From 01d79cee53df99e9a255b85c926d475d38dce127 Mon Sep 17 00:00:00 2001 From: Packit Service Date: Dec 15 2020 18:00:40 +0000 Subject: Prepare for a new update Reverting patches so we can apply the latest update and changes can be seen in the spec file and sources. --- diff --git a/cts/CTS.py.in b/cts/CTS.py.in index 091bb1f..c418318 100644 --- a/cts/CTS.py.in +++ b/cts/CTS.py.in @@ -546,7 +546,7 @@ class ClusterManager(UserDict): if self.rsh(node, self.templates["StopCmd"]) == 0: # Make sure we can continue even if corosync leaks # fdata-* is the old name - #self.rsh(node, "rm -rf /dev/shm/qb-* /dev/shm/fdata-*") + #self.rsh(node, "rm -f /dev/shm/qb-* /dev/shm/fdata-*") self.ShouldBeStatus[node] = "down" self.cluster_stable(self.Env["DeadTime"]) return 1 diff --git a/cts/CTSaudits.py b/cts/CTSaudits.py index cc82171..b7e0827 100755 --- a/cts/CTSaudits.py +++ b/cts/CTSaudits.py @@ -233,7 +233,7 @@ class FileAudit(ClusterAudit): for line in lsout: self.CM.debug("ps[%s]: %s" % (node, line)) - self.CM.rsh(node, "rm -rf /dev/shm/qb-*") + self.CM.rsh(node, "rm -f /dev/shm/qb-*") else: self.CM.debug("Skipping %s" % node) diff --git a/cts/cli/regression.upgrade.exp b/cts/cli/regression.upgrade.exp index 50b22df..28ca057 100644 --- a/cts/cli/regression.upgrade.exp +++ b/cts/cli/regression.upgrade.exp @@ -79,11 +79,8 @@ update_validation debug: Configuration valid for schema: pacemaker-3.2 update_validation debug: pacemaker-3.2-style configuration is also valid for pacemaker-3.3 update_validation debug: Testing 'pacemaker-3.3' validation (17 of X) update_validation debug: Configuration valid for schema: pacemaker-3.3 -update_validation debug: pacemaker-3.3-style configuration is also valid for pacemaker-3.4 -update_validation debug: Testing 'pacemaker-3.4' validation (18 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.4 -update_validation trace: Stopping at pacemaker-3.4 -update_validation info: Transformed the configuration from pacemaker-2.10 to pacemaker-3.4 +update_validation trace: Stopping at pacemaker-3.3 +update_validation info: Transformed the configuration from pacemaker-2.10 to pacemaker-3.3 =#=#=#= Current cib after: Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping) =#=#=#= diff --git a/cts/cli/regression.validity.exp b/cts/cli/regression.validity.exp index 4407074..46e54b5 100644 --- a/cts/cli/regression.validity.exp +++ b/cts/cli/regression.validity.exp @@ -105,11 +105,7 @@ update_validation debug: Testing 'pacemaker-3.3' validation (17 of X) element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order update_validation trace: pacemaker-3.3 validation failed -update_validation debug: Testing 'pacemaker-3.4' validation (18 of X) -element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order -element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -update_validation trace: pacemaker-3.4 validation failed -Cannot upgrade configuration (claiming schema pacemaker-1.2) to at least pacemaker-3.0 because it does not validate with any schema from pacemaker-1.2 to pacemaker-3.4 +Cannot upgrade configuration (claiming schema pacemaker-1.2) to at least pacemaker-3.0 because it does not validate with any schema from pacemaker-1.2 to pacemaker-3.3 =#=#=#= End test: Run crm_simulate with invalid CIB (enum violation) - Invalid configuration (78) =#=#=#= * Passed: crm_simulate - Run crm_simulate with invalid CIB (enum violation) =#=#=#= Begin test: Try to make resulting CIB invalid (unrecognized validate-with) =#=#=#= @@ -202,10 +198,7 @@ update_validation trace: pacemaker-3.2 validation failed update_validation debug: Testing 'pacemaker-3.3' validation (17 of X) element cib: Relax-NG validity error : Invalid attribute validate-with for element cib update_validation trace: pacemaker-3.3 validation failed -update_validation debug: Testing 'pacemaker-3.4' validation (18 of X) -element cib: Relax-NG validity error : Invalid attribute validate-with for element cib -update_validation trace: pacemaker-3.4 validation failed -Cannot upgrade configuration (claiming schema pacemaker-9999.0) to at least pacemaker-3.0 because it does not validate with any schema from unknown to pacemaker-3.4 +Cannot upgrade configuration (claiming schema pacemaker-9999.0) to at least pacemaker-3.0 because it does not validate with any schema from unknown to pacemaker-3.3 =#=#=#= End test: Run crm_simulate with invalid CIB (unrecognized validate-with) - Invalid configuration (78) =#=#=#= * Passed: crm_simulate - Run crm_simulate with invalid CIB (unrecognized validate-with) =#=#=#= Begin test: Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1) =#=#=#= @@ -293,11 +286,8 @@ update_validation debug: Configuration valid for schema: pacemaker-3.2 update_validation debug: pacemaker-3.2-style configuration is also valid for pacemaker-3.3 update_validation debug: Testing 'pacemaker-3.3' validation (17 of X) update_validation debug: Configuration valid for schema: pacemaker-3.3 -update_validation debug: pacemaker-3.3-style configuration is also valid for pacemaker-3.4 -update_validation debug: Testing 'pacemaker-3.4' validation (18 of X) -update_validation debug: Configuration valid for schema: pacemaker-3.4 -update_validation trace: Stopping at pacemaker-3.4 -update_validation info: Transformed the configuration from pacemaker-1.2 to pacemaker-3.4 +update_validation trace: Stopping at pacemaker-3.3 +update_validation info: Transformed the configuration from pacemaker-1.2 to pacemaker-3.3 unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity @@ -403,8 +393,6 @@ element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order -element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order =#=#=#= Current cib after: Make resulting CIB invalid, and without validate-with attribute =#=#=#= @@ -462,8 +450,6 @@ validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attrib validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order -validity.bad.xml:10: element rsc_order: Relax-NG validity error : Invalid attribute first-action for element rsc_order -validity.bad.xml:10: element rsc_order: Relax-NG validity error : Element constraints has extra content: rsc_order unpack_resources error: Resource start-up disabled since no STONITH resources have been defined unpack_resources error: Either configure some or disable STONITH with the stonith-enabled option unpack_resources error: NOTE: Clusters with shared data need STONITH to ensure data integrity diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in index 9e34379..5d72205 100644 --- a/cts/cts-scheduler.in +++ b/cts/cts-scheduler.in @@ -84,7 +84,6 @@ TESTS = [ [ "params-0", "Params: No change" ], [ "params-1", "Params: Changed" ], [ "params-2", "Params: Resource definition" ], - [ "params-3", "Params: Restart instead of reload if start pending" ], [ "params-4", "Params: Reload" ], [ "params-5", "Params: Restart based on probe digest" ], [ "novell-251689", "Resource definition change + target_role=stopped" ], @@ -478,11 +477,6 @@ TESTS = [ [ "master-score-startup", "Use permanent master scores without LRM history" ], [ "failed-demote-recovery", "Recover resource in slave role after demote fails" ], [ "failed-demote-recovery-master", "Recover resource in master role after demote fails" ], - [ "on_fail_demote1", "Recovery with on-fail=\"demote\" on healthy cluster, remote, guest, and bundle nodes" ], - [ "on_fail_demote2", "Recovery with on-fail=\"demote\" with promotion on different node" ], - [ "on_fail_demote3", "Recovery with on-fail=\"demote\" with no promotion" ], - [ "on_fail_demote4", "Recovery with on-fail=\"demote\" on failed cluster, remote, guest, and bundle nodes" ], - [ "no_quorum_demote", "Promotable demotion and primitive stop with no-quorum-policy=\"demote\"" ], ], [ [ "history-1", "Correctly parse stateful-1 resource state" ], @@ -968,13 +962,6 @@ TESTS = [ [ "shutdown-lock", "Ensure shutdown lock works properly" ], [ "shutdown-lock-expiration", "Ensure shutdown lock expiration works properly" ], ], - [ - [ "op-defaults", "Test op_defaults conditional expressions" ], - [ "op-defaults-2", "Test op_defaults AND'ed conditional expressions" ], - [ "op-defaults-3", "Test op_defaults precedence" ], - [ "rsc-defaults", "Test rsc_defaults conditional expressions" ], - [ "rsc-defaults-2", "Test rsc_defaults conditional expressions without type" ], - ], # @TODO: If pacemaker implements versioned attributes, uncomment these tests #[ diff --git a/cts/patterns.py b/cts/patterns.py index 7eed90c..96d6471 100644 --- a/cts/patterns.py +++ b/cts/patterns.py @@ -21,10 +21,6 @@ class BasePatterns(object): # Logging bug in some versions of libvirtd r"libvirtd.*: internal error: Failed to parse PCI config address", - - # pcs can log this when node is fenced, but fencing is OK in some - # tests (and we will catch it in pacemaker logs when not OK) - r"pcs.daemon:No response from: .* request: get_configs, error:", ] self.BadNews = [] self.components = {} diff --git a/cts/scheduler/no_quorum_demote.dot b/cts/scheduler/no_quorum_demote.dot deleted file mode 100644 index ea5b30c..0000000 --- a/cts/scheduler/no_quorum_demote.dot +++ /dev/null @@ -1,22 +0,0 @@ - digraph "g" { -"Cancel rsc1_monitor_10000 rhel7-1" -> "rsc1_demote_0 rhel7-1" [ style = bold] -"Cancel rsc1_monitor_10000 rhel7-1" [ style=bold color="green" fontcolor="black"] -"Fencing_monitor_120000 rhel7-1" [ style=dashed color="red" fontcolor="black"] -"Fencing_start_0 rhel7-1" -> "Fencing_monitor_120000 rhel7-1" [ style = dashed] -"Fencing_start_0 rhel7-1" [ style=dashed color="red" fontcolor="black"] -"Fencing_stop_0 rhel7-1" -> "Fencing_start_0 rhel7-1" [ style = dashed] -"Fencing_stop_0 rhel7-1" [ style=bold color="green" fontcolor="black"] -"rsc1-clone_demote_0" -> "rsc1-clone_demoted_0" [ style = bold] -"rsc1-clone_demote_0" -> "rsc1_demote_0 rhel7-1" [ style = bold] -"rsc1-clone_demote_0" [ style=bold color="green" fontcolor="orange"] -"rsc1-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] -"rsc1_demote_0 rhel7-1" -> "rsc1-clone_demoted_0" [ style = bold] -"rsc1_demote_0 rhel7-1" -> "rsc1_monitor_11000 rhel7-1" [ style = bold] -"rsc1_demote_0 rhel7-1" [ style=bold color="green" fontcolor="black"] -"rsc1_monitor_11000 rhel7-1" [ style=bold color="green" fontcolor="black"] -"rsc2_monitor_10000 rhel7-2" [ style=dashed color="red" fontcolor="black"] -"rsc2_start_0 rhel7-2" -> "rsc2_monitor_10000 rhel7-2" [ style = dashed] -"rsc2_start_0 rhel7-2" [ style=dashed color="red" fontcolor="black"] -"rsc2_stop_0 rhel7-2" -> "rsc2_start_0 rhel7-2" [ style = dashed] -"rsc2_stop_0 rhel7-2" [ style=bold color="green" fontcolor="black"] -} diff --git a/cts/scheduler/no_quorum_demote.exp b/cts/scheduler/no_quorum_demote.exp deleted file mode 100644 index 245574c..0000000 --- a/cts/scheduler/no_quorum_demote.exp +++ /dev/null @@ -1,81 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/no_quorum_demote.scores b/cts/scheduler/no_quorum_demote.scores deleted file mode 100644 index dddc57b..0000000 --- a/cts/scheduler/no_quorum_demote.scores +++ /dev/null @@ -1,72 +0,0 @@ -Allocation scores: -Using the original execution date of: 2020-06-17 17:26:35Z -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-1: 11 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-2: 6 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-1: 10 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-2: 5 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-1: 10 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-2: 5 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-1: 10 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-2: 5 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-5: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-1: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-2: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-3: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-4: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-5: 0 -pcmk__native_allocate: rsc1:0 allocation score on rhel7-1: 11 -pcmk__native_allocate: rsc1:0 allocation score on rhel7-2: 0 -pcmk__native_allocate: rsc1:0 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:0 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc1:0 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc1:1 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc1:1 allocation score on rhel7-2: 6 -pcmk__native_allocate: rsc1:1 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:1 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc1:1 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc1:2 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc1:2 allocation score on rhel7-2: -INFINITY -pcmk__native_allocate: rsc1:2 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:2 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc1:2 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc1:3 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc1:3 allocation score on rhel7-2: -INFINITY -pcmk__native_allocate: rsc1:3 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:3 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc1:3 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on rhel7-2: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc2 allocation score on rhel7-1: 0 -pcmk__native_allocate: rsc2 allocation score on rhel7-2: 0 -pcmk__native_allocate: rsc2 allocation score on rhel7-3: 0 -pcmk__native_allocate: rsc2 allocation score on rhel7-4: 0 -pcmk__native_allocate: rsc2 allocation score on rhel7-5: 0 -rsc1:0 promotion score on rhel7-1: 10 -rsc1:1 promotion score on rhel7-2: 5 -rsc1:2 promotion score on none: 0 -rsc1:3 promotion score on none: 0 -rsc1:4 promotion score on none: 0 diff --git a/cts/scheduler/no_quorum_demote.summary b/cts/scheduler/no_quorum_demote.summary deleted file mode 100644 index 9b69ca1..0000000 --- a/cts/scheduler/no_quorum_demote.summary +++ /dev/null @@ -1,38 +0,0 @@ -Using the original execution date of: 2020-06-17 17:26:35Z - -Current cluster status: -Online: [ rhel7-1 rhel7-2 ] -OFFLINE: [ rhel7-3 rhel7-4 rhel7-5 ] - - Fencing (stonith:fence_xvm): Started rhel7-1 - Clone Set: rsc1-clone [rsc1] (promotable) - Masters: [ rhel7-1 ] - Slaves: [ rhel7-2 ] - Stopped: [ rhel7-3 rhel7-4 rhel7-5 ] - rsc2 (ocf::pacemaker:Dummy): Started rhel7-2 - -Transition Summary: - * Stop Fencing ( rhel7-1 ) due to no quorum - * Demote rsc1:0 ( Master -> Slave rhel7-1 ) - * Stop rsc2 ( rhel7-2 ) due to no quorum - -Executing cluster transition: - * Resource action: Fencing stop on rhel7-1 - * Resource action: rsc1 cancel=10000 on rhel7-1 - * Pseudo action: rsc1-clone_demote_0 - * Resource action: rsc2 stop on rhel7-2 - * Resource action: rsc1 demote on rhel7-1 - * Pseudo action: rsc1-clone_demoted_0 - * Resource action: rsc1 monitor=11000 on rhel7-1 -Using the original execution date of: 2020-06-17 17:26:35Z - -Revised cluster status: -Online: [ rhel7-1 rhel7-2 ] -OFFLINE: [ rhel7-3 rhel7-4 rhel7-5 ] - - Fencing (stonith:fence_xvm): Stopped - Clone Set: rsc1-clone [rsc1] (promotable) - Slaves: [ rhel7-1 rhel7-2 ] - Stopped: [ rhel7-3 rhel7-4 rhel7-5 ] - rsc2 (ocf::pacemaker:Dummy): Stopped - diff --git a/cts/scheduler/no_quorum_demote.xml b/cts/scheduler/no_quorum_demote.xml deleted file mode 100644 index 8497f0a..0000000 --- a/cts/scheduler/no_quorum_demote.xml +++ /dev/null @@ -1,224 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/on_fail_demote1.dot b/cts/scheduler/on_fail_demote1.dot deleted file mode 100644 index d11c1c1..0000000 --- a/cts/scheduler/on_fail_demote1.dot +++ /dev/null @@ -1,64 +0,0 @@ - digraph "g" { -"bundled_demote_0 stateful-bundle-0" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] -"bundled_demote_0 stateful-bundle-0" -> "stateful-bundle-master_demoted_0" [ style = bold] -"bundled_demote_0 stateful-bundle-0" [ style=bold color="green" fontcolor="black"] -"bundled_promote_0 stateful-bundle-0" -> "stateful-bundle-master_promoted_0" [ style = bold] -"bundled_promote_0 stateful-bundle-0" [ style=bold color="green" fontcolor="black"] -"lxc-ms-master_demote_0" -> "lxc-ms-master_demoted_0" [ style = bold] -"lxc-ms-master_demote_0" -> "lxc-ms_demote_0 lxc2" [ style = bold] -"lxc-ms-master_demote_0" [ style=bold color="green" fontcolor="orange"] -"lxc-ms-master_demoted_0" -> "lxc-ms-master_promote_0" [ style = bold] -"lxc-ms-master_demoted_0" [ style=bold color="green" fontcolor="orange"] -"lxc-ms-master_promote_0" -> "lxc-ms_promote_0 lxc2" [ style = bold] -"lxc-ms-master_promote_0" [ style=bold color="green" fontcolor="orange"] -"lxc-ms-master_promoted_0" [ style=bold color="green" fontcolor="orange"] -"lxc-ms_demote_0 lxc2" -> "lxc-ms-master_demoted_0" [ style = bold] -"lxc-ms_demote_0 lxc2" -> "lxc-ms_promote_0 lxc2" [ style = bold] -"lxc-ms_demote_0 lxc2" [ style=bold color="green" fontcolor="black"] -"lxc-ms_promote_0 lxc2" -> "lxc-ms-master_promoted_0" [ style = bold] -"lxc-ms_promote_0 lxc2" [ style=bold color="green" fontcolor="black"] -"rsc1-clone_demote_0" -> "rsc1-clone_demoted_0" [ style = bold] -"rsc1-clone_demote_0" -> "rsc1_demote_0 rhel7-4" [ style = bold] -"rsc1-clone_demote_0" [ style=bold color="green" fontcolor="orange"] -"rsc1-clone_demoted_0" -> "rsc1-clone_promote_0" [ style = bold] -"rsc1-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] -"rsc1-clone_promote_0" -> "rsc1_promote_0 rhel7-4" [ style = bold] -"rsc1-clone_promote_0" [ style=bold color="green" fontcolor="orange"] -"rsc1-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] -"rsc1_demote_0 rhel7-4" -> "rsc1-clone_demoted_0" [ style = bold] -"rsc1_demote_0 rhel7-4" -> "rsc1_promote_0 rhel7-4" [ style = bold] -"rsc1_demote_0 rhel7-4" [ style=bold color="green" fontcolor="black"] -"rsc1_promote_0 rhel7-4" -> "rsc1-clone_promoted_0" [ style = bold] -"rsc1_promote_0 rhel7-4" [ style=bold color="green" fontcolor="black"] -"rsc2-master_demote_0" -> "rsc2-master_demoted_0" [ style = bold] -"rsc2-master_demote_0" -> "rsc2_demote_0 remote-rhel7-2" [ style = bold] -"rsc2-master_demote_0" [ style=bold color="green" fontcolor="orange"] -"rsc2-master_demoted_0" -> "rsc2-master_promote_0" [ style = bold] -"rsc2-master_demoted_0" [ style=bold color="green" fontcolor="orange"] -"rsc2-master_promote_0" -> "rsc2_promote_0 remote-rhel7-2" [ style = bold] -"rsc2-master_promote_0" [ style=bold color="green" fontcolor="orange"] -"rsc2-master_promoted_0" [ style=bold color="green" fontcolor="orange"] -"rsc2_demote_0 remote-rhel7-2" -> "rsc2-master_demoted_0" [ style = bold] -"rsc2_demote_0 remote-rhel7-2" -> "rsc2_promote_0 remote-rhel7-2" [ style = bold] -"rsc2_demote_0 remote-rhel7-2" [ style=bold color="green" fontcolor="black"] -"rsc2_promote_0 remote-rhel7-2" -> "rsc2-master_promoted_0" [ style = bold] -"rsc2_promote_0 remote-rhel7-2" [ style=bold color="green" fontcolor="black"] -"stateful-bundle-master_demote_0" -> "bundled_demote_0 stateful-bundle-0" [ style = bold] -"stateful-bundle-master_demote_0" -> "stateful-bundle-master_demoted_0" [ style = bold] -"stateful-bundle-master_demote_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle-master_demoted_0" -> "stateful-bundle-master_promote_0" [ style = bold] -"stateful-bundle-master_demoted_0" -> "stateful-bundle_demoted_0" [ style = bold] -"stateful-bundle-master_demoted_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle-master_promote_0" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] -"stateful-bundle-master_promote_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle-master_promoted_0" -> "stateful-bundle_promoted_0" [ style = bold] -"stateful-bundle-master_promoted_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle_demote_0" -> "stateful-bundle-master_demote_0" [ style = bold] -"stateful-bundle_demote_0" -> "stateful-bundle_demoted_0" [ style = bold] -"stateful-bundle_demote_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle_demoted_0" -> "stateful-bundle_promote_0" [ style = bold] -"stateful-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle_promote_0" -> "stateful-bundle-master_promote_0" [ style = bold] -"stateful-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] -} diff --git a/cts/scheduler/on_fail_demote1.exp b/cts/scheduler/on_fail_demote1.exp deleted file mode 100644 index ebe1dd5..0000000 --- a/cts/scheduler/on_fail_demote1.exp +++ /dev/null @@ -1,360 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/on_fail_demote1.scores b/cts/scheduler/on_fail_demote1.scores deleted file mode 100644 index 7df582f..0000000 --- a/cts/scheduler/on_fail_demote1.scores +++ /dev/null @@ -1,470 +0,0 @@ -Allocation scores: -Using the original execution date of: 2020-06-16 19:23:21Z -bundled:0 promotion score on stateful-bundle-0: 10 -bundled:1 promotion score on stateful-bundle-1: 5 -bundled:2 promotion score on stateful-bundle-2: 5 -lxc-ms:0 promotion score on lxc2: INFINITY -lxc-ms:1 promotion score on lxc1: INFINITY -pcmk__bundle_allocate: bundled:0 allocation score on stateful-bundle-0: 501 -pcmk__bundle_allocate: bundled:1 allocation score on stateful-bundle-1: 501 -pcmk__bundle_allocate: bundled:2 allocation score on stateful-bundle-2: 501 -pcmk__bundle_allocate: stateful-bundle allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-0 allocation score on lxc1: -INFINITY -pcmk__bundle_allocate: stateful-bundle-0 allocation score on lxc2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-0 allocation score on remote-rhel7-2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-1 allocation score on lxc1: -INFINITY -pcmk__bundle_allocate: stateful-bundle-1 allocation score on lxc2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-1 allocation score on remote-rhel7-2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-2 allocation score on lxc1: -INFINITY -pcmk__bundle_allocate: stateful-bundle-2 allocation score on lxc2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-2 allocation score on remote-rhel7-2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-0: -INFINITY -pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-1: -INFINITY -pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-2: -INFINITY -pcmk__clone_allocate: bundled:0 allocation score on stateful-bundle-0: INFINITY -pcmk__clone_allocate: bundled:1 allocation score on stateful-bundle-1: INFINITY -pcmk__clone_allocate: bundled:2 allocation score on stateful-bundle-2: INFINITY -pcmk__clone_allocate: lxc-ms-master allocation score on lxc1: INFINITY -pcmk__clone_allocate: lxc-ms-master allocation score on lxc2: INFINITY -pcmk__clone_allocate: lxc-ms-master allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-1: 0 -pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-3: 0 -pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-4: 0 -pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-5: 0 -pcmk__clone_allocate: lxc-ms:0 allocation score on lxc1: INFINITY -pcmk__clone_allocate: lxc-ms:0 allocation score on lxc2: INFINITY -pcmk__clone_allocate: lxc-ms:0 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-1: 0 -pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-3: 0 -pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-4: 0 -pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-5: 0 -pcmk__clone_allocate: lxc-ms:1 allocation score on lxc1: INFINITY -pcmk__clone_allocate: lxc-ms:1 allocation score on lxc2: INFINITY -pcmk__clone_allocate: lxc-ms:1 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-1: 0 -pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-3: 0 -pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-4: 0 -pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1-clone allocation score on lxc1: 0 -pcmk__clone_allocate: rsc1-clone allocation score on lxc2: 0 -pcmk__clone_allocate: rsc1-clone allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:0 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc1:0 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc1:0 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-4: 11 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:1 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc1:1 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc1:1 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-3: 6 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:2 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc1:2 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc1:2 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-5: 6 -pcmk__clone_allocate: rsc1:3 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc1:3 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc1:3 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-1: 6 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:4 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc1:4 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc1:4 allocation score on remote-rhel7-2: 6 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:5 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc1:5 allocation score on lxc2: 6 -pcmk__clone_allocate: rsc1:5 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc1:5 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:5 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:5 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:5 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:6 allocation score on lxc1: 6 -pcmk__clone_allocate: rsc1:6 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc1:6 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc1:6 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:6 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:6 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:6 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2-master allocation score on lxc1: 0 -pcmk__clone_allocate: rsc2-master allocation score on lxc2: 0 -pcmk__clone_allocate: rsc2-master allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc2-master allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2-master allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2-master allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2-master allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2:0 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc2:0 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc2:0 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc2:0 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2:0 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2:0 allocation score on rhel7-4: 11 -pcmk__clone_allocate: rsc2:0 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2:1 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc2:1 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc2:1 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc2:1 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2:1 allocation score on rhel7-3: 6 -pcmk__clone_allocate: rsc2:1 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2:1 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2:2 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc2:2 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc2:2 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc2:2 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2:2 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2:2 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2:2 allocation score on rhel7-5: 6 -pcmk__clone_allocate: rsc2:3 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc2:3 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc2:3 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc2:3 allocation score on rhel7-1: 6 -pcmk__clone_allocate: rsc2:3 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2:3 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2:3 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2:4 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc2:4 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc2:4 allocation score on remote-rhel7-2: 11 -pcmk__clone_allocate: rsc2:4 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2:4 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2:4 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2:4 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2:5 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc2:5 allocation score on lxc2: 6 -pcmk__clone_allocate: rsc2:5 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc2:5 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2:5 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2:5 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2:5 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2:6 allocation score on lxc1: 6 -pcmk__clone_allocate: rsc2:6 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc2:6 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc2:6 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2:6 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2:6 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2:6 allocation score on rhel7-5: 0 -pcmk__clone_allocate: stateful-bundle-master allocation score on lxc1: -INFINITY -pcmk__clone_allocate: stateful-bundle-master allocation score on lxc2: -INFINITY -pcmk__clone_allocate: stateful-bundle-master allocation score on remote-rhel7-2: -INFINITY -pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-1: -INFINITY -pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-3: -INFINITY -pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-4: -INFINITY -pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-5: -INFINITY -pcmk__clone_allocate: stateful-bundle-master allocation score on stateful-bundle-0: 0 -pcmk__clone_allocate: stateful-bundle-master allocation score on stateful-bundle-1: 0 -pcmk__clone_allocate: stateful-bundle-master allocation score on stateful-bundle-2: 0 -pcmk__native_allocate: Fencing allocation score on lxc1: -INFINITY -pcmk__native_allocate: Fencing allocation score on lxc2: -INFINITY -pcmk__native_allocate: Fencing allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: Fencing allocation score on rhel7-1: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-3: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-4: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-5: 0 -pcmk__native_allocate: bundled:0 allocation score on stateful-bundle-0: INFINITY -pcmk__native_allocate: bundled:1 allocation score on stateful-bundle-1: INFINITY -pcmk__native_allocate: bundled:2 allocation score on stateful-bundle-2: INFINITY -pcmk__native_allocate: container1 allocation score on lxc1: -INFINITY -pcmk__native_allocate: container1 allocation score on lxc2: -INFINITY -pcmk__native_allocate: container1 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: container1 allocation score on rhel7-1: 0 -pcmk__native_allocate: container1 allocation score on rhel7-3: INFINITY -pcmk__native_allocate: container1 allocation score on rhel7-4: 0 -pcmk__native_allocate: container1 allocation score on rhel7-5: 0 -pcmk__native_allocate: container2 allocation score on lxc1: -INFINITY -pcmk__native_allocate: container2 allocation score on lxc2: -INFINITY -pcmk__native_allocate: container2 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: container2 allocation score on rhel7-1: 0 -pcmk__native_allocate: container2 allocation score on rhel7-3: INFINITY -pcmk__native_allocate: container2 allocation score on rhel7-4: 0 -pcmk__native_allocate: container2 allocation score on rhel7-5: 0 -pcmk__native_allocate: lxc-ms:0 allocation score on lxc1: -INFINITY -pcmk__native_allocate: lxc-ms:0 allocation score on lxc2: INFINITY -pcmk__native_allocate: lxc-ms:0 allocation score on remote-rhel7-2: 0 -pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-1: 0 -pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-3: 0 -pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-4: 0 -pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-5: 0 -pcmk__native_allocate: lxc-ms:1 allocation score on lxc1: INFINITY -pcmk__native_allocate: lxc-ms:1 allocation score on lxc2: INFINITY -pcmk__native_allocate: lxc-ms:1 allocation score on remote-rhel7-2: 0 -pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-1: 0 -pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-3: 0 -pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-4: 0 -pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-5: 0 -pcmk__native_allocate: lxc1 allocation score on lxc1: -INFINITY -pcmk__native_allocate: lxc1 allocation score on lxc2: -INFINITY -pcmk__native_allocate: lxc1 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: lxc1 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: lxc1 allocation score on rhel7-3: 0 -pcmk__native_allocate: lxc1 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: lxc1 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: lxc2 allocation score on lxc1: -INFINITY -pcmk__native_allocate: lxc2 allocation score on lxc2: -INFINITY -pcmk__native_allocate: lxc2 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: lxc2 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: lxc2 allocation score on rhel7-3: 0 -pcmk__native_allocate: lxc2 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: lxc2 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: remote-rhel7-2 allocation score on lxc1: -INFINITY -pcmk__native_allocate: remote-rhel7-2 allocation score on lxc2: -INFINITY -pcmk__native_allocate: remote-rhel7-2 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-1: 0 -pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-3: 0 -pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-4: 0 -pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-5: 0 -pcmk__native_allocate: rsc1:0 allocation score on lxc1: -INFINITY -pcmk__native_allocate: rsc1:0 allocation score on lxc2: -INFINITY -pcmk__native_allocate: rsc1:0 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: rsc1:0 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc1:0 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:0 allocation score on rhel7-4: 11 -pcmk__native_allocate: rsc1:0 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc1:1 allocation score on lxc1: 0 -pcmk__native_allocate: rsc1:1 allocation score on lxc2: 0 -pcmk__native_allocate: rsc1:1 allocation score on remote-rhel7-2: 0 -pcmk__native_allocate: rsc1:1 allocation score on rhel7-1: 0 -pcmk__native_allocate: rsc1:1 allocation score on rhel7-3: 6 -pcmk__native_allocate: rsc1:1 allocation score on rhel7-4: 0 -pcmk__native_allocate: rsc1:1 allocation score on rhel7-5: 0 -pcmk__native_allocate: rsc1:2 allocation score on lxc1: 0 -pcmk__native_allocate: rsc1:2 allocation score on lxc2: 0 -pcmk__native_allocate: rsc1:2 allocation score on remote-rhel7-2: 0 -pcmk__native_allocate: rsc1:2 allocation score on rhel7-1: 0 -pcmk__native_allocate: rsc1:2 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:2 allocation score on rhel7-4: 0 -pcmk__native_allocate: rsc1:2 allocation score on rhel7-5: 6 -pcmk__native_allocate: rsc1:3 allocation score on lxc1: 0 -pcmk__native_allocate: rsc1:3 allocation score on lxc2: 0 -pcmk__native_allocate: rsc1:3 allocation score on remote-rhel7-2: 0 -pcmk__native_allocate: rsc1:3 allocation score on rhel7-1: 6 -pcmk__native_allocate: rsc1:3 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:3 allocation score on rhel7-4: 0 -pcmk__native_allocate: rsc1:3 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on lxc1: 0 -pcmk__native_allocate: rsc1:4 allocation score on lxc2: 0 -pcmk__native_allocate: rsc1:4 allocation score on remote-rhel7-2: 6 -pcmk__native_allocate: rsc1:4 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on rhel7-4: 0 -pcmk__native_allocate: rsc1:4 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc1:5 allocation score on lxc1: 0 -pcmk__native_allocate: rsc1:5 allocation score on lxc2: 6 -pcmk__native_allocate: rsc1:5 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: rsc1:5 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc1:5 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:5 allocation score on rhel7-4: 0 -pcmk__native_allocate: rsc1:5 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc1:6 allocation score on lxc1: 6 -pcmk__native_allocate: rsc1:6 allocation score on lxc2: -INFINITY -pcmk__native_allocate: rsc1:6 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: rsc1:6 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc1:6 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:6 allocation score on rhel7-4: 0 -pcmk__native_allocate: rsc1:6 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc2:0 allocation score on lxc1: 0 -pcmk__native_allocate: rsc2:0 allocation score on lxc2: 0 -pcmk__native_allocate: rsc2:0 allocation score on remote-rhel7-2: 0 -pcmk__native_allocate: rsc2:0 allocation score on rhel7-1: 0 -pcmk__native_allocate: rsc2:0 allocation score on rhel7-3: 0 -pcmk__native_allocate: rsc2:0 allocation score on rhel7-4: 11 -pcmk__native_allocate: rsc2:0 allocation score on rhel7-5: 0 -pcmk__native_allocate: rsc2:1 allocation score on lxc1: 0 -pcmk__native_allocate: rsc2:1 allocation score on lxc2: 0 -pcmk__native_allocate: rsc2:1 allocation score on remote-rhel7-2: 0 -pcmk__native_allocate: rsc2:1 allocation score on rhel7-1: 0 -pcmk__native_allocate: rsc2:1 allocation score on rhel7-3: 6 -pcmk__native_allocate: rsc2:1 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:1 allocation score on rhel7-5: 0 -pcmk__native_allocate: rsc2:2 allocation score on lxc1: 0 -pcmk__native_allocate: rsc2:2 allocation score on lxc2: 0 -pcmk__native_allocate: rsc2:2 allocation score on remote-rhel7-2: 0 -pcmk__native_allocate: rsc2:2 allocation score on rhel7-1: 0 -pcmk__native_allocate: rsc2:2 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc2:2 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:2 allocation score on rhel7-5: 6 -pcmk__native_allocate: rsc2:3 allocation score on lxc1: 0 -pcmk__native_allocate: rsc2:3 allocation score on lxc2: 0 -pcmk__native_allocate: rsc2:3 allocation score on remote-rhel7-2: 0 -pcmk__native_allocate: rsc2:3 allocation score on rhel7-1: 6 -pcmk__native_allocate: rsc2:3 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc2:3 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:3 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on lxc1: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on lxc2: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on remote-rhel7-2: 11 -pcmk__native_allocate: rsc2:4 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc2:5 allocation score on lxc1: 0 -pcmk__native_allocate: rsc2:5 allocation score on lxc2: 6 -pcmk__native_allocate: rsc2:5 allocation score on remote-rhel7-2: 0 -pcmk__native_allocate: rsc2:5 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc2:5 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc2:5 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:5 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc2:6 allocation score on lxc1: 6 -pcmk__native_allocate: rsc2:6 allocation score on lxc2: -INFINITY -pcmk__native_allocate: rsc2:6 allocation score on remote-rhel7-2: 0 -pcmk__native_allocate: rsc2:6 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc2:6 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc2:6 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:6 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: stateful-bundle-0 allocation score on lxc1: -INFINITY -pcmk__native_allocate: stateful-bundle-0 allocation score on lxc2: -INFINITY -pcmk__native_allocate: stateful-bundle-0 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-1: 0 -pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-3: 0 -pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-4: 0 -pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-5: 10000 -pcmk__native_allocate: stateful-bundle-1 allocation score on lxc1: -INFINITY -pcmk__native_allocate: stateful-bundle-1 allocation score on lxc2: -INFINITY -pcmk__native_allocate: stateful-bundle-1 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-1: 10000 -pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-3: 0 -pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-4: 0 -pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-5: 0 -pcmk__native_allocate: stateful-bundle-2 allocation score on lxc1: -INFINITY -pcmk__native_allocate: stateful-bundle-2 allocation score on lxc2: -INFINITY -pcmk__native_allocate: stateful-bundle-2 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-1: 0 -pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-3: 0 -pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-4: 10000 -pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-5: 0 -pcmk__native_allocate: stateful-bundle-docker-0 allocation score on lxc1: -10000 -pcmk__native_allocate: stateful-bundle-docker-0 allocation score on lxc2: -10000 -pcmk__native_allocate: stateful-bundle-docker-0 allocation score on remote-rhel7-2: -10000 -pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-1: 0 -pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-3: 0 -pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-4: 0 -pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-5: 0 -pcmk__native_allocate: stateful-bundle-docker-1 allocation score on lxc1: -10000 -pcmk__native_allocate: stateful-bundle-docker-1 allocation score on lxc2: -10000 -pcmk__native_allocate: stateful-bundle-docker-1 allocation score on remote-rhel7-2: -10000 -pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-1: 0 -pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-3: 0 -pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-4: 0 -pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: stateful-bundle-docker-2 allocation score on lxc1: -10000 -pcmk__native_allocate: stateful-bundle-docker-2 allocation score on lxc2: -10000 -pcmk__native_allocate: stateful-bundle-docker-2 allocation score on remote-rhel7-2: -10000 -pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-3: 0 -pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-4: 0 -pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc1: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc2: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-5: 0 -pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc1: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc2: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-1: 0 -pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc1: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc2: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-4: 0 -pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-5: -INFINITY -rsc1:0 promotion score on rhel7-4: 10 -rsc1:1 promotion score on rhel7-3: 5 -rsc1:2 promotion score on rhel7-5: 5 -rsc1:3 promotion score on rhel7-1: 5 -rsc1:4 promotion score on remote-rhel7-2: 5 -rsc1:5 promotion score on lxc2: 5 -rsc1:6 promotion score on lxc1: 5 -rsc2:0 promotion score on rhel7-4: 10 -rsc2:1 promotion score on rhel7-3: 5 -rsc2:2 promotion score on rhel7-5: 5 -rsc2:3 promotion score on rhel7-1: 5 -rsc2:4 promotion score on remote-rhel7-2: 110 -rsc2:5 promotion score on lxc2: 5 -rsc2:6 promotion score on lxc1: 5 diff --git a/cts/scheduler/on_fail_demote1.summary b/cts/scheduler/on_fail_demote1.summary deleted file mode 100644 index b173582..0000000 --- a/cts/scheduler/on_fail_demote1.summary +++ /dev/null @@ -1,86 +0,0 @@ -Using the original execution date of: 2020-06-16 19:23:21Z - -Current cluster status: -Online: [ rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] -RemoteOnline: [ remote-rhel7-2 ] -GuestOnline: [ lxc1:container1 lxc2:container2 stateful-bundle-0:stateful-bundle-docker-0 stateful-bundle-1:stateful-bundle-docker-1 stateful-bundle-2:stateful-bundle-docker-2 ] - - Fencing (stonith:fence_xvm): Started rhel7-4 - Clone Set: rsc1-clone [rsc1] (promotable) - rsc1 (ocf::pacemaker:Stateful): FAILED Master rhel7-4 - Slaves: [ lxc1 lxc2 remote-rhel7-2 rhel7-1 rhel7-3 rhel7-5 ] - Clone Set: rsc2-master [rsc2] (promotable) - rsc2 (ocf::pacemaker:Stateful): FAILED Master remote-rhel7-2 - Slaves: [ lxc1 lxc2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] - remote-rhel7-2 (ocf::pacemaker:remote): Started rhel7-1 - container1 (ocf::heartbeat:VirtualDomain): Started rhel7-3 - container2 (ocf::heartbeat:VirtualDomain): Started rhel7-3 - Clone Set: lxc-ms-master [lxc-ms] (promotable) - lxc-ms (ocf::pacemaker:Stateful): FAILED Master lxc2 - Slaves: [ lxc1 ] - Stopped: [ remote-rhel7-2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] - Container bundle set: stateful-bundle [pcmktest:http] - stateful-bundle-0 (192.168.122.131) (ocf::pacemaker:Stateful): FAILED Master rhel7-5 - stateful-bundle-1 (192.168.122.132) (ocf::pacemaker:Stateful): Slave rhel7-1 - stateful-bundle-2 (192.168.122.133) (ocf::pacemaker:Stateful): Slave rhel7-4 - -Transition Summary: - * Re-promote rsc1:0 ( Master rhel7-4 ) - * Re-promote rsc2:4 ( Master remote-rhel7-2 ) - * Re-promote lxc-ms:0 ( Master lxc2 ) - * Re-promote bundled:0 ( Master stateful-bundle-0 ) - -Executing cluster transition: - * Pseudo action: rsc1-clone_demote_0 - * Pseudo action: rsc2-master_demote_0 - * Pseudo action: lxc-ms-master_demote_0 - * Pseudo action: stateful-bundle_demote_0 - * Resource action: rsc1 demote on rhel7-4 - * Pseudo action: rsc1-clone_demoted_0 - * Pseudo action: rsc1-clone_promote_0 - * Resource action: rsc2 demote on remote-rhel7-2 - * Pseudo action: rsc2-master_demoted_0 - * Pseudo action: rsc2-master_promote_0 - * Resource action: lxc-ms demote on lxc2 - * Pseudo action: lxc-ms-master_demoted_0 - * Pseudo action: lxc-ms-master_promote_0 - * Pseudo action: stateful-bundle-master_demote_0 - * Resource action: rsc1 promote on rhel7-4 - * Pseudo action: rsc1-clone_promoted_0 - * Resource action: rsc2 promote on remote-rhel7-2 - * Pseudo action: rsc2-master_promoted_0 - * Resource action: lxc-ms promote on lxc2 - * Pseudo action: lxc-ms-master_promoted_0 - * Resource action: bundled demote on stateful-bundle-0 - * Pseudo action: stateful-bundle-master_demoted_0 - * Pseudo action: stateful-bundle_demoted_0 - * Pseudo action: stateful-bundle_promote_0 - * Pseudo action: stateful-bundle-master_promote_0 - * Resource action: bundled promote on stateful-bundle-0 - * Pseudo action: stateful-bundle-master_promoted_0 - * Pseudo action: stateful-bundle_promoted_0 -Using the original execution date of: 2020-06-16 19:23:21Z - -Revised cluster status: -Online: [ rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] -RemoteOnline: [ remote-rhel7-2 ] -GuestOnline: [ lxc1:container1 lxc2:container2 stateful-bundle-0:stateful-bundle-docker-0 stateful-bundle-1:stateful-bundle-docker-1 stateful-bundle-2:stateful-bundle-docker-2 ] - - Fencing (stonith:fence_xvm): Started rhel7-4 - Clone Set: rsc1-clone [rsc1] (promotable) - Masters: [ rhel7-4 ] - Slaves: [ lxc1 lxc2 remote-rhel7-2 rhel7-1 rhel7-3 rhel7-5 ] - Clone Set: rsc2-master [rsc2] (promotable) - Masters: [ remote-rhel7-2 ] - Slaves: [ lxc1 lxc2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] - remote-rhel7-2 (ocf::pacemaker:remote): Started rhel7-1 - container1 (ocf::heartbeat:VirtualDomain): Started rhel7-3 - container2 (ocf::heartbeat:VirtualDomain): Started rhel7-3 - Clone Set: lxc-ms-master [lxc-ms] (promotable) - Masters: [ lxc2 ] - Slaves: [ lxc1 ] - Container bundle set: stateful-bundle [pcmktest:http] - stateful-bundle-0 (192.168.122.131) (ocf::pacemaker:Stateful): Master rhel7-5 - stateful-bundle-1 (192.168.122.132) (ocf::pacemaker:Stateful): Slave rhel7-1 - stateful-bundle-2 (192.168.122.133) (ocf::pacemaker:Stateful): Slave rhel7-4 - diff --git a/cts/scheduler/on_fail_demote1.xml b/cts/scheduler/on_fail_demote1.xml deleted file mode 100644 index 9f3ff20..0000000 --- a/cts/scheduler/on_fail_demote1.xml +++ /dev/null @@ -1,616 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/on_fail_demote2.dot b/cts/scheduler/on_fail_demote2.dot deleted file mode 100644 index 06193cb..0000000 --- a/cts/scheduler/on_fail_demote2.dot +++ /dev/null @@ -1,22 +0,0 @@ - digraph "g" { -"Cancel rsc1_monitor_10000 rhel7-4" -> "rsc1_demote_0 rhel7-4" [ style = bold] -"Cancel rsc1_monitor_10000 rhel7-4" [ style=bold color="green" fontcolor="black"] -"Cancel rsc1_monitor_11000 rhel7-3" -> "rsc1_promote_0 rhel7-3" [ style = bold] -"Cancel rsc1_monitor_11000 rhel7-3" [ style=bold color="green" fontcolor="black"] -"rsc1-clone_demote_0" -> "rsc1-clone_demoted_0" [ style = bold] -"rsc1-clone_demote_0" -> "rsc1_demote_0 rhel7-4" [ style = bold] -"rsc1-clone_demote_0" [ style=bold color="green" fontcolor="orange"] -"rsc1-clone_demoted_0" -> "rsc1-clone_promote_0" [ style = bold] -"rsc1-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] -"rsc1-clone_promote_0" -> "rsc1_promote_0 rhel7-3" [ style = bold] -"rsc1-clone_promote_0" [ style=bold color="green" fontcolor="orange"] -"rsc1-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] -"rsc1_demote_0 rhel7-4" -> "rsc1-clone_demoted_0" [ style = bold] -"rsc1_demote_0 rhel7-4" -> "rsc1_monitor_11000 rhel7-4" [ style = bold] -"rsc1_demote_0 rhel7-4" [ style=bold color="green" fontcolor="black"] -"rsc1_monitor_10000 rhel7-3" [ style=bold color="green" fontcolor="black"] -"rsc1_monitor_11000 rhel7-4" [ style=bold color="green" fontcolor="black"] -"rsc1_promote_0 rhel7-3" -> "rsc1-clone_promoted_0" [ style = bold] -"rsc1_promote_0 rhel7-3" -> "rsc1_monitor_10000 rhel7-3" [ style = bold] -"rsc1_promote_0 rhel7-3" [ style=bold color="green" fontcolor="black"] -} diff --git a/cts/scheduler/on_fail_demote2.exp b/cts/scheduler/on_fail_demote2.exp deleted file mode 100644 index 492e86f..0000000 --- a/cts/scheduler/on_fail_demote2.exp +++ /dev/null @@ -1,125 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/on_fail_demote2.scores b/cts/scheduler/on_fail_demote2.scores deleted file mode 100644 index 25aea90..0000000 --- a/cts/scheduler/on_fail_demote2.scores +++ /dev/null @@ -1,127 +0,0 @@ -Allocation scores: -Using the original execution date of: 2020-06-16 19:23:21Z -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-4: 11 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-3: 6 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-5: 6 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-1: 6 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-2: 6 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2-master allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2-master allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc2-master allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2-master allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2-master allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2:0 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2:0 allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc2:0 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2:0 allocation score on rhel7-4: 11 -pcmk__clone_allocate: rsc2:0 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2:1 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2:1 allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc2:1 allocation score on rhel7-3: 6 -pcmk__clone_allocate: rsc2:1 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2:1 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2:2 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2:2 allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc2:2 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2:2 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2:2 allocation score on rhel7-5: 6 -pcmk__clone_allocate: rsc2:3 allocation score on rhel7-1: 6 -pcmk__clone_allocate: rsc2:3 allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc2:3 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2:3 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2:3 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2:4 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2:4 allocation score on rhel7-2: 6 -pcmk__clone_allocate: rsc2:4 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2:4 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2:4 allocation score on rhel7-5: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-1: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-2: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-3: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-4: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-5: 0 -pcmk__native_allocate: rsc1:0 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc1:0 allocation score on rhel7-2: -INFINITY -pcmk__native_allocate: rsc1:0 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:0 allocation score on rhel7-4: 11 -pcmk__native_allocate: rsc1:0 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc1:1 allocation score on rhel7-1: 0 -pcmk__native_allocate: rsc1:1 allocation score on rhel7-2: 0 -pcmk__native_allocate: rsc1:1 allocation score on rhel7-3: 6 -pcmk__native_allocate: rsc1:1 allocation score on rhel7-4: 0 -pcmk__native_allocate: rsc1:1 allocation score on rhel7-5: 0 -pcmk__native_allocate: rsc1:2 allocation score on rhel7-1: 0 -pcmk__native_allocate: rsc1:2 allocation score on rhel7-2: 0 -pcmk__native_allocate: rsc1:2 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:2 allocation score on rhel7-4: 0 -pcmk__native_allocate: rsc1:2 allocation score on rhel7-5: 6 -pcmk__native_allocate: rsc1:3 allocation score on rhel7-1: 6 -pcmk__native_allocate: rsc1:3 allocation score on rhel7-2: 0 -pcmk__native_allocate: rsc1:3 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:3 allocation score on rhel7-4: 0 -pcmk__native_allocate: rsc1:3 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on rhel7-2: 6 -pcmk__native_allocate: rsc1:4 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on rhel7-4: 0 -pcmk__native_allocate: rsc1:4 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc2:0 allocation score on rhel7-1: 0 -pcmk__native_allocate: rsc2:0 allocation score on rhel7-2: 0 -pcmk__native_allocate: rsc2:0 allocation score on rhel7-3: 0 -pcmk__native_allocate: rsc2:0 allocation score on rhel7-4: 11 -pcmk__native_allocate: rsc2:0 allocation score on rhel7-5: 0 -pcmk__native_allocate: rsc2:1 allocation score on rhel7-1: 0 -pcmk__native_allocate: rsc2:1 allocation score on rhel7-2: 0 -pcmk__native_allocate: rsc2:1 allocation score on rhel7-3: 6 -pcmk__native_allocate: rsc2:1 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:1 allocation score on rhel7-5: 0 -pcmk__native_allocate: rsc2:2 allocation score on rhel7-1: 0 -pcmk__native_allocate: rsc2:2 allocation score on rhel7-2: 0 -pcmk__native_allocate: rsc2:2 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc2:2 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:2 allocation score on rhel7-5: 6 -pcmk__native_allocate: rsc2:3 allocation score on rhel7-1: 6 -pcmk__native_allocate: rsc2:3 allocation score on rhel7-2: 0 -pcmk__native_allocate: rsc2:3 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc2:3 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:3 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on rhel7-2: 6 -pcmk__native_allocate: rsc2:4 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on rhel7-5: -INFINITY -rsc1:0 promotion score on rhel7-4: -INFINITY -rsc1:1 promotion score on rhel7-3: 5 -rsc1:2 promotion score on rhel7-5: 5 -rsc1:3 promotion score on rhel7-1: 5 -rsc1:4 promotion score on rhel7-2: 5 -rsc2:0 promotion score on rhel7-4: 10 -rsc2:1 promotion score on rhel7-3: 5 -rsc2:2 promotion score on rhel7-5: 5 -rsc2:3 promotion score on rhel7-1: 5 -rsc2:4 promotion score on rhel7-2: 5 diff --git a/cts/scheduler/on_fail_demote2.summary b/cts/scheduler/on_fail_demote2.summary deleted file mode 100644 index 795a11d..0000000 --- a/cts/scheduler/on_fail_demote2.summary +++ /dev/null @@ -1,41 +0,0 @@ -Using the original execution date of: 2020-06-16 19:23:21Z - -Current cluster status: -Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] - - Fencing (stonith:fence_xvm): Started rhel7-1 - Clone Set: rsc1-clone [rsc1] (promotable) - rsc1 (ocf::pacemaker:Stateful): FAILED Master rhel7-4 - Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] - Clone Set: rsc2-master [rsc2] (promotable) - Masters: [ rhel7-4 ] - Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] - -Transition Summary: - * Demote rsc1:0 ( Master -> Slave rhel7-4 ) - * Promote rsc1:1 ( Slave -> Master rhel7-3 ) - -Executing cluster transition: - * Resource action: rsc1 cancel=10000 on rhel7-4 - * Resource action: rsc1 cancel=11000 on rhel7-3 - * Pseudo action: rsc1-clone_demote_0 - * Resource action: rsc1 demote on rhel7-4 - * Pseudo action: rsc1-clone_demoted_0 - * Pseudo action: rsc1-clone_promote_0 - * Resource action: rsc1 monitor=11000 on rhel7-4 - * Resource action: rsc1 promote on rhel7-3 - * Pseudo action: rsc1-clone_promoted_0 - * Resource action: rsc1 monitor=10000 on rhel7-3 -Using the original execution date of: 2020-06-16 19:23:21Z - -Revised cluster status: -Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] - - Fencing (stonith:fence_xvm): Started rhel7-1 - Clone Set: rsc1-clone [rsc1] (promotable) - Masters: [ rhel7-3 ] - Slaves: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ] - Clone Set: rsc2-master [rsc2] (promotable) - Masters: [ rhel7-4 ] - Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] - diff --git a/cts/scheduler/on_fail_demote2.xml b/cts/scheduler/on_fail_demote2.xml deleted file mode 100644 index ae91633..0000000 --- a/cts/scheduler/on_fail_demote2.xml +++ /dev/null @@ -1,221 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/on_fail_demote3.dot b/cts/scheduler/on_fail_demote3.dot deleted file mode 100644 index e78325b..0000000 --- a/cts/scheduler/on_fail_demote3.dot +++ /dev/null @@ -1,12 +0,0 @@ - digraph "g" { -"Cancel rsc1_monitor_10000 rhel7-4" -> "rsc1_demote_0 rhel7-4" [ style = bold] -"Cancel rsc1_monitor_10000 rhel7-4" [ style=bold color="green" fontcolor="black"] -"rsc1-clone_demote_0" -> "rsc1-clone_demoted_0" [ style = bold] -"rsc1-clone_demote_0" -> "rsc1_demote_0 rhel7-4" [ style = bold] -"rsc1-clone_demote_0" [ style=bold color="green" fontcolor="orange"] -"rsc1-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] -"rsc1_demote_0 rhel7-4" -> "rsc1-clone_demoted_0" [ style = bold] -"rsc1_demote_0 rhel7-4" -> "rsc1_monitor_11000 rhel7-4" [ style = bold] -"rsc1_demote_0 rhel7-4" [ style=bold color="green" fontcolor="black"] -"rsc1_monitor_11000 rhel7-4" [ style=bold color="green" fontcolor="black"] -} diff --git a/cts/scheduler/on_fail_demote3.exp b/cts/scheduler/on_fail_demote3.exp deleted file mode 100644 index ed6bd6d..0000000 --- a/cts/scheduler/on_fail_demote3.exp +++ /dev/null @@ -1,63 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/on_fail_demote3.scores b/cts/scheduler/on_fail_demote3.scores deleted file mode 100644 index a85639a..0000000 --- a/cts/scheduler/on_fail_demote3.scores +++ /dev/null @@ -1,127 +0,0 @@ -Allocation scores: -Using the original execution date of: 2020-06-16 19:23:21Z -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-4: 11 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-3: 6 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-5: 6 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-1: 6 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-2: 6 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2-master allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2-master allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc2-master allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2-master allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2-master allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2:0 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2:0 allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc2:0 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2:0 allocation score on rhel7-4: 11 -pcmk__clone_allocate: rsc2:0 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2:1 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2:1 allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc2:1 allocation score on rhel7-3: 6 -pcmk__clone_allocate: rsc2:1 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2:1 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2:2 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2:2 allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc2:2 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2:2 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2:2 allocation score on rhel7-5: 6 -pcmk__clone_allocate: rsc2:3 allocation score on rhel7-1: 6 -pcmk__clone_allocate: rsc2:3 allocation score on rhel7-2: 0 -pcmk__clone_allocate: rsc2:3 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2:3 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2:3 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2:4 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2:4 allocation score on rhel7-2: 6 -pcmk__clone_allocate: rsc2:4 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2:4 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2:4 allocation score on rhel7-5: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-1: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-2: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-3: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-4: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-5: 0 -pcmk__native_allocate: rsc1:0 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc1:0 allocation score on rhel7-2: -INFINITY -pcmk__native_allocate: rsc1:0 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:0 allocation score on rhel7-4: 11 -pcmk__native_allocate: rsc1:0 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc1:1 allocation score on rhel7-1: 0 -pcmk__native_allocate: rsc1:1 allocation score on rhel7-2: 0 -pcmk__native_allocate: rsc1:1 allocation score on rhel7-3: 6 -pcmk__native_allocate: rsc1:1 allocation score on rhel7-4: 0 -pcmk__native_allocate: rsc1:1 allocation score on rhel7-5: 0 -pcmk__native_allocate: rsc1:2 allocation score on rhel7-1: 0 -pcmk__native_allocate: rsc1:2 allocation score on rhel7-2: 0 -pcmk__native_allocate: rsc1:2 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:2 allocation score on rhel7-4: 0 -pcmk__native_allocate: rsc1:2 allocation score on rhel7-5: 6 -pcmk__native_allocate: rsc1:3 allocation score on rhel7-1: 6 -pcmk__native_allocate: rsc1:3 allocation score on rhel7-2: 0 -pcmk__native_allocate: rsc1:3 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:3 allocation score on rhel7-4: 0 -pcmk__native_allocate: rsc1:3 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on rhel7-2: 6 -pcmk__native_allocate: rsc1:4 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on rhel7-4: 0 -pcmk__native_allocate: rsc1:4 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc2:0 allocation score on rhel7-1: 0 -pcmk__native_allocate: rsc2:0 allocation score on rhel7-2: 0 -pcmk__native_allocate: rsc2:0 allocation score on rhel7-3: 0 -pcmk__native_allocate: rsc2:0 allocation score on rhel7-4: 11 -pcmk__native_allocate: rsc2:0 allocation score on rhel7-5: 0 -pcmk__native_allocate: rsc2:1 allocation score on rhel7-1: 0 -pcmk__native_allocate: rsc2:1 allocation score on rhel7-2: 0 -pcmk__native_allocate: rsc2:1 allocation score on rhel7-3: 6 -pcmk__native_allocate: rsc2:1 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:1 allocation score on rhel7-5: 0 -pcmk__native_allocate: rsc2:2 allocation score on rhel7-1: 0 -pcmk__native_allocate: rsc2:2 allocation score on rhel7-2: 0 -pcmk__native_allocate: rsc2:2 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc2:2 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:2 allocation score on rhel7-5: 6 -pcmk__native_allocate: rsc2:3 allocation score on rhel7-1: 6 -pcmk__native_allocate: rsc2:3 allocation score on rhel7-2: 0 -pcmk__native_allocate: rsc2:3 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc2:3 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:3 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on rhel7-2: 6 -pcmk__native_allocate: rsc2:4 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on rhel7-5: -INFINITY -rsc1:0 promotion score on rhel7-4: -INFINITY -rsc1:1 promotion score on rhel7-3: -INFINITY -rsc1:2 promotion score on rhel7-5: -INFINITY -rsc1:3 promotion score on rhel7-1: -INFINITY -rsc1:4 promotion score on rhel7-2: -INFINITY -rsc2:0 promotion score on rhel7-4: 10 -rsc2:1 promotion score on rhel7-3: 5 -rsc2:2 promotion score on rhel7-5: 5 -rsc2:3 promotion score on rhel7-1: 5 -rsc2:4 promotion score on rhel7-2: 5 diff --git a/cts/scheduler/on_fail_demote3.summary b/cts/scheduler/on_fail_demote3.summary deleted file mode 100644 index f1173fd..0000000 --- a/cts/scheduler/on_fail_demote3.summary +++ /dev/null @@ -1,34 +0,0 @@ -Using the original execution date of: 2020-06-16 19:23:21Z - -Current cluster status: -Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] - - Fencing (stonith:fence_xvm): Started rhel7-1 - Clone Set: rsc1-clone [rsc1] (promotable) - rsc1 (ocf::pacemaker:Stateful): FAILED Master rhel7-4 - Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] - Clone Set: rsc2-master [rsc2] (promotable) - Masters: [ rhel7-4 ] - Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] - -Transition Summary: - * Demote rsc1:0 ( Master -> Slave rhel7-4 ) - -Executing cluster transition: - * Resource action: rsc1 cancel=10000 on rhel7-4 - * Pseudo action: rsc1-clone_demote_0 - * Resource action: rsc1 demote on rhel7-4 - * Pseudo action: rsc1-clone_demoted_0 - * Resource action: rsc1 monitor=11000 on rhel7-4 -Using the original execution date of: 2020-06-16 19:23:21Z - -Revised cluster status: -Online: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] - - Fencing (stonith:fence_xvm): Started rhel7-1 - Clone Set: rsc1-clone [rsc1] (promotable) - Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-4 rhel7-5 ] - Clone Set: rsc2-master [rsc2] (promotable) - Masters: [ rhel7-4 ] - Slaves: [ rhel7-1 rhel7-2 rhel7-3 rhel7-5 ] - diff --git a/cts/scheduler/on_fail_demote3.xml b/cts/scheduler/on_fail_demote3.xml deleted file mode 100644 index a7b6806..0000000 --- a/cts/scheduler/on_fail_demote3.xml +++ /dev/null @@ -1,221 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/on_fail_demote4.dot b/cts/scheduler/on_fail_demote4.dot deleted file mode 100644 index 4715cd3..0000000 --- a/cts/scheduler/on_fail_demote4.dot +++ /dev/null @@ -1,383 +0,0 @@ - digraph "g" { -"Cancel rsc1_monitor_11000 rhel7-3" -> "rsc1_promote_0 rhel7-3" [ style = bold] -"Cancel rsc1_monitor_11000 rhel7-3" [ style=bold color="green" fontcolor="black"] -"Cancel rsc2_monitor_11000 rhel7-3" -> "rsc2_promote_0 rhel7-3" [ style = bold] -"Cancel rsc2_monitor_11000 rhel7-3" [ style=bold color="green" fontcolor="black"] -"Fencing_monitor_120000 rhel7-5" [ style=bold color="green" fontcolor="black"] -"Fencing_start_0 rhel7-5" -> "Fencing_monitor_120000 rhel7-5" [ style = bold] -"Fencing_start_0 rhel7-5" [ style=bold color="green" fontcolor="black"] -"Fencing_stop_0 rhel7-4" -> "Fencing_start_0 rhel7-5" [ style = bold] -"Fencing_stop_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] -"bundled_demote_0 stateful-bundle-0" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] -"bundled_demote_0 stateful-bundle-0" -> "bundled_stop_0 stateful-bundle-0" [ style = bold] -"bundled_demote_0 stateful-bundle-0" -> "stateful-bundle-master_demoted_0" [ style = bold] -"bundled_demote_0 stateful-bundle-0" [ style=bold color="green" fontcolor="orange"] -"bundled_monitor_10000 stateful-bundle-0" [ style=bold color="green" fontcolor="black"] -"bundled_monitor_11000 stateful-bundle-2" [ style=bold color="green" fontcolor="black"] -"bundled_promote_0 stateful-bundle-0" -> "bundled_monitor_10000 stateful-bundle-0" [ style = bold] -"bundled_promote_0 stateful-bundle-0" -> "stateful-bundle-master_promoted_0" [ style = bold] -"bundled_promote_0 stateful-bundle-0" [ style=bold color="green" fontcolor="black"] -"bundled_start_0 stateful-bundle-0" -> "bundled_monitor_10000 stateful-bundle-0" [ style = bold] -"bundled_start_0 stateful-bundle-0" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] -"bundled_start_0 stateful-bundle-0" -> "bundled_start_0 stateful-bundle-2" [ style = bold] -"bundled_start_0 stateful-bundle-0" -> "stateful-bundle-master_running_0" [ style = bold] -"bundled_start_0 stateful-bundle-0" [ style=bold color="green" fontcolor="black"] -"bundled_start_0 stateful-bundle-2" -> "bundled_monitor_11000 stateful-bundle-2" [ style = bold] -"bundled_start_0 stateful-bundle-2" -> "stateful-bundle-master_running_0" [ style = bold] -"bundled_start_0 stateful-bundle-2" [ style=bold color="green" fontcolor="black"] -"bundled_stop_0 stateful-bundle-0" -> "bundled_start_0 stateful-bundle-0" [ style = bold] -"bundled_stop_0 stateful-bundle-0" -> "stateful-bundle-master_stopped_0" [ style = bold] -"bundled_stop_0 stateful-bundle-0" [ style=bold color="green" fontcolor="orange"] -"bundled_stop_0 stateful-bundle-2" -> "bundled_start_0 stateful-bundle-2" [ style = bold] -"bundled_stop_0 stateful-bundle-2" -> "bundled_stop_0 stateful-bundle-0" [ style = bold] -"bundled_stop_0 stateful-bundle-2" -> "stateful-bundle-master_stopped_0" [ style = bold] -"bundled_stop_0 stateful-bundle-2" [ style=bold color="green" fontcolor="orange"] -"container2_monitor_20000 rhel7-3" [ style=bold color="green" fontcolor="black"] -"container2_start_0 rhel7-3" -> "container2_monitor_20000 rhel7-3" [ style = bold] -"container2_start_0 rhel7-3" -> "lxc-ms_promote_0 lxc2" [ style = bold] -"container2_start_0 rhel7-3" -> "lxc-ms_start_0 lxc2" [ style = bold] -"container2_start_0 rhel7-3" -> "lxc2_start_0 rhel7-3" [ style = bold] -"container2_start_0 rhel7-3" -> "rsc1_start_0 lxc2" [ style = bold] -"container2_start_0 rhel7-3" -> "rsc2_start_0 lxc2" [ style = bold] -"container2_start_0 rhel7-3" [ style=bold color="green" fontcolor="black"] -"container2_stop_0 rhel7-3" -> "container2_start_0 rhel7-3" [ style = bold] -"container2_stop_0 rhel7-3" -> "stonith 'reboot' lxc2" [ style = bold] -"container2_stop_0 rhel7-3" [ style=bold color="green" fontcolor="black"] -"lxc-ms-master_demote_0" -> "lxc-ms-master_demoted_0" [ style = bold] -"lxc-ms-master_demote_0" -> "lxc-ms_demote_0 lxc2" [ style = bold] -"lxc-ms-master_demote_0" [ style=bold color="green" fontcolor="orange"] -"lxc-ms-master_demoted_0" -> "lxc-ms-master_promote_0" [ style = bold] -"lxc-ms-master_demoted_0" -> "lxc-ms-master_start_0" [ style = bold] -"lxc-ms-master_demoted_0" -> "lxc-ms-master_stop_0" [ style = bold] -"lxc-ms-master_demoted_0" [ style=bold color="green" fontcolor="orange"] -"lxc-ms-master_promote_0" -> "lxc-ms_promote_0 lxc2" [ style = bold] -"lxc-ms-master_promote_0" [ style=bold color="green" fontcolor="orange"] -"lxc-ms-master_promoted_0" [ style=bold color="green" fontcolor="orange"] -"lxc-ms-master_running_0" -> "lxc-ms-master_promote_0" [ style = bold] -"lxc-ms-master_running_0" [ style=bold color="green" fontcolor="orange"] -"lxc-ms-master_start_0" -> "lxc-ms-master_running_0" [ style = bold] -"lxc-ms-master_start_0" -> "lxc-ms_start_0 lxc2" [ style = bold] -"lxc-ms-master_start_0" [ style=bold color="green" fontcolor="orange"] -"lxc-ms-master_stop_0" -> "lxc-ms-master_stopped_0" [ style = bold] -"lxc-ms-master_stop_0" -> "lxc-ms_stop_0 lxc2" [ style = bold] -"lxc-ms-master_stop_0" [ style=bold color="green" fontcolor="orange"] -"lxc-ms-master_stopped_0" -> "lxc-ms-master_promote_0" [ style = bold] -"lxc-ms-master_stopped_0" -> "lxc-ms-master_start_0" [ style = bold] -"lxc-ms-master_stopped_0" [ style=bold color="green" fontcolor="orange"] -"lxc-ms_demote_0 lxc2" -> "lxc-ms-master_demoted_0" [ style = bold] -"lxc-ms_demote_0 lxc2" -> "lxc-ms_promote_0 lxc2" [ style = bold] -"lxc-ms_demote_0 lxc2" -> "lxc-ms_stop_0 lxc2" [ style = bold] -"lxc-ms_demote_0 lxc2" [ style=bold color="green" fontcolor="orange"] -"lxc-ms_monitor_10000 lxc2" [ style=bold color="green" fontcolor="black"] -"lxc-ms_promote_0 lxc2" -> "lxc-ms-master_promoted_0" [ style = bold] -"lxc-ms_promote_0 lxc2" -> "lxc-ms_monitor_10000 lxc2" [ style = bold] -"lxc-ms_promote_0 lxc2" [ style=bold color="green" fontcolor="black"] -"lxc-ms_start_0 lxc2" -> "lxc-ms-master_running_0" [ style = bold] -"lxc-ms_start_0 lxc2" -> "lxc-ms_monitor_10000 lxc2" [ style = bold] -"lxc-ms_start_0 lxc2" -> "lxc-ms_promote_0 lxc2" [ style = bold] -"lxc-ms_start_0 lxc2" [ style=bold color="green" fontcolor="black"] -"lxc-ms_stop_0 lxc2" -> "lxc-ms-master_stopped_0" [ style = bold] -"lxc-ms_stop_0 lxc2" -> "lxc-ms_start_0 lxc2" [ style = bold] -"lxc-ms_stop_0 lxc2" [ style=bold color="green" fontcolor="orange"] -"lxc2_monitor_30000 rhel7-3" [ style=bold color="green" fontcolor="black"] -"lxc2_start_0 rhel7-3" -> "lxc-ms_monitor_10000 lxc2" [ style = bold] -"lxc2_start_0 rhel7-3" -> "lxc-ms_promote_0 lxc2" [ style = bold] -"lxc2_start_0 rhel7-3" -> "lxc-ms_start_0 lxc2" [ style = bold] -"lxc2_start_0 rhel7-3" -> "lxc2_monitor_30000 rhel7-3" [ style = bold] -"lxc2_start_0 rhel7-3" -> "rsc1_monitor_11000 lxc2" [ style = bold] -"lxc2_start_0 rhel7-3" -> "rsc1_start_0 lxc2" [ style = bold] -"lxc2_start_0 rhel7-3" -> "rsc2_monitor_11000 lxc2" [ style = bold] -"lxc2_start_0 rhel7-3" -> "rsc2_start_0 lxc2" [ style = bold] -"lxc2_start_0 rhel7-3" [ style=bold color="green" fontcolor="black"] -"lxc2_stop_0 rhel7-3" -> "container2_stop_0 rhel7-3" [ style = bold] -"lxc2_stop_0 rhel7-3" -> "lxc2_start_0 rhel7-3" [ style = bold] -"lxc2_stop_0 rhel7-3" [ style=bold color="green" fontcolor="black"] -"remote-rhel7-2_monitor_60000 rhel7-1" [ style=bold color="green" fontcolor="black"] -"remote-rhel7-2_start_0 rhel7-1" -> "remote-rhel7-2_monitor_60000 rhel7-1" [ style = bold] -"remote-rhel7-2_start_0 rhel7-1" [ style=bold color="green" fontcolor="black"] -"remote-rhel7-2_stop_0 rhel7-1" -> "remote-rhel7-2_start_0 rhel7-1" [ style = bold] -"remote-rhel7-2_stop_0 rhel7-1" [ style=bold color="green" fontcolor="black"] -"rsc1-clone_demote_0" -> "rsc1-clone_demoted_0" [ style = bold] -"rsc1-clone_demote_0" -> "rsc1_demote_0 rhel7-4" [ style = bold] -"rsc1-clone_demote_0" [ style=bold color="green" fontcolor="orange"] -"rsc1-clone_demoted_0" -> "rsc1-clone_promote_0" [ style = bold] -"rsc1-clone_demoted_0" -> "rsc1-clone_start_0" [ style = bold] -"rsc1-clone_demoted_0" -> "rsc1-clone_stop_0" [ style = bold] -"rsc1-clone_demoted_0" [ style=bold color="green" fontcolor="orange"] -"rsc1-clone_promote_0" -> "rsc1_promote_0 rhel7-3" [ style = bold] -"rsc1-clone_promote_0" [ style=bold color="green" fontcolor="orange"] -"rsc1-clone_promoted_0" [ style=bold color="green" fontcolor="orange"] -"rsc1-clone_running_0" -> "rsc1-clone_promote_0" [ style = bold] -"rsc1-clone_running_0" [ style=bold color="green" fontcolor="orange"] -"rsc1-clone_start_0" -> "rsc1-clone_running_0" [ style = bold] -"rsc1-clone_start_0" -> "rsc1_start_0 lxc2" [ style = bold] -"rsc1-clone_start_0" [ style=bold color="green" fontcolor="orange"] -"rsc1-clone_stop_0" -> "rsc1-clone_stopped_0" [ style = bold] -"rsc1-clone_stop_0" -> "rsc1_stop_0 lxc2" [ style = bold] -"rsc1-clone_stop_0" -> "rsc1_stop_0 remote-rhel7-2" [ style = bold] -"rsc1-clone_stop_0" -> "rsc1_stop_0 rhel7-4" [ style = bold] -"rsc1-clone_stop_0" [ style=bold color="green" fontcolor="orange"] -"rsc1-clone_stopped_0" -> "rsc1-clone_promote_0" [ style = bold] -"rsc1-clone_stopped_0" -> "rsc1-clone_start_0" [ style = bold] -"rsc1-clone_stopped_0" [ style=bold color="green" fontcolor="orange"] -"rsc1_demote_0 rhel7-4" -> "rsc1-clone_demoted_0" [ style = bold] -"rsc1_demote_0 rhel7-4" -> "rsc1_stop_0 rhel7-4" [ style = bold] -"rsc1_demote_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] -"rsc1_monitor_10000 rhel7-3" [ style=bold color="green" fontcolor="black"] -"rsc1_monitor_11000 lxc2" [ style=bold color="green" fontcolor="black"] -"rsc1_promote_0 rhel7-3" -> "rsc1-clone_promoted_0" [ style = bold] -"rsc1_promote_0 rhel7-3" -> "rsc1_monitor_10000 rhel7-3" [ style = bold] -"rsc1_promote_0 rhel7-3" [ style=bold color="green" fontcolor="black"] -"rsc1_start_0 lxc2" -> "rsc1-clone_running_0" [ style = bold] -"rsc1_start_0 lxc2" -> "rsc1_monitor_11000 lxc2" [ style = bold] -"rsc1_start_0 lxc2" [ style=bold color="green" fontcolor="black"] -"rsc1_stop_0 lxc2" -> "rsc1-clone_stopped_0" [ style = bold] -"rsc1_stop_0 lxc2" -> "rsc1_start_0 lxc2" [ style = bold] -"rsc1_stop_0 lxc2" [ style=bold color="green" fontcolor="orange"] -"rsc1_stop_0 remote-rhel7-2" -> "remote-rhel7-2_stop_0 rhel7-1" [ style = bold] -"rsc1_stop_0 remote-rhel7-2" -> "rsc1-clone_stopped_0" [ style = bold] -"rsc1_stop_0 remote-rhel7-2" [ style=bold color="green" fontcolor="orange"] -"rsc1_stop_0 rhel7-4" -> "rsc1-clone_stopped_0" [ style = bold] -"rsc1_stop_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] -"rsc2-master_demote_0" -> "rsc2-master_demoted_0" [ style = bold] -"rsc2-master_demote_0" -> "rsc2_demote_0 remote-rhel7-2" [ style = bold] -"rsc2-master_demote_0" [ style=bold color="green" fontcolor="orange"] -"rsc2-master_demoted_0" -> "rsc2-master_promote_0" [ style = bold] -"rsc2-master_demoted_0" -> "rsc2-master_start_0" [ style = bold] -"rsc2-master_demoted_0" -> "rsc2-master_stop_0" [ style = bold] -"rsc2-master_demoted_0" [ style=bold color="green" fontcolor="orange"] -"rsc2-master_promote_0" -> "rsc2_promote_0 rhel7-3" [ style = bold] -"rsc2-master_promote_0" [ style=bold color="green" fontcolor="orange"] -"rsc2-master_promoted_0" [ style=bold color="green" fontcolor="orange"] -"rsc2-master_running_0" -> "rsc2-master_promote_0" [ style = bold] -"rsc2-master_running_0" [ style=bold color="green" fontcolor="orange"] -"rsc2-master_start_0" -> "rsc2-master_running_0" [ style = bold] -"rsc2-master_start_0" -> "rsc2_start_0 lxc2" [ style = bold] -"rsc2-master_start_0" [ style=bold color="green" fontcolor="orange"] -"rsc2-master_stop_0" -> "rsc2-master_stopped_0" [ style = bold] -"rsc2-master_stop_0" -> "rsc2_stop_0 lxc2" [ style = bold] -"rsc2-master_stop_0" -> "rsc2_stop_0 remote-rhel7-2" [ style = bold] -"rsc2-master_stop_0" -> "rsc2_stop_0 rhel7-4" [ style = bold] -"rsc2-master_stop_0" [ style=bold color="green" fontcolor="orange"] -"rsc2-master_stopped_0" -> "rsc2-master_promote_0" [ style = bold] -"rsc2-master_stopped_0" -> "rsc2-master_start_0" [ style = bold] -"rsc2-master_stopped_0" [ style=bold color="green" fontcolor="orange"] -"rsc2_demote_0 remote-rhel7-2" -> "rsc2-master_demoted_0" [ style = bold] -"rsc2_demote_0 remote-rhel7-2" -> "rsc2_stop_0 remote-rhel7-2" [ style = bold] -"rsc2_demote_0 remote-rhel7-2" [ style=bold color="green" fontcolor="orange"] -"rsc2_monitor_10000 rhel7-3" [ style=bold color="green" fontcolor="black"] -"rsc2_monitor_11000 lxc2" [ style=bold color="green" fontcolor="black"] -"rsc2_promote_0 rhel7-3" -> "rsc2-master_promoted_0" [ style = bold] -"rsc2_promote_0 rhel7-3" -> "rsc2_monitor_10000 rhel7-3" [ style = bold] -"rsc2_promote_0 rhel7-3" [ style=bold color="green" fontcolor="black"] -"rsc2_start_0 lxc2" -> "rsc2-master_running_0" [ style = bold] -"rsc2_start_0 lxc2" -> "rsc2_monitor_11000 lxc2" [ style = bold] -"rsc2_start_0 lxc2" [ style=bold color="green" fontcolor="black"] -"rsc2_stop_0 lxc2" -> "rsc2-master_stopped_0" [ style = bold] -"rsc2_stop_0 lxc2" -> "rsc2_start_0 lxc2" [ style = bold] -"rsc2_stop_0 lxc2" [ style=bold color="green" fontcolor="orange"] -"rsc2_stop_0 remote-rhel7-2" -> "remote-rhel7-2_stop_0 rhel7-1" [ style = bold] -"rsc2_stop_0 remote-rhel7-2" -> "rsc2-master_stopped_0" [ style = bold] -"rsc2_stop_0 remote-rhel7-2" [ style=bold color="green" fontcolor="orange"] -"rsc2_stop_0 rhel7-4" -> "rsc2-master_stopped_0" [ style = bold] -"rsc2_stop_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle-0_monitor_30000 rhel7-5" [ style=bold color="green" fontcolor="black"] -"stateful-bundle-0_start_0 rhel7-5" -> "bundled_monitor_10000 stateful-bundle-0" [ style = bold] -"stateful-bundle-0_start_0 rhel7-5" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] -"stateful-bundle-0_start_0 rhel7-5" -> "bundled_start_0 stateful-bundle-0" [ style = bold] -"stateful-bundle-0_start_0 rhel7-5" -> "stateful-bundle-0_monitor_30000 rhel7-5" [ style = bold] -"stateful-bundle-0_start_0 rhel7-5" [ style=bold color="green" fontcolor="black"] -"stateful-bundle-0_stop_0 rhel7-5" -> "stateful-bundle-0_start_0 rhel7-5" [ style = bold] -"stateful-bundle-0_stop_0 rhel7-5" -> "stateful-bundle-docker-0_stop_0 rhel7-5" [ style = bold] -"stateful-bundle-0_stop_0 rhel7-5" [ style=bold color="green" fontcolor="black"] -"stateful-bundle-2_monitor_30000 rhel7-3" [ style=bold color="green" fontcolor="black"] -"stateful-bundle-2_start_0 rhel7-3" -> "bundled_monitor_11000 stateful-bundle-2" [ style = bold] -"stateful-bundle-2_start_0 rhel7-3" -> "bundled_start_0 stateful-bundle-2" [ style = bold] -"stateful-bundle-2_start_0 rhel7-3" -> "stateful-bundle-2_monitor_30000 rhel7-3" [ style = bold] -"stateful-bundle-2_start_0 rhel7-3" [ style=bold color="green" fontcolor="black"] -"stateful-bundle-2_stop_0 rhel7-4" -> "stateful-bundle-2_start_0 rhel7-3" [ style = bold] -"stateful-bundle-2_stop_0 rhel7-4" -> "stateful-bundle-docker-2_stop_0 rhel7-4" [ style = bold] -"stateful-bundle-2_stop_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle-docker-0_monitor_60000 rhel7-5" [ style=bold color="green" fontcolor="black"] -"stateful-bundle-docker-0_start_0 rhel7-5" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] -"stateful-bundle-docker-0_start_0 rhel7-5" -> "bundled_start_0 stateful-bundle-0" [ style = bold] -"stateful-bundle-docker-0_start_0 rhel7-5" -> "stateful-bundle-0_start_0 rhel7-5" [ style = bold] -"stateful-bundle-docker-0_start_0 rhel7-5" -> "stateful-bundle-docker-0_monitor_60000 rhel7-5" [ style = bold] -"stateful-bundle-docker-0_start_0 rhel7-5" -> "stateful-bundle_running_0" [ style = bold] -"stateful-bundle-docker-0_start_0 rhel7-5" [ style=bold color="green" fontcolor="black"] -"stateful-bundle-docker-0_stop_0 rhel7-5" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] -"stateful-bundle-docker-0_stop_0 rhel7-5" -> "stateful-bundle_stopped_0" [ style = bold] -"stateful-bundle-docker-0_stop_0 rhel7-5" -> "stonith 'reboot' stateful-bundle-0" [ style = bold] -"stateful-bundle-docker-0_stop_0 rhel7-5" [ style=bold color="green" fontcolor="black"] -"stateful-bundle-docker-2_monitor_60000 rhel7-3" [ style=bold color="green" fontcolor="black"] -"stateful-bundle-docker-2_start_0 rhel7-3" -> "bundled_start_0 stateful-bundle-2" [ style = bold] -"stateful-bundle-docker-2_start_0 rhel7-3" -> "stateful-bundle-2_start_0 rhel7-3" [ style = bold] -"stateful-bundle-docker-2_start_0 rhel7-3" -> "stateful-bundle-docker-2_monitor_60000 rhel7-3" [ style = bold] -"stateful-bundle-docker-2_start_0 rhel7-3" -> "stateful-bundle_running_0" [ style = bold] -"stateful-bundle-docker-2_start_0 rhel7-3" [ style=bold color="green" fontcolor="black"] -"stateful-bundle-docker-2_stop_0 rhel7-4" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] -"stateful-bundle-docker-2_stop_0 rhel7-4" -> "stateful-bundle-ip-192.168.122.133_stop_0 rhel7-4" [ style = bold] -"stateful-bundle-docker-2_stop_0 rhel7-4" -> "stateful-bundle_stopped_0" [ style = bold] -"stateful-bundle-docker-2_stop_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle-ip-192.168.122.133_monitor_60000 rhel7-3" [ style=bold color="green" fontcolor="black"] -"stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] -"stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" -> "stateful-bundle-ip-192.168.122.133_monitor_60000 rhel7-3" [ style = bold] -"stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style=bold color="green" fontcolor="black"] -"stateful-bundle-ip-192.168.122.133_stop_0 rhel7-4" -> "stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style = bold] -"stateful-bundle-ip-192.168.122.133_stop_0 rhel7-4" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle-master_demote_0" -> "bundled_demote_0 stateful-bundle-0" [ style = bold] -"stateful-bundle-master_demote_0" -> "stateful-bundle-master_demoted_0" [ style = bold] -"stateful-bundle-master_demote_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle-master_demoted_0" -> "stateful-bundle-master_promote_0" [ style = bold] -"stateful-bundle-master_demoted_0" -> "stateful-bundle-master_start_0" [ style = bold] -"stateful-bundle-master_demoted_0" -> "stateful-bundle-master_stop_0" [ style = bold] -"stateful-bundle-master_demoted_0" -> "stateful-bundle_demoted_0" [ style = bold] -"stateful-bundle-master_demoted_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle-master_promote_0" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] -"stateful-bundle-master_promote_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle-master_promoted_0" -> "stateful-bundle_promoted_0" [ style = bold] -"stateful-bundle-master_promoted_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle-master_running_0" -> "stateful-bundle-master_promote_0" [ style = bold] -"stateful-bundle-master_running_0" -> "stateful-bundle_running_0" [ style = bold] -"stateful-bundle-master_running_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle-master_start_0" -> "bundled_start_0 stateful-bundle-0" [ style = bold] -"stateful-bundle-master_start_0" -> "bundled_start_0 stateful-bundle-2" [ style = bold] -"stateful-bundle-master_start_0" -> "stateful-bundle-master_running_0" [ style = bold] -"stateful-bundle-master_start_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle-master_stop_0" -> "bundled_stop_0 stateful-bundle-0" [ style = bold] -"stateful-bundle-master_stop_0" -> "bundled_stop_0 stateful-bundle-2" [ style = bold] -"stateful-bundle-master_stop_0" -> "stateful-bundle-master_stopped_0" [ style = bold] -"stateful-bundle-master_stop_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle-master_stopped_0" -> "stateful-bundle-master_promote_0" [ style = bold] -"stateful-bundle-master_stopped_0" -> "stateful-bundle-master_start_0" [ style = bold] -"stateful-bundle-master_stopped_0" -> "stateful-bundle_stopped_0" [ style = bold] -"stateful-bundle-master_stopped_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle_demote_0" -> "stateful-bundle-master_demote_0" [ style = bold] -"stateful-bundle_demote_0" -> "stateful-bundle_demoted_0" [ style = bold] -"stateful-bundle_demote_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle_demoted_0" -> "stateful-bundle_promote_0" [ style = bold] -"stateful-bundle_demoted_0" -> "stateful-bundle_start_0" [ style = bold] -"stateful-bundle_demoted_0" -> "stateful-bundle_stop_0" [ style = bold] -"stateful-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle_promote_0" -> "stateful-bundle-master_promote_0" [ style = bold] -"stateful-bundle_promote_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle_running_0" -> "stateful-bundle_promote_0" [ style = bold] -"stateful-bundle_running_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle_start_0" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] -"stateful-bundle_start_0" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] -"stateful-bundle_start_0" -> "stateful-bundle-master_start_0" [ style = bold] -"stateful-bundle_start_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle_stop_0" -> "bundled_stop_0 stateful-bundle-0" [ style = bold] -"stateful-bundle_stop_0" -> "bundled_stop_0 stateful-bundle-2" [ style = bold] -"stateful-bundle_stop_0" -> "stateful-bundle-docker-0_stop_0 rhel7-5" [ style = bold] -"stateful-bundle_stop_0" -> "stateful-bundle-docker-2_stop_0 rhel7-4" [ style = bold] -"stateful-bundle_stop_0" -> "stateful-bundle-master_stop_0" [ style = bold] -"stateful-bundle_stop_0" [ style=bold color="green" fontcolor="orange"] -"stateful-bundle_stopped_0" -> "stateful-bundle_promote_0" [ style = bold] -"stateful-bundle_stopped_0" -> "stateful-bundle_start_0" [ style = bold] -"stateful-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"] -"stonith 'reboot' lxc2" -> "Fencing_start_0 rhel7-5" [ style = bold] -"stonith 'reboot' lxc2" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] -"stonith 'reboot' lxc2" -> "bundled_start_0 stateful-bundle-0" [ style = bold] -"stonith 'reboot' lxc2" -> "bundled_start_0 stateful-bundle-2" [ style = bold] -"stonith 'reboot' lxc2" -> "container2_start_0 rhel7-3" [ style = bold] -"stonith 'reboot' lxc2" -> "lxc-ms-master_stop_0" [ style = bold] -"stonith 'reboot' lxc2" -> "lxc-ms_demote_0 lxc2" [ style = bold] -"stonith 'reboot' lxc2" -> "lxc-ms_promote_0 lxc2" [ style = bold] -"stonith 'reboot' lxc2" -> "lxc-ms_start_0 lxc2" [ style = bold] -"stonith 'reboot' lxc2" -> "lxc-ms_stop_0 lxc2" [ style = bold] -"stonith 'reboot' lxc2" -> "lxc2_start_0 rhel7-3" [ style = bold] -"stonith 'reboot' lxc2" -> "remote-rhel7-2_start_0 rhel7-1" [ style = bold] -"stonith 'reboot' lxc2" -> "rsc1-clone_stop_0" [ style = bold] -"stonith 'reboot' lxc2" -> "rsc1_promote_0 rhel7-3" [ style = bold] -"stonith 'reboot' lxc2" -> "rsc1_start_0 lxc2" [ style = bold] -"stonith 'reboot' lxc2" -> "rsc1_stop_0 lxc2" [ style = bold] -"stonith 'reboot' lxc2" -> "rsc2-master_stop_0" [ style = bold] -"stonith 'reboot' lxc2" -> "rsc2_promote_0 rhel7-3" [ style = bold] -"stonith 'reboot' lxc2" -> "rsc2_start_0 lxc2" [ style = bold] -"stonith 'reboot' lxc2" -> "rsc2_stop_0 lxc2" [ style = bold] -"stonith 'reboot' lxc2" -> "stateful-bundle-0_start_0 rhel7-5" [ style = bold] -"stonith 'reboot' lxc2" -> "stateful-bundle-2_start_0 rhel7-3" [ style = bold] -"stonith 'reboot' lxc2" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] -"stonith 'reboot' lxc2" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] -"stonith 'reboot' lxc2" -> "stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style = bold] -"stonith 'reboot' lxc2" [ style=bold color="green" fontcolor="orange"] -"stonith 'reboot' remote-rhel7-2" -> "Fencing_start_0 rhel7-5" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "bundled_start_0 stateful-bundle-0" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "bundled_start_0 stateful-bundle-2" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "container2_start_0 rhel7-3" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "lxc-ms_promote_0 lxc2" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "lxc-ms_start_0 lxc2" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "lxc2_start_0 rhel7-3" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "remote-rhel7-2_start_0 rhel7-1" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "rsc1-clone_stop_0" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "rsc1_promote_0 rhel7-3" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "rsc1_start_0 lxc2" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "rsc1_stop_0 remote-rhel7-2" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "rsc2-master_stop_0" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "rsc2_demote_0 remote-rhel7-2" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "rsc2_promote_0 rhel7-3" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "rsc2_start_0 lxc2" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "rsc2_stop_0 remote-rhel7-2" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "stateful-bundle-0_start_0 rhel7-5" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "stateful-bundle-2_start_0 rhel7-3" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style = bold] -"stonith 'reboot' remote-rhel7-2" -> "stonith 'reboot' rhel7-4" [ style = bold] -"stonith 'reboot' remote-rhel7-2" [ style=bold color="green" fontcolor="black"] -"stonith 'reboot' rhel7-4" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] -"stonith 'reboot' rhel7-4" -> "bundled_start_0 stateful-bundle-0" [ style = bold] -"stonith 'reboot' rhel7-4" -> "bundled_start_0 stateful-bundle-2" [ style = bold] -"stonith 'reboot' rhel7-4" -> "container2_start_0 rhel7-3" [ style = bold] -"stonith 'reboot' rhel7-4" -> "lxc-ms_promote_0 lxc2" [ style = bold] -"stonith 'reboot' rhel7-4" -> "lxc-ms_start_0 lxc2" [ style = bold] -"stonith 'reboot' rhel7-4" -> "rsc1-clone_stop_0" [ style = bold] -"stonith 'reboot' rhel7-4" -> "rsc1_demote_0 rhel7-4" [ style = bold] -"stonith 'reboot' rhel7-4" -> "rsc1_promote_0 rhel7-3" [ style = bold] -"stonith 'reboot' rhel7-4" -> "rsc1_start_0 lxc2" [ style = bold] -"stonith 'reboot' rhel7-4" -> "rsc1_stop_0 rhel7-4" [ style = bold] -"stonith 'reboot' rhel7-4" -> "rsc2-master_stop_0" [ style = bold] -"stonith 'reboot' rhel7-4" -> "rsc2_promote_0 rhel7-3" [ style = bold] -"stonith 'reboot' rhel7-4" -> "rsc2_start_0 lxc2" [ style = bold] -"stonith 'reboot' rhel7-4" -> "rsc2_stop_0 rhel7-4" [ style = bold] -"stonith 'reboot' rhel7-4" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] -"stonith 'reboot' rhel7-4" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] -"stonith 'reboot' rhel7-4" -> "stateful-bundle-docker-2_stop_0 rhel7-4" [ style = bold] -"stonith 'reboot' rhel7-4" -> "stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style = bold] -"stonith 'reboot' rhel7-4" -> "stateful-bundle-ip-192.168.122.133_stop_0 rhel7-4" [ style = bold] -"stonith 'reboot' rhel7-4" -> "stonith 'reboot' stateful-bundle-2" [ style = bold] -"stonith 'reboot' rhel7-4" [ style=bold color="green" fontcolor="black"] -"stonith 'reboot' stateful-bundle-0" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] -"stonith 'reboot' stateful-bundle-0" -> "bundled_start_0 stateful-bundle-0" [ style = bold] -"stonith 'reboot' stateful-bundle-0" -> "bundled_start_0 stateful-bundle-2" [ style = bold] -"stonith 'reboot' stateful-bundle-0" -> "container2_start_0 rhel7-3" [ style = bold] -"stonith 'reboot' stateful-bundle-0" -> "lxc-ms_promote_0 lxc2" [ style = bold] -"stonith 'reboot' stateful-bundle-0" -> "lxc-ms_start_0 lxc2" [ style = bold] -"stonith 'reboot' stateful-bundle-0" -> "rsc1_promote_0 rhel7-3" [ style = bold] -"stonith 'reboot' stateful-bundle-0" -> "rsc1_start_0 lxc2" [ style = bold] -"stonith 'reboot' stateful-bundle-0" -> "rsc2_promote_0 rhel7-3" [ style = bold] -"stonith 'reboot' stateful-bundle-0" -> "rsc2_start_0 lxc2" [ style = bold] -"stonith 'reboot' stateful-bundle-0" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] -"stonith 'reboot' stateful-bundle-0" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] -"stonith 'reboot' stateful-bundle-0" -> "stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style = bold] -"stonith 'reboot' stateful-bundle-0" -> "stateful-bundle-master_stop_0" [ style = bold] -"stonith 'reboot' stateful-bundle-0" [ style=bold color="green" fontcolor="orange"] -"stonith 'reboot' stateful-bundle-2" -> "bundled_promote_0 stateful-bundle-0" [ style = bold] -"stonith 'reboot' stateful-bundle-2" -> "bundled_start_0 stateful-bundle-0" [ style = bold] -"stonith 'reboot' stateful-bundle-2" -> "bundled_start_0 stateful-bundle-2" [ style = bold] -"stonith 'reboot' stateful-bundle-2" -> "container2_start_0 rhel7-3" [ style = bold] -"stonith 'reboot' stateful-bundle-2" -> "lxc-ms_promote_0 lxc2" [ style = bold] -"stonith 'reboot' stateful-bundle-2" -> "lxc-ms_start_0 lxc2" [ style = bold] -"stonith 'reboot' stateful-bundle-2" -> "rsc1_promote_0 rhel7-3" [ style = bold] -"stonith 'reboot' stateful-bundle-2" -> "rsc1_start_0 lxc2" [ style = bold] -"stonith 'reboot' stateful-bundle-2" -> "rsc2_promote_0 rhel7-3" [ style = bold] -"stonith 'reboot' stateful-bundle-2" -> "rsc2_start_0 lxc2" [ style = bold] -"stonith 'reboot' stateful-bundle-2" -> "stateful-bundle-docker-0_start_0 rhel7-5" [ style = bold] -"stonith 'reboot' stateful-bundle-2" -> "stateful-bundle-docker-2_start_0 rhel7-3" [ style = bold] -"stonith 'reboot' stateful-bundle-2" -> "stateful-bundle-ip-192.168.122.133_start_0 rhel7-3" [ style = bold] -"stonith 'reboot' stateful-bundle-2" -> "stateful-bundle-master_stop_0" [ style = bold] -"stonith 'reboot' stateful-bundle-2" [ style=bold color="green" fontcolor="orange"] -} diff --git a/cts/scheduler/on_fail_demote4.exp b/cts/scheduler/on_fail_demote4.exp deleted file mode 100644 index 0789a12..0000000 --- a/cts/scheduler/on_fail_demote4.exp +++ /dev/null @@ -1,1818 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/on_fail_demote4.scores b/cts/scheduler/on_fail_demote4.scores deleted file mode 100644 index cde3fec..0000000 --- a/cts/scheduler/on_fail_demote4.scores +++ /dev/null @@ -1,470 +0,0 @@ -Allocation scores: -Using the original execution date of: 2020-06-16 19:23:21Z -bundled:0 promotion score on stateful-bundle-0: 10 -bundled:1 promotion score on stateful-bundle-1: 5 -bundled:2 promotion score on stateful-bundle-2: 5 -lxc-ms:0 promotion score on lxc2: INFINITY -lxc-ms:1 promotion score on lxc1: INFINITY -pcmk__bundle_allocate: bundled:0 allocation score on stateful-bundle-0: 501 -pcmk__bundle_allocate: bundled:1 allocation score on stateful-bundle-1: 501 -pcmk__bundle_allocate: bundled:2 allocation score on stateful-bundle-2: 501 -pcmk__bundle_allocate: stateful-bundle allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-0 allocation score on lxc1: -INFINITY -pcmk__bundle_allocate: stateful-bundle-0 allocation score on lxc2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-0 allocation score on remote-rhel7-2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-1 allocation score on lxc1: -INFINITY -pcmk__bundle_allocate: stateful-bundle-1 allocation score on lxc2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-1 allocation score on remote-rhel7-2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-2 allocation score on lxc1: -INFINITY -pcmk__bundle_allocate: stateful-bundle-2 allocation score on lxc2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-2 allocation score on remote-rhel7-2: -INFINITY -pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on lxc1: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on lxc2: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on remote-rhel7-2: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-1: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-3: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-4: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-5: 0 -pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-0: -INFINITY -pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-1: -INFINITY -pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-2: -INFINITY -pcmk__clone_allocate: bundled:0 allocation score on stateful-bundle-0: INFINITY -pcmk__clone_allocate: bundled:1 allocation score on stateful-bundle-1: INFINITY -pcmk__clone_allocate: bundled:2 allocation score on stateful-bundle-2: INFINITY -pcmk__clone_allocate: lxc-ms-master allocation score on lxc1: INFINITY -pcmk__clone_allocate: lxc-ms-master allocation score on lxc2: INFINITY -pcmk__clone_allocate: lxc-ms-master allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-1: 0 -pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-3: 0 -pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-4: 0 -pcmk__clone_allocate: lxc-ms-master allocation score on rhel7-5: 0 -pcmk__clone_allocate: lxc-ms:0 allocation score on lxc1: INFINITY -pcmk__clone_allocate: lxc-ms:0 allocation score on lxc2: INFINITY -pcmk__clone_allocate: lxc-ms:0 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-1: 0 -pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-3: 0 -pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-4: 0 -pcmk__clone_allocate: lxc-ms:0 allocation score on rhel7-5: 0 -pcmk__clone_allocate: lxc-ms:1 allocation score on lxc1: INFINITY -pcmk__clone_allocate: lxc-ms:1 allocation score on lxc2: INFINITY -pcmk__clone_allocate: lxc-ms:1 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-1: 0 -pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-3: 0 -pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-4: 0 -pcmk__clone_allocate: lxc-ms:1 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1-clone allocation score on lxc1: 0 -pcmk__clone_allocate: rsc1-clone allocation score on lxc2: 0 -pcmk__clone_allocate: rsc1-clone allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1-clone allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:0 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc1:0 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc1:0 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-4: 1 -pcmk__clone_allocate: rsc1:0 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:1 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc1:1 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc1:1 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-3: 6 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:1 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:2 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc1:2 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc1:2 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:2 allocation score on rhel7-5: 6 -pcmk__clone_allocate: rsc1:3 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc1:3 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc1:3 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-1: 6 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:3 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:4 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc1:4 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc1:4 allocation score on remote-rhel7-2: 1 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:4 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:5 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc1:5 allocation score on lxc2: 6 -pcmk__clone_allocate: rsc1:5 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc1:5 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:5 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:5 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:5 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc1:6 allocation score on lxc1: 6 -pcmk__clone_allocate: rsc1:6 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc1:6 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc1:6 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc1:6 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc1:6 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc1:6 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2-master allocation score on lxc1: 0 -pcmk__clone_allocate: rsc2-master allocation score on lxc2: 0 -pcmk__clone_allocate: rsc2-master allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc2-master allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2-master allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2-master allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2-master allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2:0 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc2:0 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc2:0 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc2:0 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2:0 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2:0 allocation score on rhel7-4: 1 -pcmk__clone_allocate: rsc2:0 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2:1 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc2:1 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc2:1 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc2:1 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2:1 allocation score on rhel7-3: 6 -pcmk__clone_allocate: rsc2:1 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2:1 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2:2 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc2:2 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc2:2 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc2:2 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2:2 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2:2 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2:2 allocation score on rhel7-5: 6 -pcmk__clone_allocate: rsc2:3 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc2:3 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc2:3 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc2:3 allocation score on rhel7-1: 6 -pcmk__clone_allocate: rsc2:3 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2:3 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2:3 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2:4 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc2:4 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc2:4 allocation score on remote-rhel7-2: 1 -pcmk__clone_allocate: rsc2:4 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2:4 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2:4 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2:4 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2:5 allocation score on lxc1: 0 -pcmk__clone_allocate: rsc2:5 allocation score on lxc2: 6 -pcmk__clone_allocate: rsc2:5 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc2:5 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2:5 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2:5 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2:5 allocation score on rhel7-5: 0 -pcmk__clone_allocate: rsc2:6 allocation score on lxc1: 6 -pcmk__clone_allocate: rsc2:6 allocation score on lxc2: 0 -pcmk__clone_allocate: rsc2:6 allocation score on remote-rhel7-2: 0 -pcmk__clone_allocate: rsc2:6 allocation score on rhel7-1: 0 -pcmk__clone_allocate: rsc2:6 allocation score on rhel7-3: 0 -pcmk__clone_allocate: rsc2:6 allocation score on rhel7-4: 0 -pcmk__clone_allocate: rsc2:6 allocation score on rhel7-5: 0 -pcmk__clone_allocate: stateful-bundle-master allocation score on lxc1: -INFINITY -pcmk__clone_allocate: stateful-bundle-master allocation score on lxc2: -INFINITY -pcmk__clone_allocate: stateful-bundle-master allocation score on remote-rhel7-2: -INFINITY -pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-1: -INFINITY -pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-3: -INFINITY -pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-4: -INFINITY -pcmk__clone_allocate: stateful-bundle-master allocation score on rhel7-5: -INFINITY -pcmk__clone_allocate: stateful-bundle-master allocation score on stateful-bundle-0: 0 -pcmk__clone_allocate: stateful-bundle-master allocation score on stateful-bundle-1: 0 -pcmk__clone_allocate: stateful-bundle-master allocation score on stateful-bundle-2: 0 -pcmk__native_allocate: Fencing allocation score on lxc1: -INFINITY -pcmk__native_allocate: Fencing allocation score on lxc2: -INFINITY -pcmk__native_allocate: Fencing allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: Fencing allocation score on rhel7-1: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-3: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-4: 0 -pcmk__native_allocate: Fencing allocation score on rhel7-5: 0 -pcmk__native_allocate: bundled:0 allocation score on stateful-bundle-0: INFINITY -pcmk__native_allocate: bundled:1 allocation score on stateful-bundle-1: INFINITY -pcmk__native_allocate: bundled:2 allocation score on stateful-bundle-2: INFINITY -pcmk__native_allocate: container1 allocation score on lxc1: -INFINITY -pcmk__native_allocate: container1 allocation score on lxc2: -INFINITY -pcmk__native_allocate: container1 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: container1 allocation score on rhel7-1: 0 -pcmk__native_allocate: container1 allocation score on rhel7-3: INFINITY -pcmk__native_allocate: container1 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: container1 allocation score on rhel7-5: 0 -pcmk__native_allocate: container2 allocation score on lxc1: -INFINITY -pcmk__native_allocate: container2 allocation score on lxc2: -INFINITY -pcmk__native_allocate: container2 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: container2 allocation score on rhel7-1: 0 -pcmk__native_allocate: container2 allocation score on rhel7-3: INFINITY -pcmk__native_allocate: container2 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: container2 allocation score on rhel7-5: 0 -pcmk__native_allocate: lxc-ms:0 allocation score on lxc1: -INFINITY -pcmk__native_allocate: lxc-ms:0 allocation score on lxc2: INFINITY -pcmk__native_allocate: lxc-ms:0 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-1: 0 -pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-3: 0 -pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: lxc-ms:0 allocation score on rhel7-5: 0 -pcmk__native_allocate: lxc-ms:1 allocation score on lxc1: INFINITY -pcmk__native_allocate: lxc-ms:1 allocation score on lxc2: INFINITY -pcmk__native_allocate: lxc-ms:1 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-1: 0 -pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-3: 0 -pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: lxc-ms:1 allocation score on rhel7-5: 0 -pcmk__native_allocate: lxc1 allocation score on lxc1: -INFINITY -pcmk__native_allocate: lxc1 allocation score on lxc2: -INFINITY -pcmk__native_allocate: lxc1 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: lxc1 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: lxc1 allocation score on rhel7-3: 0 -pcmk__native_allocate: lxc1 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: lxc1 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: lxc2 allocation score on lxc1: -INFINITY -pcmk__native_allocate: lxc2 allocation score on lxc2: -INFINITY -pcmk__native_allocate: lxc2 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: lxc2 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: lxc2 allocation score on rhel7-3: 0 -pcmk__native_allocate: lxc2 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: lxc2 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: remote-rhel7-2 allocation score on lxc1: -INFINITY -pcmk__native_allocate: remote-rhel7-2 allocation score on lxc2: -INFINITY -pcmk__native_allocate: remote-rhel7-2 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-1: 0 -pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-3: 0 -pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-4: 0 -pcmk__native_allocate: remote-rhel7-2 allocation score on rhel7-5: 0 -pcmk__native_allocate: rsc1:0 allocation score on lxc1: -INFINITY -pcmk__native_allocate: rsc1:0 allocation score on lxc2: -INFINITY -pcmk__native_allocate: rsc1:0 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: rsc1:0 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc1:0 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:0 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc1:0 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc1:1 allocation score on lxc1: 0 -pcmk__native_allocate: rsc1:1 allocation score on lxc2: 0 -pcmk__native_allocate: rsc1:1 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: rsc1:1 allocation score on rhel7-1: 0 -pcmk__native_allocate: rsc1:1 allocation score on rhel7-3: 6 -pcmk__native_allocate: rsc1:1 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc1:1 allocation score on rhel7-5: 0 -pcmk__native_allocate: rsc1:2 allocation score on lxc1: 0 -pcmk__native_allocate: rsc1:2 allocation score on lxc2: 0 -pcmk__native_allocate: rsc1:2 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: rsc1:2 allocation score on rhel7-1: 0 -pcmk__native_allocate: rsc1:2 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:2 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc1:2 allocation score on rhel7-5: 6 -pcmk__native_allocate: rsc1:3 allocation score on lxc1: 0 -pcmk__native_allocate: rsc1:3 allocation score on lxc2: 0 -pcmk__native_allocate: rsc1:3 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: rsc1:3 allocation score on rhel7-1: 6 -pcmk__native_allocate: rsc1:3 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:3 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc1:3 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on lxc1: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on lxc2: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc1:4 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc1:5 allocation score on lxc1: -INFINITY -pcmk__native_allocate: rsc1:5 allocation score on lxc2: 6 -pcmk__native_allocate: rsc1:5 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: rsc1:5 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc1:5 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:5 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc1:5 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc1:6 allocation score on lxc1: 6 -pcmk__native_allocate: rsc1:6 allocation score on lxc2: 0 -pcmk__native_allocate: rsc1:6 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: rsc1:6 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc1:6 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc1:6 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc1:6 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc2:0 allocation score on lxc1: -INFINITY -pcmk__native_allocate: rsc2:0 allocation score on lxc2: -INFINITY -pcmk__native_allocate: rsc2:0 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: rsc2:0 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc2:0 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc2:0 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:0 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc2:1 allocation score on lxc1: 0 -pcmk__native_allocate: rsc2:1 allocation score on lxc2: 0 -pcmk__native_allocate: rsc2:1 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: rsc2:1 allocation score on rhel7-1: 0 -pcmk__native_allocate: rsc2:1 allocation score on rhel7-3: 6 -pcmk__native_allocate: rsc2:1 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:1 allocation score on rhel7-5: 0 -pcmk__native_allocate: rsc2:2 allocation score on lxc1: 0 -pcmk__native_allocate: rsc2:2 allocation score on lxc2: 0 -pcmk__native_allocate: rsc2:2 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: rsc2:2 allocation score on rhel7-1: 0 -pcmk__native_allocate: rsc2:2 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc2:2 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:2 allocation score on rhel7-5: 6 -pcmk__native_allocate: rsc2:3 allocation score on lxc1: 0 -pcmk__native_allocate: rsc2:3 allocation score on lxc2: 0 -pcmk__native_allocate: rsc2:3 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: rsc2:3 allocation score on rhel7-1: 6 -pcmk__native_allocate: rsc2:3 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc2:3 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:3 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on lxc1: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on lxc2: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:4 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc2:5 allocation score on lxc1: -INFINITY -pcmk__native_allocate: rsc2:5 allocation score on lxc2: 6 -pcmk__native_allocate: rsc2:5 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: rsc2:5 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc2:5 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc2:5 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:5 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: rsc2:6 allocation score on lxc1: 6 -pcmk__native_allocate: rsc2:6 allocation score on lxc2: 0 -pcmk__native_allocate: rsc2:6 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: rsc2:6 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: rsc2:6 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: rsc2:6 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: rsc2:6 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: stateful-bundle-0 allocation score on lxc1: -INFINITY -pcmk__native_allocate: stateful-bundle-0 allocation score on lxc2: -INFINITY -pcmk__native_allocate: stateful-bundle-0 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-1: 0 -pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-3: 0 -pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-4: 0 -pcmk__native_allocate: stateful-bundle-0 allocation score on rhel7-5: 10000 -pcmk__native_allocate: stateful-bundle-1 allocation score on lxc1: -INFINITY -pcmk__native_allocate: stateful-bundle-1 allocation score on lxc2: -INFINITY -pcmk__native_allocate: stateful-bundle-1 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-1: 10000 -pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-3: 0 -pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-4: 0 -pcmk__native_allocate: stateful-bundle-1 allocation score on rhel7-5: 0 -pcmk__native_allocate: stateful-bundle-2 allocation score on lxc1: -INFINITY -pcmk__native_allocate: stateful-bundle-2 allocation score on lxc2: -INFINITY -pcmk__native_allocate: stateful-bundle-2 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-1: 0 -pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-3: 10000 -pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-4: 0 -pcmk__native_allocate: stateful-bundle-2 allocation score on rhel7-5: 0 -pcmk__native_allocate: stateful-bundle-docker-0 allocation score on lxc1: -10000 -pcmk__native_allocate: stateful-bundle-docker-0 allocation score on lxc2: -10000 -pcmk__native_allocate: stateful-bundle-docker-0 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-3: 0 -pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: stateful-bundle-docker-0 allocation score on rhel7-5: 0 -pcmk__native_allocate: stateful-bundle-docker-1 allocation score on lxc1: -10000 -pcmk__native_allocate: stateful-bundle-docker-1 allocation score on lxc2: -10000 -pcmk__native_allocate: stateful-bundle-docker-1 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-1: 0 -pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-3: 0 -pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: stateful-bundle-docker-1 allocation score on rhel7-5: 0 -pcmk__native_allocate: stateful-bundle-docker-2 allocation score on lxc1: -10000 -pcmk__native_allocate: stateful-bundle-docker-2 allocation score on lxc2: -10000 -pcmk__native_allocate: stateful-bundle-docker-2 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-3: 0 -pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: stateful-bundle-docker-2 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc1: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc2: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-5: 0 -pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc1: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc2: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-1: 0 -pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-3: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-5: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc1: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc2: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on remote-rhel7-2: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-1: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-3: 0 -pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-4: -INFINITY -pcmk__native_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-5: -INFINITY -rsc1:0 promotion score on none: 0 -rsc1:1 promotion score on rhel7-3: 5 -rsc1:2 promotion score on rhel7-5: 5 -rsc1:3 promotion score on rhel7-1: 5 -rsc1:4 promotion score on none: 0 -rsc1:5 promotion score on lxc2: 5 -rsc1:6 promotion score on lxc1: 5 -rsc2:0 promotion score on none: 0 -rsc2:1 promotion score on rhel7-3: 5 -rsc2:2 promotion score on rhel7-5: 5 -rsc2:3 promotion score on rhel7-1: 5 -rsc2:4 promotion score on none: 0 -rsc2:5 promotion score on lxc2: 5 -rsc2:6 promotion score on lxc1: 5 diff --git a/cts/scheduler/on_fail_demote4.summary b/cts/scheduler/on_fail_demote4.summary deleted file mode 100644 index 20520ff..0000000 --- a/cts/scheduler/on_fail_demote4.summary +++ /dev/null @@ -1,187 +0,0 @@ -Using the original execution date of: 2020-06-16 19:23:21Z - -Current cluster status: -RemoteNode remote-rhel7-2: UNCLEAN (offline) -Node rhel7-4 (4): UNCLEAN (offline) -Online: [ rhel7-1 rhel7-3 rhel7-5 ] -GuestOnline: [ lxc1:container1 stateful-bundle-1:stateful-bundle-docker-1 ] - - Fencing (stonith:fence_xvm): Started rhel7-4 (UNCLEAN) - Clone Set: rsc1-clone [rsc1] (promotable) - rsc1 (ocf::pacemaker:Stateful): Master rhel7-4 (UNCLEAN) - rsc1 (ocf::pacemaker:Stateful): Slave remote-rhel7-2 (UNCLEAN) - Slaves: [ lxc1 rhel7-1 rhel7-3 rhel7-5 ] - Clone Set: rsc2-master [rsc2] (promotable) - rsc2 (ocf::pacemaker:Stateful): Slave rhel7-4 (UNCLEAN) - rsc2 (ocf::pacemaker:Stateful): Master remote-rhel7-2 (UNCLEAN) - Slaves: [ lxc1 rhel7-1 rhel7-3 rhel7-5 ] - remote-rhel7-2 (ocf::pacemaker:remote): FAILED rhel7-1 - container1 (ocf::heartbeat:VirtualDomain): Started rhel7-3 - container2 (ocf::heartbeat:VirtualDomain): FAILED rhel7-3 - Clone Set: lxc-ms-master [lxc-ms] (promotable) - Slaves: [ lxc1 ] - Stopped: [ remote-rhel7-2 rhel7-1 rhel7-3 rhel7-4 rhel7-5 ] - Container bundle set: stateful-bundle [pcmktest:http] - stateful-bundle-0 (192.168.122.131) (ocf::pacemaker:Stateful): FAILED Master rhel7-5 - stateful-bundle-1 (192.168.122.132) (ocf::pacemaker:Stateful): Slave rhel7-1 - stateful-bundle-2 (192.168.122.133) (ocf::pacemaker:Stateful): FAILED rhel7-4 (UNCLEAN) - -Transition Summary: - * Fence (reboot) stateful-bundle-2 (resource: stateful-bundle-docker-2) 'guest is unclean' - * Fence (reboot) stateful-bundle-0 (resource: stateful-bundle-docker-0) 'guest is unclean' - * Fence (reboot) lxc2 (resource: container2) 'guest is unclean' - * Fence (reboot) remote-rhel7-2 'remote connection is unrecoverable' - * Fence (reboot) rhel7-4 'peer is no longer part of the cluster' - * Move Fencing ( rhel7-4 -> rhel7-5 ) - * Stop rsc1:0 ( Master rhel7-4 ) due to node availability - * Promote rsc1:1 ( Slave -> Master rhel7-3 ) - * Stop rsc1:4 ( Slave remote-rhel7-2 ) due to node availability - * Recover rsc1:5 ( Slave lxc2 ) - * Stop rsc2:0 ( Slave rhel7-4 ) due to node availability - * Promote rsc2:1 ( Slave -> Master rhel7-3 ) - * Stop rsc2:4 ( Master remote-rhel7-2 ) due to node availability - * Recover rsc2:5 ( Slave lxc2 ) - * Recover remote-rhel7-2 ( rhel7-1 ) - * Recover container2 ( rhel7-3 ) - * Recover lxc-ms:0 ( Master lxc2 ) - * Recover stateful-bundle-docker-0 ( rhel7-5 ) - * Restart stateful-bundle-0 ( rhel7-5 ) due to required stateful-bundle-docker-0 start - * Recover bundled:0 ( Master stateful-bundle-0 ) - * Move stateful-bundle-ip-192.168.122.133 ( rhel7-4 -> rhel7-3 ) - * Recover stateful-bundle-docker-2 ( rhel7-4 -> rhel7-3 ) - * Move stateful-bundle-2 ( rhel7-4 -> rhel7-3 ) - * Recover bundled:2 ( Slave stateful-bundle-2 ) - * Restart lxc2 ( rhel7-3 ) due to required container2 start - -Executing cluster transition: - * Pseudo action: Fencing_stop_0 - * Resource action: rsc1 cancel=11000 on rhel7-3 - * Pseudo action: rsc1-clone_demote_0 - * Resource action: rsc2 cancel=11000 on rhel7-3 - * Pseudo action: rsc2-master_demote_0 - * Pseudo action: lxc-ms-master_demote_0 - * Resource action: stateful-bundle-0 stop on rhel7-5 - * Pseudo action: stateful-bundle-2_stop_0 - * Resource action: lxc2 stop on rhel7-3 - * Pseudo action: stateful-bundle_demote_0 - * Fencing remote-rhel7-2 (reboot) - * Fencing rhel7-4 (reboot) - * Pseudo action: rsc1_demote_0 - * Pseudo action: rsc1-clone_demoted_0 - * Pseudo action: rsc2_demote_0 - * Pseudo action: rsc2-master_demoted_0 - * Resource action: container2 stop on rhel7-3 - * Pseudo action: stateful-bundle-master_demote_0 - * Pseudo action: stonith-stateful-bundle-2-reboot on stateful-bundle-2 - * Pseudo action: stonith-lxc2-reboot on lxc2 - * Resource action: Fencing start on rhel7-5 - * Pseudo action: rsc1-clone_stop_0 - * Pseudo action: rsc2-master_stop_0 - * Pseudo action: lxc-ms_demote_0 - * Pseudo action: lxc-ms-master_demoted_0 - * Pseudo action: lxc-ms-master_stop_0 - * Pseudo action: bundled_demote_0 - * Pseudo action: stateful-bundle-master_demoted_0 - * Pseudo action: stateful-bundle_demoted_0 - * Pseudo action: stateful-bundle_stop_0 - * Resource action: Fencing monitor=120000 on rhel7-5 - * Pseudo action: rsc1_stop_0 - * Pseudo action: rsc1_stop_0 - * Pseudo action: rsc1_stop_0 - * Pseudo action: rsc1-clone_stopped_0 - * Pseudo action: rsc1-clone_start_0 - * Pseudo action: rsc2_stop_0 - * Pseudo action: rsc2_stop_0 - * Pseudo action: rsc2_stop_0 - * Pseudo action: rsc2-master_stopped_0 - * Pseudo action: rsc2-master_start_0 - * Resource action: remote-rhel7-2 stop on rhel7-1 - * Pseudo action: lxc-ms_stop_0 - * Pseudo action: lxc-ms-master_stopped_0 - * Pseudo action: lxc-ms-master_start_0 - * Resource action: stateful-bundle-docker-0 stop on rhel7-5 - * Pseudo action: stateful-bundle-docker-2_stop_0 - * Pseudo action: stonith-stateful-bundle-0-reboot on stateful-bundle-0 - * Resource action: remote-rhel7-2 start on rhel7-1 - * Resource action: remote-rhel7-2 monitor=60000 on rhel7-1 - * Resource action: container2 start on rhel7-3 - * Resource action: container2 monitor=20000 on rhel7-3 - * Pseudo action: stateful-bundle-master_stop_0 - * Pseudo action: stateful-bundle-ip-192.168.122.133_stop_0 - * Resource action: lxc2 start on rhel7-3 - * Resource action: lxc2 monitor=30000 on rhel7-3 - * Resource action: rsc1 start on lxc2 - * Pseudo action: rsc1-clone_running_0 - * Resource action: rsc2 start on lxc2 - * Pseudo action: rsc2-master_running_0 - * Resource action: lxc-ms start on lxc2 - * Pseudo action: lxc-ms-master_running_0 - * Pseudo action: bundled_stop_0 - * Resource action: stateful-bundle-ip-192.168.122.133 start on rhel7-3 - * Resource action: rsc1 monitor=11000 on lxc2 - * Pseudo action: rsc1-clone_promote_0 - * Resource action: rsc2 monitor=11000 on lxc2 - * Pseudo action: rsc2-master_promote_0 - * Pseudo action: lxc-ms-master_promote_0 - * Pseudo action: bundled_stop_0 - * Pseudo action: stateful-bundle-master_stopped_0 - * Resource action: stateful-bundle-ip-192.168.122.133 monitor=60000 on rhel7-3 - * Pseudo action: stateful-bundle_stopped_0 - * Pseudo action: stateful-bundle_start_0 - * Resource action: rsc1 promote on rhel7-3 - * Pseudo action: rsc1-clone_promoted_0 - * Resource action: rsc2 promote on rhel7-3 - * Pseudo action: rsc2-master_promoted_0 - * Resource action: lxc-ms promote on lxc2 - * Pseudo action: lxc-ms-master_promoted_0 - * Pseudo action: stateful-bundle-master_start_0 - * Resource action: stateful-bundle-docker-0 start on rhel7-5 - * Resource action: stateful-bundle-docker-0 monitor=60000 on rhel7-5 - * Resource action: stateful-bundle-0 start on rhel7-5 - * Resource action: stateful-bundle-0 monitor=30000 on rhel7-5 - * Resource action: stateful-bundle-docker-2 start on rhel7-3 - * Resource action: stateful-bundle-2 start on rhel7-3 - * Resource action: rsc1 monitor=10000 on rhel7-3 - * Resource action: rsc2 monitor=10000 on rhel7-3 - * Resource action: lxc-ms monitor=10000 on lxc2 - * Resource action: bundled start on stateful-bundle-0 - * Resource action: bundled start on stateful-bundle-2 - * Pseudo action: stateful-bundle-master_running_0 - * Resource action: stateful-bundle-docker-2 monitor=60000 on rhel7-3 - * Resource action: stateful-bundle-2 monitor=30000 on rhel7-3 - * Pseudo action: stateful-bundle_running_0 - * Resource action: bundled monitor=11000 on stateful-bundle-2 - * Pseudo action: stateful-bundle_promote_0 - * Pseudo action: stateful-bundle-master_promote_0 - * Resource action: bundled promote on stateful-bundle-0 - * Pseudo action: stateful-bundle-master_promoted_0 - * Pseudo action: stateful-bundle_promoted_0 - * Resource action: bundled monitor=10000 on stateful-bundle-0 -Using the original execution date of: 2020-06-16 19:23:21Z - -Revised cluster status: -Online: [ rhel7-1 rhel7-3 rhel7-5 ] -OFFLINE: [ rhel7-4 ] -RemoteOnline: [ remote-rhel7-2 ] -GuestOnline: [ lxc1:container1 lxc2:container2 stateful-bundle-0:stateful-bundle-docker-0 stateful-bundle-1:stateful-bundle-docker-1 stateful-bundle-2:stateful-bundle-docker-2 ] - - Fencing (stonith:fence_xvm): Started rhel7-5 - Clone Set: rsc1-clone [rsc1] (promotable) - Masters: [ rhel7-3 ] - Slaves: [ lxc1 lxc2 rhel7-1 rhel7-5 ] - Stopped: [ remote-rhel7-2 rhel7-4 ] - Clone Set: rsc2-master [rsc2] (promotable) - Masters: [ rhel7-3 ] - Slaves: [ lxc1 lxc2 rhel7-1 rhel7-5 ] - Stopped: [ remote-rhel7-2 rhel7-4 ] - remote-rhel7-2 (ocf::pacemaker:remote): Started rhel7-1 - container1 (ocf::heartbeat:VirtualDomain): Started rhel7-3 - container2 (ocf::heartbeat:VirtualDomain): Started rhel7-3 - Clone Set: lxc-ms-master [lxc-ms] (promotable) - Masters: [ lxc2 ] - Slaves: [ lxc1 ] - Container bundle set: stateful-bundle [pcmktest:http] - stateful-bundle-0 (192.168.122.131) (ocf::pacemaker:Stateful): Master rhel7-5 - stateful-bundle-1 (192.168.122.132) (ocf::pacemaker:Stateful): Slave rhel7-1 - stateful-bundle-2 (192.168.122.133) (ocf::pacemaker:Stateful): Slave rhel7-3 - diff --git a/cts/scheduler/on_fail_demote4.xml b/cts/scheduler/on_fail_demote4.xml deleted file mode 100644 index 1082266..0000000 --- a/cts/scheduler/on_fail_demote4.xml +++ /dev/null @@ -1,626 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/op-defaults-2.dot b/cts/scheduler/op-defaults-2.dot deleted file mode 100644 index 5c67bd8..0000000 --- a/cts/scheduler/op-defaults-2.dot +++ /dev/null @@ -1,33 +0,0 @@ - digraph "g" { -"dummy-rsc_monitor_0 cluster01" -> "dummy-rsc_start_0 cluster02" [ style = bold] -"dummy-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] -"dummy-rsc_monitor_0 cluster02" -> "dummy-rsc_start_0 cluster02" [ style = bold] -"dummy-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] -"dummy-rsc_monitor_10000 cluster02" [ style=bold color="green" fontcolor="black"] -"dummy-rsc_start_0 cluster02" -> "dummy-rsc_monitor_10000 cluster02" [ style = bold] -"dummy-rsc_start_0 cluster02" [ style=bold color="green" fontcolor="black"] -"fencing_monitor_0 cluster01" -> "fencing_start_0 cluster01" [ style = bold] -"fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] -"fencing_monitor_0 cluster02" -> "fencing_start_0 cluster01" [ style = bold] -"fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] -"fencing_start_0 cluster01" [ style=bold color="green" fontcolor="black"] -"ip-rsc_monitor_0 cluster01" -> "ip-rsc_start_0 cluster02" [ style = bold] -"ip-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] -"ip-rsc_monitor_0 cluster02" -> "ip-rsc_start_0 cluster02" [ style = bold] -"ip-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] -"ip-rsc_monitor_20000 cluster02" [ style=bold color="green" fontcolor="black"] -"ip-rsc_start_0 cluster02" -> "ip-rsc_monitor_20000 cluster02" [ style = bold] -"ip-rsc_start_0 cluster02" [ style=bold color="green" fontcolor="black"] -"ping-rsc-ping_monitor_0 cluster01" -> "ping-rsc-ping_start_0 cluster01" [ style = bold] -"ping-rsc-ping_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] -"ping-rsc-ping_monitor_0 cluster02" -> "ping-rsc-ping_start_0 cluster01" [ style = bold] -"ping-rsc-ping_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] -"ping-rsc-ping_start_0 cluster01" [ style=bold color="green" fontcolor="black"] -"rsc-passes_monitor_0 cluster01" -> "rsc-passes_start_0 cluster01" [ style = bold] -"rsc-passes_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] -"rsc-passes_monitor_0 cluster02" -> "rsc-passes_start_0 cluster01" [ style = bold] -"rsc-passes_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] -"rsc-passes_monitor_10000 cluster01" [ style=bold color="green" fontcolor="black"] -"rsc-passes_start_0 cluster01" -> "rsc-passes_monitor_10000 cluster01" [ style = bold] -"rsc-passes_start_0 cluster01" [ style=bold color="green" fontcolor="black"] -} diff --git a/cts/scheduler/op-defaults-2.exp b/cts/scheduler/op-defaults-2.exp deleted file mode 100644 index 4324fde..0000000 --- a/cts/scheduler/op-defaults-2.exp +++ /dev/null @@ -1,211 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/op-defaults-2.scores b/cts/scheduler/op-defaults-2.scores deleted file mode 100644 index 180c8b4..0000000 --- a/cts/scheduler/op-defaults-2.scores +++ /dev/null @@ -1,11 +0,0 @@ -Allocation scores: -pcmk__native_allocate: dummy-rsc allocation score on cluster01: 0 -pcmk__native_allocate: dummy-rsc allocation score on cluster02: 0 -pcmk__native_allocate: fencing allocation score on cluster01: 0 -pcmk__native_allocate: fencing allocation score on cluster02: 0 -pcmk__native_allocate: ip-rsc allocation score on cluster01: 0 -pcmk__native_allocate: ip-rsc allocation score on cluster02: 0 -pcmk__native_allocate: ping-rsc-ping allocation score on cluster01: 0 -pcmk__native_allocate: ping-rsc-ping allocation score on cluster02: 0 -pcmk__native_allocate: rsc-passes allocation score on cluster01: 0 -pcmk__native_allocate: rsc-passes allocation score on cluster02: 0 diff --git a/cts/scheduler/op-defaults-2.summary b/cts/scheduler/op-defaults-2.summary deleted file mode 100644 index 16a68be..0000000 --- a/cts/scheduler/op-defaults-2.summary +++ /dev/null @@ -1,46 +0,0 @@ - -Current cluster status: -Online: [ cluster01 cluster02 ] - - fencing (stonith:fence_xvm): Stopped - ip-rsc (ocf::heartbeat:IPaddr2): Stopped - rsc-passes (ocf::heartbeat:IPaddr2): Stopped - dummy-rsc (ocf::pacemaker:Dummy): Stopped - ping-rsc-ping (ocf::pacemaker:ping): Stopped - -Transition Summary: - * Start fencing ( cluster01 ) - * Start ip-rsc ( cluster02 ) - * Start rsc-passes ( cluster01 ) - * Start dummy-rsc ( cluster02 ) - * Start ping-rsc-ping ( cluster01 ) - -Executing cluster transition: - * Resource action: fencing monitor on cluster02 - * Resource action: fencing monitor on cluster01 - * Resource action: ip-rsc monitor on cluster02 - * Resource action: ip-rsc monitor on cluster01 - * Resource action: rsc-passes monitor on cluster02 - * Resource action: rsc-passes monitor on cluster01 - * Resource action: dummy-rsc monitor on cluster02 - * Resource action: dummy-rsc monitor on cluster01 - * Resource action: ping-rsc-ping monitor on cluster02 - * Resource action: ping-rsc-ping monitor on cluster01 - * Resource action: fencing start on cluster01 - * Resource action: ip-rsc start on cluster02 - * Resource action: rsc-passes start on cluster01 - * Resource action: dummy-rsc start on cluster02 - * Resource action: ping-rsc-ping start on cluster01 - * Resource action: ip-rsc monitor=20000 on cluster02 - * Resource action: rsc-passes monitor=10000 on cluster01 - * Resource action: dummy-rsc monitor=10000 on cluster02 - -Revised cluster status: -Online: [ cluster01 cluster02 ] - - fencing (stonith:fence_xvm): Started cluster01 - ip-rsc (ocf::heartbeat:IPaddr2): Started cluster02 - rsc-passes (ocf::heartbeat:IPaddr2): Started cluster01 - dummy-rsc (ocf::pacemaker:Dummy): Started cluster02 - ping-rsc-ping (ocf::pacemaker:ping): Started cluster01 - diff --git a/cts/scheduler/op-defaults-2.xml b/cts/scheduler/op-defaults-2.xml deleted file mode 100644 index 9f3c288..0000000 --- a/cts/scheduler/op-defaults-2.xml +++ /dev/null @@ -1,73 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/op-defaults-3.dot b/cts/scheduler/op-defaults-3.dot deleted file mode 100644 index 382f630..0000000 --- a/cts/scheduler/op-defaults-3.dot +++ /dev/null @@ -1,14 +0,0 @@ - digraph "g" { -"dummy-rsc_monitor_0 cluster01" -> "dummy-rsc_start_0 cluster02" [ style = bold] -"dummy-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] -"dummy-rsc_monitor_0 cluster02" -> "dummy-rsc_start_0 cluster02" [ style = bold] -"dummy-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] -"dummy-rsc_monitor_10000 cluster02" [ style=bold color="green" fontcolor="black"] -"dummy-rsc_start_0 cluster02" -> "dummy-rsc_monitor_10000 cluster02" [ style = bold] -"dummy-rsc_start_0 cluster02" [ style=bold color="green" fontcolor="black"] -"fencing_monitor_0 cluster01" -> "fencing_start_0 cluster01" [ style = bold] -"fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] -"fencing_monitor_0 cluster02" -> "fencing_start_0 cluster01" [ style = bold] -"fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] -"fencing_start_0 cluster01" [ style=bold color="green" fontcolor="black"] -} diff --git a/cts/scheduler/op-defaults-3.exp b/cts/scheduler/op-defaults-3.exp deleted file mode 100644 index 6d567dc..0000000 --- a/cts/scheduler/op-defaults-3.exp +++ /dev/null @@ -1,83 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/op-defaults-3.scores b/cts/scheduler/op-defaults-3.scores deleted file mode 100644 index 0a5190a..0000000 --- a/cts/scheduler/op-defaults-3.scores +++ /dev/null @@ -1,5 +0,0 @@ -Allocation scores: -pcmk__native_allocate: dummy-rsc allocation score on cluster01: 0 -pcmk__native_allocate: dummy-rsc allocation score on cluster02: 0 -pcmk__native_allocate: fencing allocation score on cluster01: 0 -pcmk__native_allocate: fencing allocation score on cluster02: 0 diff --git a/cts/scheduler/op-defaults-3.summary b/cts/scheduler/op-defaults-3.summary deleted file mode 100644 index a83eb15..0000000 --- a/cts/scheduler/op-defaults-3.summary +++ /dev/null @@ -1,26 +0,0 @@ - -Current cluster status: -Online: [ cluster01 cluster02 ] - - fencing (stonith:fence_xvm): Stopped - dummy-rsc (ocf::pacemaker:Dummy): Stopped - -Transition Summary: - * Start fencing ( cluster01 ) - * Start dummy-rsc ( cluster02 ) - -Executing cluster transition: - * Resource action: fencing monitor on cluster02 - * Resource action: fencing monitor on cluster01 - * Resource action: dummy-rsc monitor on cluster02 - * Resource action: dummy-rsc monitor on cluster01 - * Resource action: fencing start on cluster01 - * Resource action: dummy-rsc start on cluster02 - * Resource action: dummy-rsc monitor=10000 on cluster02 - -Revised cluster status: -Online: [ cluster01 cluster02 ] - - fencing (stonith:fence_xvm): Started cluster01 - dummy-rsc (ocf::pacemaker:Dummy): Started cluster02 - diff --git a/cts/scheduler/op-defaults-3.xml b/cts/scheduler/op-defaults-3.xml deleted file mode 100644 index 4a8912e..0000000 --- a/cts/scheduler/op-defaults-3.xml +++ /dev/null @@ -1,54 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/op-defaults.dot b/cts/scheduler/op-defaults.dot deleted file mode 100644 index 5536c15..0000000 --- a/cts/scheduler/op-defaults.dot +++ /dev/null @@ -1,33 +0,0 @@ - digraph "g" { -"dummy-rsc_monitor_0 cluster01" -> "dummy-rsc_start_0 cluster02" [ style = bold] -"dummy-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] -"dummy-rsc_monitor_0 cluster02" -> "dummy-rsc_start_0 cluster02" [ style = bold] -"dummy-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] -"dummy-rsc_monitor_60000 cluster02" [ style=bold color="green" fontcolor="black"] -"dummy-rsc_start_0 cluster02" -> "dummy-rsc_monitor_60000 cluster02" [ style = bold] -"dummy-rsc_start_0 cluster02" [ style=bold color="green" fontcolor="black"] -"fencing_monitor_0 cluster01" -> "fencing_start_0 cluster01" [ style = bold] -"fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] -"fencing_monitor_0 cluster02" -> "fencing_start_0 cluster01" [ style = bold] -"fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] -"fencing_start_0 cluster01" [ style=bold color="green" fontcolor="black"] -"ip-rsc2_monitor_0 cluster01" -> "ip-rsc2_start_0 cluster01" [ style = bold] -"ip-rsc2_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] -"ip-rsc2_monitor_0 cluster02" -> "ip-rsc2_start_0 cluster01" [ style = bold] -"ip-rsc2_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] -"ip-rsc2_monitor_10000 cluster01" [ style=bold color="green" fontcolor="black"] -"ip-rsc2_start_0 cluster01" -> "ip-rsc2_monitor_10000 cluster01" [ style = bold] -"ip-rsc2_start_0 cluster01" [ style=bold color="green" fontcolor="black"] -"ip-rsc_monitor_0 cluster01" -> "ip-rsc_start_0 cluster02" [ style = bold] -"ip-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] -"ip-rsc_monitor_0 cluster02" -> "ip-rsc_start_0 cluster02" [ style = bold] -"ip-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] -"ip-rsc_monitor_20000 cluster02" [ style=bold color="green" fontcolor="black"] -"ip-rsc_start_0 cluster02" -> "ip-rsc_monitor_20000 cluster02" [ style = bold] -"ip-rsc_start_0 cluster02" [ style=bold color="green" fontcolor="black"] -"ping-rsc-ping_monitor_0 cluster01" -> "ping-rsc-ping_start_0 cluster01" [ style = bold] -"ping-rsc-ping_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] -"ping-rsc-ping_monitor_0 cluster02" -> "ping-rsc-ping_start_0 cluster01" [ style = bold] -"ping-rsc-ping_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] -"ping-rsc-ping_start_0 cluster01" [ style=bold color="green" fontcolor="black"] -} diff --git a/cts/scheduler/op-defaults.exp b/cts/scheduler/op-defaults.exp deleted file mode 100644 index b81eacb..0000000 --- a/cts/scheduler/op-defaults.exp +++ /dev/null @@ -1,211 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/op-defaults.scores b/cts/scheduler/op-defaults.scores deleted file mode 100644 index 1c622f0..0000000 --- a/cts/scheduler/op-defaults.scores +++ /dev/null @@ -1,11 +0,0 @@ -Allocation scores: -pcmk__native_allocate: dummy-rsc allocation score on cluster01: 0 -pcmk__native_allocate: dummy-rsc allocation score on cluster02: 0 -pcmk__native_allocate: fencing allocation score on cluster01: 0 -pcmk__native_allocate: fencing allocation score on cluster02: 0 -pcmk__native_allocate: ip-rsc allocation score on cluster01: 0 -pcmk__native_allocate: ip-rsc allocation score on cluster02: 0 -pcmk__native_allocate: ip-rsc2 allocation score on cluster01: 0 -pcmk__native_allocate: ip-rsc2 allocation score on cluster02: 0 -pcmk__native_allocate: ping-rsc-ping allocation score on cluster01: 0 -pcmk__native_allocate: ping-rsc-ping allocation score on cluster02: 0 diff --git a/cts/scheduler/op-defaults.summary b/cts/scheduler/op-defaults.summary deleted file mode 100644 index b580939..0000000 --- a/cts/scheduler/op-defaults.summary +++ /dev/null @@ -1,46 +0,0 @@ - -Current cluster status: -Online: [ cluster01 cluster02 ] - - fencing (stonith:fence_xvm): Stopped - ip-rsc (ocf::heartbeat:IPaddr2): Stopped - ip-rsc2 (ocf::heartbeat:IPaddr2): Stopped - dummy-rsc (ocf::pacemaker:Dummy): Stopped - ping-rsc-ping (ocf::pacemaker:ping): Stopped - -Transition Summary: - * Start fencing ( cluster01 ) - * Start ip-rsc ( cluster02 ) - * Start ip-rsc2 ( cluster01 ) - * Start dummy-rsc ( cluster02 ) - * Start ping-rsc-ping ( cluster01 ) - -Executing cluster transition: - * Resource action: fencing monitor on cluster02 - * Resource action: fencing monitor on cluster01 - * Resource action: ip-rsc monitor on cluster02 - * Resource action: ip-rsc monitor on cluster01 - * Resource action: ip-rsc2 monitor on cluster02 - * Resource action: ip-rsc2 monitor on cluster01 - * Resource action: dummy-rsc monitor on cluster02 - * Resource action: dummy-rsc monitor on cluster01 - * Resource action: ping-rsc-ping monitor on cluster02 - * Resource action: ping-rsc-ping monitor on cluster01 - * Resource action: fencing start on cluster01 - * Resource action: ip-rsc start on cluster02 - * Resource action: ip-rsc2 start on cluster01 - * Resource action: dummy-rsc start on cluster02 - * Resource action: ping-rsc-ping start on cluster01 - * Resource action: ip-rsc monitor=20000 on cluster02 - * Resource action: ip-rsc2 monitor=10000 on cluster01 - * Resource action: dummy-rsc monitor=60000 on cluster02 - -Revised cluster status: -Online: [ cluster01 cluster02 ] - - fencing (stonith:fence_xvm): Started cluster01 - ip-rsc (ocf::heartbeat:IPaddr2): Started cluster02 - ip-rsc2 (ocf::heartbeat:IPaddr2): Started cluster01 - dummy-rsc (ocf::pacemaker:Dummy): Started cluster02 - ping-rsc-ping (ocf::pacemaker:ping): Started cluster01 - diff --git a/cts/scheduler/op-defaults.xml b/cts/scheduler/op-defaults.xml deleted file mode 100644 index ae3b248..0000000 --- a/cts/scheduler/op-defaults.xml +++ /dev/null @@ -1,87 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/params-3.dot b/cts/scheduler/params-3.dot deleted file mode 100644 index d681ee5..0000000 --- a/cts/scheduler/params-3.dot +++ /dev/null @@ -1,28 +0,0 @@ - digraph "g" { -"Cancel rsc_c001n02_monitor_5000 c001n02" [ style=bold color="green" fontcolor="black"] -"DcIPaddr_monitor_0 c001n01" -> "DcIPaddr_start_0 c001n02" [ style = bold] -"DcIPaddr_monitor_0 c001n01" [ style=bold color="green" fontcolor="black"] -"DcIPaddr_monitor_0 c001n03" -> "DcIPaddr_start_0 c001n02" [ style = bold] -"DcIPaddr_monitor_0 c001n03" [ style=bold color="green" fontcolor="black"] -"DcIPaddr_monitor_0 c001n08" -> "DcIPaddr_start_0 c001n02" [ style = bold] -"DcIPaddr_monitor_0 c001n08" [ style=bold color="green" fontcolor="black"] -"DcIPaddr_monitor_5000 c001n02" [ style=bold color="green" fontcolor="black"] -"DcIPaddr_start_0 c001n02" -> "DcIPaddr_monitor_5000 c001n02" [ style = bold] -"DcIPaddr_start_0 c001n02" [ style=bold color="green" fontcolor="black"] -"DcIPaddr_stop_0 c001n02" -> "DcIPaddr_start_0 c001n02" [ style = bold] -"DcIPaddr_stop_0 c001n02" [ style=bold color="green" fontcolor="black"] -"rsc_c001n01_monitor_0 c001n02" [ style=bold color="green" fontcolor="black"] -"rsc_c001n01_monitor_0 c001n03" [ style=bold color="green" fontcolor="black"] -"rsc_c001n01_monitor_0 c001n08" [ style=bold color="green" fontcolor="black"] -"rsc_c001n02_monitor_0 c001n01" [ style=bold color="green" fontcolor="black"] -"rsc_c001n02_monitor_0 c001n03" [ style=bold color="green" fontcolor="black"] -"rsc_c001n02_monitor_0 c001n08" [ style=bold color="green" fontcolor="black"] -"rsc_c001n02_monitor_6000 c001n02" [ style=bold color="green" fontcolor="black"] -"rsc_c001n03_monitor_0 c001n01" [ style=bold color="green" fontcolor="black"] -"rsc_c001n03_monitor_0 c001n02" [ style=bold color="green" fontcolor="black"] -"rsc_c001n03_monitor_0 c001n08" [ style=bold color="green" fontcolor="black"] -"rsc_c001n08_monitor_0 c001n01" [ style=bold color="green" fontcolor="black"] -"rsc_c001n08_monitor_0 c001n02" [ style=bold color="green" fontcolor="black"] -"rsc_c001n08_monitor_0 c001n03" [ style=bold color="green" fontcolor="black"] -"rsc_c001n08_monitor_5000 c001n08" [ style=bold color="green" fontcolor="black"] -} diff --git a/cts/scheduler/params-3.exp b/cts/scheduler/params-3.exp deleted file mode 100644 index 5cccdec..0000000 --- a/cts/scheduler/params-3.exp +++ /dev/null @@ -1,208 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/params-3.scores b/cts/scheduler/params-3.scores deleted file mode 100644 index 00417ea..0000000 --- a/cts/scheduler/params-3.scores +++ /dev/null @@ -1,21 +0,0 @@ -Allocation scores: -pcmk__native_allocate: DcIPaddr allocation score on c001n01: -INFINITY -pcmk__native_allocate: DcIPaddr allocation score on c001n02: 0 -pcmk__native_allocate: DcIPaddr allocation score on c001n03: -INFINITY -pcmk__native_allocate: DcIPaddr allocation score on c001n08: -INFINITY -pcmk__native_allocate: rsc_c001n01 allocation score on c001n01: 100 -pcmk__native_allocate: rsc_c001n01 allocation score on c001n02: 0 -pcmk__native_allocate: rsc_c001n01 allocation score on c001n03: 0 -pcmk__native_allocate: rsc_c001n01 allocation score on c001n08: 0 -pcmk__native_allocate: rsc_c001n02 allocation score on c001n01: 0 -pcmk__native_allocate: rsc_c001n02 allocation score on c001n02: 100 -pcmk__native_allocate: rsc_c001n02 allocation score on c001n03: 0 -pcmk__native_allocate: rsc_c001n02 allocation score on c001n08: 0 -pcmk__native_allocate: rsc_c001n03 allocation score on c001n01: 0 -pcmk__native_allocate: rsc_c001n03 allocation score on c001n02: 0 -pcmk__native_allocate: rsc_c001n03 allocation score on c001n03: 100 -pcmk__native_allocate: rsc_c001n03 allocation score on c001n08: 0 -pcmk__native_allocate: rsc_c001n08 allocation score on c001n01: 0 -pcmk__native_allocate: rsc_c001n08 allocation score on c001n02: 0 -pcmk__native_allocate: rsc_c001n08 allocation score on c001n03: 0 -pcmk__native_allocate: rsc_c001n08 allocation score on c001n08: 100 diff --git a/cts/scheduler/params-3.summary b/cts/scheduler/params-3.summary deleted file mode 100644 index 257f8ba..0000000 --- a/cts/scheduler/params-3.summary +++ /dev/null @@ -1,45 +0,0 @@ - -Current cluster status: -Online: [ c001n01 c001n02 c001n03 c001n08 ] - - DcIPaddr (ocf::heartbeat:IPaddr): Starting c001n02 - rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 - rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 - rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 - rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 - -Transition Summary: - * Restart DcIPaddr ( c001n02 ) - -Executing cluster transition: - * Resource action: DcIPaddr monitor on c001n08 - * Resource action: DcIPaddr monitor on c001n03 - * Resource action: DcIPaddr monitor on c001n01 - * Resource action: DcIPaddr stop on c001n02 - * Resource action: rsc_c001n08 monitor on c001n03 - * Resource action: rsc_c001n08 monitor on c001n02 - * Resource action: rsc_c001n08 monitor on c001n01 - * Resource action: rsc_c001n08 monitor=5000 on c001n08 - * Resource action: rsc_c001n02 monitor=6000 on c001n02 - * Resource action: rsc_c001n02 monitor on c001n08 - * Resource action: rsc_c001n02 monitor on c001n03 - * Resource action: rsc_c001n02 monitor on c001n01 - * Resource action: rsc_c001n02 cancel=5000 on c001n02 - * Resource action: rsc_c001n03 monitor on c001n08 - * Resource action: rsc_c001n03 monitor on c001n02 - * Resource action: rsc_c001n03 monitor on c001n01 - * Resource action: rsc_c001n01 monitor on c001n08 - * Resource action: rsc_c001n01 monitor on c001n03 - * Resource action: rsc_c001n01 monitor on c001n02 - * Resource action: DcIPaddr start on c001n02 - * Resource action: DcIPaddr monitor=5000 on c001n02 - -Revised cluster status: -Online: [ c001n01 c001n02 c001n03 c001n08 ] - - DcIPaddr (ocf::heartbeat:IPaddr): Started c001n02 - rsc_c001n08 (ocf::heartbeat:IPaddr): Started c001n08 - rsc_c001n02 (ocf::heartbeat:IPaddr): Started c001n02 - rsc_c001n03 (ocf::heartbeat:IPaddr): Started c001n03 - rsc_c001n01 (ocf::heartbeat:IPaddr): Started c001n01 - diff --git a/cts/scheduler/params-3.xml b/cts/scheduler/params-3.xml deleted file mode 100644 index ee6e157..0000000 --- a/cts/scheduler/params-3.xml +++ /dev/null @@ -1,154 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/rsc-defaults-2.dot b/cts/scheduler/rsc-defaults-2.dot deleted file mode 100644 index b43c5e6..0000000 --- a/cts/scheduler/rsc-defaults-2.dot +++ /dev/null @@ -1,11 +0,0 @@ - digraph "g" { -"dummy-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] -"dummy-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] -"fencing_monitor_0 cluster01" -> "fencing_start_0 cluster01" [ style = bold] -"fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] -"fencing_monitor_0 cluster02" -> "fencing_start_0 cluster01" [ style = bold] -"fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] -"fencing_start_0 cluster01" [ style=bold color="green" fontcolor="black"] -"ping-rsc-ping_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] -"ping-rsc-ping_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] -} diff --git a/cts/scheduler/rsc-defaults-2.exp b/cts/scheduler/rsc-defaults-2.exp deleted file mode 100644 index e9e1b5f..0000000 --- a/cts/scheduler/rsc-defaults-2.exp +++ /dev/null @@ -1,72 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/rsc-defaults-2.scores b/cts/scheduler/rsc-defaults-2.scores deleted file mode 100644 index 4b70f54..0000000 --- a/cts/scheduler/rsc-defaults-2.scores +++ /dev/null @@ -1,7 +0,0 @@ -Allocation scores: -pcmk__native_allocate: dummy-rsc allocation score on cluster01: 0 -pcmk__native_allocate: dummy-rsc allocation score on cluster02: 0 -pcmk__native_allocate: fencing allocation score on cluster01: 0 -pcmk__native_allocate: fencing allocation score on cluster02: 0 -pcmk__native_allocate: ping-rsc-ping allocation score on cluster01: 0 -pcmk__native_allocate: ping-rsc-ping allocation score on cluster02: 0 diff --git a/cts/scheduler/rsc-defaults-2.summary b/cts/scheduler/rsc-defaults-2.summary deleted file mode 100644 index 46a2a2d..0000000 --- a/cts/scheduler/rsc-defaults-2.summary +++ /dev/null @@ -1,27 +0,0 @@ - -Current cluster status: -Online: [ cluster01 cluster02 ] - - fencing (stonith:fence_xvm): Stopped - dummy-rsc (ocf::pacemaker:Dummy): Stopped (unmanaged) - ping-rsc-ping (ocf::pacemaker:ping): Stopped (unmanaged) - -Transition Summary: - * Start fencing ( cluster01 ) - -Executing cluster transition: - * Resource action: fencing monitor on cluster02 - * Resource action: fencing monitor on cluster01 - * Resource action: dummy-rsc monitor on cluster02 - * Resource action: dummy-rsc monitor on cluster01 - * Resource action: ping-rsc-ping monitor on cluster02 - * Resource action: ping-rsc-ping monitor on cluster01 - * Resource action: fencing start on cluster01 - -Revised cluster status: -Online: [ cluster01 cluster02 ] - - fencing (stonith:fence_xvm): Started cluster01 - dummy-rsc (ocf::pacemaker:Dummy): Stopped (unmanaged) - ping-rsc-ping (ocf::pacemaker:ping): Stopped (unmanaged) - diff --git a/cts/scheduler/rsc-defaults-2.xml b/cts/scheduler/rsc-defaults-2.xml deleted file mode 100644 index a160fae..0000000 --- a/cts/scheduler/rsc-defaults-2.xml +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/rsc-defaults.dot b/cts/scheduler/rsc-defaults.dot deleted file mode 100644 index d776614..0000000 --- a/cts/scheduler/rsc-defaults.dot +++ /dev/null @@ -1,18 +0,0 @@ - digraph "g" { -"dummy-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] -"dummy-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] -"fencing_monitor_0 cluster01" -> "fencing_start_0 cluster01" [ style = bold] -"fencing_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] -"fencing_monitor_0 cluster02" -> "fencing_start_0 cluster01" [ style = bold] -"fencing_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] -"fencing_start_0 cluster01" [ style=bold color="green" fontcolor="black"] -"ip-rsc2_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] -"ip-rsc2_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] -"ip-rsc_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] -"ip-rsc_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] -"ping-rsc-ping_monitor_0 cluster01" -> "ping-rsc-ping_start_0 cluster02" [ style = bold] -"ping-rsc-ping_monitor_0 cluster01" [ style=bold color="green" fontcolor="black"] -"ping-rsc-ping_monitor_0 cluster02" -> "ping-rsc-ping_start_0 cluster02" [ style = bold] -"ping-rsc-ping_monitor_0 cluster02" [ style=bold color="green" fontcolor="black"] -"ping-rsc-ping_start_0 cluster02" [ style=bold color="green" fontcolor="black"] -} diff --git a/cts/scheduler/rsc-defaults.exp b/cts/scheduler/rsc-defaults.exp deleted file mode 100644 index 4aec360..0000000 --- a/cts/scheduler/rsc-defaults.exp +++ /dev/null @@ -1,124 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cts/scheduler/rsc-defaults.scores b/cts/scheduler/rsc-defaults.scores deleted file mode 100644 index e7f1bab..0000000 --- a/cts/scheduler/rsc-defaults.scores +++ /dev/null @@ -1,11 +0,0 @@ -Allocation scores: -pcmk__native_allocate: dummy-rsc allocation score on cluster01: 0 -pcmk__native_allocate: dummy-rsc allocation score on cluster02: 0 -pcmk__native_allocate: fencing allocation score on cluster01: 0 -pcmk__native_allocate: fencing allocation score on cluster02: 0 -pcmk__native_allocate: ip-rsc allocation score on cluster01: -INFINITY -pcmk__native_allocate: ip-rsc allocation score on cluster02: -INFINITY -pcmk__native_allocate: ip-rsc2 allocation score on cluster01: -INFINITY -pcmk__native_allocate: ip-rsc2 allocation score on cluster02: -INFINITY -pcmk__native_allocate: ping-rsc-ping allocation score on cluster01: 0 -pcmk__native_allocate: ping-rsc-ping allocation score on cluster02: 0 diff --git a/cts/scheduler/rsc-defaults.summary b/cts/scheduler/rsc-defaults.summary deleted file mode 100644 index 0066f2e..0000000 --- a/cts/scheduler/rsc-defaults.summary +++ /dev/null @@ -1,38 +0,0 @@ -2 of 5 resource instances DISABLED and 0 BLOCKED from further action due to failure - -Current cluster status: -Online: [ cluster01 cluster02 ] - - fencing (stonith:fence_xvm): Stopped - ip-rsc (ocf::heartbeat:IPaddr2): Stopped (disabled) - ip-rsc2 (ocf::heartbeat:IPaddr2): Stopped (disabled) - dummy-rsc (ocf::pacemaker:Dummy): Stopped (unmanaged) - ping-rsc-ping (ocf::pacemaker:ping): Stopped - -Transition Summary: - * Start fencing ( cluster01 ) - * Start ping-rsc-ping ( cluster02 ) - -Executing cluster transition: - * Resource action: fencing monitor on cluster02 - * Resource action: fencing monitor on cluster01 - * Resource action: ip-rsc monitor on cluster02 - * Resource action: ip-rsc monitor on cluster01 - * Resource action: ip-rsc2 monitor on cluster02 - * Resource action: ip-rsc2 monitor on cluster01 - * Resource action: dummy-rsc monitor on cluster02 - * Resource action: dummy-rsc monitor on cluster01 - * Resource action: ping-rsc-ping monitor on cluster02 - * Resource action: ping-rsc-ping monitor on cluster01 - * Resource action: fencing start on cluster01 - * Resource action: ping-rsc-ping start on cluster02 - -Revised cluster status: -Online: [ cluster01 cluster02 ] - - fencing (stonith:fence_xvm): Started cluster01 - ip-rsc (ocf::heartbeat:IPaddr2): Stopped (disabled) - ip-rsc2 (ocf::heartbeat:IPaddr2): Stopped (disabled) - dummy-rsc (ocf::pacemaker:Dummy): Stopped (unmanaged) - ping-rsc-ping (ocf::pacemaker:ping): Started cluster02 - diff --git a/cts/scheduler/rsc-defaults.xml b/cts/scheduler/rsc-defaults.xml deleted file mode 100644 index 38cae8b..0000000 --- a/cts/scheduler/rsc-defaults.xml +++ /dev/null @@ -1,78 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/daemons/attrd/attrd_utils.c b/daemons/attrd/attrd_utils.c index c311ddc..b60b452 100644 --- a/daemons/attrd/attrd_utils.c +++ b/daemons/attrd/attrd_utils.c @@ -17,7 +17,7 @@ #include #include -#include +#include #include #include "pacemaker-attrd.h" diff --git a/daemons/attrd/pacemaker-attrd.c b/daemons/attrd/pacemaker-attrd.c index 0e944ed..61e5493 100644 --- a/daemons/attrd/pacemaker-attrd.c +++ b/daemons/attrd/pacemaker-attrd.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include diff --git a/daemons/based/based_messages.c b/daemons/based/based_messages.c index af0a3a2..4438e28 100644 --- a/daemons/based/based_messages.c +++ b/daemons/based/based_messages.c @@ -24,7 +24,7 @@ #include #include -#include +#include #include #include diff --git a/daemons/based/based_remote.c b/daemons/based/based_remote.c index 70261c3..ca75b73 100644 --- a/daemons/based/based_remote.c +++ b/daemons/based/based_remote.c @@ -26,7 +26,7 @@ #include #include -#include +#include #include #include #include diff --git a/daemons/based/pacemaker-based.h b/daemons/based/pacemaker-based.h index c88ce7c..0d7a5b9 100644 --- a/daemons/based/pacemaker-based.h +++ b/daemons/based/pacemaker-based.h @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include diff --git a/daemons/controld/controld_control.c b/daemons/controld/controld_control.c index 059eb7b..1ddcada 100644 --- a/daemons/controld/controld_control.c +++ b/daemons/controld/controld_control.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include @@ -626,7 +626,7 @@ static pcmk__cluster_option_t crmd_opts[] = { // Already documented in libpe_status (other values must be kept identical) { - "no-quorum-policy", NULL, "enum", "stop, freeze, ignore, demote, suicide", + "no-quorum-policy", NULL, "enum", "stop, freeze, ignore, suicide", "stop", pcmk__valid_quorum, NULL, NULL }, { diff --git a/daemons/controld/controld_fsa.h b/daemons/controld/controld_fsa.h index 28eea56..b76a7d2 100644 --- a/daemons/controld/controld_fsa.h +++ b/daemons/controld/controld_fsa.h @@ -16,7 +16,7 @@ # include # include # include -# include +# include /*! States the controller can be in */ enum crmd_fsa_state { diff --git a/daemons/controld/controld_messages.c b/daemons/controld/controld_messages.c index a443369..0b063bc 100644 --- a/daemons/controld/controld_messages.c +++ b/daemons/controld/controld_messages.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include @@ -375,11 +375,10 @@ relay_message(xmlNode * msg, gboolean originated_locally) is_local = 0; } else if (is_for_crm) { - if (safe_str_eq(task, CRM_OP_NODE_INFO) - || safe_str_eq(task, PCMK__CONTROLD_CMD_NODES)) { + if (safe_str_eq(task, CRM_OP_NODE_INFO)) { /* Node info requests do not specify a host, which is normally * treated as "all hosts", because the whole point is that the - * client may not know the local node name. Always handle these + * client doesn't know the local node name. Always handle these * requests locally. */ is_local = 1; @@ -785,42 +784,6 @@ handle_ping(xmlNode *msg) } /*! - * \brief Handle a PCMK__CONTROLD_CMD_NODES message - * - * \return Next FSA input - */ -static enum crmd_fsa_input -handle_node_list(xmlNode *request) -{ - GHashTableIter iter; - crm_node_t *node = NULL; - xmlNode *reply = NULL; - xmlNode *reply_data = NULL; - - // Create message data for reply - reply_data = create_xml_node(NULL, XML_CIB_TAG_NODES); - g_hash_table_iter_init(&iter, crm_peer_cache); - while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) { - xmlNode *xml = create_xml_node(reply_data, XML_CIB_TAG_NODE); - - crm_xml_add_ll(xml, XML_ATTR_ID, (long long) node->id); // uint32_t - crm_xml_add(xml, XML_ATTR_UNAME, node->uname); - crm_xml_add(xml, XML_NODE_IN_CLUSTER, node->state); - } - - // Create and send reply - reply = create_reply(request, reply_data); - free_xml(reply_data); - if (reply) { - (void) relay_message(reply, TRUE); - free_xml(reply); - } - - // Nothing further to do - return I_NULL; -} - -/*! * \brief Handle a CRM_OP_NODE_INFO request * * \param[in] msg Message XML @@ -1117,9 +1080,6 @@ handle_request(xmlNode *stored_msg, enum crmd_fsa_cause cause) remote_ra_process_maintenance_nodes(xml); - } else if (strcmp(op, PCMK__CONTROLD_CMD_NODES) == 0) { - return handle_node_list(stored_msg); - /*========== (NOT_DC)-Only Actions ==========*/ } else if (!AM_I_DC) { diff --git a/daemons/controld/controld_messages.h b/daemons/controld/controld_messages.h index 4018deb..db3ade3 100644 --- a/daemons/controld/controld_messages.h +++ b/daemons/controld/controld_messages.h @@ -11,7 +11,7 @@ # define XML_CRM_MESSAGES__H # include -# include +# include # include # include # include diff --git a/daemons/controld/controld_te_callbacks.c b/daemons/controld/controld_te_callbacks.c index 6ddaffe..8506f26 100644 --- a/daemons/controld/controld_te_callbacks.c +++ b/daemons/controld/controld_te_callbacks.c @@ -697,8 +697,7 @@ action_timer_callback(gpointer data) crm_err("Node %s did not send %s result (via %s) within %dms " "(action timeout plus cluster-delay)", (on_node? on_node : ""), (task? task : "unknown action"), - (via_node? via_node : "controller"), - timer->timeout + transition_graph->network_delay); + (via_node? via_node : "controller"), timer->timeout); print_action(LOG_ERR, "Aborting transition, action lost: ", timer->action); timer->action->failed = TRUE; diff --git a/daemons/execd/execd_alerts.c b/daemons/execd/execd_alerts.c index a905d1a..2db8a6a 100644 --- a/daemons/execd/execd_alerts.c +++ b/daemons/execd/execd_alerts.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include diff --git a/daemons/execd/execd_commands.c b/daemons/execd/execd_commands.c index 795ee01..4d0e457 100644 --- a/daemons/execd/execd_commands.c +++ b/daemons/execd/execd_commands.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include "pacemaker-execd.h" @@ -507,15 +507,6 @@ send_client_notify(gpointer key, gpointer value, gpointer user_data) crm_trace("Skipping notification to client without name"); return; } - if (is_set(client->flags, pcmk__client_to_proxy)) { - /* We only want to notify clients of the executor IPC API. If we are - * running as Pacemaker Remote, we may have clients proxied to other - * IPC services in the cluster, so skip those. - */ - crm_trace("Skipping executor API notification to %s IPC client", - client->name); - return; - } rc = lrmd_server_send_notify(client, update_msg); if (rc == pcmk_rc_ok) { diff --git a/daemons/execd/pacemaker-execd.c b/daemons/execd/pacemaker-execd.c index c06da7a..df27d1a 100644 --- a/daemons/execd/pacemaker-execd.c +++ b/daemons/execd/pacemaker-execd.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include diff --git a/daemons/execd/pacemaker-execd.h b/daemons/execd/pacemaker-execd.h index d86894b..7ba3e78 100644 --- a/daemons/execd/pacemaker-execd.h +++ b/daemons/execd/pacemaker-execd.h @@ -11,7 +11,7 @@ # define PACEMAKER_EXECD__H # include -# include +# include # include # include diff --git a/daemons/execd/remoted_proxy.c b/daemons/execd/remoted_proxy.c index 5c58de4..ef0d108 100644 --- a/daemons/execd/remoted_proxy.c +++ b/daemons/execd/remoted_proxy.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include @@ -88,11 +88,6 @@ ipc_proxy_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid, const char *ipc client->userdata = strdup(ipc_proxy->id); client->name = crm_strdup_printf("proxy-%s-%d-%.8s", ipc_channel, client->pid, client->id); - /* Allow remote executor to distinguish between proxied local clients and - * actual executor API clients - */ - set_bit(client->flags, pcmk__client_to_proxy); - g_hash_table_insert(ipc_clients, client->id, client); msg = create_xml_node(NULL, T_LRMD_IPC_PROXY); diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c index 8f70dab..859e7b7 100644 --- a/daemons/fenced/fenced_commands.c +++ b/daemons/fenced/fenced_commands.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include @@ -2336,8 +2336,22 @@ stonith_fence(xmlNode * msg) xmlNode * stonith_construct_reply(xmlNode * request, const char *output, xmlNode * data, int rc) { + int lpc = 0; xmlNode *reply = NULL; + const char *name = NULL; + const char *value = NULL; + + const char *names[] = { + F_STONITH_OPERATION, + F_STONITH_CALLID, + F_STONITH_CLIENTID, + F_STONITH_CLIENTNAME, + F_STONITH_REMOTE_OP_ID, + F_STONITH_CALLOPTS + }; + + crm_trace("Creating a basic reply"); reply = create_xml_node(NULL, T_STONITH_REPLY); crm_xml_add(reply, "st_origin", __FUNCTION__); @@ -2345,39 +2359,16 @@ stonith_construct_reply(xmlNode * request, const char *output, xmlNode * data, i crm_xml_add(reply, "st_output", output); crm_xml_add_int(reply, F_STONITH_RC, rc); - if (request == NULL) { - /* Most likely, this is the result of a stonith operation that was - * initiated before we came up. Unfortunately that means we lack enough - * information to provide clients with a full result. - * - * @TODO Maybe synchronize this information at start-up? - */ - crm_warn("Missing request information for client notifications for " - "operation with result %d (initiated before we came up?)", rc); - - } else { - const char *name = NULL; - const char *value = NULL; + CRM_CHECK(request != NULL, crm_warn("Can't create a sane reply"); return reply); + for (lpc = 0; lpc < DIMOF(names); lpc++) { + name = names[lpc]; + value = crm_element_value(request, name); + crm_xml_add(reply, name, value); + } - const char *names[] = { - F_STONITH_OPERATION, - F_STONITH_CALLID, - F_STONITH_CLIENTID, - F_STONITH_CLIENTNAME, - F_STONITH_REMOTE_OP_ID, - F_STONITH_CALLOPTS - }; - - crm_trace("Creating a result reply with%s reply output (rc=%d)", - (data? "" : "out"), rc); - for (int lpc = 0; lpc < DIMOF(names); lpc++) { - name = names[lpc]; - value = crm_element_value(request, name); - crm_xml_add(reply, name, value); - } - if (data != NULL) { - add_message_xml(reply, F_STONITH_CALLDATA, data); - } + if (data != NULL) { + crm_trace("Attaching reply output"); + add_message_xml(reply, F_STONITH_CALLDATA, data); } return reply; } diff --git a/daemons/fenced/fenced_history.c b/daemons/fenced/fenced_history.c index b48662c..710d6fe 100644 --- a/daemons/fenced/fenced_history.c +++ b/daemons/fenced/fenced_history.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include diff --git a/daemons/fenced/fenced_remote.c b/daemons/fenced/fenced_remote.c index 1a449c5..b9501be 100644 --- a/daemons/fenced/fenced_remote.c +++ b/daemons/fenced/fenced_remote.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include diff --git a/daemons/fenced/pacemaker-fenced.c b/daemons/fenced/pacemaker-fenced.c index 6a2935a..450814c 100644 --- a/daemons/fenced/pacemaker-fenced.c +++ b/daemons/fenced/pacemaker-fenced.c @@ -24,7 +24,7 @@ #include #include #include -#include +#include #include #include diff --git a/daemons/pacemakerd/Makefile.am b/daemons/pacemakerd/Makefile.am index 4cc8a7c..b01d8ef 100644 --- a/daemons/pacemakerd/Makefile.am +++ b/daemons/pacemakerd/Makefile.am @@ -1,5 +1,5 @@ # -# Copyright 2004-2020 the Pacemaker project contributors +# Copyright 2004-2019 the Pacemaker project contributors # # The version control history for this file may have further details. # @@ -9,6 +9,8 @@ include $(top_srcdir)/mk/common.mk +if BUILD_CS_SUPPORT + initdir = $(INITDIR) init_SCRIPTS = pacemaker sbin_PROGRAMS = pacemakerd @@ -28,9 +30,8 @@ pacemakerd_LDFLAGS = $(LDFLAGS_HARDENED_EXE) pacemakerd_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la $(top_builddir)/lib/common/libcrmcommon.la pacemakerd_LDADD += $(CLUSTERLIBS) -pacemakerd_SOURCES = pacemakerd.c -if BUILD_CS_SUPPORT -pacemakerd_SOURCES += pcmkd_corosync.c +pacemakerd_SOURCES = pacemakerd.c pcmkd_corosync.c + endif CLEANFILES = $(man8_MANS) diff --git a/daemons/pacemakerd/pacemaker.sysconfig b/daemons/pacemakerd/pacemaker.sysconfig index e4a5c4d..c7745d8 100644 --- a/daemons/pacemakerd/pacemaker.sysconfig +++ b/daemons/pacemakerd/pacemaker.sysconfig @@ -34,8 +34,9 @@ # Log all messages from a comma-separated list of functions. # PCMK_trace_functions=function1,function2,function3 -# Log all messages from a comma-separated list of file names (without path). -# PCMK_trace_files=file1.c,file2.c +# Log all messages from a comma-separated list of files (no path). +# Wildcards are supported, e.g. PCMK_trace_files=prefix*.c +# PCMK_trace_files=file.c,other.h # Log all messages matching comma-separated list of formats. # PCMK_trace_formats="Sent delete %d" diff --git a/daemons/pacemakerd/pacemakerd.c b/daemons/pacemakerd/pacemakerd.c index c888b73..5ed4626 100644 --- a/daemons/pacemakerd/pacemakerd.c +++ b/daemons/pacemakerd/pacemakerd.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include @@ -25,40 +24,32 @@ #include /* indirectly: CRM_EX_* */ #include /* cib_channel_ro */ #include -#include +#include #include #include #include +#include /* PCMK__SPECIAL_PID*, ... */ + +#ifdef SUPPORT_COROSYNC +#include +#endif + #include #include +static gboolean pcmk_quorate = FALSE; static gboolean fatal_error = FALSE; static GMainLoop *mainloop = NULL; static bool global_keep_tracking = false; #define PCMK_PROCESS_CHECK_INTERVAL 5 +static const char *local_name = NULL; +static uint32_t local_nodeid = 0; static crm_trigger_t *shutdown_trigger = NULL; -static crm_trigger_t *startup_trigger = NULL; static const char *pid_file = PCMK_RUN_DIR "/pacemaker.pid"; -/* state we report when asked via pacemakerd-api status-ping */ -static const char *pacemakerd_state = XML_PING_ATTR_PACEMAKERDSTATE_INIT; -static gboolean running_with_sbd = FALSE; /* local copy */ -/* When contacted via pacemakerd-api by a client having sbd in - * the name we assume it is sbd-daemon which wants to know - * if pacemakerd shutdown gracefully. - * Thus when everything is shutdown properly pacemakerd - * waits till it has reported the graceful completion of - * shutdown to sbd and just when sbd-client closes the - * connection we can assume that the report has arrived - * properly so that pacemakerd can finally exit. - * Following two variables are used to track that handshake. - */ -static unsigned int shutdown_complete_state_reported_to = 0; -static gboolean shutdown_complete_state_reported_client_closed = FALSE; - typedef struct pcmk_child_s { pid_t pid; long flag; @@ -116,6 +107,23 @@ static pcmk_child_t pcmk_children[] = { static gboolean check_active_before_startup_processes(gpointer user_data); static int child_liveness(pcmk_child_t *child); static gboolean start_child(pcmk_child_t * child); +static gboolean update_node_processes(uint32_t id, const char *uname, + uint32_t procs); +void update_process_clients(pcmk__client_t *client); + +static uint32_t +get_process_list(void) +{ + int lpc = 0; + uint32_t procs = crm_get_cluster_proc(); + + for (lpc = 0; lpc < SIZEOF(pcmk_children); lpc++) { + if (pcmk_children[lpc].pid != 0) { + procs |= pcmk_children[lpc].flag; + } + } + return procs; +} static void pcmk_process_exit(pcmk_child_t * child) @@ -123,6 +131,16 @@ pcmk_process_exit(pcmk_child_t * child) child->pid = 0; child->active_before_startup = FALSE; + /* Broadcast the fact that one of our processes died ASAP + * + * Try to get some logging of the cause out first though + * because we're probably about to get fenced + * + * Potentially do this only if respawn_count > N + * to allow for local recovery + */ + update_node_processes(local_nodeid, NULL, get_process_list()); + child->respawn_count += 1; if (child->respawn_count > MAX_RESPAWN) { crm_err("Child respawn count exceeded by %s", child->name); @@ -132,6 +150,8 @@ pcmk_process_exit(pcmk_child_t * child) if (shutdown_trigger) { /* resume step-wise shutdown (returned TRUE yields no parallelizing) */ mainloop_set_trigger(shutdown_trigger); + /* intended to speed up propagating expected lay-off of the daemons? */ + update_node_processes(local_nodeid, NULL, get_process_list()); } else if (!child->respawn) { /* nothing to do */ @@ -158,6 +178,28 @@ pcmk_process_exit(pcmk_child_t * child) } } +static void pcmk_exit_with_cluster(int exitcode) +{ +#ifdef SUPPORT_COROSYNC + corosync_cfg_handle_t cfg_handle; + cs_error_t err; + + if (exitcode == CRM_EX_FATAL) { + crm_info("Asking Corosync to shut down"); + err = corosync_cfg_initialize(&cfg_handle, NULL); + if (err != CS_OK) { + crm_warn("Unable to open handle to corosync to close it down. err=%d", err); + } + err = corosync_cfg_try_shutdown(cfg_handle, COROSYNC_CFG_SHUTDOWN_FLAG_IMMEDIATE); + if (err != CS_OK) { + crm_warn("Corosync shutdown failed. err=%d", err); + } + corosync_cfg_finalize(cfg_handle); + } +#endif + crm_exit(exitcode); +} + static void pcmk_child_exit(mainloop_child_t * p, pid_t pid, int core, int signo, int exitcode) { @@ -301,6 +343,7 @@ start_child(pcmk_child_t * child) crm_info("Forked child %lld for process %s%s", (long long) child->pid, child->name, use_valgrind ? " (valgrind enabled: " VALGRIND_BIN ")" : ""); + update_node_processes(local_nodeid, NULL, get_process_list()); return TRUE; } else { @@ -336,7 +379,7 @@ start_child(pcmk_child_t * child) // Drop root group access if not needed if (!need_root_group && (setgid(gid) < 0)) { - crm_warn("Could not set group to %d: %s", gid, strerror(errno)); + crm_perror(LOG_ERR, "Could not set group to %d", gid); } /* Initialize supplementary groups to only those always granted to @@ -348,8 +391,7 @@ start_child(pcmk_child_t * child) } if (uid && setuid(uid) < 0) { - crm_warn("Could not set user to %s (id %d): %s", - child->uid, uid, strerror(errno)); + crm_perror(LOG_ERR, "Could not set user to %d (%s)", uid, child->uid); } pcmk__close_fds_in_child(true); @@ -363,7 +405,7 @@ start_child(pcmk_child_t * child) } else { (void)execvp(child->command, opts_default); } - crm_crit("Could not execute %s: %s", child->command, strerror(errno)); + crm_perror(LOG_ERR, "FATAL: Cannot exec %s", child->command); crm_exit(CRM_EX_FATAL); } return TRUE; /* never reached */ @@ -391,20 +433,21 @@ escalate_shutdown(gpointer data) static gboolean pcmk_shutdown_worker(gpointer user_data) { - static int phase = SIZEOF(pcmk_children); + static int phase = 0; static time_t next_log = 0; + static int max = SIZEOF(pcmk_children); int lpc = 0; - if (phase == SIZEOF(pcmk_children)) { + if (phase == 0) { crm_notice("Shutting down Pacemaker"); - pacemakerd_state = XML_PING_ATTR_PACEMAKERDSTATE_SHUTTINGDOWN; + phase = max; } for (; phase > 0; phase--) { /* Don't stop anything with start_seq < 1 */ - for (lpc = SIZEOF(pcmk_children) - 1; lpc >= 0; lpc--) { + for (lpc = max - 1; lpc >= 0; lpc--) { pcmk_child_t *child = &(pcmk_children[lpc]); if (phase != child->start_seq) { @@ -451,14 +494,8 @@ pcmk_shutdown_worker(gpointer user_data) } } + /* send_cluster_id(); */ crm_notice("Shutdown complete"); - pacemakerd_state = XML_PING_ATTR_PACEMAKERDSTATE_SHUTDOWNCOMPLETE; - if (!fatal_error && running_with_sbd && - pcmk__get_sbd_sync_resource_startup() && - !shutdown_complete_state_reported_client_closed) { - crm_notice("Waiting for SBD to pick up shutdown-complete-state."); - return TRUE; - } { const char *delay = pcmk__env_option("shutdown_delay"); @@ -472,10 +509,7 @@ pcmk_shutdown_worker(gpointer user_data) if (fatal_error) { crm_notice("Shutting down and staying down after fatal error"); -#ifdef SUPPORT_COROSYNC - pcmkd_shutdown_corosync(); -#endif - crm_exit(CRM_EX_FATAL); + pcmk_exit_with_cluster(CRM_EX_FATAL); } return TRUE; @@ -512,55 +546,6 @@ pcmk_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid) return 0; } -static void -pcmk_handle_ping_request(pcmk__client_t *c, xmlNode *msg, uint32_t id) -{ - const char *value = NULL; - xmlNode *ping = NULL; - xmlNode *reply = NULL; - time_t pinged = time(NULL); - const char *from = crm_element_value(msg, F_CRM_SYS_FROM); - - /* Pinged for status */ - crm_trace("Pinged from %s.%s", - crm_str(crm_element_value(msg, F_CRM_ORIGIN)), - from?from:"unknown"); - ping = create_xml_node(NULL, XML_CRM_TAG_PING); - value = crm_element_value(msg, F_CRM_SYS_TO); - crm_xml_add(ping, XML_PING_ATTR_SYSFROM, value); - crm_xml_add(ping, XML_PING_ATTR_PACEMAKERDSTATE, pacemakerd_state); - crm_xml_add_ll(ping, XML_ATTR_TSTAMP, (long long) pinged); - crm_xml_add(ping, XML_PING_ATTR_STATUS, "ok"); - reply = create_reply(msg, ping); - free_xml(ping); - if (reply) { - if (pcmk__ipc_send_xml(c, id, reply, crm_ipc_server_event) != - pcmk_rc_ok) { - crm_err("Failed sending ping-reply"); - } - free_xml(reply); - } else { - crm_err("Failed building ping-reply"); - } - /* just proceed state on sbd pinging us */ - if (from && strstr(from, "sbd")) { - if (crm_str_eq(pacemakerd_state, - XML_PING_ATTR_PACEMAKERDSTATE_SHUTDOWNCOMPLETE, - TRUE)) { - if (pcmk__get_sbd_sync_resource_startup()) { - crm_notice("Shutdown-complete-state passed to SBD."); - } - shutdown_complete_state_reported_to = c->pid; - } else if (crm_str_eq(pacemakerd_state, - XML_PING_ATTR_PACEMAKERDSTATE_WAITPING, - TRUE)) { - crm_notice("Received startup-trigger from SBD."); - pacemakerd_state = XML_PING_ATTR_PACEMAKERDSTATE_STARTINGDAEMONS; - mainloop_set_trigger(startup_trigger); - } - } -} - /* Exit code means? */ static int32_t pcmk_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size) @@ -578,20 +563,28 @@ pcmk_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size) task = crm_element_value(msg, F_CRM_TASK); if (crm_str_eq(task, CRM_OP_QUIT, TRUE)) { - crm_notice("Shutting down in response to IPC request %s from %s", + /* Time to quit */ + crm_notice("Shutting down in response to ticket %s (%s)", crm_element_value(msg, F_CRM_REFERENCE), crm_element_value(msg, F_CRM_ORIGIN)); pcmk_shutdown(15); } else if (crm_str_eq(task, CRM_OP_RM_NODE_CACHE, TRUE)) { - crm_trace("Ignoring IPC request to purge node " - "because peer cache is not used"); + /* Send to everyone */ + struct iovec *iov; + int id = 0; + const char *name = NULL; + + crm_element_value_int(msg, XML_ATTR_ID, &id); + name = crm_element_value(msg, XML_ATTR_UNAME); + crm_notice("Instructing peers to remove references to node %s/%u", name, id); - } else if (crm_str_eq(task, CRM_OP_PING, TRUE)) { - pcmk_handle_ping_request(c, msg, id); + iov = calloc(1, sizeof(struct iovec)); + iov->iov_base = dump_xml_unformatted(msg); + iov->iov_len = 1 + strlen(iov->iov_base); + send_cpg_iov(iov); } else { - crm_debug("Unrecognized IPC command '%s' sent to pacemakerd", - crm_str(task)); + update_process_clients(c); } free_xml(msg); @@ -608,12 +601,6 @@ pcmk_ipc_closed(qb_ipcs_connection_t * c) return 0; } crm_trace("Connection %p", c); - if (shutdown_complete_state_reported_to == client->pid) { - shutdown_complete_state_reported_client_closed = TRUE; - if (shutdown_trigger) { - mainloop_set_trigger(shutdown_trigger); - } - } pcmk__free_client(client); return 0; } @@ -633,6 +620,113 @@ struct qb_ipcs_service_handlers mcp_ipc_callbacks = { .connection_destroyed = pcmk_ipc_destroy }; +static void +send_xml_to_client(gpointer key, gpointer value, gpointer user_data) +{ + pcmk__ipc_send_xml((pcmk__client_t *) value, 0, (xmlNode *) user_data, + crm_ipc_server_event); +} + +/*! + * \internal + * \brief Send an XML message with process list of all known peers to client(s) + * + * \param[in] client Send message to this client, or all clients if NULL + */ +void +update_process_clients(pcmk__client_t *client) +{ + GHashTableIter iter; + crm_node_t *node = NULL; + xmlNode *update = create_xml_node(NULL, "nodes"); + + if (is_corosync_cluster()) { + crm_xml_add_int(update, "quorate", pcmk_quorate); + } + + g_hash_table_iter_init(&iter, crm_peer_cache); + while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) { + xmlNode *xml = create_xml_node(update, "node"); + + crm_xml_add_int(xml, "id", node->id); + crm_xml_add(xml, "uname", node->uname); + crm_xml_add(xml, "state", node->state); + crm_xml_add_int(xml, "processes", node->processes); + } + + if(client) { + crm_trace("Sending process list to client %s", client->id); + send_xml_to_client(NULL, client, update); + + } else { + crm_trace("Sending process list to %d clients", + pcmk__ipc_client_count()); + pcmk__foreach_ipc_client(send_xml_to_client, update); + } + free_xml(update); +} + +/*! + * \internal + * \brief Send a CPG message with local node's process list to all peers + */ +static void +update_process_peers(void) +{ + /* Do nothing for corosync-2 based clusters */ + + struct iovec *iov = calloc(1, sizeof(struct iovec)); + + CRM_ASSERT(iov); + if (local_name) { + iov->iov_base = crm_strdup_printf("", + local_name, get_process_list()); + } else { + iov->iov_base = crm_strdup_printf("", + get_process_list()); + } + iov->iov_len = strlen(iov->iov_base) + 1; + crm_trace("Sending %s", (char*) iov->iov_base); + send_cpg_iov(iov); +} + +/*! + * \internal + * \brief Update a node's process list, notifying clients and peers if needed + * + * \param[in] id Node ID of affected node + * \param[in] uname Uname of affected node + * \param[in] procs Affected node's process list mask + * + * \return TRUE if the process list changed, FALSE otherwise + */ +static gboolean +update_node_processes(uint32_t id, const char *uname, uint32_t procs) +{ + gboolean changed = FALSE; + crm_node_t *node = crm_get_peer(id, uname); + + if (procs != 0) { + if (procs != node->processes) { + crm_debug("Node %s now has process list: %.32x (was %.32x)", + node->uname, procs, node->processes); + node->processes = procs; + changed = TRUE; + + /* If local node's processes have changed, notify clients/peers */ + if (id == local_nodeid) { + update_process_clients(NULL); + update_process_peers(); + } + + } else { + crm_trace("Node %s still has process list: %.32x", node->uname, procs); + } + } + return changed; +} + + static pcmk__cli_option_t long_options[] = { // long option, argument type, storage, short option, description, flags { @@ -1005,8 +1099,8 @@ find_and_track_existing_processes(void) return pcmk_rc_ok; } -static gboolean -init_children_processes(void *user_data) +static void +init_children_processes(void) { int start_seq = 1, lpc = 0; static int max = SIZEOF(pcmk_children); @@ -1032,40 +1126,97 @@ init_children_processes(void *user_data) * This may be useful for the daemons to know */ setenv("PCMK_respawned", "true", 1); - pacemakerd_state = XML_PING_ATTR_PACEMAKERDSTATE_RUNNING; - return TRUE; } static void -remove_core_file_limit(void) +mcp_cpg_destroy(gpointer user_data) { - struct rlimit cores; - int rc = getrlimit(RLIMIT_CORE, &cores); + crm_crit("Lost connection to cluster layer, shutting down"); + crm_exit(CRM_EX_DISCONNECT); +} - if (rc < 0) { - crm_warn("Cannot determine current maximum core file size: %s", - strerror(errno)); - return; - } +/*! + * \internal + * \brief Process a CPG message (process list or manual peer cache removal) + * + * \param[in] handle CPG connection (ignored) + * \param[in] groupName CPG group name (ignored) + * \param[in] nodeid ID of affected node + * \param[in] pid Process ID (ignored) + * \param[in] msg CPG XML message + * \param[in] msg_len Length of msg in bytes (ignored) + */ +static void +mcp_cpg_deliver(cpg_handle_t handle, + const struct cpg_name *groupName, + uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len) +{ + xmlNode *xml = string2xml(msg); + const char *task = crm_element_value(xml, F_CRM_TASK); - if ((cores.rlim_max == 0) && (geteuid() == 0)) { - cores.rlim_max = RLIM_INFINITY; - } else { - crm_info("Maximum core file size is %llu bytes", - (unsigned long long) cores.rlim_max); + crm_trace("Received CPG message (%s): %.200s", + (task? task : "process list"), (char*)msg); + + if (task == NULL) { + if (nodeid == local_nodeid) { + crm_debug("Ignoring message with local node's process list"); + } else { + uint32_t procs = 0; + const char *uname = crm_element_value(xml, "uname"); + + crm_element_value_int(xml, "proclist", (int *)&procs); + if (update_node_processes(nodeid, uname, procs)) { + update_process_clients(NULL); + } + } + + } else if (crm_str_eq(task, CRM_OP_RM_NODE_CACHE, TRUE)) { + int id = 0; + const char *name = NULL; + + crm_element_value_int(xml, XML_ATTR_ID, &id); + name = crm_element_value(xml, XML_ATTR_UNAME); + reap_crm_member(id, name); } - cores.rlim_cur = cores.rlim_max; - rc = setrlimit(RLIMIT_CORE, &cores); - if (rc < 0) { - crm_warn("Cannot raise system limit on core file size " - "(consider doing so manually)"); + if (xml != NULL) { + free_xml(xml); } } +static void +mcp_cpg_membership(cpg_handle_t handle, + const struct cpg_name *groupName, + const struct cpg_address *member_list, size_t member_list_entries, + const struct cpg_address *left_list, size_t left_list_entries, + const struct cpg_address *joined_list, size_t joined_list_entries) +{ + /* Update peer cache if needed */ + pcmk_cpg_membership(handle, groupName, member_list, member_list_entries, + left_list, left_list_entries, + joined_list, joined_list_entries); + + /* Always broadcast our own presence after any membership change */ + update_process_peers(); +} + +static gboolean +mcp_quorum_callback(unsigned long long seq, gboolean quorate) +{ + pcmk_quorate = quorate; + return TRUE; +} + +static void +mcp_quorum_destroy(gpointer user_data) +{ + crm_info("connection lost"); +} + int main(int argc, char **argv) { + int rc; int flag; int argerr = 0; @@ -1074,8 +1225,10 @@ main(int argc, char **argv) uid_t pcmk_uid = 0; gid_t pcmk_gid = 0; + struct rlimit cores; crm_ipc_t *old_instance = NULL; qb_ipcs_service_t *ipcs = NULL; + static crm_cluster_t cluster; crm_log_preinit(NULL, argc, argv); pcmk__set_cli_options(NULL, "[options]", long_options, @@ -1167,11 +1320,10 @@ main(int argc, char **argv) crm_ipc_close(old_instance); crm_ipc_destroy(old_instance); -#ifdef SUPPORT_COROSYNC if (mcp_read_config() == FALSE) { + crm_notice("Could not obtain corosync config data, exiting"); crm_exit(CRM_EX_UNAVAILABLE); } -#endif // OCF shell functions and cluster-glue need facility under different name { @@ -1186,7 +1338,25 @@ main(int argc, char **argv) PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES); mainloop = g_main_loop_new(NULL, FALSE); - remove_core_file_limit(); + rc = getrlimit(RLIMIT_CORE, &cores); + if (rc < 0) { + crm_perror(LOG_ERR, "Cannot determine current maximum core size."); + } else { + if (cores.rlim_max == 0 && geteuid() == 0) { + cores.rlim_max = RLIM_INFINITY; + } else { + crm_info("Maximum core file size is: %lu", (unsigned long)cores.rlim_max); + } + cores.rlim_cur = cores.rlim_max; + + rc = setrlimit(RLIMIT_CORE, &cores); + if (rc < 0) { + crm_perror(LOG_ERR, + "Core file generation will remain disabled." + " Core files are an important diagnostic tool, so" + " please consider enabling them by default."); + } + } if (pcmk_daemon_user(&pcmk_uid, &pcmk_gid) < 0) { crm_err("Cluster user %s does not exist, aborting Pacemaker startup", CRM_DAEMON_USER); @@ -1228,16 +1398,14 @@ main(int argc, char **argv) crm_exit(CRM_EX_OSERR); } -#ifdef SUPPORT_COROSYNC /* Allows us to block shutdown */ - if (!cluster_connect_cfg()) { + if (cluster_connect_cfg(&local_nodeid) == FALSE) { + crm_err("Couldn't connect to Corosync's CFG service"); crm_exit(CRM_EX_PROTOCOL); } -#endif if(pcmk_locate_sbd() > 0) { setenv("PCMK_watchdog", "true", 1); - running_with_sbd = TRUE; } else { setenv("PCMK_watchdog", "false", 1); } @@ -1251,25 +1419,34 @@ main(int argc, char **argv) crm_exit(CRM_EX_FATAL); }; - mainloop_add_signal(SIGTERM, pcmk_shutdown); - mainloop_add_signal(SIGINT, pcmk_shutdown); + cluster.destroy = mcp_cpg_destroy; + cluster.cpg.cpg_deliver_fn = mcp_cpg_deliver; + cluster.cpg.cpg_confchg_fn = mcp_cpg_membership; + + crm_set_autoreap(FALSE); + + rc = pcmk_ok; + + if (cluster_connect_cpg(&cluster) == FALSE) { + crm_err("Couldn't connect to Corosync's CPG service"); + rc = -ENOPROTOOPT; + + } else if (cluster_connect_quorum(mcp_quorum_callback, mcp_quorum_destroy) + == FALSE) { + rc = -ENOTCONN; - if ((running_with_sbd) && pcmk__get_sbd_sync_resource_startup()) { - crm_notice("Waiting for startup-trigger from SBD."); - pacemakerd_state = XML_PING_ATTR_PACEMAKERDSTATE_WAITPING; - startup_trigger = mainloop_add_trigger(G_PRIORITY_HIGH, init_children_processes, NULL); } else { - if (running_with_sbd) { - crm_warn("Enabling SBD_SYNC_RESOURCE_STARTUP would (if supported " - "by your SBD version) improve reliability of " - "interworking between SBD & pacemaker."); - } - pacemakerd_state = XML_PING_ATTR_PACEMAKERDSTATE_STARTINGDAEMONS; - init_children_processes(NULL); - } + local_name = get_local_node_name(); + update_node_processes(local_nodeid, local_name, get_process_list()); + + mainloop_add_signal(SIGTERM, pcmk_shutdown); + mainloop_add_signal(SIGINT, pcmk_shutdown); - crm_notice("Pacemaker daemon successfully started and accepting connections"); - g_main_loop_run(mainloop); + init_children_processes(); + + crm_notice("Pacemaker daemon successfully started and accepting connections"); + g_main_loop_run(mainloop); + } if (ipcs) { crm_trace("Closing IPC server"); @@ -1278,8 +1455,9 @@ main(int argc, char **argv) } g_main_loop_unref(mainloop); -#ifdef SUPPORT_COROSYNC + + cluster_disconnect_cpg(&cluster); cluster_disconnect_cfg(); -#endif - crm_exit(CRM_EX_OK); + + crm_exit(crm_errno2exit(rc)); } diff --git a/daemons/pacemakerd/pacemakerd.h b/daemons/pacemakerd/pacemakerd.h index 5f475fd..d66ab10 100644 --- a/daemons/pacemakerd/pacemakerd.h +++ b/daemons/pacemakerd/pacemakerd.h @@ -22,8 +22,7 @@ gboolean mcp_read_config(void); -gboolean cluster_connect_cfg(void); +gboolean cluster_connect_cfg(uint32_t * nodeid); gboolean cluster_disconnect_cfg(void); -void pcmkd_shutdown_corosync(void); void pcmk_shutdown(int nsig); diff --git a/daemons/pacemakerd/pcmkd_corosync.c b/daemons/pacemakerd/pcmkd_corosync.c index 82bd257..ec74908 100644 --- a/daemons/pacemakerd/pcmkd_corosync.c +++ b/daemons/pacemakerd/pcmkd_corosync.c @@ -28,7 +28,8 @@ #include /* PCMK__SPECIAL_PID* */ -static corosync_cfg_handle_t cfg_handle = 0; +enum cluster_type_e stack = pcmk_cluster_unknown; +static corosync_cfg_handle_t cfg_handle; /* =::=::=::= CFG - Shutdown stuff =::=::=::= */ @@ -62,8 +63,9 @@ pcmk_cfg_dispatch(gpointer user_data) static void cfg_connection_destroy(gpointer user_data) { - crm_err("Lost connection to Corosync"); + crm_err("Connection destroyed"); cfg_handle = 0; + pcmk_shutdown(SIGTERM); } @@ -83,7 +85,7 @@ cluster_disconnect_cfg(void) code; \ if(rc == CS_ERR_TRY_AGAIN || rc == CS_ERR_QUEUE_FULL) { \ counter++; \ - crm_debug("Retrying Corosync operation after %ds", counter); \ + crm_debug("Retrying operation after %ds", counter); \ sleep(counter); \ } else { \ break; \ @@ -91,14 +93,13 @@ cluster_disconnect_cfg(void) } while(counter < max) gboolean -cluster_connect_cfg(void) +cluster_connect_cfg(uint32_t * nodeid) { cs_error_t rc; int fd = -1, retries = 0, rv; uid_t found_uid = 0; gid_t found_gid = 0; pid_t found_pid = 0; - uint32_t nodeid; static struct mainloop_fd_callbacks cfg_fd_callbacks = { .dispatch = pcmk_cfg_dispatch, @@ -108,42 +109,41 @@ cluster_connect_cfg(void) cs_repeat(retries, 30, rc = corosync_cfg_initialize(&cfg_handle, &cfg_callbacks)); if (rc != CS_OK) { - crm_crit("Could not connect to Corosync CFG: %s " CRM_XS " rc=%d", - cs_strerror(rc), rc); + crm_err("corosync cfg init: %s (%d)", cs_strerror(rc), rc); return FALSE; } rc = corosync_cfg_fd_get(cfg_handle, &fd); if (rc != CS_OK) { - crm_crit("Could not get Corosync CFG descriptor: %s " CRM_XS " rc=%d", - cs_strerror(rc), rc); + crm_err("corosync cfg fd_get: %s (%d)", cs_strerror(rc), rc); goto bail; } /* CFG provider run as root (in given user namespace, anyway)? */ if (!(rv = crm_ipc_is_authentic_process(fd, (uid_t) 0,(gid_t) 0, &found_pid, &found_uid, &found_gid))) { - crm_crit("Rejecting Corosync CFG provider because process %lld " - "is running as uid %lld gid %lld, not root", - (long long) PCMK__SPECIAL_PID_AS_0(found_pid), - (long long) found_uid, (long long) found_gid); + crm_err("CFG provider is not authentic:" + " process %lld (uid: %lld, gid: %lld)", + (long long) PCMK__SPECIAL_PID_AS_0(found_pid), + (long long) found_uid, (long long) found_gid); goto bail; } else if (rv < 0) { - crm_crit("Could not authenticate Corosync CFG provider: %s " - CRM_XS " rc=%d", strerror(-rv), -rv); + crm_err("Could not verify authenticity of CFG provider: %s (%d)", + strerror(-rv), -rv); goto bail; } retries = 0; - cs_repeat(retries, 30, rc = corosync_cfg_local_get(cfg_handle, &nodeid)); + cs_repeat(retries, 30, rc = corosync_cfg_local_get(cfg_handle, nodeid)); + if (rc != CS_OK) { - crm_crit("Could not get local node ID from Corosync: %s " - CRM_XS " rc=%d", cs_strerror(rc), rc); + crm_err("corosync cfg local_get error %d", rc); goto bail; } - crm_debug("Corosync reports local node ID is %lu", (unsigned long) nodeid); + crm_debug("Our nodeid: %d", *nodeid); mainloop_add_fd("corosync-cfg", G_PRIORITY_DEFAULT, fd, &cfg_handle, &cfg_fd_callbacks); + return TRUE; bail: @@ -151,28 +151,6 @@ cluster_connect_cfg(void) return FALSE; } -void -pcmkd_shutdown_corosync(void) -{ - cs_error_t rc; - - if (cfg_handle == 0) { - crm_warn("Unable to shut down Corosync: No connection"); - return; - } - crm_info("Asking Corosync to shut down"); - rc = corosync_cfg_try_shutdown(cfg_handle, - COROSYNC_CFG_SHUTDOWN_FLAG_IMMEDIATE); - if (rc == CS_OK) { - corosync_cfg_finalize(cfg_handle); - cfg_handle = 0; - } else { - crm_warn("Corosync shutdown failed: %s " CRM_XS " rc=%d", - cs_strerror(rc), rc); - } -} - - /* =::=::=::= Configuration =::=::=::= */ static int get_config_opt(uint64_t unused, cmap_handle_t object_handle, const char *key, char **value, @@ -205,15 +183,14 @@ mcp_read_config(void) gid_t found_gid = 0; pid_t found_pid = 0; int rv; - enum cluster_type_e stack; // There can be only one possibility do { rc = cmap_initialize(&local_handle); if (rc != CS_OK) { retries++; - crm_info("Could not connect to Corosync CMAP: %s (retrying in %ds) " - CRM_XS " rc=%d", cs_strerror(rc), retries, rc); + printf("cmap connection setup failed: %s. Retrying in %ds\n", cs_strerror(rc), retries); + crm_info("cmap connection setup failed: %s. Retrying in %ds", cs_strerror(rc), retries); sleep(retries); } else { @@ -223,15 +200,15 @@ mcp_read_config(void) } while (retries < 5); if (rc != CS_OK) { - crm_crit("Could not connect to Corosync CMAP: %s " - CRM_XS " rc=%d", cs_strerror(rc), rc); + printf("Could not connect to Cluster Configuration Database API, error %d\n", rc); + crm_warn("Could not connect to Cluster Configuration Database API, error %d", rc); return FALSE; } rc = cmap_fd_get(local_handle, &fd); if (rc != CS_OK) { - crm_crit("Could not get Corosync CMAP descriptor: %s " CRM_XS " rc=%d", - cs_strerror(rc), rc); + crm_err("Could not obtain the CMAP API connection: %s (%d)", + cs_strerror(rc), rc); cmap_finalize(local_handle); return FALSE; } @@ -239,33 +216,38 @@ mcp_read_config(void) /* CMAP provider run as root (in given user namespace, anyway)? */ if (!(rv = crm_ipc_is_authentic_process(fd, (uid_t) 0,(gid_t) 0, &found_pid, &found_uid, &found_gid))) { - crm_crit("Rejecting Corosync CMAP provider because process %lld " - "is running as uid %lld gid %lld, not root", - (long long) PCMK__SPECIAL_PID_AS_0(found_pid), - (long long) found_uid, (long long) found_gid); + crm_err("CMAP provider is not authentic:" + " process %lld (uid: %lld, gid: %lld)", + (long long) PCMK__SPECIAL_PID_AS_0(found_pid), + (long long) found_uid, (long long) found_gid); cmap_finalize(local_handle); return FALSE; } else if (rv < 0) { - crm_crit("Could not authenticate Corosync CMAP provider: %s " - CRM_XS " rc=%d", strerror(-rv), -rv); + crm_err("Could not verify authenticity of CMAP provider: %s (%d)", + strerror(-rv), -rv); cmap_finalize(local_handle); return FALSE; } stack = get_cluster_type(); - if (stack != pcmk_cluster_corosync) { - crm_crit("Expected corosync stack but detected %s " CRM_XS " stack=%d", - name_for_cluster_type(stack), stack); + crm_info("Reading configure for stack: %s", name_for_cluster_type(stack)); + + /* =::=::= Should we be here =::=::= */ + if (stack == pcmk_cluster_corosync) { + pcmk__set_env_option("cluster_type", "corosync"); + pcmk__set_env_option("quorum_type", "corosync"); + + } else { + crm_err("Unsupported stack type: %s", name_for_cluster_type(stack)); return FALSE; } - crm_info("Reading configuration for %s stack", - name_for_cluster_type(stack)); - pcmk__set_env_option("cluster_type", "corosync"); - pcmk__set_env_option("quorum_type", "corosync"); + /* =::=::= Logging =::=::= */ + if (pcmk__env_option("debug")) { + /* Syslog logging is already setup by crm_log_init() */ - // If debug logging is not configured, check whether corosync has it - if (pcmk__env_option("debug") == NULL) { + } else { + /* Check corosync */ char *debug_enabled = NULL; get_config_opt(config, local_handle, "logging.debug", &debug_enabled, "off"); @@ -286,7 +268,7 @@ mcp_read_config(void) if(local_handle){ gid_t gid = 0; if (pcmk_daemon_user(NULL, &gid) < 0) { - crm_warn("Could not authorize group with Corosync " CRM_XS + crm_warn("Could not authorize group with corosync " CRM_XS " No group found for user %s", CRM_DAEMON_USER); } else { @@ -294,8 +276,8 @@ mcp_read_config(void) snprintf(key, PATH_MAX, "uidgid.gid.%u", gid); rc = cmap_set_uint8(local_handle, key, 1); if (rc != CS_OK) { - crm_warn("Could not authorize group with Corosync: %s " CRM_XS - " group=%u rc=%d", ais_error2text(rc), gid, rc); + crm_warn("Could not authorize group with corosync "CRM_XS + " group=%u rc=%d (%s)", gid, rc, ais_error2text(rc)); } } } diff --git a/daemons/schedulerd/pacemaker-schedulerd.c b/daemons/schedulerd/pacemaker-schedulerd.c index 885386d..0146ca2 100644 --- a/daemons/schedulerd/pacemaker-schedulerd.c +++ b/daemons/schedulerd/pacemaker-schedulerd.c @@ -21,7 +21,7 @@ #include -#include +#include #include #include #include diff --git a/doc/Pacemaker_Explained/en-US/Ch-Options.txt b/doc/Pacemaker_Explained/en-US/Ch-Options.txt index b158f00..faefe7c 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Options.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Options.txt @@ -181,8 +181,6 @@ What to do when the cluster does not have quorum. Allowed values: * +ignore:+ continue all resource management * +freeze:+ continue resource management, but don't recover resources from nodes not in the affected partition * +stop:+ stop all resources in the affected cluster partition -* +demote:+ demote promotable resources and stop all other resources in the - affected cluster partition * +suicide:+ fence all nodes in the affected cluster partition | batch-limit | 0 | diff --git a/doc/Pacemaker_Explained/en-US/Ch-Resources.txt b/doc/Pacemaker_Explained/en-US/Ch-Resources.txt index 88892db..d8e7115 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Resources.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Resources.txt @@ -669,22 +669,13 @@ XML attributes take precedence over +nvpair+ elements if both are specified. indexterm:[Action,Property,timeout] |on-fail -a|Varies by action: - -* +stop+: +fence+ if +stonith-enabled+ is true or +block+ otherwise -* +demote+: +on-fail+ of the +monitor+ action with +role+ set to +Master+, if - present, enabled, and configured to a value other than +demote+, or +restart+ - otherwise -* all other actions: +restart+ +|restart '(except for +stop+ operations, which default to' fence 'when + STONITH is enabled and' block 'otherwise)' a|The action to take if this action ever fails. Allowed values: * +ignore:+ Pretend the resource did not fail. * +block:+ Don't perform any further operations on the resource. * +stop:+ Stop the resource and do not start it elsewhere. -* +demote:+ Demote the resource, without a full restart. This is valid only for - +promote+ actions, and for +monitor+ actions with both a nonzero +interval+ - and +role+ set to +Master+; for any other action, a configuration error will - be logged, and the default behavior will be used. * +restart:+ Stop the resource and start it again (possibly on a different node). * +fence:+ STONITH the node on which the resource failed. * +standby:+ Move _all_ resources away from the node on which the resource failed. @@ -723,38 +714,6 @@ indexterm:[Action,Property,on-fail] |========================================================= -[NOTE] -==== -When +on-fail+ is set to +demote+, recovery from failure by a successful demote -causes the cluster to recalculate whether and where a new instance should be -promoted. The node with the failure is eligible, so if master scores have not -changed, it will be promoted again. - -There is no direct equivalent of +migration-threshold+ for the master role, but -the same effect can be achieved with a location constraint using a -<> with a node attribute expression for the resource's fail -count. - -For example, to immediately ban the master role from a node with any failed -promote or master monitor: -[source,XML] ----- - - - - - - ----- - -This example assumes that there is a promotable clone of the +my_primitive+ -resource (note that the primitive name, not the clone name, is used in the -rule), and that there is a recurring 10-second-interval monitor configured for -the master role (fail count attributes specify the interval in milliseconds). -==== - [[s-resource-monitoring]] === Monitoring Resources for Failure === diff --git a/doc/Pacemaker_Explained/en-US/Ch-Rules.txt b/doc/Pacemaker_Explained/en-US/Ch-Rules.txt index 5df5f82..9d617f6 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Rules.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Rules.txt @@ -522,124 +522,6 @@ You may wish to write +end="2005-03-31T23:59:59"+ to avoid confusion. ------- ===== -== Resource Expressions == - -An +rsc_expression+ is a rule condition based on a resource agent's properties. -This rule is only valid within an +rsc_defaults+ or +op_defaults+ context. None -of the matching attributes of +class+, +provider+, and +type+ are required. If -one is omitted, all values of that attribute will match. For instance, omitting -+type+ means every type will match. - -.Attributes of an rsc_expression Element -[width="95%",cols="2m,<5",options="header",align="center"] -|========================================================= - -|Field -|Description - -|id -|A unique name for the expression (required) - indexterm:[XML attribute,id attribute,rsc_expression element] - indexterm:[XML element,rsc_expression element,id attribute] - -|class -|The standard name to be matched against resource agents - indexterm:[XML attribute,class attribute,rsc_expression element] - indexterm:[XML element,rsc_expression element,class attribute] - -|provider -|If given, the vendor to be matched against resource agents. This - only makes sense for agents using the OCF spec. - indexterm:[XML attribute,provider attribute,rsc_expression element] - indexterm:[XML element,rsc_expression element,provider attribute] - -|type -|The name of the resource agent to be matched - indexterm:[XML attribute,type attribute,rsc_expression element] - indexterm:[XML element,rsc_expression element,type attribute] - -|========================================================= - -=== Example Resource-Based Expressions === - -A small sample of how resource-based expressions can be used: - -.True for all ocf:heartbeat:IPaddr2 resources -==== -[source,XML] ----- - - - ----- -==== - -.Provider doesn't apply to non-OCF resources -==== -[source,XML] ----- - - - ----- -==== - -== Operation Expressions == - -An +op_expression+ is a rule condition based on an action of some resource -agent. This rule is only valid within an +op_defaults+ context. - -.Attributes of an op_expression Element -[width="95%",cols="2m,<5",options="header",align="center"] -|========================================================= - -|Field -|Description - -|id -|A unique name for the expression (required) - indexterm:[XML attribute,id attribute,op_expression element] - indexterm:[XML element,op_expression element,id attribute] - -|name -|The action name to match against. This can be any action supported by - the resource agent; common values include +monitor+, +start+, and +stop+ - (required). - indexterm:[XML attribute,name attribute,op_expression element] - indexterm:[XML element,op_expression element,name attribute] - -|interval -|The interval of the action to match against. If not given, only - the name attribute will be used to match. - indexterm:[XML attribute,interval attribute,op_expression element] - indexterm:[XML element,op_expression element,interval attribute] - -|========================================================= - -=== Example Operation-Based Expressions === - -A small sample of how operation-based expressions can be used: - -.True for all monitor actions -==== -[source,XML] ----- - - - ----- -==== - -.True for all monitor actions with a 10 second interval -==== -[source,XML] ----- - - - ----- -==== - == Using Rules to Determine Resource Location == indexterm:[Rule,Determine Resource Location] indexterm:[Resource,Location,Determine by Rules] @@ -828,62 +710,6 @@ Rules may be used similarly in +instance_attributes+ or +utilization+ blocks. Any single block may directly contain only a single rule, but that rule may itself contain any number of rules. -+rsc_expression+ and +op_expression+ blocks may additionally be used to set defaults -on either a single resource or across an entire class of resources with a single -rule. +rsc_expression+ may be used to select resource agents within both +rsc_defaults+ -and +op_defaults+, while +op_expression+ may only be used within +op_defaults+. If -multiple rules succeed for a given resource agent, the last one specified will be -the one that takes effect. As with any other rule, boolean operations may be used -to make more complicated expressions. - -.Set all IPaddr2 resources to stopped -===== -[source,XML] -------- - - - - - - - - -------- -===== - -.Set all monitor action timeouts to 7 seconds -===== -[source,XML] -------- - - - - - - - - -------- -===== - -.Set the monitor action timeout on all IPaddr2 resources with a given monitor interval to 8 seconds -===== -[source,XML] -------- - - - - - - - - - -------- -===== - === Using Rules to Control Cluster Options === indexterm:[Rule,Controlling Cluster Options] indexterm:[Cluster,Setting Options with Rules] diff --git a/include/crm/cib/internal.h b/include/crm/cib/internal.h index b43cf08..df16280 100644 --- a/include/crm/cib/internal.h +++ b/include/crm/cib/internal.h @@ -10,7 +10,7 @@ #ifndef CIB_INTERNAL__H # define CIB_INTERNAL__H # include -# include +# include # define CIB_OP_SLAVE "cib_slave" # define CIB_OP_SLAVEALL "cib_slave_all" diff --git a/include/crm/common/Makefile.am b/include/crm/common/Makefile.am index 1b5730a..b38a5c5 100644 --- a/include/crm/common/Makefile.am +++ b/include/crm/common/Makefile.am @@ -12,8 +12,8 @@ MAINTAINERCLEANFILES = Makefile.in headerdir=$(pkgincludedir)/crm/common header_HEADERS = xml.h ipc.h util.h iso8601.h mainloop.h logging.h results.h \ - nvpair.h acl.h ipc_controld.h ipc_pacemakerd.h -noinst_HEADERS = internal.h alerts_internal.h \ + nvpair.h acl.h +noinst_HEADERS = ipcs_internal.h internal.h alerts_internal.h \ iso8601_internal.h remote_internal.h xml_internal.h \ ipc_internal.h output.h cmdline_internal.h curses_internal.h \ attrd_internal.h options_internal.h diff --git a/include/crm/common/internal.h b/include/crm/common/internal.h index ef1e3df..4f4c66f 100644 --- a/include/crm/common/internal.h +++ b/include/crm/common/internal.h @@ -19,7 +19,6 @@ #include // xmlNode #include // crm_strdup_printf() -#include // mainloop_io_t, struct ipc_client_callbacks // Internal ACL-related utilities (from acl.c) @@ -104,13 +103,6 @@ pcmk__open_devnull(int flags) } while (0) -/* internal main loop utilities (from mainloop.c) */ - -int pcmk__add_mainloop_ipc(crm_ipc_t *ipc, int priority, void *userdata, - struct ipc_client_callbacks *callbacks, - mainloop_io_t **source); - - /* internal procfs utilities (from procfs.c) */ pid_t pcmk__procfs_pid_of(const char *name); diff --git a/include/crm/common/ipc.h b/include/crm/common/ipc.h index c67aaea..79763f6 100644 --- a/include/crm/common/ipc.h +++ b/include/crm/common/ipc.h @@ -16,8 +16,7 @@ extern "C" { /** * \file - * \brief IPC interface to Pacemaker daemons - * + * \brief Wrappers for and extensions to libqb IPC * \ingroup core */ @@ -25,120 +24,17 @@ extern "C" { #include #include -/* - * Message creation utilities - * - * These are used for both IPC messages and cluster layer messages. However, - * since this is public API, they stay in this header for backward - * compatibility. - */ - -#define create_reply(request, xml_response_data) \ - create_reply_adv(request, xml_response_data, __FUNCTION__) +/* clplumbing based IPC */ -xmlNode *create_reply_adv(xmlNode *request, xmlNode *xml_response_data, - const char *origin); +# define create_reply(request, xml_response_data) create_reply_adv(request, xml_response_data, __FUNCTION__); +xmlNode *create_reply_adv(xmlNode * request, xmlNode * xml_response_data, const char *origin); -#define create_request(task, xml_data, host_to, sys_to, sys_from, uuid_from) \ - create_request_adv(task, xml_data, host_to, sys_to, sys_from, uuid_from, \ - __FUNCTION__) +# define create_request(task, xml_data, host_to, sys_to, sys_from, uuid_from) create_request_adv(task, xml_data, host_to, sys_to, sys_from, uuid_from, __FUNCTION__) -xmlNode *create_request_adv(const char *task, xmlNode *xml_data, - const char *host_to, const char *sys_to, - const char *sys_from, const char *uuid_from, +xmlNode *create_request_adv(const char *task, xmlNode * xml_data, const char *host_to, + const char *sys_to, const char *sys_from, const char *uuid_from, const char *origin); - -/* - * The library supports two methods of creating IPC connections. The older code - * allows connecting to any arbitrary IPC name. The newer code only allows - * connecting to one of the Pacemaker daemons. - * - * As daemons are converted to use the new model, the old functions should be - * considered deprecated for use with those daemons. Once all daemons are - * converted, the old functions should be officially deprecated as public API - * and eventually made internal API. - */ - -/* - * Pacemaker daemon IPC - */ - -//! Available IPC interfaces -enum pcmk_ipc_server { - pcmk_ipc_attrd, //!< Attribute manager - pcmk_ipc_based, //!< CIB manager - pcmk_ipc_controld, //!< Controller - pcmk_ipc_execd, //!< Executor - pcmk_ipc_fenced, //!< Fencer - pcmk_ipc_pacemakerd, //!< Launcher - pcmk_ipc_schedulerd, //!< Scheduler -}; - -//! Possible event types that an IPC event callback can be called for -enum pcmk_ipc_event { - pcmk_ipc_event_connect, //!< Result of asynchronous connection attempt - pcmk_ipc_event_disconnect, //!< Termination of IPC connection - pcmk_ipc_event_reply, //!< Daemon's reply to client IPC request - pcmk_ipc_event_notify, //!< Notification from daemon -}; - -//! How IPC replies should be dispatched -enum pcmk_ipc_dispatch { - pcmk_ipc_dispatch_main, //!< Attach IPC to GMainLoop for dispatch - pcmk_ipc_dispatch_poll, //!< Caller will poll and dispatch IPC - pcmk_ipc_dispatch_sync, //!< Sending a command will wait for any reply -}; - -//! Client connection to Pacemaker IPC -typedef struct pcmk_ipc_api_s pcmk_ipc_api_t; - -/*! - * \brief Callback function type for Pacemaker daemon IPC APIs - * - * \param[in] api IPC API connection - * \param[in] event_type The type of event that occurred - * \param[in] status Event status - * \param[in] event_data Event-specific data - * \param[in] user_data Caller data provided when callback was registered - * - * \note For connection and disconnection events, event_data may be NULL (for - * local IPC) or the name of the connected node (for remote IPC, for - * daemons that support that). For reply and notify events, event_data is - * defined by the specific daemon API. - */ -typedef void (*pcmk_ipc_callback_t)(pcmk_ipc_api_t *api, - enum pcmk_ipc_event event_type, - crm_exit_t status, - void *event_data, void *user_data); - -int pcmk_new_ipc_api(pcmk_ipc_api_t **api, enum pcmk_ipc_server server); - -void pcmk_free_ipc_api(pcmk_ipc_api_t *api); - -int pcmk_connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type); - -void pcmk_disconnect_ipc(pcmk_ipc_api_t *api); - -int pcmk_poll_ipc(pcmk_ipc_api_t *api, int timeout_ms); - -void pcmk_dispatch_ipc(pcmk_ipc_api_t *api); - -void pcmk_register_ipc_callback(pcmk_ipc_api_t *api, pcmk_ipc_callback_t cb, - void *user_data); - -const char *pcmk_ipc_name(pcmk_ipc_api_t *api, bool for_log); - -bool pcmk_ipc_is_connected(pcmk_ipc_api_t *api); - -int pcmk_ipc_purge_node(pcmk_ipc_api_t *api, const char *node_name, - uint32_t nodeid); - - -/* - * Generic IPC API (to eventually be deprecated as public API and made internal) - */ - /* *INDENT-OFF* */ enum crm_ipc_flags { @@ -149,7 +45,7 @@ enum crm_ipc_flags crm_ipc_proxied = 0x00000100, /* _ALL_ replies to proxied connections need to be sent as events */ crm_ipc_client_response = 0x00000200, /* A Response is expected in reply */ - // These are options for Pacemaker's internal use only (pcmk__ipc_send_*()) + // These are options only for pcmk__ipc_send_iov() crm_ipc_server_event = 0x00010000, /* Send an Event instead of a Response */ crm_ipc_server_free = 0x00020000, /* Free the iovec after sending */ crm_ipc_proxied_relay_response = 0x00040000, /* all replies to proxied connections are sent as events, this flag preserves whether the event should be treated as an actual event, or a response.*/ @@ -217,9 +113,7 @@ unsigned int crm_ipc_default_buffer_size(void); int crm_ipc_is_authentic_process(int sock, uid_t refuid, gid_t refgid, pid_t *gotpid, uid_t *gotuid, gid_t *gotgid); -/* This is controller-specific but is declared in this header for C API - * backward compatibility. - */ +/* Utils */ xmlNode *create_hello_message(const char *uuid, const char *client_name, const char *major_version, const char *minor_version); diff --git a/include/crm/common/ipc_controld.h b/include/crm/common/ipc_controld.h deleted file mode 100644 index b817357..0000000 --- a/include/crm/common/ipc_controld.h +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright 2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * - * This source code is licensed under the GNU Lesser General Public License - * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. - */ - -#ifndef PCMK__IPC_CONTROLD__H -# define PCMK__IPC_CONTROLD__H - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \file - * \brief IPC commands for Pacemaker controller - * - * \ingroup core - */ - -#include // bool -#include // GList -#include // xmlNode -#include // pcmk_ipc_api_t - -//! Possible types of controller replies -enum pcmk_controld_api_reply { - pcmk_controld_reply_unknown, - pcmk_controld_reply_reprobe, - pcmk_controld_reply_info, - pcmk_controld_reply_resource, - pcmk_controld_reply_ping, - pcmk_controld_reply_nodes, -}; - -// Node information passed with pcmk_controld_reply_nodes -typedef struct { - uint32_t id; - const char *uname; - const char *state; -} pcmk_controld_api_node_t; - -/*! - * Controller reply passed to event callback - * - * \note Shutdown and election calls have no reply. Reprobe calls are - * acknowledged but contain no data (reply_type will be the only item - * set). Node info and ping calls have their own reply data. Fail and - * refresh calls use the resource reply type and reply data. - * \note The pointers in the reply are only guaranteed to be meaningful for the - * execution of the callback; if the values are needed for later, the - * callback should copy them. - */ -typedef struct { - enum pcmk_controld_api_reply reply_type; - const char *feature_set; //!< CRM feature set advertised by controller - const char *host_from; //!< Name of node that sent reply - - union { - // pcmk_controld_reply_info - struct { - bool have_quorum; - bool is_remote; - int id; - const char *uuid; - const char *uname; - const char *state; - } node_info; - - // pcmk_controld_reply_resource - struct { - xmlNode *node_state; // // bool -#include // uint32_t -#include // struct iovec -#include // uid_t, gid_t, pid_t, size_t +#include -#ifdef HAVE_GNUTLS_GNUTLS_H -# include // gnutls_session_t -#endif +#include /* US_AUTH_GETPEEREID */ -#include // guint, gpointer, GQueue, ... -#include // xmlNode -#include // qb_ipcs_connection_t, ... - -#include // US_AUTH_GETPEEREID -#include -#include // mainloop_io_t /* denotes "non yieldable PID" on FreeBSD, or actual PID1 in scenarios that require a delicate handling anyway (socket-based activation with systemd); @@ -85,132 +69,4 @@ extern "C" { int pcmk__ipc_is_authentic_process_active(const char *name, uid_t refuid, gid_t refgid, pid_t *gotpid); - -/* - * Server-related - */ - -typedef struct pcmk__client_s pcmk__client_t; - -enum pcmk__client_type { - PCMK__CLIENT_IPC = 1, - PCMK__CLIENT_TCP = 2, -# ifdef HAVE_GNUTLS_GNUTLS_H - PCMK__CLIENT_TLS = 3, -# endif -}; - -struct pcmk__remote_s { - /* Shared */ - char *buffer; - size_t buffer_size; - size_t buffer_offset; - int auth_timeout; - int tcp_socket; - mainloop_io_t *source; - - /* CIB-only */ - bool authenticated; - char *token; - - /* TLS only */ -# ifdef HAVE_GNUTLS_GNUTLS_H - gnutls_session_t *tls_session; - bool tls_handshake_complete; -# endif -}; - -enum pcmk__client_flags { - pcmk__client_proxied = (1 << 0), // Remote client behind proxy - pcmk__client_privileged = (1 << 1), // root or cluster user - pcmk__client_to_proxy = (1 << 2), // Local client to be proxied -}; - -struct pcmk__client_s { - unsigned int pid; - - uid_t uid; - gid_t gid; - - char *id; - char *name; - char *user; - - /* Provided for server use (not used by library) */ - /* @TODO merge options, flags, and kind (reserving lower bits for server) */ - long long options; - - int request_id; - uint32_t flags; - void *userdata; - - int event_timer; - GQueue *event_queue; - - /* Depending on the value of kind, only some of the following - * will be populated/valid - */ - enum pcmk__client_type kind; - - qb_ipcs_connection_t *ipcs; /* IPC */ - - struct pcmk__remote_s *remote; /* TCP/TLS */ - - unsigned int queue_backlog; /* IPC queue length after last flush */ - unsigned int queue_max; /* Evict client whose queue grows this big */ -}; - -guint pcmk__ipc_client_count(void); -void pcmk__foreach_ipc_client(GHFunc func, gpointer user_data); -void pcmk__foreach_ipc_client_remove(GHRFunc func, gpointer user_data); - -void pcmk__client_cleanup(void); - -pcmk__client_t *pcmk__find_client(qb_ipcs_connection_t *c); -pcmk__client_t *pcmk__find_client_by_id(const char *id); -const char *pcmk__client_name(pcmk__client_t *c); -const char *pcmk__client_type_str(enum pcmk__client_type client_type); - -pcmk__client_t *pcmk__new_unauth_client(void *key); -pcmk__client_t *pcmk__new_client(qb_ipcs_connection_t *c, uid_t uid, gid_t gid); -void pcmk__free_client(pcmk__client_t *c); -void pcmk__drop_all_clients(qb_ipcs_service_t *s); -bool pcmk__set_client_queue_max(pcmk__client_t *client, const char *qmax); - -void pcmk__ipc_send_ack_as(const char *function, int line, pcmk__client_t *c, - uint32_t request, uint32_t flags, const char *tag); -#define pcmk__ipc_send_ack(c, req, flags, tag) \ - pcmk__ipc_send_ack_as(__FUNCTION__, __LINE__, (c), (req), (flags), (tag)) - -int pcmk__ipc_prepare_iov(uint32_t request, xmlNode *message, - uint32_t max_send_size, - struct iovec **result, ssize_t *bytes); -int pcmk__ipc_send_xml(pcmk__client_t *c, uint32_t request, xmlNode *message, - uint32_t flags); -int pcmk__ipc_send_iov(pcmk__client_t *c, struct iovec *iov, uint32_t flags); -xmlNode *pcmk__client_data2xml(pcmk__client_t *c, void *data, - uint32_t *id, uint32_t *flags); - -int pcmk__client_pid(qb_ipcs_connection_t *c); - -void pcmk__serve_attrd_ipc(qb_ipcs_service_t **ipcs, - struct qb_ipcs_service_handlers *cb); -void pcmk__serve_fenced_ipc(qb_ipcs_service_t **ipcs, - struct qb_ipcs_service_handlers *cb); -qb_ipcs_service_t *pcmk__serve_controld_ipc(struct qb_ipcs_service_handlers *cb); - -void pcmk__serve_based_ipc(qb_ipcs_service_t **ipcs_ro, - qb_ipcs_service_t **ipcs_rw, - qb_ipcs_service_t **ipcs_shm, - struct qb_ipcs_service_handlers *ro_cb, - struct qb_ipcs_service_handlers *rw_cb); - -void pcmk__stop_based_ipc(qb_ipcs_service_t *ipcs_ro, - qb_ipcs_service_t *ipcs_rw, - qb_ipcs_service_t *ipcs_shm); - -#ifdef __cplusplus -} -#endif - #endif diff --git a/include/crm/common/ipc_pacemakerd.h b/include/crm/common/ipc_pacemakerd.h deleted file mode 100644 index 00e3edd..0000000 --- a/include/crm/common/ipc_pacemakerd.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * - * This source code is licensed under the GNU Lesser General Public License - * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. - */ - -#ifndef PCMK__IPC_PACEMAKERD__H -# define PCMK__IPC_PACEMAKERD__H - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * \file - * \brief IPC commands for Pacemakerd - * - * \ingroup core - */ - -#include // time_t -#include // pcmk_ipc_api_t - -enum pcmk_pacemakerd_state { - pcmk_pacemakerd_state_invalid = -1, - pcmk_pacemakerd_state_init = 0, - pcmk_pacemakerd_state_starting_daemons, - pcmk_pacemakerd_state_wait_for_ping, - pcmk_pacemakerd_state_running, - pcmk_pacemakerd_state_shutting_down, - pcmk_pacemakerd_state_shutdown_complete, - pcmk_pacemakerd_state_max = pcmk_pacemakerd_state_shutdown_complete, -}; - -//! Possible types of pacemakerd replies -enum pcmk_pacemakerd_api_reply { - pcmk_pacemakerd_reply_unknown, - pcmk_pacemakerd_reply_ping, -}; - -/*! - * Pacemakerd reply passed to event callback - */ -typedef struct { - enum pcmk_pacemakerd_api_reply reply_type; - - union { - // pcmk_pacemakerd_reply_ping - struct { - const char *sys_from; - enum pcmk_pacemakerd_state state; - time_t last_good; - int status; - } ping; - } data; -} pcmk_pacemakerd_api_reply_t; - -int pcmk_pacemakerd_api_ping(pcmk_ipc_api_t *api, const char *ipc_name); -enum pcmk_pacemakerd_state - pcmk_pacemakerd_api_daemon_state_text2enum(const char *state); -const char - *pcmk_pacemakerd_api_daemon_state_enum2text(enum pcmk_pacemakerd_state state); - -#ifdef __cplusplus -} -#endif - -#endif // PCMK__IPC_PACEMAKERD__H diff --git a/include/crm/common/ipcs_internal.h b/include/crm/common/ipcs_internal.h new file mode 100644 index 0000000..c631dfc --- /dev/null +++ b/include/crm/common/ipcs_internal.h @@ -0,0 +1,149 @@ +/* + * Copyright 2013-2020 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#ifndef CRM_COMMON_IPCS__H +# define CRM_COMMON_IPCS__H + +#ifdef __cplusplus +extern "C" { +#endif + +# include +# include +# ifdef HAVE_GNUTLS_GNUTLS_H +# undef KEYFILE +# include +# endif + +# include +# include + +typedef struct pcmk__client_s pcmk__client_t; + +enum pcmk__client_type { + PCMK__CLIENT_IPC = 1, + PCMK__CLIENT_TCP = 2, +# ifdef HAVE_GNUTLS_GNUTLS_H + PCMK__CLIENT_TLS = 3, +# endif +}; + +struct pcmk__remote_s { + /* Shared */ + char *buffer; + size_t buffer_size; + size_t buffer_offset; + int auth_timeout; + int tcp_socket; + mainloop_io_t *source; + + /* CIB-only */ + bool authenticated; + char *token; + + /* TLS only */ +# ifdef HAVE_GNUTLS_GNUTLS_H + gnutls_session_t *tls_session; + bool tls_handshake_complete; +# endif +}; + +enum pcmk__client_flags { + pcmk__client_proxied = 0x00001, /* ipc_proxy code only */ + pcmk__client_privileged = 0x00002, /* root or cluster user */ +}; + +struct pcmk__client_s { + uint pid; + + uid_t uid; + gid_t gid; + + char *id; + char *name; + char *user; + + /* Provided for server use (not used by library) */ + /* @TODO merge options, flags, and kind (reserving lower bits for server) */ + long long options; + + int request_id; + uint32_t flags; + void *userdata; + + int event_timer; + GQueue *event_queue; + + /* Depending on the value of kind, only some of the following + * will be populated/valid + */ + enum pcmk__client_type kind; + + qb_ipcs_connection_t *ipcs; /* IPC */ + + struct pcmk__remote_s *remote; /* TCP/TLS */ + + unsigned int queue_backlog; /* IPC queue length after last flush */ + unsigned int queue_max; /* Evict client whose queue grows this big */ +}; + +guint pcmk__ipc_client_count(void); +void pcmk__foreach_ipc_client(GHFunc func, gpointer user_data); +void pcmk__foreach_ipc_client_remove(GHRFunc func, gpointer user_data); + +void pcmk__client_cleanup(void); + +pcmk__client_t *pcmk__find_client(qb_ipcs_connection_t *c); +pcmk__client_t *pcmk__find_client_by_id(const char *id); +const char *pcmk__client_name(pcmk__client_t *c); +const char *pcmk__client_type_str(enum pcmk__client_type client_type); + +pcmk__client_t *pcmk__new_unauth_client(void *key); +pcmk__client_t *pcmk__new_client(qb_ipcs_connection_t *c, uid_t uid, gid_t gid); +void pcmk__free_client(pcmk__client_t *c); +void pcmk__drop_all_clients(qb_ipcs_service_t *s); +bool pcmk__set_client_queue_max(pcmk__client_t *client, const char *qmax); + +void pcmk__ipc_send_ack_as(const char *function, int line, pcmk__client_t *c, + uint32_t request, uint32_t flags, const char *tag); +#define pcmk__ipc_send_ack(c, req, flags, tag) \ + pcmk__ipc_send_ack_as(__FUNCTION__, __LINE__, (c), (req), (flags), (tag)) + +int pcmk__ipc_prepare_iov(uint32_t request, xmlNode *message, + uint32_t max_send_size, + struct iovec **result, ssize_t *bytes); +int pcmk__ipc_send_xml(pcmk__client_t *c, uint32_t request, xmlNode *message, + uint32_t flags); +int pcmk__ipc_send_iov(pcmk__client_t *c, struct iovec *iov, uint32_t flags); +xmlNode *pcmk__client_data2xml(pcmk__client_t *c, void *data, + uint32_t *id, uint32_t *flags); + +int pcmk__client_pid(qb_ipcs_connection_t *c); + +void pcmk__serve_attrd_ipc(qb_ipcs_service_t **ipcs, + struct qb_ipcs_service_handlers *cb); +void pcmk__serve_fenced_ipc(qb_ipcs_service_t **ipcs, + struct qb_ipcs_service_handlers *cb); +qb_ipcs_service_t *pcmk__serve_controld_ipc(struct qb_ipcs_service_handlers *cb); + +void pcmk__serve_based_ipc(qb_ipcs_service_t **ipcs_ro, + qb_ipcs_service_t **ipcs_rw, + qb_ipcs_service_t **ipcs_shm, + struct qb_ipcs_service_handlers *ro_cb, + struct qb_ipcs_service_handlers *rw_cb); + +void pcmk__stop_based_ipc(qb_ipcs_service_t *ipcs_ro, + qb_ipcs_service_t *ipcs_rw, + qb_ipcs_service_t *ipcs_shm); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/crm/common/mainloop.h b/include/crm/common/mainloop.h index 9957b25..b443b4e 100644 --- a/include/crm/common/mainloop.h +++ b/include/crm/common/mainloop.h @@ -1,5 +1,5 @@ /* - * Copyright 2009-2020 the Pacemaker project contributors + * Copyright 2009-2019 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -146,7 +146,6 @@ pid_t mainloop_child_pid(mainloop_child_t * child); void mainloop_clear_child_userdata(mainloop_child_t * child); gboolean mainloop_child_kill(pid_t pid); -void pcmk_quit_main_loop(GMainLoop *mloop, unsigned int n); void pcmk_drain_main_loop(GMainLoop *mloop, guint timer_ms, bool (*check)(guint)); diff --git a/include/crm/common/options_internal.h b/include/crm/common/options_internal.h index d0429c9..db54da4 100644 --- a/include/crm/common/options_internal.h +++ b/include/crm/common/options_internal.h @@ -111,7 +111,6 @@ bool pcmk__valid_utilization(const char *value); // from watchdog.c long pcmk__get_sbd_timeout(void); -bool pcmk__get_sbd_sync_resource_startup(void); long pcmk__auto_watchdog_timeout(void); bool pcmk__valid_sbd_timeout(const char *value); diff --git a/include/crm/common/output.h b/include/crm/common/output.h index 939972c..6188f19 100644 --- a/include/crm/common/output.h +++ b/include/crm/common/output.h @@ -703,14 +703,15 @@ pcmk__output_create_html_node(pcmk__output_t *out, const char *element_name, con * the following code would generate the tag "": * * \code - * pcmk__html_add_header("meta", "http-equiv", "refresh", "content", "19", NULL); + * pcmk__html_add_header(parent, "meta", "http-equiv", "refresh", "content", "19", NULL); * \endcode * + * \param[in,out] parent The node that will be the parent of the new node. * \param[in] name The HTML tag for the new node. * \param[in] ... A NULL-terminated key/value list of attributes. */ void -pcmk__html_add_header(const char *name, ...) +pcmk__html_add_header(xmlNodePtr parent, const char *name, ...) G_GNUC_NULL_TERMINATED; #ifdef __cplusplus diff --git a/include/crm/common/util.h b/include/crm/common/util.h index f2ea944..22ac8eb 100644 --- a/include/crm/common/util.h +++ b/include/crm/common/util.h @@ -61,7 +61,6 @@ guint g_str_hash_traditional(gconstpointer v); char *crm_strdup_printf(char const *format, ...) __attribute__ ((__format__ (__printf__, 1, 2))); int pcmk__parse_ll_range(const char *srcstring, long long *start, long long *end); gboolean pcmk__str_in_list(GList *lst, const gchar *s); -int pcmk_numeric_strcasecmp(const char *s1, const char *s2); # define safe_str_eq(a, b) crm_str_eq(a, b, FALSE) # define crm_str_hash g_str_hash_traditional diff --git a/include/crm/crm.h b/include/crm/crm.h index ce2074b..d2ffb61 100644 --- a/include/crm/crm.h +++ b/include/crm/crm.h @@ -51,7 +51,7 @@ extern "C" { * >=3.0.13: Fail counts include operation name and interval * >=3.2.0: DC supports PCMK_LRM_OP_INVALID and PCMK_LRM_OP_NOT_CONNECTED */ -# define CRM_FEATURE_SET "3.4.1" +# define CRM_FEATURE_SET "3.3.0" # define EOS '\0' # define DIMOF(a) ((int) (sizeof(a)/sizeof(a[0])) ) diff --git a/include/crm/msg_xml.h b/include/crm/msg_xml.h index 1fcb72d..af3f33e 100644 --- a/include/crm/msg_xml.h +++ b/include/crm/msg_xml.h @@ -123,13 +123,6 @@ extern "C" { # define XML_PING_ATTR_STATUS "result" # define XML_PING_ATTR_SYSFROM "crm_subsystem" # define XML_PING_ATTR_CRMDSTATE "crmd_state" -# define XML_PING_ATTR_PACEMAKERDSTATE "pacemakerd_state" -# define XML_PING_ATTR_PACEMAKERDSTATE_INIT "init" -# define XML_PING_ATTR_PACEMAKERDSTATE_STARTINGDAEMONS "starting_daemons" -# define XML_PING_ATTR_PACEMAKERDSTATE_WAITPING "wait_for_ping" -# define XML_PING_ATTR_PACEMAKERDSTATE_RUNNING "running" -# define XML_PING_ATTR_PACEMAKERDSTATE_SHUTTINGDOWN "shutting_down" -# define XML_PING_ATTR_PACEMAKERDSTATE_SHUTDOWNCOMPLETE "shutdown_complete" # define XML_TAG_FRAGMENT "cib_fragment" diff --git a/include/crm/pengine/common.h b/include/crm/pengine/common.h index 2737b2e..48c2b66 100644 --- a/include/crm/pengine/common.h +++ b/include/crm/pengine/common.h @@ -1,5 +1,5 @@ /* - * Copyright 2004-2020 the Pacemaker project contributors + * Copyright 2004-2019 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -15,36 +15,22 @@ extern "C" { #endif # include -# include - -# include extern gboolean was_processing_error; extern gboolean was_processing_warning; -/* The order is (partially) significant here; the values from action_fail_ignore - * through action_fail_fence are in order of increasing severity. - * - * @COMPAT The values should be ordered and numbered per the "TODO" comments - * below, so all values are in order of severity and there is room for - * future additions, but that would break API compatibility. - * @TODO For now, we just use a function to compare the values specially, but - * at the next compatibility break, we should arrange things properly. +/* order is significant here + * items listed in order of accending severeness + * more severe actions take precedent over lower ones */ enum action_fail_response { - action_fail_ignore, // @TODO = 10 - // @TODO action_fail_demote = 20, - action_fail_recover, // @TODO = 30 - // @TODO action_fail_reset_remote = 40, - // @TODO action_fail_restart_container = 50, - action_fail_migrate, // @TODO = 60 - action_fail_block, // @TODO = 70 - action_fail_stop, // @TODO = 80 - action_fail_standby, // @TODO = 90 - action_fail_fence, // @TODO = 100 - - // @COMPAT Values below here are out of order for API compatibility - + action_fail_ignore, + action_fail_recover, + action_fail_migrate, /* recover by moving it somewhere else */ + action_fail_block, + action_fail_stop, + action_fail_standby, + action_fail_fence, action_fail_restart_container, /* This is reserved for internal use for remote node connection resources. @@ -55,7 +41,6 @@ enum action_fail_response { */ action_fail_reset_remote, - action_fail_demote, }; /* the "done" action must be the "pre" action +1 */ @@ -146,38 +131,6 @@ recovery2text(enum rsc_recovery_type type) return "Unknown"; } -typedef struct pe_re_match_data { - char *string; - int nregs; - regmatch_t *pmatch; -} pe_re_match_data_t; - -typedef struct pe_match_data { - pe_re_match_data_t *re; - GHashTable *params; - GHashTable *meta; -} pe_match_data_t; - -typedef struct pe_rsc_eval_data { - const char *standard; - const char *provider; - const char *agent; -} pe_rsc_eval_data_t; - -typedef struct pe_op_eval_data { - const char *op_name; - guint interval; -} pe_op_eval_data_t; - -typedef struct pe_rule_eval_data { - GHashTable *node_hash; - enum rsc_role_e role; - crm_time_t *now; - pe_match_data_t *match_data; - pe_rsc_eval_data_t *rsc_data; - pe_op_eval_data_t *op_data; -} pe_rule_eval_data_t; - #ifdef __cplusplus } #endif diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h index 25554cf..d29b161 100644 --- a/include/crm/pengine/internal.h +++ b/include/crm/pengine/internal.h @@ -460,7 +460,7 @@ void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set); void pe__register_messages(pcmk__output_t *out); void pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name, - pe_rule_eval_data_t *rule_data, GHashTable *hash, + GHashTable *node_hash, GHashTable *hash, const char *always_first, gboolean overwrite, pe_working_set_t *data_set); diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h index f3cb4ef..ba88491 100644 --- a/include/crm/pengine/pe_types.h +++ b/include/crm/pengine/pe_types.h @@ -61,8 +61,7 @@ enum pe_quorum_policy { no_quorum_freeze, no_quorum_stop, no_quorum_ignore, - no_quorum_suicide, - no_quorum_demote + no_quorum_suicide }; enum node_type { @@ -247,7 +246,6 @@ struct pe_node_s { # define pe_rsc_allocating 0x00000200ULL # define pe_rsc_merging 0x00000400ULL -# define pe_rsc_stop 0x00001000ULL # define pe_rsc_reload 0x00002000ULL # define pe_rsc_allow_remote_remotes 0x00004000ULL diff --git a/include/crm/pengine/rules.h b/include/crm/pengine/rules.h index cbae8ed..ebd3148 100644 --- a/include/crm/pengine/rules.h +++ b/include/crm/pengine/rules.h @@ -15,6 +15,7 @@ extern "C" { #endif # include +# include # include # include @@ -27,11 +28,21 @@ enum expression_type { loc_expr, role_expr, time_expr, - version_expr, - rsc_expr, - op_expr + version_expr }; +typedef struct pe_re_match_data { + char *string; + int nregs; + regmatch_t *pmatch; +} pe_re_match_data_t; + +typedef struct pe_match_data { + pe_re_match_data_t *re; + GHashTable *params; + GHashTable *meta; +} pe_match_data_t; + enum expression_type find_expression_type(xmlNode * expr); gboolean pe_evaluate_rules(xmlNode *ruleset, GHashTable *node_hash, @@ -46,21 +57,12 @@ gboolean pe_test_expression(xmlNode *expr, GHashTable *node_hash, crm_time_t *next_change, pe_match_data_t *match_data); -void pe_eval_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name, - pe_rule_eval_data_t *rule_data, GHashTable *hash, - const char *always_first, gboolean overwrite, - crm_time_t *next_change); - void pe_unpack_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name, GHashTable *node_hash, GHashTable *hash, const char *always_first, gboolean overwrite, crm_time_t *now, crm_time_t *next_change); #if ENABLE_VERSIONED_ATTRS -void pe_eval_versioned_attributes(xmlNode *top, xmlNode *xml_obj, - const char *set_name, pe_rule_eval_data_t *rule_data, - xmlNode *hash, crm_time_t *next_change); - void pe_unpack_versioned_attributes(xmlNode *top, xmlNode *xml_obj, const char *set_name, GHashTable *node_hash, xmlNode *hash, crm_time_t *now, @@ -70,13 +72,6 @@ GHashTable *pe_unpack_versioned_parameters(xmlNode *versioned_params, const char char *pe_expand_re_matches(const char *string, pe_re_match_data_t * match_data); -gboolean pe_eval_rules(xmlNode *ruleset, pe_rule_eval_data_t *rule_data, - crm_time_t *next_change); -gboolean pe_eval_expr(xmlNode *rule, pe_rule_eval_data_t *rule_data, - crm_time_t *next_change); -gboolean pe_eval_subexpr(xmlNode *expr, pe_rule_eval_data_t *rule_data, - crm_time_t *next_change); - #ifndef PCMK__NO_COMPAT /* Everything here is deprecated and kept only for public API backward * compatibility. It will be moved to compatibility.h when 2.1.0 is released. diff --git a/include/crm/pengine/rules_internal.h b/include/crm/pengine/rules_internal.h index f60263a..fd65c1e 100644 --- a/include/crm/pengine/rules_internal.h +++ b/include/crm/pengine/rules_internal.h @@ -21,13 +21,6 @@ void pe_free_alert_list(GListPtr alert_list); crm_time_t *pe_parse_xml_duration(crm_time_t * start, xmlNode * duration_spec); -gboolean pe__eval_attr_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data); -int pe__eval_date_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data, - crm_time_t *next_change); -gboolean pe__eval_op_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data); -gboolean pe__eval_role_expr(xmlNode *expr, pe_rule_eval_data_t *rule_data); -gboolean pe__eval_rsc_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data); - int pe_eval_date_expression(xmlNode *time_expr, crm_time_t *now, crm_time_t *next_change); diff --git a/include/crm_internal.h b/include/crm_internal.h index cf8999f..882cad8 100644 --- a/include/crm_internal.h +++ b/include/crm_internal.h @@ -26,7 +26,7 @@ # include # include -# include +# include # include # include @@ -68,6 +68,8 @@ crm_set_bit(const char *function, int line, const char *target, long long word, # define set_bit(word, bit) word = crm_set_bit(__FUNCTION__, __LINE__, NULL, word, bit) # define clear_bit(word, bit) word = crm_clear_bit(__FUNCTION__, __LINE__, NULL, word, bit) +char *generate_hash_key(const char *crm_msg_reference, const char *sys); + void strip_text_nodes(xmlNode * xml); void pcmk_panic(const char *origin); pid_t pcmk_locate_sbd(void); @@ -122,7 +124,6 @@ pid_t pcmk_locate_sbd(void); #define PCMK__ATTRD_CMD_SYNC_RESPONSE "sync-response" #define PCMK__ATTRD_CMD_CLEAR_FAILURE "clear-failure" -#define PCMK__CONTROLD_CMD_NODES "list-nodes" /* * Environment variables used by Pacemaker diff --git a/lib/cib/cib_remote.c b/lib/cib/cib_remote.c index a011810..ed93700 100644 --- a/lib/cib/cib_remote.c +++ b/lib/cib/cib_remote.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include diff --git a/lib/common/Makefile.am b/lib/common/Makefile.am index e0249b9..fae17f5 100644 --- a/lib/common/Makefile.am +++ b/lib/common/Makefile.am @@ -47,15 +47,10 @@ endif libcrmcommon_la_SOURCES += cmdline.c libcrmcommon_la_SOURCES += digest.c libcrmcommon_la_SOURCES += io.c -libcrmcommon_la_SOURCES += ipc_client.c -libcrmcommon_la_SOURCES += ipc_common.c -libcrmcommon_la_SOURCES += ipc_controld.c -libcrmcommon_la_SOURCES += ipc_pacemakerd.c -libcrmcommon_la_SOURCES += ipc_server.c +libcrmcommon_la_SOURCES += ipc.c libcrmcommon_la_SOURCES += iso8601.c libcrmcommon_la_SOURCES += logging.c libcrmcommon_la_SOURCES += mainloop.c -libcrmcommon_la_SOURCES += messages.c libcrmcommon_la_SOURCES += nvpair.c libcrmcommon_la_SOURCES += operations.c libcrmcommon_la_SOURCES += options.c diff --git a/lib/common/crmcommon_private.h b/lib/common/crmcommon_private.h index 68e3390..dfb1e54 100644 --- a/lib/common/crmcommon_private.h +++ b/lib/common/crmcommon_private.h @@ -1,5 +1,5 @@ /* - * Copyright 2018-2020 the Pacemaker project contributors + * Copyright 2018-2019 the Pacemaker project contributors * * The version control history for this file may have further details. * @@ -14,17 +14,6 @@ * declared with G_GNUC_INTERNAL for efficiency. */ -#include // uint8_t, uint32_t -#include // bool -#include // size_t -#include // GList -#include // xmlNode, xmlAttr -#include // struct qb_ipc_response_header - -/* - * XML and ACLs - */ - enum xml_private_flags { xpf_none = 0x0000, xpf_dirty = 0x0001, @@ -51,8 +40,8 @@ typedef struct xml_private_s { long check; uint32_t flags; char *user; - GList *acls; - GList *deleted_objs; + GListPtr acls; + GListPtr deleted_objs; } xml_private_t; G_GNUC_INTERNAL @@ -97,120 +86,4 @@ pcmk__xml_attr_value(const xmlAttr *attr) : (const char *) attr->children->content; } -/* - * IPC - */ - -#define PCMK__IPC_VERSION 1 - -#define PCMK__CONTROLD_API_MAJOR "1" -#define PCMK__CONTROLD_API_MINOR "0" - -// IPC behavior that varies by daemon -typedef struct pcmk__ipc_methods_s { - /*! - * \internal - * \brief Allocate any private data needed by daemon IPC - * - * \param[in] api IPC API connection - * - * \return Standard Pacemaker return code - */ - int (*new_data)(pcmk_ipc_api_t *api); - - /*! - * \internal - * \brief Free any private data used by daemon IPC - * - * \param[in] api_data Data allocated by new_data() method - */ - void (*free_data)(void *api_data); - - /*! - * \internal - * \brief Perform daemon-specific handling after successful connection - * - * Some daemons require clients to register before sending any other - * commands. The controller requires a CRM_OP_HELLO (with no reply), and - * the CIB manager, executor, and fencer require a CRM_OP_REGISTER (with a - * reply). Ideally this would be consistent across all daemons, but for now - * this allows each to do its own authorization. - * - * \param[in] api IPC API connection - * - * \return Standard Pacemaker return code - */ - int (*post_connect)(pcmk_ipc_api_t *api); - - /*! - * \internal - * \brief Check whether an IPC request results in a reply - * - * \parma[in] api IPC API connection - * \param[in] request IPC request XML - * - * \return true if request would result in an IPC reply, false otherwise - */ - bool (*reply_expected)(pcmk_ipc_api_t *api, xmlNode *request); - - /*! - * \internal - * \brief Perform daemon-specific handling of an IPC message - * - * \param[in] api IPC API connection - * \param[in] msg Message read from IPC connection - */ - void (*dispatch)(pcmk_ipc_api_t *api, xmlNode *msg); - - /*! - * \internal - * \brief Perform daemon-specific handling of an IPC disconnect - * - * \param[in] api IPC API connection - */ - void (*post_disconnect)(pcmk_ipc_api_t *api); -} pcmk__ipc_methods_t; - -// Implementation of pcmk_ipc_api_t -struct pcmk_ipc_api_s { - enum pcmk_ipc_server server; // Daemon this IPC API instance is for - enum pcmk_ipc_dispatch dispatch_type; // How replies should be dispatched - size_t ipc_size_max; // maximum IPC buffer size - crm_ipc_t *ipc; // IPC connection - mainloop_io_t *mainloop_io; // If using mainloop, I/O source for IPC - bool free_on_disconnect; // Whether disconnect should free object - pcmk_ipc_callback_t cb; // Caller-registered callback (if any) - void *user_data; // Caller-registered data (if any) - void *api_data; // For daemon-specific use - pcmk__ipc_methods_t *cmds; // Behavior that varies by daemon -}; - -typedef struct pcmk__ipc_header_s { - struct qb_ipc_response_header qb; - uint32_t size_uncompressed; - uint32_t size_compressed; - uint32_t flags; - uint8_t version; -} pcmk__ipc_header_t; - -G_GNUC_INTERNAL -int pcmk__send_ipc_request(pcmk_ipc_api_t *api, xmlNode *request); - -G_GNUC_INTERNAL -void pcmk__call_ipc_callback(pcmk_ipc_api_t *api, - enum pcmk_ipc_event event_type, - crm_exit_t status, void *event_data); - -G_GNUC_INTERNAL -unsigned int pcmk__ipc_buffer_size(unsigned int max); - -G_GNUC_INTERNAL -bool pcmk__valid_ipc_header(const pcmk__ipc_header_t *header); - -G_GNUC_INTERNAL -pcmk__ipc_methods_t *pcmk__controld_api_methods(void); - -G_GNUC_INTERNAL -pcmk__ipc_methods_t *pcmk__pacemakerd_api_methods(void); - #endif // CRMCOMMON_PRIVATE__H diff --git a/lib/common/ipc.c b/lib/common/ipc.c new file mode 100644 index 0000000..331a76f --- /dev/null +++ b/lib/common/ipc.c @@ -0,0 +1,1848 @@ +/* + * Copyright 2004-2020 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ + +#include + +#if defined(US_AUTH_PEERCRED_UCRED) || defined(US_AUTH_PEERCRED_SOCKPEERCRED) +# ifdef US_AUTH_PEERCRED_UCRED +# ifndef _GNU_SOURCE +# define _GNU_SOURCE +# endif +# endif +# include +#elif defined(US_AUTH_GETPEERUCRED) +# include +#endif + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include /* indirectly: pcmk_err_generic */ +#include +#include +#include + +#include /* PCMK__SPECIAL_PID* */ + +#define PCMK_IPC_VERSION 1 + +/* Evict clients whose event queue grows this large (by default) */ +#define PCMK_IPC_DEFAULT_QUEUE_MAX 500 + +struct crm_ipc_response_header { + struct qb_ipc_response_header qb; + uint32_t size_uncompressed; + uint32_t size_compressed; + uint32_t flags; + uint8_t version; /* Protect against version changes for anyone that might bother to statically link us */ +}; + +static int hdr_offset = 0; +static unsigned int ipc_buffer_max = 0; +static unsigned int pick_ipc_buffer(unsigned int max); + +static inline void +crm_ipc_init(void) +{ + if (hdr_offset == 0) { + hdr_offset = sizeof(struct crm_ipc_response_header); + } + if (ipc_buffer_max == 0) { + ipc_buffer_max = pick_ipc_buffer(0); + } +} + +unsigned int +crm_ipc_default_buffer_size(void) +{ + return pick_ipc_buffer(0); +} + +static char * +generateReference(const char *custom1, const char *custom2) +{ + static uint ref_counter = 0; + + return crm_strdup_printf("%s-%s-%lld-%u", + (custom1? custom1 : "_empty_"), + (custom2? custom2 : "_empty_"), + (long long) time(NULL), ref_counter++); +} + +xmlNode * +create_request_adv(const char *task, xmlNode * msg_data, + const char *host_to, const char *sys_to, + const char *sys_from, const char *uuid_from, const char *origin) +{ + char *true_from = NULL; + xmlNode *request = NULL; + char *reference = generateReference(task, sys_from); + + if (uuid_from != NULL) { + true_from = generate_hash_key(sys_from, uuid_from); + } else if (sys_from != NULL) { + true_from = strdup(sys_from); + } else { + crm_err("No sys from specified"); + } + + // host_from will get set for us if necessary by the controller when routed + request = create_xml_node(NULL, __FUNCTION__); + crm_xml_add(request, F_CRM_ORIGIN, origin); + crm_xml_add(request, F_TYPE, T_CRM); + crm_xml_add(request, F_CRM_VERSION, CRM_FEATURE_SET); + crm_xml_add(request, F_CRM_MSG_TYPE, XML_ATTR_REQUEST); + crm_xml_add(request, F_CRM_REFERENCE, reference); + crm_xml_add(request, F_CRM_TASK, task); + crm_xml_add(request, F_CRM_SYS_TO, sys_to); + crm_xml_add(request, F_CRM_SYS_FROM, true_from); + + /* HOSTTO will be ignored if it is to the DC anyway. */ + if (host_to != NULL && strlen(host_to) > 0) { + crm_xml_add(request, F_CRM_HOST_TO, host_to); + } + + if (msg_data != NULL) { + add_message_xml(request, F_CRM_DATA, msg_data); + } + free(reference); + free(true_from); + + return request; +} + +/* + * This method adds a copy of xml_response_data + */ +xmlNode * +create_reply_adv(xmlNode * original_request, xmlNode * xml_response_data, const char *origin) +{ + xmlNode *reply = NULL; + + const char *host_from = crm_element_value(original_request, F_CRM_HOST_FROM); + const char *sys_from = crm_element_value(original_request, F_CRM_SYS_FROM); + const char *sys_to = crm_element_value(original_request, F_CRM_SYS_TO); + const char *type = crm_element_value(original_request, F_CRM_MSG_TYPE); + const char *operation = crm_element_value(original_request, F_CRM_TASK); + const char *crm_msg_reference = crm_element_value(original_request, F_CRM_REFERENCE); + + if (type == NULL) { + crm_err("Cannot create new_message, no message type in original message"); + CRM_ASSERT(type != NULL); + return NULL; +#if 0 + } else if (strcasecmp(XML_ATTR_REQUEST, type) != 0) { + crm_err("Cannot create new_message, original message was not a request"); + return NULL; +#endif + } + reply = create_xml_node(NULL, __FUNCTION__); + if (reply == NULL) { + crm_err("Cannot create new_message, malloc failed"); + return NULL; + } + + crm_xml_add(reply, F_CRM_ORIGIN, origin); + crm_xml_add(reply, F_TYPE, T_CRM); + crm_xml_add(reply, F_CRM_VERSION, CRM_FEATURE_SET); + crm_xml_add(reply, F_CRM_MSG_TYPE, XML_ATTR_RESPONSE); + crm_xml_add(reply, F_CRM_REFERENCE, crm_msg_reference); + crm_xml_add(reply, F_CRM_TASK, operation); + + /* since this is a reply, we reverse the from and to */ + crm_xml_add(reply, F_CRM_SYS_TO, sys_from); + crm_xml_add(reply, F_CRM_SYS_FROM, sys_to); + + /* HOSTTO will be ignored if it is to the DC anyway. */ + if (host_from != NULL && strlen(host_from) > 0) { + crm_xml_add(reply, F_CRM_HOST_TO, host_from); + } + + if (xml_response_data != NULL) { + add_message_xml(reply, F_CRM_DATA, xml_response_data); + } + + return reply; +} + +/* Libqb based IPC */ + +/* Server... */ + +static GHashTable *client_connections = NULL; + +/*! + * \internal + * \brief Count IPC clients + * + * \return Number of active IPC client connections + */ +guint +pcmk__ipc_client_count() +{ + return client_connections? g_hash_table_size(client_connections) : 0; +} + +/*! + * \internal + * \brief Execute a function for each active IPC client connection + * + * \param[in] func Function to call + * \param[in] user_data Pointer to pass to function + * + * \note The parameters are the same as for g_hash_table_foreach(). + */ +void +pcmk__foreach_ipc_client(GHFunc func, gpointer user_data) +{ + if ((func != NULL) && (client_connections != NULL)) { + g_hash_table_foreach(client_connections, func, user_data); + } +} + +/*! + * \internal + * \brief Remote IPC clients based on iterative function result + * + * \param[in] func Function to call for each active IPC client + * \param[in] user_data Pointer to pass to function + * + * \note The parameters are the same as for g_hash_table_foreach_remove(). + */ +void +pcmk__foreach_ipc_client_remove(GHRFunc func, gpointer user_data) +{ + if ((func != NULL) && (client_connections != NULL)) { + g_hash_table_foreach_remove(client_connections, func, user_data); + } +} + +pcmk__client_t * +pcmk__find_client(qb_ipcs_connection_t *c) +{ + if (client_connections) { + return g_hash_table_lookup(client_connections, c); + } + + crm_trace("No client found for %p", c); + return NULL; +} + +pcmk__client_t * +pcmk__find_client_by_id(const char *id) +{ + gpointer key; + pcmk__client_t *client; + GHashTableIter iter; + + if (client_connections && id) { + g_hash_table_iter_init(&iter, client_connections); + while (g_hash_table_iter_next(&iter, &key, (gpointer *) & client)) { + if (strcmp(client->id, id) == 0) { + return client; + } + } + } + + crm_trace("No client found with id=%s", id); + return NULL; +} + +const char * +pcmk__client_name(pcmk__client_t *c) +{ + if (c == NULL) { + return "null"; + } else if (c->name == NULL && c->id == NULL) { + return "unknown"; + } else if (c->name == NULL) { + return c->id; + } else { + return c->name; + } +} + +const char * +pcmk__client_type_str(enum pcmk__client_type client_type) +{ + switch (client_type) { + case PCMK__CLIENT_IPC: + return "IPC"; + case PCMK__CLIENT_TCP: + return "TCP"; +#ifdef HAVE_GNUTLS_GNUTLS_H + case PCMK__CLIENT_TLS: + return "TLS"; +#endif + default: + return "unknown"; + } +} + +void +pcmk__client_cleanup(void) +{ + if (client_connections != NULL) { + int active = g_hash_table_size(client_connections); + + if (active) { + crm_err("Exiting with %d active IPC client%s", + active, pcmk__plural_s(active)); + } + g_hash_table_destroy(client_connections); client_connections = NULL; + } +} + +void +pcmk__drop_all_clients(qb_ipcs_service_t *service) +{ + qb_ipcs_connection_t *c = NULL; + + if (service == NULL) { + return; + } + + c = qb_ipcs_connection_first_get(service); + + while (c != NULL) { + qb_ipcs_connection_t *last = c; + + c = qb_ipcs_connection_next_get(service, last); + + /* There really shouldn't be anyone connected at this point */ + crm_notice("Disconnecting client %p, pid=%d...", + last, pcmk__client_pid(last)); + qb_ipcs_disconnect(last); + qb_ipcs_connection_unref(last); + } +} + +/*! + * \internal + * \brief Allocate a new pcmk__client_t object based on an IPC connection + * + * \param[in] c IPC connection (or NULL to allocate generic client) + * \param[in] key Connection table key (or NULL to use sane default) + * \param[in] uid_client UID corresponding to c (ignored if c is NULL) + * + * \return Pointer to new pcmk__client_t (or NULL on error) + */ +static pcmk__client_t * +client_from_connection(qb_ipcs_connection_t *c, void *key, uid_t uid_client) +{ + pcmk__client_t *client = calloc(1, sizeof(pcmk__client_t)); + + if (client == NULL) { + crm_perror(LOG_ERR, "Allocating client"); + return NULL; + } + + if (c) { +#if ENABLE_ACL + client->user = pcmk__uid2username(uid_client); + if (client->user == NULL) { + client->user = strdup("#unprivileged"); + CRM_CHECK(client->user != NULL, free(client); return NULL); + crm_err("Unable to enforce ACLs for user ID %d, assuming unprivileged", + uid_client); + } +#endif + client->ipcs = c; + client->kind = PCMK__CLIENT_IPC; + client->pid = pcmk__client_pid(c); + if (key == NULL) { + key = c; + } + } + + client->id = crm_generate_uuid(); + if (client->id == NULL) { + crm_err("Could not generate UUID for client"); + free(client->user); + free(client); + return NULL; + } + if (key == NULL) { + key = client->id; + } + if (client_connections == NULL) { + crm_trace("Creating IPC client table"); + client_connections = g_hash_table_new(g_direct_hash, g_direct_equal); + } + g_hash_table_insert(client_connections, key, client); + return client; +} + +/*! + * \brief Allocate a new pcmk__client_t object and generate its ID + * + * \param[in] key What to use as connections hash table key (NULL to use ID) + * + * \return Pointer to new pcmk__client_t (asserts on failure) + */ +pcmk__client_t * +pcmk__new_unauth_client(void *key) +{ + pcmk__client_t *client = client_from_connection(NULL, key, 0); + + CRM_ASSERT(client != NULL); + return client; +} + +pcmk__client_t * +pcmk__new_client(qb_ipcs_connection_t *c, uid_t uid_client, gid_t gid_client) +{ + gid_t uid_cluster = 0; + gid_t gid_cluster = 0; + + pcmk__client_t *client = NULL; + + CRM_CHECK(c != NULL, return NULL); + + if (pcmk_daemon_user(&uid_cluster, &gid_cluster) < 0) { + static bool need_log = TRUE; + + if (need_log) { + crm_warn("Could not find user and group IDs for user %s", + CRM_DAEMON_USER); + need_log = FALSE; + } + } + + if (uid_client != 0) { + crm_trace("Giving group %u access to new IPC connection", gid_cluster); + /* Passing -1 to chown(2) means don't change */ + qb_ipcs_connection_auth_set(c, -1, gid_cluster, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP); + } + + /* TODO: Do our own auth checking, return NULL if unauthorized */ + client = client_from_connection(c, NULL, uid_client); + if (client == NULL) { + return NULL; + } + + if ((uid_client == 0) || (uid_client == uid_cluster)) { + /* Remember when a connection came from root or hacluster */ + set_bit(client->flags, pcmk__client_privileged); + } + + crm_debug("New IPC client %s for PID %u with uid %d and gid %d", + client->id, client->pid, uid_client, gid_client); + return client; +} + +static struct iovec * +pcmk__new_ipc_event(void) +{ + struct iovec *iov = calloc(2, sizeof(struct iovec)); + + CRM_ASSERT(iov != NULL); + return iov; +} + +/*! + * \brief Free an I/O vector created by pcmk__ipc_prepare_iov() + * + * \param[in] event I/O vector to free + */ +void +pcmk_free_ipc_event(struct iovec *event) +{ + if (event != NULL) { + free(event[0].iov_base); + free(event[1].iov_base); + free(event); + } +} + +static void +free_event(gpointer data) +{ + pcmk_free_ipc_event((struct iovec *) data); +} + +static void +add_event(pcmk__client_t *c, struct iovec *iov) +{ + if (c->event_queue == NULL) { + c->event_queue = g_queue_new(); + } + g_queue_push_tail(c->event_queue, iov); +} + +void +pcmk__free_client(pcmk__client_t *c) +{ + if (c == NULL) { + return; + } + + if (client_connections) { + if (c->ipcs) { + crm_trace("Destroying %p/%p (%d remaining)", + c, c->ipcs, g_hash_table_size(client_connections) - 1); + g_hash_table_remove(client_connections, c->ipcs); + + } else { + crm_trace("Destroying remote connection %p (%d remaining)", + c, g_hash_table_size(client_connections) - 1); + g_hash_table_remove(client_connections, c->id); + } + } + + if (c->event_timer) { + g_source_remove(c->event_timer); + } + + if (c->event_queue) { + crm_debug("Destroying %d events", g_queue_get_length(c->event_queue)); + g_queue_free_full(c->event_queue, free_event); + } + + free(c->id); + free(c->name); + free(c->user); + if (c->remote) { + if (c->remote->auth_timeout) { + g_source_remove(c->remote->auth_timeout); + } + free(c->remote->buffer); + free(c->remote); + } + free(c); +} + +/*! + * \internal + * \brief Raise IPC eviction threshold for a client, if allowed + * + * \param[in,out] client Client to modify + * \param[in] qmax New threshold (as non-NULL string) + * + * \return TRUE if change was allowed, FALSE otherwise + */ +bool +pcmk__set_client_queue_max(pcmk__client_t *client, const char *qmax) +{ + if (is_set(client->flags, pcmk__client_privileged)) { + long long qmax_int; + + errno = 0; + qmax_int = crm_parse_ll(qmax, NULL); + if ((errno == 0) && (qmax_int > 0)) { + client->queue_max = (unsigned int) qmax_int; + return TRUE; + } + } + return FALSE; +} + +int +pcmk__client_pid(qb_ipcs_connection_t *c) +{ + struct qb_ipcs_connection_stats stats; + + stats.client_pid = 0; + qb_ipcs_connection_stats_get(c, &stats, 0); + return stats.client_pid; +} + +/*! + * \internal + * \brief Retrieve message XML from data read from client IPC + * + * \param[in] c IPC client connection + * \param[in] data Data read from client connection + * \param[out] id Where to store message ID from libqb header + * \param[out] flags Where to store flags from libqb header + * + * \return Message XML on success, NULL otherwise + */ +xmlNode * +pcmk__client_data2xml(pcmk__client_t *c, void *data, uint32_t *id, + uint32_t *flags) +{ + xmlNode *xml = NULL; + char *uncompressed = NULL; + char *text = ((char *)data) + sizeof(struct crm_ipc_response_header); + struct crm_ipc_response_header *header = data; + + if (id) { + *id = ((struct qb_ipc_response_header *)data)->id; + } + if (flags) { + *flags = header->flags; + } + + if (is_set(header->flags, crm_ipc_proxied)) { + /* Mark this client as being the endpoint of a proxy connection. + * Proxy connections responses are sent on the event channel, to avoid + * blocking the controller serving as proxy. + */ + c->flags |= pcmk__client_proxied; + } + + if(header->version > PCMK_IPC_VERSION) { + crm_err("Filtering incompatible v%d IPC message, we only support versions <= %d", + header->version, PCMK_IPC_VERSION); + return NULL; + } + + if (header->size_compressed) { + int rc = 0; + unsigned int size_u = 1 + header->size_uncompressed; + uncompressed = calloc(1, size_u); + + crm_trace("Decompressing message data %u bytes into %u bytes", + header->size_compressed, size_u); + + rc = BZ2_bzBuffToBuffDecompress(uncompressed, &size_u, text, header->size_compressed, 1, 0); + text = uncompressed; + + if (rc != BZ_OK) { + crm_err("Decompression failed: %s " CRM_XS " bzerror=%d", + bz2_strerror(rc), rc); + free(uncompressed); + return NULL; + } + } + + CRM_ASSERT(text[header->size_uncompressed - 1] == 0); + + xml = string2xml(text); + crm_log_xml_trace(xml, "[IPC received]"); + + free(uncompressed); + return xml; +} + +static int crm_ipcs_flush_events(pcmk__client_t *c); + +static gboolean +crm_ipcs_flush_events_cb(gpointer data) +{ + pcmk__client_t *c = data; + + c->event_timer = 0; + crm_ipcs_flush_events(c); + return FALSE; +} + +/*! + * \internal + * \brief Add progressive delay before next event queue flush + * + * \param[in,out] c Client connection to add delay to + * \param[in] queue_len Current event queue length + */ +static inline void +delay_next_flush(pcmk__client_t *c, unsigned int queue_len) +{ + /* Delay a maximum of 1.5 seconds */ + guint delay = (queue_len < 5)? (1000 + 100 * queue_len) : 1500; + + c->event_timer = g_timeout_add(delay, crm_ipcs_flush_events_cb, c); +} + +/*! + * \internal + * \brief Send client any messages in its queue + * + * \param[in] c Client to flush + * + * \return Standard Pacemaker return value + */ +static int +crm_ipcs_flush_events(pcmk__client_t *c) +{ + int rc = pcmk_rc_ok; + ssize_t qb_rc = 0; + unsigned int sent = 0; + unsigned int queue_len = 0; + + if (c == NULL) { + return rc; + + } else if (c->event_timer) { + /* There is already a timer, wait until it goes off */ + crm_trace("Timer active for %p - %d", c->ipcs, c->event_timer); + return rc; + } + + if (c->event_queue) { + queue_len = g_queue_get_length(c->event_queue); + } + while (sent < 100) { + struct crm_ipc_response_header *header = NULL; + struct iovec *event = NULL; + + if (c->event_queue) { + // We don't pop unless send is successful + event = g_queue_peek_head(c->event_queue); + } + if (event == NULL) { // Queue is empty + break; + } + + qb_rc = qb_ipcs_event_sendv(c->ipcs, event, 2); + if (qb_rc < 0) { + rc = (int) -qb_rc; + break; + } + event = g_queue_pop_head(c->event_queue); + + sent++; + header = event[0].iov_base; + if (header->size_compressed) { + crm_trace("Event %d to %p[%d] (%lld compressed bytes) sent", + header->qb.id, c->ipcs, c->pid, (long long) qb_rc); + } else { + crm_trace("Event %d to %p[%d] (%lld bytes) sent: %.120s", + header->qb.id, c->ipcs, c->pid, (long long) qb_rc, + (char *) (event[1].iov_base)); + } + pcmk_free_ipc_event(event); + } + + queue_len -= sent; + if (sent > 0 || queue_len) { + crm_trace("Sent %d events (%d remaining) for %p[%d]: %s (%lld)", + sent, queue_len, c->ipcs, c->pid, + pcmk_rc_str(rc), (long long) qb_rc); + } + + if (queue_len) { + + /* Allow clients to briefly fall behind on processing incoming messages, + * but drop completely unresponsive clients so the connection doesn't + * consume resources indefinitely. + */ + if (queue_len > QB_MAX(c->queue_max, PCMK_IPC_DEFAULT_QUEUE_MAX)) { + if ((c->queue_backlog <= 1) || (queue_len < c->queue_backlog)) { + /* Don't evict for a new or shrinking backlog */ + crm_warn("Client with process ID %u has a backlog of %u messages " + CRM_XS " %p", c->pid, queue_len, c->ipcs); + } else { + crm_err("Evicting client with process ID %u due to backlog of %u messages " + CRM_XS " %p", c->pid, queue_len, c->ipcs); + c->queue_backlog = 0; + qb_ipcs_disconnect(c->ipcs); + return rc; + } + } + + c->queue_backlog = queue_len; + delay_next_flush(c, queue_len); + + } else { + /* Event queue is empty, there is no backlog */ + c->queue_backlog = 0; + } + + return rc; +} + +/*! + * \internal + * \brief Create an I/O vector for sending an IPC XML message + * + * \param[in] request Identifier for libqb response header + * \param[in] message XML message to send + * \param[in] max_send_size If 0, default IPC buffer size is used + * \param[out] result Where to store prepared I/O vector + * \param[out] bytes Size of prepared data in bytes + * + * \return Standard Pacemaker return code + */ +int +pcmk__ipc_prepare_iov(uint32_t request, xmlNode *message, + uint32_t max_send_size, struct iovec **result, + ssize_t *bytes) +{ + static unsigned int biggest = 0; + struct iovec *iov; + unsigned int total = 0; + char *compressed = NULL; + char *buffer = NULL; + struct crm_ipc_response_header *header = NULL; + + if ((message == NULL) || (result == NULL)) { + return EINVAL; + } + + header = calloc(1, sizeof(struct crm_ipc_response_header)); + if (header == NULL) { + return ENOMEM; /* errno mightn't be set by allocator */ + } + + buffer = dump_xml_unformatted(message); + crm_ipc_init(); + + if (max_send_size == 0) { + max_send_size = ipc_buffer_max; + } + CRM_LOG_ASSERT(max_send_size != 0); + + *result = NULL; + iov = pcmk__new_ipc_event(); + iov[0].iov_len = hdr_offset; + iov[0].iov_base = header; + + header->version = PCMK_IPC_VERSION; + header->size_uncompressed = 1 + strlen(buffer); + total = iov[0].iov_len + header->size_uncompressed; + + if (total < max_send_size) { + iov[1].iov_base = buffer; + iov[1].iov_len = header->size_uncompressed; + + } else { + unsigned int new_size = 0; + + if (pcmk__compress(buffer, (unsigned int) header->size_uncompressed, + (unsigned int) max_send_size, &compressed, + &new_size) == pcmk_rc_ok) { + + header->flags |= crm_ipc_compressed; + header->size_compressed = new_size; + + iov[1].iov_len = header->size_compressed; + iov[1].iov_base = compressed; + + free(buffer); + + biggest = QB_MAX(header->size_compressed, biggest); + + } else { + crm_log_xml_trace(message, "EMSGSIZE"); + biggest = QB_MAX(header->size_uncompressed, biggest); + + crm_err("Could not compress %u-byte message into less than IPC " + "limit of %u bytes; set PCMK_ipc_buffer to higher value " + "(%u bytes suggested)", + header->size_uncompressed, max_send_size, 4 * biggest); + + free(compressed); + free(buffer); + pcmk_free_ipc_event(iov); + return EMSGSIZE; + } + } + + header->qb.size = iov[0].iov_len + iov[1].iov_len; + header->qb.id = (int32_t)request; /* Replying to a specific request */ + + *result = iov; + CRM_ASSERT(header->qb.size > 0); + if (bytes != NULL) { + *bytes = header->qb.size; + } + return pcmk_rc_ok; +} + +int +pcmk__ipc_send_iov(pcmk__client_t *c, struct iovec *iov, uint32_t flags) +{ + int rc = pcmk_rc_ok; + static uint32_t id = 1; + struct crm_ipc_response_header *header = iov[0].iov_base; + + if (c->flags & pcmk__client_proxied) { + /* _ALL_ replies to proxied connections need to be sent as events */ + if (is_not_set(flags, crm_ipc_server_event)) { + flags |= crm_ipc_server_event; + /* this flag lets us know this was originally meant to be a response. + * even though we're sending it over the event channel. */ + flags |= crm_ipc_proxied_relay_response; + } + } + + header->flags |= flags; + if (flags & crm_ipc_server_event) { + header->qb.id = id++; /* We don't really use it, but doesn't hurt to set one */ + + if (flags & crm_ipc_server_free) { + crm_trace("Sending the original to %p[%d]", c->ipcs, c->pid); + add_event(c, iov); + + } else { + struct iovec *iov_copy = pcmk__new_ipc_event(); + + crm_trace("Sending a copy to %p[%d]", c->ipcs, c->pid); + iov_copy[0].iov_len = iov[0].iov_len; + iov_copy[0].iov_base = malloc(iov[0].iov_len); + memcpy(iov_copy[0].iov_base, iov[0].iov_base, iov[0].iov_len); + + iov_copy[1].iov_len = iov[1].iov_len; + iov_copy[1].iov_base = malloc(iov[1].iov_len); + memcpy(iov_copy[1].iov_base, iov[1].iov_base, iov[1].iov_len); + + add_event(c, iov_copy); + } + + } else { + ssize_t qb_rc; + + CRM_LOG_ASSERT(header->qb.id != 0); /* Replying to a specific request */ + + qb_rc = qb_ipcs_response_sendv(c->ipcs, iov, 2); + if (qb_rc < header->qb.size) { + if (qb_rc < 0) { + rc = (int) -qb_rc; + } + crm_notice("Response %d to pid %d failed: %s " + CRM_XS " bytes=%u rc=%lld ipcs=%p", + header->qb.id, c->pid, pcmk_rc_str(rc), + header->qb.size, (long long) qb_rc, c->ipcs); + + } else { + crm_trace("Response %d sent, %lld bytes to %p[%d]", + header->qb.id, (long long) qb_rc, c->ipcs, c->pid); + } + + if (flags & crm_ipc_server_free) { + pcmk_free_ipc_event(iov); + } + } + + if (flags & crm_ipc_server_event) { + rc = crm_ipcs_flush_events(c); + } else { + crm_ipcs_flush_events(c); + } + + if ((rc == EPIPE) || (rc == ENOTCONN)) { + crm_trace("Client %p disconnected", c->ipcs); + } + return rc; +} + +int +pcmk__ipc_send_xml(pcmk__client_t *c, uint32_t request, xmlNode *message, + uint32_t flags) +{ + struct iovec *iov = NULL; + int rc = pcmk_rc_ok; + + if (c == NULL) { + return EINVAL; + } + crm_ipc_init(); + rc = pcmk__ipc_prepare_iov(request, message, ipc_buffer_max, &iov, NULL); + if (rc == pcmk_rc_ok) { + rc = pcmk__ipc_send_iov(c, iov, flags | crm_ipc_server_free); + } else { + pcmk_free_ipc_event(iov); + crm_notice("IPC message to pid %d failed: %s " CRM_XS " rc=%d", + c->pid, pcmk_rc_str(rc), rc); + } + return rc; +} + +void +pcmk__ipc_send_ack_as(const char *function, int line, pcmk__client_t *c, + uint32_t request, uint32_t flags, const char *tag) +{ + if (flags & crm_ipc_client_response) { + xmlNode *ack = create_xml_node(NULL, tag); + + crm_trace("Ack'ing IPC message from %s", pcmk__client_name(c)); + c->request_id = 0; + crm_xml_add(ack, "function", function); + crm_xml_add_int(ack, "line", line); + pcmk__ipc_send_xml(c, request, ack, flags); + free_xml(ack); + } +} + +/*! + * \internal + * \brief Add an IPC server to the main loop for the pacemaker-based API + * + * \param[out] ipcs_ro New IPC server for read-only pacemaker-based API + * \param[out] ipcs_rw New IPC server for read/write pacemaker-based API + * \param[out] ipcs_shm New IPC server for shared-memory pacemaker-based API + * \param[in] ro_cb IPC callbacks for read-only API + * \param[in] rw_cb IPC callbacks for read/write and shared-memory APIs + * + * \note This function exits fatally if unable to create the servers. + */ +void pcmk__serve_based_ipc(qb_ipcs_service_t **ipcs_ro, + qb_ipcs_service_t **ipcs_rw, + qb_ipcs_service_t **ipcs_shm, + struct qb_ipcs_service_handlers *ro_cb, + struct qb_ipcs_service_handlers *rw_cb) +{ + *ipcs_ro = mainloop_add_ipc_server(PCMK__SERVER_BASED_RO, + QB_IPC_NATIVE, ro_cb); + + *ipcs_rw = mainloop_add_ipc_server(PCMK__SERVER_BASED_RW, + QB_IPC_NATIVE, rw_cb); + + *ipcs_shm = mainloop_add_ipc_server(PCMK__SERVER_BASED_SHM, + QB_IPC_SHM, rw_cb); + + if (*ipcs_ro == NULL || *ipcs_rw == NULL || *ipcs_shm == NULL) { + crm_err("Failed to create the CIB manager: exiting and inhibiting respawn"); + crm_warn("Verify pacemaker and pacemaker_remote are not both enabled"); + crm_exit(CRM_EX_FATAL); + } +} + +/*! + * \internal + * \brief Destroy IPC servers for pacemaker-based API + * + * \param[out] ipcs_ro IPC server for read-only pacemaker-based API + * \param[out] ipcs_rw IPC server for read/write pacemaker-based API + * \param[out] ipcs_shm IPC server for shared-memory pacemaker-based API + * + * \note This is a convenience function for calling qb_ipcs_destroy() for each + * argument. + */ +void +pcmk__stop_based_ipc(qb_ipcs_service_t *ipcs_ro, + qb_ipcs_service_t *ipcs_rw, + qb_ipcs_service_t *ipcs_shm) +{ + qb_ipcs_destroy(ipcs_ro); + qb_ipcs_destroy(ipcs_rw); + qb_ipcs_destroy(ipcs_shm); +} + +/*! + * \internal + * \brief Add an IPC server to the main loop for the pacemaker-controld API + * + * \param[in] cb IPC callbacks + * + * \return Newly created IPC server + */ +qb_ipcs_service_t * +pcmk__serve_controld_ipc(struct qb_ipcs_service_handlers *cb) +{ + return mainloop_add_ipc_server(CRM_SYSTEM_CRMD, QB_IPC_NATIVE, cb); +} + +/*! + * \internal + * \brief Add an IPC server to the main loop for the pacemaker-attrd API + * + * \param[in] cb IPC callbacks + * + * \note This function exits fatally if unable to create the servers. + */ +void +pcmk__serve_attrd_ipc(qb_ipcs_service_t **ipcs, + struct qb_ipcs_service_handlers *cb) +{ + *ipcs = mainloop_add_ipc_server(T_ATTRD, QB_IPC_NATIVE, cb); + + if (*ipcs == NULL) { + crm_err("Failed to create pacemaker-attrd server: exiting and inhibiting respawn"); + crm_warn("Verify pacemaker and pacemaker_remote are not both enabled."); + crm_exit(CRM_EX_FATAL); + } +} + +/*! + * \internal + * \brief Add an IPC server to the main loop for the pacemaker-fenced API + * + * \param[in] cb IPC callbacks + * + * \note This function exits fatally if unable to create the servers. + */ +void +pcmk__serve_fenced_ipc(qb_ipcs_service_t **ipcs, + struct qb_ipcs_service_handlers *cb) +{ + *ipcs = mainloop_add_ipc_server_with_prio("stonith-ng", QB_IPC_NATIVE, cb, + QB_LOOP_HIGH); + + if (*ipcs == NULL) { + crm_err("Failed to create fencer: exiting and inhibiting respawn."); + crm_warn("Verify pacemaker and pacemaker_remote are not both enabled."); + crm_exit(CRM_EX_FATAL); + } +} + +/* Client... */ + +#define MIN_MSG_SIZE 12336 /* sizeof(struct qb_ipc_connection_response) */ +#define MAX_MSG_SIZE 128*1024 /* 128k default */ + +struct crm_ipc_s { + struct pollfd pfd; + + /* the max size we can send/receive over ipc */ + unsigned int max_buf_size; + /* Size of the allocated 'buffer' */ + unsigned int buf_size; + int msg_size; + int need_reply; + char *buffer; + char *name; + + qb_ipcc_connection_t *ipc; + +}; + +static unsigned int +pick_ipc_buffer(unsigned int max) +{ + static unsigned int global_max = 0; + + if (global_max == 0) { + const char *env = getenv("PCMK_ipc_buffer"); + + if (env) { + int env_max = crm_parse_int(env, "0"); + + global_max = (env_max > 0)? QB_MAX(MIN_MSG_SIZE, env_max) : MAX_MSG_SIZE; + + } else { + global_max = MAX_MSG_SIZE; + } + } + + return QB_MAX(max, global_max); +} + +crm_ipc_t * +crm_ipc_new(const char *name, size_t max_size) +{ + crm_ipc_t *client = NULL; + + client = calloc(1, sizeof(crm_ipc_t)); + + client->name = strdup(name); + client->buf_size = pick_ipc_buffer(max_size); + client->buffer = malloc(client->buf_size); + + /* Clients initiating connection pick the max buf size */ + client->max_buf_size = client->buf_size; + + client->pfd.fd = -1; + client->pfd.events = POLLIN; + client->pfd.revents = 0; + + return client; +} + +/*! + * \brief Establish an IPC connection to a Pacemaker component + * + * \param[in] client Connection instance obtained from crm_ipc_new() + * + * \return TRUE on success, FALSE otherwise (in which case errno will be set; + * specifically, in case of discovering the remote side is not + * authentic, its value is set to ECONNABORTED). + */ +bool +crm_ipc_connect(crm_ipc_t * client) +{ + uid_t cl_uid = 0; + gid_t cl_gid = 0; + pid_t found_pid = 0; uid_t found_uid = 0; gid_t found_gid = 0; + int rv; + + client->need_reply = FALSE; + client->ipc = qb_ipcc_connect(client->name, client->buf_size); + + if (client->ipc == NULL) { + crm_debug("Could not establish %s connection: %s (%d)", client->name, pcmk_strerror(errno), errno); + return FALSE; + } + + client->pfd.fd = crm_ipc_get_fd(client); + if (client->pfd.fd < 0) { + rv = errno; + /* message already omitted */ + crm_ipc_close(client); + errno = rv; + return FALSE; + } + + rv = pcmk_daemon_user(&cl_uid, &cl_gid); + if (rv < 0) { + /* message already omitted */ + crm_ipc_close(client); + errno = -rv; + return FALSE; + } + + if (!(rv = crm_ipc_is_authentic_process(client->pfd.fd, cl_uid, cl_gid, + &found_pid, &found_uid, + &found_gid))) { + crm_err("Daemon (IPC %s) is not authentic:" + " process %lld (uid: %lld, gid: %lld)", + client->name, (long long) PCMK__SPECIAL_PID_AS_0(found_pid), + (long long) found_uid, (long long) found_gid); + crm_ipc_close(client); + errno = ECONNABORTED; + return FALSE; + + } else if (rv < 0) { + errno = -rv; + crm_perror(LOG_ERR, "Could not verify authenticity of daemon (IPC %s)", + client->name); + crm_ipc_close(client); + errno = -rv; + return FALSE; + } + + qb_ipcc_context_set(client->ipc, client); + +#ifdef HAVE_IPCS_GET_BUFFER_SIZE + client->max_buf_size = qb_ipcc_get_buffer_size(client->ipc); + if (client->max_buf_size > client->buf_size) { + free(client->buffer); + client->buffer = calloc(1, client->max_buf_size); + client->buf_size = client->max_buf_size; + } +#endif + + return TRUE; +} + +void +crm_ipc_close(crm_ipc_t * client) +{ + if (client) { + crm_trace("Disconnecting %s IPC connection %p (%p)", client->name, client, client->ipc); + + if (client->ipc) { + qb_ipcc_connection_t *ipc = client->ipc; + + client->ipc = NULL; + qb_ipcc_disconnect(ipc); + } + } +} + +void +crm_ipc_destroy(crm_ipc_t * client) +{ + if (client) { + if (client->ipc && qb_ipcc_is_connected(client->ipc)) { + crm_notice("Destroying an active IPC connection to %s", client->name); + /* The next line is basically unsafe + * + * If this connection was attached to mainloop and mainloop is active, + * the 'disconnected' callback will end up back here and we'll end + * up free'ing the memory twice - something that can still happen + * even without this if we destroy a connection and it closes before + * we call exit + */ + /* crm_ipc_close(client); */ + } + crm_trace("Destroying IPC connection to %s: %p", client->name, client); + free(client->buffer); + free(client->name); + free(client); + } +} + +int +crm_ipc_get_fd(crm_ipc_t * client) +{ + int fd = 0; + + if (client && client->ipc && (qb_ipcc_fd_get(client->ipc, &fd) == 0)) { + return fd; + } + errno = EINVAL; + crm_perror(LOG_ERR, "Could not obtain file IPC descriptor for %s", + (client? client->name : "unspecified client")); + return -errno; +} + +bool +crm_ipc_connected(crm_ipc_t * client) +{ + bool rc = FALSE; + + if (client == NULL) { + crm_trace("No client"); + return FALSE; + + } else if (client->ipc == NULL) { + crm_trace("No connection"); + return FALSE; + + } else if (client->pfd.fd < 0) { + crm_trace("Bad descriptor"); + return FALSE; + } + + rc = qb_ipcc_is_connected(client->ipc); + if (rc == FALSE) { + client->pfd.fd = -EINVAL; + } + return rc; +} + +/*! + * \brief Check whether an IPC connection is ready to be read + * + * \param[in] client Connection to check + * + * \return Positive value if ready to be read, 0 if not ready, -errno on error + */ +int +crm_ipc_ready(crm_ipc_t *client) +{ + int rc; + + CRM_ASSERT(client != NULL); + + if (crm_ipc_connected(client) == FALSE) { + return -ENOTCONN; + } + + client->pfd.revents = 0; + rc = poll(&(client->pfd), 1, 0); + return (rc < 0)? -errno : rc; +} + +// \return Standard Pacemaker return code +static int +crm_ipc_decompress(crm_ipc_t * client) +{ + struct crm_ipc_response_header *header = (struct crm_ipc_response_header *)(void*)client->buffer; + + if (header->size_compressed) { + int rc = 0; + unsigned int size_u = 1 + header->size_uncompressed; + /* never let buf size fall below our max size required for ipc reads. */ + unsigned int new_buf_size = QB_MAX((hdr_offset + size_u), client->max_buf_size); + char *uncompressed = calloc(1, new_buf_size); + + crm_trace("Decompressing message data %u bytes into %u bytes", + header->size_compressed, size_u); + + rc = BZ2_bzBuffToBuffDecompress(uncompressed + hdr_offset, &size_u, + client->buffer + hdr_offset, header->size_compressed, 1, 0); + + if (rc != BZ_OK) { + crm_err("Decompression failed: %s " CRM_XS " bzerror=%d", + bz2_strerror(rc), rc); + free(uncompressed); + return EILSEQ; + } + + /* + * This assert no longer holds true. For an identical msg, some clients may + * require compression, and others may not. If that same msg (event) is sent + * to multiple clients, it could result in some clients receiving a compressed + * msg even though compression was not explicitly required for them. + * + * CRM_ASSERT((header->size_uncompressed + hdr_offset) >= ipc_buffer_max); + */ + CRM_ASSERT(size_u == header->size_uncompressed); + + memcpy(uncompressed, client->buffer, hdr_offset); /* Preserve the header */ + header = (struct crm_ipc_response_header *)(void*)uncompressed; + + free(client->buffer); + client->buf_size = new_buf_size; + client->buffer = uncompressed; + } + + CRM_ASSERT(client->buffer[hdr_offset + header->size_uncompressed - 1] == 0); + return pcmk_rc_ok; +} + +long +crm_ipc_read(crm_ipc_t * client) +{ + struct crm_ipc_response_header *header = NULL; + + CRM_ASSERT(client != NULL); + CRM_ASSERT(client->ipc != NULL); + CRM_ASSERT(client->buffer != NULL); + + crm_ipc_init(); + + client->buffer[0] = 0; + client->msg_size = qb_ipcc_event_recv(client->ipc, client->buffer, + client->buf_size, 0); + if (client->msg_size >= 0) { + int rc = crm_ipc_decompress(client); + + if (rc != pcmk_rc_ok) { + return pcmk_rc2legacy(rc); + } + + header = (struct crm_ipc_response_header *)(void*)client->buffer; + if(header->version > PCMK_IPC_VERSION) { + crm_err("Filtering incompatible v%d IPC message, we only support versions <= %d", + header->version, PCMK_IPC_VERSION); + return -EBADMSG; + } + + crm_trace("Received %s event %d, size=%u, rc=%d, text: %.100s", + client->name, header->qb.id, header->qb.size, client->msg_size, + client->buffer + hdr_offset); + + } else { + crm_trace("No message from %s received: %s", client->name, pcmk_strerror(client->msg_size)); + } + + if (crm_ipc_connected(client) == FALSE || client->msg_size == -ENOTCONN) { + crm_err("Connection to %s failed", client->name); + } + + if (header) { + /* Data excluding the header */ + return header->size_uncompressed; + } + return -ENOMSG; +} + +const char * +crm_ipc_buffer(crm_ipc_t * client) +{ + CRM_ASSERT(client != NULL); + return client->buffer + sizeof(struct crm_ipc_response_header); +} + +uint32_t +crm_ipc_buffer_flags(crm_ipc_t * client) +{ + struct crm_ipc_response_header *header = NULL; + + CRM_ASSERT(client != NULL); + if (client->buffer == NULL) { + return 0; + } + + header = (struct crm_ipc_response_header *)(void*)client->buffer; + return header->flags; +} + +const char * +crm_ipc_name(crm_ipc_t * client) +{ + CRM_ASSERT(client != NULL); + return client->name; +} + +// \return Standard Pacemaker return code +static int +internal_ipc_get_reply(crm_ipc_t *client, int request_id, int ms_timeout, + ssize_t *bytes) +{ + time_t timeout = time(NULL) + 1 + (ms_timeout / 1000); + int rc = pcmk_rc_ok; + + crm_ipc_init(); + + /* get the reply */ + crm_trace("client %s waiting on reply to msg id %d", client->name, request_id); + do { + + *bytes = qb_ipcc_recv(client->ipc, client->buffer, client->buf_size, 1000); + if (*bytes > 0) { + struct crm_ipc_response_header *hdr = NULL; + + rc = crm_ipc_decompress(client); + if (rc != pcmk_rc_ok) { + return rc; + } + + hdr = (struct crm_ipc_response_header *)(void*)client->buffer; + if (hdr->qb.id == request_id) { + /* Got it */ + break; + } else if (hdr->qb.id < request_id) { + xmlNode *bad = string2xml(crm_ipc_buffer(client)); + + crm_err("Discarding old reply %d (need %d)", hdr->qb.id, request_id); + crm_log_xml_notice(bad, "OldIpcReply"); + + } else { + xmlNode *bad = string2xml(crm_ipc_buffer(client)); + + crm_err("Discarding newer reply %d (need %d)", hdr->qb.id, request_id); + crm_log_xml_notice(bad, "ImpossibleReply"); + CRM_ASSERT(hdr->qb.id <= request_id); + } + } else if (crm_ipc_connected(client) == FALSE) { + crm_err("Server disconnected client %s while waiting for msg id %d", client->name, + request_id); + break; + } + + } while (time(NULL) < timeout); + + if (*bytes < 0) { + rc = (int) -*bytes; // System errno + } + return rc; +} + +/*! + * \brief Send an IPC XML message + * + * \param[in] client Connection to IPC server + * \param[in] message XML message to send + * \param[in] flags Bitmask of crm_ipc_flags + * \param[in] ms_timeout Give up if not sent within this much time + * (5 seconds if 0, or no timeout if negative) + * \param[out] reply Reply from server (or NULL if none) + * + * \return Negative errno on error, otherwise size of reply received in bytes + * if reply was needed, otherwise number of bytes sent + */ +int +crm_ipc_send(crm_ipc_t * client, xmlNode * message, enum crm_ipc_flags flags, int32_t ms_timeout, + xmlNode ** reply) +{ + int rc = 0; + ssize_t qb_rc = 0; + ssize_t bytes = 0; + struct iovec *iov; + static uint32_t id = 0; + static int factor = 8; + struct crm_ipc_response_header *header; + + crm_ipc_init(); + + if (client == NULL) { + crm_notice("Can't send IPC request without connection (bug?): %.100s", + message); + return -ENOTCONN; + + } else if (crm_ipc_connected(client) == FALSE) { + /* Don't even bother */ + crm_notice("Can't send IPC request to %s: Connection closed", + client->name); + return -ENOTCONN; + } + + if (ms_timeout == 0) { + ms_timeout = 5000; + } + + if (client->need_reply) { + qb_rc = qb_ipcc_recv(client->ipc, client->buffer, client->buf_size, ms_timeout); + if (qb_rc < 0) { + crm_warn("Sending IPC to %s disabled until pending reply received", + client->name); + return -EALREADY; + + } else { + crm_notice("Sending IPC to %s re-enabled after pending reply received", + client->name); + client->need_reply = FALSE; + } + } + + id++; + CRM_LOG_ASSERT(id != 0); /* Crude wrap-around detection */ + rc = pcmk__ipc_prepare_iov(id, message, client->max_buf_size, &iov, &bytes); + if (rc != pcmk_rc_ok) { + crm_warn("Couldn't prepare IPC request to %s: %s " CRM_XS " rc=%d", + client->name, pcmk_rc_str(rc), rc); + return pcmk_rc2legacy(rc); + } + + header = iov[0].iov_base; + header->flags |= flags; + + if(is_set(flags, crm_ipc_proxied)) { + /* Don't look for a synchronous response */ + clear_bit(flags, crm_ipc_client_response); + } + + if(header->size_compressed) { + if(factor < 10 && (client->max_buf_size / 10) < (bytes / factor)) { + crm_notice("Compressed message exceeds %d0%% of configured IPC " + "limit (%u bytes); consider setting PCMK_ipc_buffer to " + "%u or higher", + factor, client->max_buf_size, 2 * client->max_buf_size); + factor++; + } + } + + crm_trace("Sending %s IPC request %d of %u bytes using %dms timeout", + client->name, header->qb.id, header->qb.size, ms_timeout); + + if (ms_timeout > 0 || is_not_set(flags, crm_ipc_client_response)) { + + time_t timeout = time(NULL) + 1 + (ms_timeout / 1000); + + do { + /* @TODO Is this check really needed? Won't qb_ipcc_sendv() return + * an error if it's not connected? + */ + if (!crm_ipc_connected(client)) { + goto send_cleanup; + } + + qb_rc = qb_ipcc_sendv(client->ipc, iov, 2); + } while ((qb_rc == -EAGAIN) && (time(NULL) < timeout)); + + rc = (int) qb_rc; // Negative of system errno, or bytes sent + if (qb_rc <= 0) { + goto send_cleanup; + + } else if (is_not_set(flags, crm_ipc_client_response)) { + crm_trace("Not waiting for reply to %s IPC request %d", + client->name, header->qb.id); + goto send_cleanup; + } + + rc = internal_ipc_get_reply(client, header->qb.id, ms_timeout, &bytes); + if (rc != pcmk_rc_ok) { + /* We didn't get the reply in time, so disable future sends for now. + * The only alternative would be to close the connection since we + * don't know how to detect and discard out-of-sequence replies. + * + * @TODO Implement out-of-sequence detection + */ + client->need_reply = TRUE; + } + rc = (int) bytes; // Negative system errno, or size of reply received + + } else { + // No timeout, and client response needed + do { + qb_rc = qb_ipcc_sendv_recv(client->ipc, iov, 2, client->buffer, + client->buf_size, -1); + } while ((qb_rc == -EAGAIN) && crm_ipc_connected(client)); + rc = (int) qb_rc; // Negative system errno, or size of reply received + } + + if (rc > 0) { + struct crm_ipc_response_header *hdr = (struct crm_ipc_response_header *)(void*)client->buffer; + + crm_trace("Received %d-byte reply %d to %s IPC %d: %.100s", + rc, hdr->qb.id, client->name, header->qb.id, + crm_ipc_buffer(client)); + + if (reply) { + *reply = string2xml(crm_ipc_buffer(client)); + } + + } else { + crm_trace("No reply to %s IPC %d: rc=%d", + client->name, header->qb.id, rc); + } + + send_cleanup: + if (crm_ipc_connected(client) == FALSE) { + crm_notice("Couldn't send %s IPC request %d: Connection closed " + CRM_XS " rc=%d", client->name, header->qb.id, rc); + + } else if (rc == -ETIMEDOUT) { + crm_warn("%s IPC request %d failed: %s after %dms " CRM_XS " rc=%d", + client->name, header->qb.id, pcmk_strerror(rc), ms_timeout, + rc); + crm_write_blackbox(0, NULL); + + } else if (rc <= 0) { + crm_warn("%s IPC request %d failed: %s " CRM_XS " rc=%d", + client->name, header->qb.id, + ((rc == 0)? "No bytes sent" : pcmk_strerror(rc)), rc); + } + + pcmk_free_ipc_event(iov); + return rc; +} + +int +crm_ipc_is_authentic_process(int sock, uid_t refuid, gid_t refgid, + pid_t *gotpid, uid_t *gotuid, gid_t *gotgid) { + int ret = 0; + pid_t found_pid = 0; uid_t found_uid = 0; gid_t found_gid = 0; +#if defined(US_AUTH_PEERCRED_UCRED) + struct ucred ucred; + socklen_t ucred_len = sizeof(ucred); + + if (!getsockopt(sock, SOL_SOCKET, SO_PEERCRED, + &ucred, &ucred_len) + && ucred_len == sizeof(ucred)) { + found_pid = ucred.pid; found_uid = ucred.uid; found_gid = ucred.gid; + +#elif defined(US_AUTH_PEERCRED_SOCKPEERCRED) + struct sockpeercred sockpeercred; + socklen_t sockpeercred_len = sizeof(sockpeercred); + + if (!getsockopt(sock, SOL_SOCKET, SO_PEERCRED, + &sockpeercred, &sockpeercred_len) + && sockpeercred_len == sizeof(sockpeercred_len)) { + found_pid = sockpeercred.pid; + found_uid = sockpeercred.uid; found_gid = sockpeercred.gid; + +#elif defined(US_AUTH_GETPEEREID) + if (!getpeereid(sock, &found_uid, &found_gid)) { + found_pid = PCMK__SPECIAL_PID; /* cannot obtain PID (FreeBSD) */ + +#elif defined(US_AUTH_GETPEERUCRED) + ucred_t *ucred; + if (!getpeerucred(sock, &ucred)) { + errno = 0; + found_pid = ucred_getpid(ucred); + found_uid = ucred_geteuid(ucred); found_gid = ucred_getegid(ucred); + ret = -errno; + ucred_free(ucred); + if (ret) { + return (ret < 0) ? ret : -pcmk_err_generic; + } + +#else +# error "No way to authenticate a Unix socket peer" + errno = 0; + if (0) { +#endif + if (gotpid != NULL) { + *gotpid = found_pid; + } + if (gotuid != NULL) { + *gotuid = found_uid; + } + if (gotgid != NULL) { + *gotgid = found_gid; + } + ret = (found_uid == 0 || found_uid == refuid || found_gid == refgid); + } else { + ret = (errno > 0) ? -errno : -pcmk_err_generic; + } + + return ret; +} + +int +pcmk__ipc_is_authentic_process_active(const char *name, uid_t refuid, + gid_t refgid, pid_t *gotpid) +{ + static char last_asked_name[PATH_MAX / 2] = ""; /* log spam prevention */ + int fd; + int rc = pcmk_rc_ipc_unresponsive; + int auth_rc = 0; + int32_t qb_rc; + pid_t found_pid = 0; uid_t found_uid = 0; gid_t found_gid = 0; + qb_ipcc_connection_t *c; + + c = qb_ipcc_connect(name, 0); + if (c == NULL) { + crm_info("Could not connect to %s IPC: %s", name, strerror(errno)); + rc = pcmk_rc_ipc_unresponsive; + goto bail; + } + + qb_rc = qb_ipcc_fd_get(c, &fd); + if (qb_rc != 0) { + rc = (int) -qb_rc; // System errno + crm_err("Could not get fd from %s IPC: %s " CRM_XS " rc=%d", + name, pcmk_rc_str(rc), rc); + goto bail; + } + + auth_rc = crm_ipc_is_authentic_process(fd, refuid, refgid, &found_pid, + &found_uid, &found_gid); + if (auth_rc < 0) { + rc = pcmk_legacy2rc(auth_rc); + crm_err("Could not get peer credentials from %s IPC: %s " + CRM_XS " rc=%d", name, pcmk_rc_str(rc), rc); + goto bail; + } + + if (gotpid != NULL) { + *gotpid = found_pid; + } + + if (auth_rc == 0) { + crm_err("Daemon (IPC %s) effectively blocked with unauthorized" + " process %lld (uid: %lld, gid: %lld)", + name, (long long) PCMK__SPECIAL_PID_AS_0(found_pid), + (long long) found_uid, (long long) found_gid); + rc = pcmk_rc_ipc_unauthorized; + goto bail; + } + + rc = pcmk_rc_ok; + if ((found_uid != refuid || found_gid != refgid) + && strncmp(last_asked_name, name, sizeof(last_asked_name))) { + if ((found_uid == 0) && (refuid != 0)) { + crm_warn("Daemon (IPC %s) runs as root, whereas the expected" + " credentials are %lld:%lld, hazard of violating" + " the least privilege principle", + name, (long long) refuid, (long long) refgid); + } else { + crm_notice("Daemon (IPC %s) runs as %lld:%lld, whereas the" + " expected credentials are %lld:%lld, which may" + " mean a different set of privileges than expected", + name, (long long) found_uid, (long long) found_gid, + (long long) refuid, (long long) refgid); + } + memccpy(last_asked_name, name, '\0', sizeof(last_asked_name)); + } + +bail: + if (c != NULL) { + qb_ipcc_disconnect(c); + } + return rc; +} + + +/* Utils */ + +xmlNode * +create_hello_message(const char *uuid, + const char *client_name, const char *major_version, const char *minor_version) +{ + xmlNode *hello_node = NULL; + xmlNode *hello = NULL; + + if (pcmk__str_empty(uuid) || pcmk__str_empty(client_name) + || pcmk__str_empty(major_version) || pcmk__str_empty(minor_version)) { + crm_err("Could not create IPC hello message from %s (UUID %s): " + "missing information", + client_name? client_name : "unknown client", + uuid? uuid : "unknown"); + return NULL; + } + + hello_node = create_xml_node(NULL, XML_TAG_OPTIONS); + if (hello_node == NULL) { + crm_err("Could not create IPC hello message from %s (UUID %s): " + "Message data creation failed", client_name, uuid); + return NULL; + } + + crm_xml_add(hello_node, "major_version", major_version); + crm_xml_add(hello_node, "minor_version", minor_version); + crm_xml_add(hello_node, "client_name", client_name); + crm_xml_add(hello_node, "client_uuid", uuid); + + hello = create_request(CRM_OP_HELLO, hello_node, NULL, NULL, client_name, uuid); + if (hello == NULL) { + crm_err("Could not create IPC hello message from %s (UUID %s): " + "Request creation failed", client_name, uuid); + return NULL; + } + free_xml(hello_node); + + crm_trace("Created hello message from %s (UUID %s)", client_name, uuid); + return hello; +} diff --git a/lib/common/ipc_client.c b/lib/common/ipc_client.c deleted file mode 100644 index 033199d..0000000 --- a/lib/common/ipc_client.c +++ /dev/null @@ -1,1427 +0,0 @@ -/* - * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * - * This source code is licensed under the GNU Lesser General Public License - * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. - */ - -#include - -#if defined(US_AUTH_PEERCRED_UCRED) || defined(US_AUTH_PEERCRED_SOCKPEERCRED) -# ifdef US_AUTH_PEERCRED_UCRED -# ifndef _GNU_SOURCE -# define _GNU_SOURCE -# endif -# endif -# include -#elif defined(US_AUTH_GETPEERUCRED) -# include -#endif - -#include -#include -#include -#include - -#include /* indirectly: pcmk_err_generic */ -#include -#include -#include -#include "crmcommon_private.h" - -/*! - * \brief Create a new object for using Pacemaker daemon IPC - * - * \param[out] api Where to store new IPC object - * \param[in] server Which Pacemaker daemon the object is for - * - * \return Standard Pacemaker result code - * - * \note The caller is responsible for freeing *api using pcmk_free_ipc_api(). - * \note This is intended to supersede crm_ipc_new() but currently only - * supports the controller & pacemakerd IPC API. - */ -int -pcmk_new_ipc_api(pcmk_ipc_api_t **api, enum pcmk_ipc_server server) -{ - if (api == NULL) { - return EINVAL; - } - - *api = calloc(1, sizeof(pcmk_ipc_api_t)); - if (*api == NULL) { - return errno; - } - - (*api)->server = server; - if (pcmk_ipc_name(*api, false) == NULL) { - pcmk_free_ipc_api(*api); - *api = NULL; - return EOPNOTSUPP; - } - - (*api)->ipc_size_max = 0; - - // Set server methods and max_size (if not default) - switch (server) { - case pcmk_ipc_attrd: - break; - - case pcmk_ipc_based: - (*api)->ipc_size_max = 512 * 1024; // 512KB - break; - - case pcmk_ipc_controld: - (*api)->cmds = pcmk__controld_api_methods(); - break; - - case pcmk_ipc_execd: - break; - - case pcmk_ipc_fenced: - break; - - case pcmk_ipc_pacemakerd: - (*api)->cmds = pcmk__pacemakerd_api_methods(); - break; - - case pcmk_ipc_schedulerd: - // @TODO max_size could vary by client, maybe take as argument? - (*api)->ipc_size_max = 5 * 1024 * 1024; // 5MB - break; - } - if ((*api)->cmds == NULL) { - pcmk_free_ipc_api(*api); - *api = NULL; - return ENOMEM; - } - - (*api)->ipc = crm_ipc_new(pcmk_ipc_name(*api, false), - (*api)->ipc_size_max); - if ((*api)->ipc == NULL) { - pcmk_free_ipc_api(*api); - *api = NULL; - return ENOMEM; - } - - // If daemon API has its own data to track, allocate it - if ((*api)->cmds->new_data != NULL) { - if ((*api)->cmds->new_data(*api) != pcmk_rc_ok) { - pcmk_free_ipc_api(*api); - *api = NULL; - return ENOMEM; - } - } - crm_trace("Created %s API IPC object", pcmk_ipc_name(*api, true)); - return pcmk_rc_ok; -} - -static void -free_daemon_specific_data(pcmk_ipc_api_t *api) -{ - if ((api != NULL) && (api->cmds != NULL)) { - if ((api->cmds->free_data != NULL) && (api->api_data != NULL)) { - api->cmds->free_data(api->api_data); - api->api_data = NULL; - } - free(api->cmds); - api->cmds = NULL; - } -} - -/*! - * \internal - * \brief Call an IPC API event callback, if one is registed - * - * \param[in] api IPC API connection - * \param[in] event_type The type of event that occurred - * \param[in] status Event status - * \param[in] event_data Event-specific data - */ -void -pcmk__call_ipc_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type, - crm_exit_t status, void *event_data) -{ - if ((api != NULL) && (api->cb != NULL)) { - api->cb(api, event_type, status, event_data, api->user_data); - } -} - -/*! - * \internal - * \brief Clean up after an IPC disconnect - * - * \param[in] user_data IPC API connection that disconnected - * - * \note This function can be used as a main loop IPC destroy callback. - */ -static void -ipc_post_disconnect(gpointer user_data) -{ - pcmk_ipc_api_t *api = user_data; - - crm_info("Disconnected from %s IPC API", pcmk_ipc_name(api, true)); - - // Perform any daemon-specific handling needed - if ((api->cmds != NULL) && (api->cmds->post_disconnect != NULL)) { - api->cmds->post_disconnect(api); - } - - // Call client's registered event callback - pcmk__call_ipc_callback(api, pcmk_ipc_event_disconnect, CRM_EX_DISCONNECT, - NULL); - - /* If this is being called from a running main loop, mainloop_gio_destroy() - * will free ipc and mainloop_io immediately after calling this function. - * If this is called from a stopped main loop, these will leak, so the best - * practice is to close the connection before stopping the main loop. - */ - api->ipc = NULL; - api->mainloop_io = NULL; - - if (api->free_on_disconnect) { - /* pcmk_free_ipc_api() has already been called, but did not free api - * or api->cmds because this function needed them. Do that now. - */ - free_daemon_specific_data(api); - crm_trace("Freeing IPC API object after disconnect"); - free(api); - } -} - -/*! - * \brief Free the contents of an IPC API object - * - * \param[in] api IPC API object to free - */ -void -pcmk_free_ipc_api(pcmk_ipc_api_t *api) -{ - bool free_on_disconnect = false; - - if (api == NULL) { - return; - } - crm_debug("Releasing %s IPC API", pcmk_ipc_name(api, true)); - - if (api->ipc != NULL) { - if (api->mainloop_io != NULL) { - /* We need to keep the api pointer itself around, because it is the - * user data for the IPC client destroy callback. That will be - * triggered by the pcmk_disconnect_ipc() call below, but it might - * happen later in the main loop (if still running). - * - * This flag tells the destroy callback to free the object. It can't - * do that unconditionally, because the application might call this - * function after a disconnect that happened by other means. - */ - free_on_disconnect = api->free_on_disconnect = true; - } - pcmk_disconnect_ipc(api); // Frees api if free_on_disconnect is true - } - if (!free_on_disconnect) { - free_daemon_specific_data(api); - crm_trace("Freeing IPC API object"); - free(api); - } -} - -/*! - * \brief Get the IPC name used with an IPC API connection - * - * \param[in] api IPC API connection - * \param[in] for_log If true, return human-friendly name instead of IPC name - * - * \return IPC API's human-friendly or connection name, or if none is available, - * "Pacemaker" if for_log is true and NULL if for_log is false - */ -const char * -pcmk_ipc_name(pcmk_ipc_api_t *api, bool for_log) -{ - if (api == NULL) { - return for_log? "Pacemaker" : NULL; - } - switch (api->server) { - case pcmk_ipc_attrd: - return for_log? "attribute manager" : NULL /* T_ATTRD */; - - case pcmk_ipc_based: - return for_log? "CIB manager" : NULL /* PCMK__SERVER_BASED_RW */; - - case pcmk_ipc_controld: - return for_log? "controller" : CRM_SYSTEM_CRMD; - - case pcmk_ipc_execd: - return for_log? "executor" : NULL /* CRM_SYSTEM_LRMD */; - - case pcmk_ipc_fenced: - return for_log? "fencer" : NULL /* "stonith-ng" */; - - case pcmk_ipc_pacemakerd: - return for_log? "launcher" : CRM_SYSTEM_MCP; - - case pcmk_ipc_schedulerd: - return for_log? "scheduler" : NULL /* CRM_SYSTEM_PENGINE */; - - default: - return for_log? "Pacemaker" : NULL; - } -} - -/*! - * \brief Check whether an IPC API connection is active - * - * \param[in] api IPC API connection - * - * \return true if IPC is connected, false otherwise - */ -bool -pcmk_ipc_is_connected(pcmk_ipc_api_t *api) -{ - return (api != NULL) && crm_ipc_connected(api->ipc); -} - -/*! - * \internal - * \brief Call the daemon-specific API's dispatch function - * - * Perform daemon-specific handling of IPC reply dispatch. It is the daemon - * method's responsibility to call the client's registered event callback, as - * well as allocate and free any event data. - * - * \param[in] api IPC API connection - */ -static void -call_api_dispatch(pcmk_ipc_api_t *api, xmlNode *message) -{ - crm_log_xml_trace(message, "ipc-received"); - if ((api->cmds != NULL) && (api->cmds->dispatch != NULL)) { - api->cmds->dispatch(api, message); - } -} - -/*! - * \internal - * \brief Dispatch data read from IPC source - * - * \param[in] buffer Data read from IPC - * \param[in] length Number of bytes of data in buffer (ignored) - * \param[in] user_data IPC object - * - * \return Always 0 (meaning connection is still required) - * - * \note This function can be used as a main loop IPC dispatch callback. - */ -static int -dispatch_ipc_data(const char *buffer, ssize_t length, gpointer user_data) -{ - pcmk_ipc_api_t *api = user_data; - xmlNode *msg; - - CRM_CHECK(api != NULL, return 0); - - if (buffer == NULL) { - crm_warn("Empty message received from %s IPC", - pcmk_ipc_name(api, true)); - return 0; - } - - msg = string2xml(buffer); - if (msg == NULL) { - crm_warn("Malformed message received from %s IPC", - pcmk_ipc_name(api, true)); - return 0; - } - call_api_dispatch(api, msg); - free_xml(msg); - return 0; -} - -/*! - * \brief Check whether an IPC connection has data available (without main loop) - * - * \param[in] api IPC API connection - * \param[in] timeout_ms If less than 0, poll indefinitely; if 0, poll once - * and return immediately; otherwise, poll for up to - * this many milliseconds - * - * \return Standard Pacemaker return code - * - * \note Callers of pcmk_connect_ipc() using pcmk_ipc_dispatch_poll should call - * this function to check whether IPC data is available. Return values of - * interest include pcmk_rc_ok meaning data is available, and EAGAIN - * meaning no data is available; all other values indicate errors. - * \todo This does not allow the caller to poll multiple file descriptors at - * once. If there is demand for that, we could add a wrapper for - * crm_ipc_get_fd(api->ipc), so the caller can call poll() themselves. - */ -int -pcmk_poll_ipc(pcmk_ipc_api_t *api, int timeout_ms) -{ - int rc; - struct pollfd pollfd = { 0, }; - - if ((api == NULL) || (api->dispatch_type != pcmk_ipc_dispatch_poll)) { - return EINVAL; - } - pollfd.fd = crm_ipc_get_fd(api->ipc); - pollfd.events = POLLIN; - rc = poll(&pollfd, 1, timeout_ms); - if (rc < 0) { - return errno; - } else if (rc == 0) { - return EAGAIN; - } - return pcmk_rc_ok; -} - -/*! - * \brief Dispatch available messages on an IPC connection (without main loop) - * - * \param[in] api IPC API connection - * - * \return Standard Pacemaker return code - * - * \note Callers of pcmk_connect_ipc() using pcmk_ipc_dispatch_poll should call - * this function when IPC data is available. - */ -void -pcmk_dispatch_ipc(pcmk_ipc_api_t *api) -{ - if (api == NULL) { - return; - } - while (crm_ipc_ready(api->ipc) > 0) { - if (crm_ipc_read(api->ipc) > 0) { - dispatch_ipc_data(crm_ipc_buffer(api->ipc), 0, api); - } - } -} - -// \return Standard Pacemaker return code -static int -connect_with_main_loop(pcmk_ipc_api_t *api) -{ - int rc; - - struct ipc_client_callbacks callbacks = { - .dispatch = dispatch_ipc_data, - .destroy = ipc_post_disconnect, - }; - - rc = pcmk__add_mainloop_ipc(api->ipc, G_PRIORITY_DEFAULT, api, - &callbacks, &(api->mainloop_io)); - if (rc != pcmk_rc_ok) { - return rc; - } - crm_debug("Connected to %s IPC (attached to main loop)", - pcmk_ipc_name(api, true)); - /* After this point, api->mainloop_io owns api->ipc, so api->ipc - * should not be explicitly freed. - */ - return pcmk_rc_ok; -} - -// \return Standard Pacemaker return code -static int -connect_without_main_loop(pcmk_ipc_api_t *api) -{ - int rc; - - if (!crm_ipc_connect(api->ipc)) { - rc = errno; - crm_ipc_close(api->ipc); - return rc; - } - crm_debug("Connected to %s IPC (without main loop)", - pcmk_ipc_name(api, true)); - return pcmk_rc_ok; -} - -/*! - * \brief Connect to a Pacemaker daemon via IPC - * - * \param[in] api IPC API instance - * \param[out] dispatch_type How IPC replies should be dispatched - * - * \return Standard Pacemaker return code - */ -int -pcmk_connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type) -{ - int rc = pcmk_rc_ok; - - if (api == NULL) { - crm_err("Cannot connect to uninitialized API object"); - return EINVAL; - } - - if (api->ipc == NULL) { - api->ipc = crm_ipc_new(pcmk_ipc_name(api, false), - api->ipc_size_max); - if (api->ipc == NULL) { - crm_err("Failed to re-create IPC API"); - return ENOMEM; - } - } - - if (crm_ipc_connected(api->ipc)) { - crm_trace("Already connected to %s IPC API", pcmk_ipc_name(api, true)); - return pcmk_rc_ok; - } - - api->dispatch_type = dispatch_type; - switch (dispatch_type) { - case pcmk_ipc_dispatch_main: - rc = connect_with_main_loop(api); - break; - - case pcmk_ipc_dispatch_sync: - case pcmk_ipc_dispatch_poll: - rc = connect_without_main_loop(api); - break; - } - if (rc != pcmk_rc_ok) { - return rc; - } - - if ((api->cmds != NULL) && (api->cmds->post_connect != NULL)) { - rc = api->cmds->post_connect(api); - if (rc != pcmk_rc_ok) { - crm_ipc_close(api->ipc); - } - } - return rc; -} - -/*! - * \brief Disconnect an IPC API instance - * - * \param[in] api IPC API connection - * - * \return Standard Pacemaker return code - * - * \note If the connection is attached to a main loop, this function should be - * called before quitting the main loop, to ensure that all memory is - * freed. - */ -void -pcmk_disconnect_ipc(pcmk_ipc_api_t *api) -{ - if ((api == NULL) || (api->ipc == NULL)) { - return; - } - switch (api->dispatch_type) { - case pcmk_ipc_dispatch_main: - { - mainloop_io_t *mainloop_io = api->mainloop_io; - - // Make sure no code with access to api can use these again - api->mainloop_io = NULL; - api->ipc = NULL; - - mainloop_del_ipc_client(mainloop_io); - // After this point api might have already been freed - } - break; - - case pcmk_ipc_dispatch_poll: - case pcmk_ipc_dispatch_sync: - { - crm_ipc_t *ipc = api->ipc; - - // Make sure no code with access to api can use ipc again - api->ipc = NULL; - - // This should always be the case already, but to be safe - api->free_on_disconnect = false; - - crm_ipc_destroy(ipc); - ipc_post_disconnect(api); - } - break; - } -} - -/*! - * \brief Register a callback for IPC API events - * - * \param[in] api IPC API connection - * \param[in] callback Callback to register - * \param[in] userdata Caller data to pass to callback - * - * \note This function may be called multiple times to update the callback - * and/or user data. The caller remains responsible for freeing - * userdata in any case (after the IPC is disconnected, if the - * user data is still registered with the IPC). - */ -void -pcmk_register_ipc_callback(pcmk_ipc_api_t *api, pcmk_ipc_callback_t cb, - void *user_data) -{ - if (api == NULL) { - return; - } - api->cb = cb; - api->user_data = user_data; -} - -/*! - * \internal - * \brief Send an XML request across an IPC API connection - * - * \param[in] api IPC API connection - * \param[in] request XML request to send - * - * \return Standard Pacemaker return code - * - * \note Daemon-specific IPC API functions should call this function to send - * requests, because it handles different dispatch types appropriately. - */ -int -pcmk__send_ipc_request(pcmk_ipc_api_t *api, xmlNode *request) -{ - int rc; - xmlNode *reply = NULL; - enum crm_ipc_flags flags = crm_ipc_flags_none; - - if ((api == NULL) || (api->ipc == NULL) || (request == NULL)) { - return EINVAL; - } - crm_log_xml_trace(request, "ipc-sent"); - - // Synchronous dispatch requires waiting for a reply - if ((api->dispatch_type == pcmk_ipc_dispatch_sync) - && (api->cmds != NULL) - && (api->cmds->reply_expected != NULL) - && (api->cmds->reply_expected(api, request))) { - flags = crm_ipc_client_response; - } - - // The 0 here means a default timeout of 5 seconds - rc = crm_ipc_send(api->ipc, request, flags, 0, &reply); - - if (rc < 0) { - return pcmk_legacy2rc(rc); - } else if (rc == 0) { - return ENODATA; - } - - // With synchronous dispatch, we dispatch any reply now - if (reply != NULL) { - call_api_dispatch(api, reply); - free_xml(reply); - } - return pcmk_rc_ok; -} - -/*! - * \internal - * \brief Create the XML for an IPC request to purge a node from the peer cache - * - * \param[in] api IPC API connection - * \param[in] node_name If not NULL, name of node to purge - * \param[in] nodeid If not 0, node ID of node to purge - * - * \return Newly allocated IPC request XML - * - * \note The controller, fencer, and pacemakerd use the same request syntax, but - * the attribute manager uses a different one. The CIB manager doesn't - * have any syntax for it. The executor and scheduler don't connect to the - * cluster layer and thus don't have or need any syntax for it. - * - * \todo Modify the attribute manager to accept the common syntax (as well - * as its current one, for compatibility with older clients). Modify - * the CIB manager to accept and honor the common syntax. Modify the - * executor and scheduler to accept the syntax (immediately returning - * success), just for consistency. Modify this function to use the - * common syntax with all daemons if their version supports it. - */ -static xmlNode * -create_purge_node_request(pcmk_ipc_api_t *api, const char *node_name, - uint32_t nodeid) -{ - xmlNode *request = NULL; - const char *client = crm_system_name? crm_system_name : "client"; - - switch (api->server) { - case pcmk_ipc_attrd: - request = create_xml_node(NULL, __FUNCTION__); - crm_xml_add(request, F_TYPE, T_ATTRD); - crm_xml_add(request, F_ORIG, crm_system_name); - crm_xml_add(request, PCMK__XA_TASK, PCMK__ATTRD_CMD_PEER_REMOVE); - crm_xml_add(request, PCMK__XA_ATTR_NODE_NAME, node_name); - if (nodeid > 0) { - crm_xml_add_int(request, PCMK__XA_ATTR_NODE_ID, (int) nodeid); - } - break; - - case pcmk_ipc_controld: - case pcmk_ipc_fenced: - case pcmk_ipc_pacemakerd: - request = create_request(CRM_OP_RM_NODE_CACHE, NULL, NULL, - pcmk_ipc_name(api, false), client, NULL); - if (nodeid > 0) { - crm_xml_set_id(request, "%lu", (unsigned long) nodeid); - } - crm_xml_add(request, XML_ATTR_UNAME, node_name); - break; - - case pcmk_ipc_based: - case pcmk_ipc_execd: - case pcmk_ipc_schedulerd: - break; - } - return request; -} - -/*! - * \brief Ask a Pacemaker daemon to purge a node from its peer cache - * - * \param[in] api IPC API connection - * \param[in] node_name If not NULL, name of node to purge - * \param[in] nodeid If not 0, node ID of node to purge - * - * \return Standard Pacemaker return code - * - * \note At least one of node_name or nodeid must be specified. - */ -int -pcmk_ipc_purge_node(pcmk_ipc_api_t *api, const char *node_name, uint32_t nodeid) -{ - int rc = 0; - xmlNode *request = NULL; - - if (api == NULL) { - return EINVAL; - } - if ((node_name == NULL) && (nodeid == 0)) { - return EINVAL; - } - - request = create_purge_node_request(api, node_name, nodeid); - if (request == NULL) { - return EOPNOTSUPP; - } - rc = pcmk__send_ipc_request(api, request); - free_xml(request); - - crm_debug("%s peer cache purge of node %s[%lu]: rc=%d", - pcmk_ipc_name(api, true), node_name, (unsigned long) nodeid, rc); - return rc; -} - -/* - * Generic IPC API (to eventually be deprecated as public API and made internal) - */ - -struct crm_ipc_s { - struct pollfd pfd; - unsigned int max_buf_size; // maximum bytes we can send or receive over IPC - unsigned int buf_size; // size of allocated buffer - int msg_size; - int need_reply; - char *buffer; - char *name; - qb_ipcc_connection_t *ipc; -}; - -/*! - * \brief Create a new (legacy) object for using Pacemaker daemon IPC - * - * \param[in] name IPC system name to connect to - * \param[in] max_size Use a maximum IPC buffer size of at least this size - * - * \return Newly allocated IPC object on success, NULL otherwise - * - * \note The caller is responsible for freeing the result using - * crm_ipc_destroy(). - * \note This should be considered deprecated for use with daemons supported by - * pcmk_new_ipc_api(). - */ -crm_ipc_t * -crm_ipc_new(const char *name, size_t max_size) -{ - crm_ipc_t *client = NULL; - - client = calloc(1, sizeof(crm_ipc_t)); - if (client == NULL) { - crm_err("Could not create IPC connection: %s", strerror(errno)); - return NULL; - } - - client->name = strdup(name); - if (client->name == NULL) { - crm_err("Could not create IPC connection: %s", strerror(errno)); - free(client); - return NULL; - } - client->buf_size = pcmk__ipc_buffer_size(max_size); - client->buffer = malloc(client->buf_size); - if (client->buffer == NULL) { - crm_err("Could not create IPC connection: %s", strerror(errno)); - free(client->name); - free(client); - return NULL; - } - - /* Clients initiating connection pick the max buf size */ - client->max_buf_size = client->buf_size; - - client->pfd.fd = -1; - client->pfd.events = POLLIN; - client->pfd.revents = 0; - - return client; -} - -/*! - * \brief Establish an IPC connection to a Pacemaker component - * - * \param[in] client Connection instance obtained from crm_ipc_new() - * - * \return TRUE on success, FALSE otherwise (in which case errno will be set; - * specifically, in case of discovering the remote side is not - * authentic, its value is set to ECONNABORTED). - */ -bool -crm_ipc_connect(crm_ipc_t * client) -{ - uid_t cl_uid = 0; - gid_t cl_gid = 0; - pid_t found_pid = 0; uid_t found_uid = 0; gid_t found_gid = 0; - int rv; - - client->need_reply = FALSE; - client->ipc = qb_ipcc_connect(client->name, client->buf_size); - - if (client->ipc == NULL) { - crm_debug("Could not establish %s connection: %s (%d)", client->name, pcmk_strerror(errno), errno); - return FALSE; - } - - client->pfd.fd = crm_ipc_get_fd(client); - if (client->pfd.fd < 0) { - rv = errno; - /* message already omitted */ - crm_ipc_close(client); - errno = rv; - return FALSE; - } - - rv = pcmk_daemon_user(&cl_uid, &cl_gid); - if (rv < 0) { - /* message already omitted */ - crm_ipc_close(client); - errno = -rv; - return FALSE; - } - - if (!(rv = crm_ipc_is_authentic_process(client->pfd.fd, cl_uid, cl_gid, - &found_pid, &found_uid, - &found_gid))) { - crm_err("Daemon (IPC %s) is not authentic:" - " process %lld (uid: %lld, gid: %lld)", - client->name, (long long) PCMK__SPECIAL_PID_AS_0(found_pid), - (long long) found_uid, (long long) found_gid); - crm_ipc_close(client); - errno = ECONNABORTED; - return FALSE; - - } else if (rv < 0) { - errno = -rv; - crm_perror(LOG_ERR, "Could not verify authenticity of daemon (IPC %s)", - client->name); - crm_ipc_close(client); - errno = -rv; - return FALSE; - } - - qb_ipcc_context_set(client->ipc, client); - -#ifdef HAVE_IPCS_GET_BUFFER_SIZE - client->max_buf_size = qb_ipcc_get_buffer_size(client->ipc); - if (client->max_buf_size > client->buf_size) { - free(client->buffer); - client->buffer = calloc(1, client->max_buf_size); - client->buf_size = client->max_buf_size; - } -#endif - - return TRUE; -} - -void -crm_ipc_close(crm_ipc_t * client) -{ - if (client) { - if (client->ipc) { - qb_ipcc_connection_t *ipc = client->ipc; - - client->ipc = NULL; - qb_ipcc_disconnect(ipc); - } - } -} - -void -crm_ipc_destroy(crm_ipc_t * client) -{ - if (client) { - if (client->ipc && qb_ipcc_is_connected(client->ipc)) { - crm_notice("Destroying an active IPC connection to %s", client->name); - /* The next line is basically unsafe - * - * If this connection was attached to mainloop and mainloop is active, - * the 'disconnected' callback will end up back here and we'll end - * up free'ing the memory twice - something that can still happen - * even without this if we destroy a connection and it closes before - * we call exit - */ - /* crm_ipc_close(client); */ - } - crm_trace("Destroying IPC connection to %s: %p", client->name, client); - free(client->buffer); - free(client->name); - free(client); - } -} - -int -crm_ipc_get_fd(crm_ipc_t * client) -{ - int fd = 0; - - if (client && client->ipc && (qb_ipcc_fd_get(client->ipc, &fd) == 0)) { - return fd; - } - errno = EINVAL; - crm_perror(LOG_ERR, "Could not obtain file IPC descriptor for %s", - (client? client->name : "unspecified client")); - return -errno; -} - -bool -crm_ipc_connected(crm_ipc_t * client) -{ - bool rc = FALSE; - - if (client == NULL) { - crm_trace("No client"); - return FALSE; - - } else if (client->ipc == NULL) { - crm_trace("No connection"); - return FALSE; - - } else if (client->pfd.fd < 0) { - crm_trace("Bad descriptor"); - return FALSE; - } - - rc = qb_ipcc_is_connected(client->ipc); - if (rc == FALSE) { - client->pfd.fd = -EINVAL; - } - return rc; -} - -/*! - * \brief Check whether an IPC connection is ready to be read - * - * \param[in] client Connection to check - * - * \return Positive value if ready to be read, 0 if not ready, -errno on error - */ -int -crm_ipc_ready(crm_ipc_t *client) -{ - int rc; - - CRM_ASSERT(client != NULL); - - if (crm_ipc_connected(client) == FALSE) { - return -ENOTCONN; - } - - client->pfd.revents = 0; - rc = poll(&(client->pfd), 1, 0); - return (rc < 0)? -errno : rc; -} - -// \return Standard Pacemaker return code -static int -crm_ipc_decompress(crm_ipc_t * client) -{ - pcmk__ipc_header_t *header = (pcmk__ipc_header_t *)(void*)client->buffer; - - if (header->size_compressed) { - int rc = 0; - unsigned int size_u = 1 + header->size_uncompressed; - /* never let buf size fall below our max size required for ipc reads. */ - unsigned int new_buf_size = QB_MAX((sizeof(pcmk__ipc_header_t) + size_u), client->max_buf_size); - char *uncompressed = calloc(1, new_buf_size); - - crm_trace("Decompressing message data %u bytes into %u bytes", - header->size_compressed, size_u); - - rc = BZ2_bzBuffToBuffDecompress(uncompressed + sizeof(pcmk__ipc_header_t), &size_u, - client->buffer + sizeof(pcmk__ipc_header_t), header->size_compressed, 1, 0); - - if (rc != BZ_OK) { - crm_err("Decompression failed: %s " CRM_XS " bzerror=%d", - bz2_strerror(rc), rc); - free(uncompressed); - return EILSEQ; - } - - /* - * This assert no longer holds true. For an identical msg, some clients may - * require compression, and others may not. If that same msg (event) is sent - * to multiple clients, it could result in some clients receiving a compressed - * msg even though compression was not explicitly required for them. - * - * CRM_ASSERT((header->size_uncompressed + sizeof(pcmk__ipc_header_t)) >= ipc_buffer_max); - */ - CRM_ASSERT(size_u == header->size_uncompressed); - - memcpy(uncompressed, client->buffer, sizeof(pcmk__ipc_header_t)); /* Preserve the header */ - header = (pcmk__ipc_header_t *)(void*)uncompressed; - - free(client->buffer); - client->buf_size = new_buf_size; - client->buffer = uncompressed; - } - - CRM_ASSERT(client->buffer[sizeof(pcmk__ipc_header_t) + header->size_uncompressed - 1] == 0); - return pcmk_rc_ok; -} - -long -crm_ipc_read(crm_ipc_t * client) -{ - pcmk__ipc_header_t *header = NULL; - - CRM_ASSERT(client != NULL); - CRM_ASSERT(client->ipc != NULL); - CRM_ASSERT(client->buffer != NULL); - - client->buffer[0] = 0; - client->msg_size = qb_ipcc_event_recv(client->ipc, client->buffer, - client->buf_size, 0); - if (client->msg_size >= 0) { - int rc = crm_ipc_decompress(client); - - if (rc != pcmk_rc_ok) { - return pcmk_rc2legacy(rc); - } - - header = (pcmk__ipc_header_t *)(void*)client->buffer; - if (!pcmk__valid_ipc_header(header)) { - return -EBADMSG; - } - - crm_trace("Received %s event %d, size=%u, rc=%d, text: %.100s", - client->name, header->qb.id, header->qb.size, client->msg_size, - client->buffer + sizeof(pcmk__ipc_header_t)); - - } else { - crm_trace("No message from %s received: %s", client->name, pcmk_strerror(client->msg_size)); - } - - if (crm_ipc_connected(client) == FALSE || client->msg_size == -ENOTCONN) { - crm_err("Connection to %s failed", client->name); - } - - if (header) { - /* Data excluding the header */ - return header->size_uncompressed; - } - return -ENOMSG; -} - -const char * -crm_ipc_buffer(crm_ipc_t * client) -{ - CRM_ASSERT(client != NULL); - return client->buffer + sizeof(pcmk__ipc_header_t); -} - -uint32_t -crm_ipc_buffer_flags(crm_ipc_t * client) -{ - pcmk__ipc_header_t *header = NULL; - - CRM_ASSERT(client != NULL); - if (client->buffer == NULL) { - return 0; - } - - header = (pcmk__ipc_header_t *)(void*)client->buffer; - return header->flags; -} - -const char * -crm_ipc_name(crm_ipc_t * client) -{ - CRM_ASSERT(client != NULL); - return client->name; -} - -// \return Standard Pacemaker return code -static int -internal_ipc_get_reply(crm_ipc_t *client, int request_id, int ms_timeout, - ssize_t *bytes) -{ - time_t timeout = time(NULL) + 1 + (ms_timeout / 1000); - int rc = pcmk_rc_ok; - - /* get the reply */ - crm_trace("client %s waiting on reply to msg id %d", client->name, request_id); - do { - - *bytes = qb_ipcc_recv(client->ipc, client->buffer, client->buf_size, 1000); - if (*bytes > 0) { - pcmk__ipc_header_t *hdr = NULL; - - rc = crm_ipc_decompress(client); - if (rc != pcmk_rc_ok) { - return rc; - } - - hdr = (pcmk__ipc_header_t *)(void*)client->buffer; - if (hdr->qb.id == request_id) { - /* Got it */ - break; - } else if (hdr->qb.id < request_id) { - xmlNode *bad = string2xml(crm_ipc_buffer(client)); - - crm_err("Discarding old reply %d (need %d)", hdr->qb.id, request_id); - crm_log_xml_notice(bad, "OldIpcReply"); - - } else { - xmlNode *bad = string2xml(crm_ipc_buffer(client)); - - crm_err("Discarding newer reply %d (need %d)", hdr->qb.id, request_id); - crm_log_xml_notice(bad, "ImpossibleReply"); - CRM_ASSERT(hdr->qb.id <= request_id); - } - } else if (crm_ipc_connected(client) == FALSE) { - crm_err("Server disconnected client %s while waiting for msg id %d", client->name, - request_id); - break; - } - - } while (time(NULL) < timeout); - - if (*bytes < 0) { - rc = (int) -*bytes; // System errno - } - return rc; -} - -/*! - * \brief Send an IPC XML message - * - * \param[in] client Connection to IPC server - * \param[in] message XML message to send - * \param[in] flags Bitmask of crm_ipc_flags - * \param[in] ms_timeout Give up if not sent within this much time - * (5 seconds if 0, or no timeout if negative) - * \param[out] reply Reply from server (or NULL if none) - * - * \return Negative errno on error, otherwise size of reply received in bytes - * if reply was needed, otherwise number of bytes sent - */ -int -crm_ipc_send(crm_ipc_t * client, xmlNode * message, enum crm_ipc_flags flags, int32_t ms_timeout, - xmlNode ** reply) -{ - int rc = 0; - ssize_t qb_rc = 0; - ssize_t bytes = 0; - struct iovec *iov; - static uint32_t id = 0; - static int factor = 8; - pcmk__ipc_header_t *header; - - if (client == NULL) { - crm_notice("Can't send IPC request without connection (bug?): %.100s", - message); - return -ENOTCONN; - - } else if (crm_ipc_connected(client) == FALSE) { - /* Don't even bother */ - crm_notice("Can't send IPC request to %s: Connection closed", - client->name); - return -ENOTCONN; - } - - if (ms_timeout == 0) { - ms_timeout = 5000; - } - - if (client->need_reply) { - qb_rc = qb_ipcc_recv(client->ipc, client->buffer, client->buf_size, ms_timeout); - if (qb_rc < 0) { - crm_warn("Sending IPC to %s disabled until pending reply received", - client->name); - return -EALREADY; - - } else { - crm_notice("Sending IPC to %s re-enabled after pending reply received", - client->name); - client->need_reply = FALSE; - } - } - - id++; - CRM_LOG_ASSERT(id != 0); /* Crude wrap-around detection */ - rc = pcmk__ipc_prepare_iov(id, message, client->max_buf_size, &iov, &bytes); - if (rc != pcmk_rc_ok) { - crm_warn("Couldn't prepare IPC request to %s: %s " CRM_XS " rc=%d", - client->name, pcmk_rc_str(rc), rc); - return pcmk_rc2legacy(rc); - } - - header = iov[0].iov_base; - header->flags |= flags; - - if(is_set(flags, crm_ipc_proxied)) { - /* Don't look for a synchronous response */ - clear_bit(flags, crm_ipc_client_response); - } - - if(header->size_compressed) { - if(factor < 10 && (client->max_buf_size / 10) < (bytes / factor)) { - crm_notice("Compressed message exceeds %d0%% of configured IPC " - "limit (%u bytes); consider setting PCMK_ipc_buffer to " - "%u or higher", - factor, client->max_buf_size, 2 * client->max_buf_size); - factor++; - } - } - - crm_trace("Sending %s IPC request %d of %u bytes using %dms timeout", - client->name, header->qb.id, header->qb.size, ms_timeout); - - if (ms_timeout > 0 || is_not_set(flags, crm_ipc_client_response)) { - - time_t timeout = time(NULL) + 1 + (ms_timeout / 1000); - - do { - /* @TODO Is this check really needed? Won't qb_ipcc_sendv() return - * an error if it's not connected? - */ - if (!crm_ipc_connected(client)) { - goto send_cleanup; - } - - qb_rc = qb_ipcc_sendv(client->ipc, iov, 2); - } while ((qb_rc == -EAGAIN) && (time(NULL) < timeout)); - - rc = (int) qb_rc; // Negative of system errno, or bytes sent - if (qb_rc <= 0) { - goto send_cleanup; - - } else if (is_not_set(flags, crm_ipc_client_response)) { - crm_trace("Not waiting for reply to %s IPC request %d", - client->name, header->qb.id); - goto send_cleanup; - } - - rc = internal_ipc_get_reply(client, header->qb.id, ms_timeout, &bytes); - if (rc != pcmk_rc_ok) { - /* We didn't get the reply in time, so disable future sends for now. - * The only alternative would be to close the connection since we - * don't know how to detect and discard out-of-sequence replies. - * - * @TODO Implement out-of-sequence detection - */ - client->need_reply = TRUE; - } - rc = (int) bytes; // Negative system errno, or size of reply received - - } else { - // No timeout, and client response needed - do { - qb_rc = qb_ipcc_sendv_recv(client->ipc, iov, 2, client->buffer, - client->buf_size, -1); - } while ((qb_rc == -EAGAIN) && crm_ipc_connected(client)); - rc = (int) qb_rc; // Negative system errno, or size of reply received - } - - if (rc > 0) { - pcmk__ipc_header_t *hdr = (pcmk__ipc_header_t *)(void*)client->buffer; - - crm_trace("Received %d-byte reply %d to %s IPC %d: %.100s", - rc, hdr->qb.id, client->name, header->qb.id, - crm_ipc_buffer(client)); - - if (reply) { - *reply = string2xml(crm_ipc_buffer(client)); - } - - } else { - crm_trace("No reply to %s IPC %d: rc=%d", - client->name, header->qb.id, rc); - } - - send_cleanup: - if (crm_ipc_connected(client) == FALSE) { - crm_notice("Couldn't send %s IPC request %d: Connection closed " - CRM_XS " rc=%d", client->name, header->qb.id, rc); - - } else if (rc == -ETIMEDOUT) { - crm_warn("%s IPC request %d failed: %s after %dms " CRM_XS " rc=%d", - client->name, header->qb.id, pcmk_strerror(rc), ms_timeout, - rc); - crm_write_blackbox(0, NULL); - - } else if (rc <= 0) { - crm_warn("%s IPC request %d failed: %s " CRM_XS " rc=%d", - client->name, header->qb.id, - ((rc == 0)? "No bytes sent" : pcmk_strerror(rc)), rc); - } - - pcmk_free_ipc_event(iov); - return rc; -} - -int -crm_ipc_is_authentic_process(int sock, uid_t refuid, gid_t refgid, - pid_t *gotpid, uid_t *gotuid, gid_t *gotgid) { - int ret = 0; - pid_t found_pid = 0; uid_t found_uid = 0; gid_t found_gid = 0; -#if defined(US_AUTH_PEERCRED_UCRED) - struct ucred ucred; - socklen_t ucred_len = sizeof(ucred); - - if (!getsockopt(sock, SOL_SOCKET, SO_PEERCRED, - &ucred, &ucred_len) - && ucred_len == sizeof(ucred)) { - found_pid = ucred.pid; found_uid = ucred.uid; found_gid = ucred.gid; - -#elif defined(US_AUTH_PEERCRED_SOCKPEERCRED) - struct sockpeercred sockpeercred; - socklen_t sockpeercred_len = sizeof(sockpeercred); - - if (!getsockopt(sock, SOL_SOCKET, SO_PEERCRED, - &sockpeercred, &sockpeercred_len) - && sockpeercred_len == sizeof(sockpeercred_len)) { - found_pid = sockpeercred.pid; - found_uid = sockpeercred.uid; found_gid = sockpeercred.gid; - -#elif defined(US_AUTH_GETPEEREID) - if (!getpeereid(sock, &found_uid, &found_gid)) { - found_pid = PCMK__SPECIAL_PID; /* cannot obtain PID (FreeBSD) */ - -#elif defined(US_AUTH_GETPEERUCRED) - ucred_t *ucred; - if (!getpeerucred(sock, &ucred)) { - errno = 0; - found_pid = ucred_getpid(ucred); - found_uid = ucred_geteuid(ucred); found_gid = ucred_getegid(ucred); - ret = -errno; - ucred_free(ucred); - if (ret) { - return (ret < 0) ? ret : -pcmk_err_generic; - } - -#else -# error "No way to authenticate a Unix socket peer" - errno = 0; - if (0) { -#endif - if (gotpid != NULL) { - *gotpid = found_pid; - } - if (gotuid != NULL) { - *gotuid = found_uid; - } - if (gotgid != NULL) { - *gotgid = found_gid; - } - ret = (found_uid == 0 || found_uid == refuid || found_gid == refgid); - } else { - ret = (errno > 0) ? -errno : -pcmk_err_generic; - } - - return ret; -} - -int -pcmk__ipc_is_authentic_process_active(const char *name, uid_t refuid, - gid_t refgid, pid_t *gotpid) -{ - static char last_asked_name[PATH_MAX / 2] = ""; /* log spam prevention */ - int fd; - int rc = pcmk_rc_ipc_unresponsive; - int auth_rc = 0; - int32_t qb_rc; - pid_t found_pid = 0; uid_t found_uid = 0; gid_t found_gid = 0; - qb_ipcc_connection_t *c; - - c = qb_ipcc_connect(name, 0); - if (c == NULL) { - crm_info("Could not connect to %s IPC: %s", name, strerror(errno)); - rc = pcmk_rc_ipc_unresponsive; - goto bail; - } - - qb_rc = qb_ipcc_fd_get(c, &fd); - if (qb_rc != 0) { - rc = (int) -qb_rc; // System errno - crm_err("Could not get fd from %s IPC: %s " CRM_XS " rc=%d", - name, pcmk_rc_str(rc), rc); - goto bail; - } - - auth_rc = crm_ipc_is_authentic_process(fd, refuid, refgid, &found_pid, - &found_uid, &found_gid); - if (auth_rc < 0) { - rc = pcmk_legacy2rc(auth_rc); - crm_err("Could not get peer credentials from %s IPC: %s " - CRM_XS " rc=%d", name, pcmk_rc_str(rc), rc); - goto bail; - } - - if (gotpid != NULL) { - *gotpid = found_pid; - } - - if (auth_rc == 0) { - crm_err("Daemon (IPC %s) effectively blocked with unauthorized" - " process %lld (uid: %lld, gid: %lld)", - name, (long long) PCMK__SPECIAL_PID_AS_0(found_pid), - (long long) found_uid, (long long) found_gid); - rc = pcmk_rc_ipc_unauthorized; - goto bail; - } - - rc = pcmk_rc_ok; - if ((found_uid != refuid || found_gid != refgid) - && strncmp(last_asked_name, name, sizeof(last_asked_name))) { - if ((found_uid == 0) && (refuid != 0)) { - crm_warn("Daemon (IPC %s) runs as root, whereas the expected" - " credentials are %lld:%lld, hazard of violating" - " the least privilege principle", - name, (long long) refuid, (long long) refgid); - } else { - crm_notice("Daemon (IPC %s) runs as %lld:%lld, whereas the" - " expected credentials are %lld:%lld, which may" - " mean a different set of privileges than expected", - name, (long long) found_uid, (long long) found_gid, - (long long) refuid, (long long) refgid); - } - memccpy(last_asked_name, name, '\0', sizeof(last_asked_name)); - } - -bail: - if (c != NULL) { - qb_ipcc_disconnect(c); - } - return rc; -} diff --git a/lib/common/ipc_common.c b/lib/common/ipc_common.c deleted file mode 100644 index 78360aa..0000000 --- a/lib/common/ipc_common.c +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * - * This source code is licensed under the GNU Lesser General Public License - * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. - */ - -#include - -#include -#include - -#include -#include "crmcommon_private.h" - -#define MIN_MSG_SIZE 12336 // sizeof(struct qb_ipc_connection_response) -#define MAX_MSG_SIZE 128*1024 // 128k default - -/*! - * \internal - * \brief Choose an IPC buffer size in bytes - * - * \param[in] max Use this value if environment/default is lower - * - * \return Maximum of max and value of PCMK_ipc_buffer (default 128KB) - */ -unsigned int -pcmk__ipc_buffer_size(unsigned int max) -{ - static unsigned int global_max = 0; - - if (global_max == 0) { - const char *env = getenv("PCMK_ipc_buffer"); - - if (env) { - int env_max = crm_parse_int(env, "0"); - - global_max = (env_max > 0)? QB_MAX(MIN_MSG_SIZE, env_max) : MAX_MSG_SIZE; - - } else { - global_max = MAX_MSG_SIZE; - } - } - return QB_MAX(max, global_max); -} - -/*! - * \brief Return pacemaker's default IPC buffer size - * - * \return IPC buffer size in bytes - */ -unsigned int -crm_ipc_default_buffer_size(void) -{ - static unsigned int default_size = 0; - - if (default_size == 0) { - default_size = pcmk__ipc_buffer_size(0); - } - return default_size; -} - -/*! - * \internal - * \brief Check whether an IPC header is valid - * - * \param[in] header IPC header to check - * - * \return true if IPC header has a supported version, false otherwise - */ -bool -pcmk__valid_ipc_header(const pcmk__ipc_header_t *header) -{ - if (header == NULL) { - crm_err("IPC message without header"); - return false; - - } else if (header->version > PCMK__IPC_VERSION) { - crm_err("Filtering incompatible v%d IPC message (only versions <= %d supported)", - header->version, PCMK__IPC_VERSION); - return false; - } - return true; -} - -const char * -pcmk__client_type_str(enum pcmk__client_type client_type) -{ - switch (client_type) { - case PCMK__CLIENT_IPC: - return "IPC"; - case PCMK__CLIENT_TCP: - return "TCP"; -#ifdef HAVE_GNUTLS_GNUTLS_H - case PCMK__CLIENT_TLS: - return "TLS"; -#endif - default: - return "unknown"; - } -} diff --git a/lib/common/ipc_controld.c b/lib/common/ipc_controld.c deleted file mode 100644 index 2caea21..0000000 --- a/lib/common/ipc_controld.c +++ /dev/null @@ -1,660 +0,0 @@ -/* - * Copyright 2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * - * This source code is licensed under the GNU Lesser General Public License - * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. - */ - -#include - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include "crmcommon_private.h" - -struct controld_api_private_s { - char *client_uuid; - unsigned int replies_expected; -}; - -// \return Standard Pacemaker return code -static int -new_data(pcmk_ipc_api_t *api) -{ - struct controld_api_private_s *private = NULL; - - api->api_data = calloc(1, sizeof(struct controld_api_private_s)); - - if (api->api_data == NULL) { - return errno; - } - - private = api->api_data; - - /* This is set to the PID because that's how it was always done, but PIDs - * are not unique because clients can be remote. The value appears to be - * unused other than as part of F_CRM_SYS_FROM in IPC requests, which is - * only compared against the internal system names (CRM_SYSTEM_TENGINE, - * etc.), so it shouldn't be a problem. - */ - private->client_uuid = pcmk__getpid_s(); - - /* @TODO Implement a call ID model similar to the CIB, executor, and fencer - * IPC APIs, so that requests and replies can be matched, and - * duplicate replies can be discarded. - */ - return pcmk_rc_ok; -} - -static void -free_data(void *data) -{ - free(((struct controld_api_private_s *) data)->client_uuid); - free(data); -} - -// \return Standard Pacemaker return code -static int -post_connect(pcmk_ipc_api_t *api) -{ - /* The controller currently requires clients to register via a hello - * request, but does not reply back. - */ - struct controld_api_private_s *private = api->api_data; - const char *client_name = crm_system_name? crm_system_name : "client"; - xmlNode *hello; - int rc; - - hello = create_hello_message(private->client_uuid, client_name, - PCMK__CONTROLD_API_MAJOR, - PCMK__CONTROLD_API_MINOR); - rc = pcmk__send_ipc_request(api, hello); - free_xml(hello); - if (rc != pcmk_rc_ok) { - crm_info("Could not send IPC hello to %s: %s " CRM_XS " rc=%s", - pcmk_ipc_name(api, true), pcmk_rc_str(rc), rc); - } else { - crm_debug("Sent IPC hello to %s", pcmk_ipc_name(api, true)); - } - return rc; -} - -#define xml_true(xml, field) crm_is_true(crm_element_value(xml, field)) - -static void -set_node_info_data(pcmk_controld_api_reply_t *data, xmlNode *msg_data) -{ - data->reply_type = pcmk_controld_reply_info; - if (msg_data == NULL) { - return; - } - data->data.node_info.have_quorum = xml_true(msg_data, XML_ATTR_HAVE_QUORUM); - data->data.node_info.is_remote = xml_true(msg_data, XML_NODE_IS_REMOTE); - crm_element_value_int(msg_data, XML_ATTR_ID, &(data->data.node_info.id)); - data->data.node_info.uuid = crm_element_value(msg_data, XML_ATTR_UUID); - data->data.node_info.uname = crm_element_value(msg_data, XML_ATTR_UNAME); - data->data.node_info.state = crm_element_value(msg_data, XML_NODE_IS_PEER); -} - -static void -set_ping_data(pcmk_controld_api_reply_t *data, xmlNode *msg_data) -{ - data->reply_type = pcmk_controld_reply_ping; - if (msg_data == NULL) { - return; - } - data->data.ping.sys_from = crm_element_value(msg_data, - XML_PING_ATTR_SYSFROM); - data->data.ping.fsa_state = crm_element_value(msg_data, - XML_PING_ATTR_CRMDSTATE); - data->data.ping.result = crm_element_value(msg_data, XML_PING_ATTR_STATUS); -} - -static void -set_nodes_data(pcmk_controld_api_reply_t *data, xmlNode *msg_data) -{ - pcmk_controld_api_node_t *node_info; - - data->reply_type = pcmk_controld_reply_nodes; - for (xmlNode *node = first_named_child(msg_data, XML_CIB_TAG_NODE); - node != NULL; node = crm_next_same_xml(node)) { - - long long id_ll = 0; - - node_info = calloc(1, sizeof(pcmk_controld_api_node_t)); - crm_element_value_ll(node, XML_ATTR_ID, &id_ll); - if (id_ll > 0) { - node_info->id = id_ll; - } - node_info->uname = crm_element_value(node, XML_ATTR_UNAME); - node_info->state = crm_element_value(node, XML_NODE_IN_CLUSTER); - data->data.nodes = g_list_prepend(data->data.nodes, node_info); - } -} - -static bool -reply_expected(pcmk_ipc_api_t *api, xmlNode *request) -{ - const char *command = crm_element_value(request, F_CRM_TASK); - - if (command == NULL) { - return false; - } - - // We only need to handle commands that functions in this file can send - return !strcmp(command, CRM_OP_REPROBE) - || !strcmp(command, CRM_OP_NODE_INFO) - || !strcmp(command, CRM_OP_PING) - || !strcmp(command, CRM_OP_LRM_FAIL) - || !strcmp(command, CRM_OP_LRM_DELETE); -} - -static void -dispatch(pcmk_ipc_api_t *api, xmlNode *reply) -{ - struct controld_api_private_s *private = api->api_data; - crm_exit_t status = CRM_EX_OK; - xmlNode *msg_data = NULL; - const char *value = NULL; - pcmk_controld_api_reply_t reply_data = { - pcmk_controld_reply_unknown, NULL, NULL, - }; - - if (private->replies_expected > 0) { - private->replies_expected--; - } - - // Do some basic validation of the reply - - /* @TODO We should be able to verify that value is always a response, but - * currently the controller doesn't always properly set the type. Even - * if we fix the controller, we'll still need to handle replies from - * old versions (feature set could be used to differentiate). - */ - value = crm_element_value(reply, F_CRM_MSG_TYPE); - if ((value == NULL) || (strcmp(value, XML_ATTR_REQUEST) - && strcmp(value, XML_ATTR_RESPONSE))) { - crm_debug("Unrecognizable controller message: invalid message type '%s'", - crm_str(value)); - status = CRM_EX_PROTOCOL; - goto done; - } - - if (crm_element_value(reply, XML_ATTR_REFERENCE) == NULL) { - crm_debug("Unrecognizable controller message: no reference"); - status = CRM_EX_PROTOCOL; - goto done; - } - - value = crm_element_value(reply, F_CRM_TASK); - if (value == NULL) { - crm_debug("Unrecognizable controller message: no command name"); - status = CRM_EX_PROTOCOL; - goto done; - } - - // Parse useful info from reply - - reply_data.feature_set = crm_element_value(reply, XML_ATTR_VERSION); - reply_data.host_from = crm_element_value(reply, F_CRM_HOST_FROM); - msg_data = get_message_xml(reply, F_CRM_DATA); - - if (!strcmp(value, CRM_OP_REPROBE)) { - reply_data.reply_type = pcmk_controld_reply_reprobe; - - } else if (!strcmp(value, CRM_OP_NODE_INFO)) { - set_node_info_data(&reply_data, msg_data); - - } else if (!strcmp(value, CRM_OP_INVOKE_LRM)) { - reply_data.reply_type = pcmk_controld_reply_resource; - reply_data.data.resource.node_state = msg_data; - - } else if (!strcmp(value, CRM_OP_PING)) { - set_ping_data(&reply_data, msg_data); - - } else if (!strcmp(value, PCMK__CONTROLD_CMD_NODES)) { - set_nodes_data(&reply_data, msg_data); - - } else { - crm_debug("Unrecognizable controller message: unknown command '%s'", - value); - status = CRM_EX_PROTOCOL; - } - -done: - pcmk__call_ipc_callback(api, pcmk_ipc_event_reply, status, &reply_data); - - // Free any reply data that was allocated - if (safe_str_eq(value, PCMK__CONTROLD_CMD_NODES)) { - g_list_free_full(reply_data.data.nodes, free); - } -} - -pcmk__ipc_methods_t * -pcmk__controld_api_methods() -{ - pcmk__ipc_methods_t *cmds = calloc(1, sizeof(pcmk__ipc_methods_t)); - - if (cmds != NULL) { - cmds->new_data = new_data; - cmds->free_data = free_data; - cmds->post_connect = post_connect; - cmds->reply_expected = reply_expected; - cmds->dispatch = dispatch; - } - return cmds; -} - -/*! - * \internal - * \brief Create XML for a controller IPC request - * - * \param[in] api Controller connection - * \param[in] op Controller IPC command name - * \param[in] node Node name to set as destination host - * \param[in] msg_data XML to attach to request as message data - * - * \return Newly allocated XML for request - */ -static xmlNode * -create_controller_request(pcmk_ipc_api_t *api, const char *op, - const char *node, xmlNode *msg_data) -{ - struct controld_api_private_s *private = api->api_data; - const char *sys_to = NULL; - - if ((node == NULL) && !strcmp(op, CRM_OP_PING)) { - sys_to = CRM_SYSTEM_DC; - } else { - sys_to = CRM_SYSTEM_CRMD; - } - return create_request(op, msg_data, node, sys_to, - (crm_system_name? crm_system_name : "client"), - private->client_uuid); -} - -// \return Standard Pacemaker return code -static int -send_controller_request(pcmk_ipc_api_t *api, xmlNode *request, - bool reply_is_expected) -{ - int rc; - - if (crm_element_value(request, XML_ATTR_REFERENCE) == NULL) { - return EINVAL; - } - rc = pcmk__send_ipc_request(api, request); - if ((rc == pcmk_rc_ok) && reply_is_expected) { - struct controld_api_private_s *private = api->api_data; - - private->replies_expected++; - } - return rc; -} - -static xmlNode * -create_reprobe_message_data(const char *target_node, const char *router_node) -{ - xmlNode *msg_data; - - msg_data = create_xml_node(NULL, "data_for_" CRM_OP_REPROBE); - crm_xml_add(msg_data, XML_LRM_ATTR_TARGET, target_node); - if ((router_node != NULL) && safe_str_neq(router_node, target_node)) { - crm_xml_add(msg_data, XML_LRM_ATTR_ROUTER_NODE, router_node); - } - return msg_data; -} - -/*! - * \brief Send a reprobe controller operation - * - * \param[in] api Controller connection - * \param[in] target_node Name of node to reprobe - * \param[in] router_node Router node for host - * - * \return Standard Pacemaker return code - * \note Event callback will get a reply of type pcmk_controld_reply_reprobe. - */ -int -pcmk_controld_api_reprobe(pcmk_ipc_api_t *api, const char *target_node, - const char *router_node) -{ - xmlNode *request; - xmlNode *msg_data; - int rc = pcmk_rc_ok; - - if (api == NULL) { - return EINVAL; - } - if (router_node == NULL) { - router_node = target_node; - } - crm_debug("Sending %s IPC request to reprobe %s via %s", - pcmk_ipc_name(api, true), crm_str(target_node), - crm_str(router_node)); - msg_data = create_reprobe_message_data(target_node, router_node); - request = create_controller_request(api, CRM_OP_REPROBE, router_node, - msg_data); - rc = send_controller_request(api, request, true); - free_xml(msg_data); - free_xml(request); - return rc; -} - -/*! - * \brief Send a "node info" controller operation - * - * \param[in] api Controller connection - * \param[in] nodeid ID of node to get info for (or 0 for local node) - * - * \return Standard Pacemaker return code - * \note Event callback will get a reply of type pcmk_controld_reply_info. - */ -int -pcmk_controld_api_node_info(pcmk_ipc_api_t *api, uint32_t nodeid) -{ - xmlNode *request; - int rc = pcmk_rc_ok; - - request = create_controller_request(api, CRM_OP_NODE_INFO, NULL, NULL); - if (request == NULL) { - return EINVAL; - } - if (nodeid > 0) { - crm_xml_set_id(request, "%lu", (unsigned long) nodeid); - } - - rc = send_controller_request(api, request, true); - free_xml(request); - return rc; -} - -/*! - * \brief Ask the controller for status - * - * \param[in] api Controller connection - * \param[in] node_name Name of node whose status is desired (or NULL for DC) - * - * \return Standard Pacemaker return code - * \note Event callback will get a reply of type pcmk_controld_reply_ping. - */ -int -pcmk_controld_api_ping(pcmk_ipc_api_t *api, const char *node_name) -{ - xmlNode *request; - int rc = pcmk_rc_ok; - - request = create_controller_request(api, CRM_OP_PING, node_name, NULL); - if (request == NULL) { - return EINVAL; - } - rc = send_controller_request(api, request, true); - free_xml(request); - return rc; -} - -/*! - * \brief Ask the controller for cluster information - * - * \param[in] api Controller connection - * - * \return Standard Pacemaker return code - * \note Event callback will get a reply of type pcmk_controld_reply_nodes. - */ -int -pcmk_controld_api_list_nodes(pcmk_ipc_api_t *api) -{ - xmlNode *request; - int rc = EINVAL; - - request = create_controller_request(api, PCMK__CONTROLD_CMD_NODES, NULL, - NULL); - if (request != NULL) { - rc = send_controller_request(api, request, true); - free_xml(request); - } - return rc; -} - -/*! - * \internal - * \brief Ask the controller to shut down - * - * \param[in] api Controller connection - * \param[in] node_name Name of node whose controller should shut down - * - * \return Standard Pacemaker return code - * - * \note This capability currently does not work, so the function is considered - * internal. It will likely be removed. - * \note Event callback will not get a reply. - */ -int -pcmk_controld_api_shutdown(pcmk_ipc_api_t *api, const char *node_name) -{ - xmlNode *request; - int rc = pcmk_rc_ok; - - request = create_controller_request(api, CRM_OP_SHUTDOWN, NULL, NULL); - if (request == NULL) { - return EINVAL; - } - rc = send_controller_request(api, request, false); - free_xml(request); - return rc; -} - -/*! - * \internal - * \brief Ask the controller to start a DC election - * - * \param[in] api Controller connection - * - * \return Standard Pacemaker return code - * - * \note This capability currently does not work, so the function is considered - * internal. It will likely be removed. - * \note Event callback will not get a reply. - */ -int -pcmk_controld_api_start_election(pcmk_ipc_api_t *api) -{ - xmlNode *request; - int rc = pcmk_rc_ok; - - request = create_controller_request(api, CRM_OP_VOTE, NULL, NULL); - if (request == NULL) { - return EINVAL; - } - rc = send_controller_request(api, request, false); - free_xml(request); - return rc; -} - -// \return Standard Pacemaker return code -static int -controller_resource_op(pcmk_ipc_api_t *api, const char *op, - const char *target_node, const char *router_node, - bool cib_only, const char *rsc_id, - const char *rsc_long_id, const char *standard, - const char *provider, const char *type) -{ - int rc = pcmk_rc_ok; - char *key; - xmlNode *request, *msg_data, *xml_rsc, *params; - - if (api == NULL) { - return EINVAL; - } - if (router_node == NULL) { - router_node = target_node; - } - - msg_data = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP); - - /* The controller logs the transition key from resource op requests, so we - * need to have *something* for it. - * @TODO don't use "crm-resource" - */ - key = pcmk__transition_key(0, getpid(), 0, - "xxxxxxxx-xrsc-opxx-xcrm-resourcexxxx"); - crm_xml_add(msg_data, XML_ATTR_TRANSITION_KEY, key); - free(key); - - crm_xml_add(msg_data, XML_LRM_ATTR_TARGET, target_node); - if (safe_str_neq(router_node, target_node)) { - crm_xml_add(msg_data, XML_LRM_ATTR_ROUTER_NODE, router_node); - } - - if (cib_only) { - // Indicate that only the CIB needs to be cleaned - crm_xml_add(msg_data, PCMK__XA_MODE, XML_TAG_CIB); - } - - xml_rsc = create_xml_node(msg_data, XML_CIB_TAG_RESOURCE); - crm_xml_add(xml_rsc, XML_ATTR_ID, rsc_id); - crm_xml_add(xml_rsc, XML_ATTR_ID_LONG, rsc_long_id); - crm_xml_add(xml_rsc, XML_AGENT_ATTR_CLASS, standard); - crm_xml_add(xml_rsc, XML_AGENT_ATTR_PROVIDER, provider); - crm_xml_add(xml_rsc, XML_ATTR_TYPE, type); - - params = create_xml_node(msg_data, XML_TAG_ATTRS); - crm_xml_add(params, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); - - // The controller parses the timeout from the request - key = crm_meta_name(XML_ATTR_TIMEOUT); - crm_xml_add(params, key, "60000"); /* 1 minute */ //@TODO pass as arg - free(key); - - request = create_controller_request(api, op, router_node, msg_data); - rc = send_controller_request(api, request, true); - free_xml(msg_data); - free_xml(request); - return rc; -} - -/*! - * \brief Ask the controller to fail a resource - * - * \param[in] api Controller connection - * \param[in] target_node Name of node resource is on - * \param[in] router_node Router node for target - * \param[in] rsc_id ID of resource to fail - * \param[in] rsc_long_id Long ID of resource (if any) - * \param[in] standard Standard of resource - * \param[in] provider Provider of resource (if any) - * \param[in] type Type of resource to fail - * - * \return Standard Pacemaker return code - * \note Event callback will get a reply of type pcmk_controld_reply_resource. - */ -int -pcmk_controld_api_fail(pcmk_ipc_api_t *api, - const char *target_node, const char *router_node, - const char *rsc_id, const char *rsc_long_id, - const char *standard, const char *provider, - const char *type) -{ - crm_debug("Sending %s IPC request to fail %s (a.k.a. %s) on %s via %s", - pcmk_ipc_name(api, true), crm_str(rsc_id), crm_str(rsc_long_id), - crm_str(target_node), crm_str(router_node)); - return controller_resource_op(api, CRM_OP_LRM_FAIL, target_node, - router_node, false, rsc_id, rsc_long_id, - standard, provider, type); -} - -/*! - * \brief Ask the controller to refresh a resource - * - * \param[in] api Controller connection - * \param[in] target_node Name of node resource is on - * \param[in] router_node Router node for target - * \param[in] rsc_id ID of resource to refresh - * \param[in] rsc_long_id Long ID of resource (if any) - * \param[in] standard Standard of resource - * \param[in] provider Provider of resource (if any) - * \param[in] type Type of resource - * \param[in] cib_only If true, clean resource from CIB only - * - * \return Standard Pacemaker return code - * \note Event callback will get a reply of type pcmk_controld_reply_resource. - */ -int -pcmk_controld_api_refresh(pcmk_ipc_api_t *api, const char *target_node, - const char *router_node, - const char *rsc_id, const char *rsc_long_id, - const char *standard, const char *provider, - const char *type, bool cib_only) -{ - crm_debug("Sending %s IPC request to refresh %s (a.k.a. %s) on %s via %s", - pcmk_ipc_name(api, true), crm_str(rsc_id), crm_str(rsc_long_id), - crm_str(target_node), crm_str(router_node)); - return controller_resource_op(api, CRM_OP_LRM_DELETE, target_node, - router_node, cib_only, rsc_id, rsc_long_id, - standard, provider, type); -} - -/*! - * \brief Get the number of IPC replies currently expected from the controller - * - * \param[in] api Controller IPC API connection - * - * \return Number of replies expected - */ -unsigned int -pcmk_controld_api_replies_expected(pcmk_ipc_api_t *api) -{ - struct controld_api_private_s *private = api->api_data; - - return private->replies_expected; -} - -xmlNode * -create_hello_message(const char *uuid, const char *client_name, - const char *major_version, const char *minor_version) -{ - xmlNode *hello_node = NULL; - xmlNode *hello = NULL; - - if (pcmk__str_empty(uuid) || pcmk__str_empty(client_name) - || pcmk__str_empty(major_version) || pcmk__str_empty(minor_version)) { - crm_err("Could not create IPC hello message from %s (UUID %s): " - "missing information", - client_name? client_name : "unknown client", - uuid? uuid : "unknown"); - return NULL; - } - - hello_node = create_xml_node(NULL, XML_TAG_OPTIONS); - if (hello_node == NULL) { - crm_err("Could not create IPC hello message from %s (UUID %s): " - "Message data creation failed", client_name, uuid); - return NULL; - } - - crm_xml_add(hello_node, "major_version", major_version); - crm_xml_add(hello_node, "minor_version", minor_version); - crm_xml_add(hello_node, "client_name", client_name); - crm_xml_add(hello_node, "client_uuid", uuid); - - hello = create_request(CRM_OP_HELLO, hello_node, NULL, NULL, client_name, uuid); - if (hello == NULL) { - crm_err("Could not create IPC hello message from %s (UUID %s): " - "Request creation failed", client_name, uuid); - return NULL; - } - free_xml(hello_node); - - crm_trace("Created hello message from %s (UUID %s)", client_name, uuid); - return hello; -} diff --git a/lib/common/ipc_pacemakerd.c b/lib/common/ipc_pacemakerd.c deleted file mode 100644 index 241722e..0000000 --- a/lib/common/ipc_pacemakerd.c +++ /dev/null @@ -1,232 +0,0 @@ -/* - * Copyright 2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * - * This source code is licensed under the GNU Lesser General Public License - * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. - */ - -#include - -#include -#include - -#include -#include -#include -#include -#include -#include -#include "crmcommon_private.h" - -typedef struct pacemakerd_api_private_s { - enum pcmk_pacemakerd_state state; - char *client_uuid; -} pacemakerd_api_private_t; - -static const char *pacemakerd_state_str[] = { - XML_PING_ATTR_PACEMAKERDSTATE_INIT, - XML_PING_ATTR_PACEMAKERDSTATE_STARTINGDAEMONS, - XML_PING_ATTR_PACEMAKERDSTATE_WAITPING, - XML_PING_ATTR_PACEMAKERDSTATE_RUNNING, - XML_PING_ATTR_PACEMAKERDSTATE_SHUTTINGDOWN, - XML_PING_ATTR_PACEMAKERDSTATE_SHUTDOWNCOMPLETE -}; - -enum pcmk_pacemakerd_state -pcmk_pacemakerd_api_daemon_state_text2enum(const char *state) -{ - int i; - - if (state == NULL) { - return pcmk_pacemakerd_state_invalid; - } - for (i=pcmk_pacemakerd_state_init; i <= pcmk_pacemakerd_state_max; - i++) { - if (crm_str_eq(state, pacemakerd_state_str[i], TRUE)) { - return i; - } - } - return pcmk_pacemakerd_state_invalid; -} - -const char * -pcmk_pacemakerd_api_daemon_state_enum2text( - enum pcmk_pacemakerd_state state) -{ - if ((state >= pcmk_pacemakerd_state_init) && - (state <= pcmk_pacemakerd_state_max)) { - return pacemakerd_state_str[state]; - } - return "invalid"; -} - -// \return Standard Pacemaker return code -static int -new_data(pcmk_ipc_api_t *api) -{ - struct pacemakerd_api_private_s *private = NULL; - - api->api_data = calloc(1, sizeof(struct pacemakerd_api_private_s)); - - if (api->api_data == NULL) { - return errno; - } - - private = api->api_data; - private->state = pcmk_pacemakerd_state_invalid; - /* other as with cib, controld, ... we are addressing pacemakerd just - from the local node -> pid is unique and thus sufficient as an ID - */ - private->client_uuid = pcmk__getpid_s(); - - return pcmk_rc_ok; -} - -static void -free_data(void *data) -{ - free(((struct pacemakerd_api_private_s *) data)->client_uuid); - free(data); -} - -// \return Standard Pacemaker return code -static int -post_connect(pcmk_ipc_api_t *api) -{ - struct pacemakerd_api_private_s *private = NULL; - - if (api->api_data == NULL) { - return EINVAL; - } - private = api->api_data; - private->state = pcmk_pacemakerd_state_invalid; - - return pcmk_rc_ok; -} - -static void -post_disconnect(pcmk_ipc_api_t *api) -{ - struct pacemakerd_api_private_s *private = NULL; - - if (api->api_data == NULL) { - return; - } - private = api->api_data; - private->state = pcmk_pacemakerd_state_invalid; - - return; -} - -static bool -reply_expected(pcmk_ipc_api_t *api, xmlNode *request) -{ - const char *command = crm_element_value(request, F_CRM_TASK); - - if (command == NULL) { - return false; - } - - // We only need to handle commands that functions in this file can send - return !strcmp(command, CRM_OP_PING); -} - -static void -dispatch(pcmk_ipc_api_t *api, xmlNode *reply) -{ - crm_exit_t status = CRM_EX_OK; - xmlNode *msg_data = NULL; - pcmk_pacemakerd_api_reply_t reply_data = { - pcmk_pacemakerd_reply_unknown - }; - const char *value = NULL; - long long value_ll = 0; - - value = crm_element_value(reply, F_CRM_MSG_TYPE); - if ((value == NULL) || (strcmp(value, XML_ATTR_RESPONSE))) { - crm_debug("Unrecognizable pacemakerd message: invalid message type '%s'", - crm_str(value)); - status = CRM_EX_PROTOCOL; - goto done; - } - - if (crm_element_value(reply, XML_ATTR_REFERENCE) == NULL) { - crm_debug("Unrecognizable pacemakerd message: no reference"); - status = CRM_EX_PROTOCOL; - goto done; - } - - value = crm_element_value(reply, F_CRM_TASK); - if ((value == NULL) || strcmp(value, CRM_OP_PING)) { - crm_debug("Unrecognizable pacemakerd message: '%s'", crm_str(value)); - status = CRM_EX_PROTOCOL; - goto done; - } - - // Parse useful info from reply - - msg_data = get_message_xml(reply, F_CRM_DATA); - crm_element_value_ll(msg_data, XML_ATTR_TSTAMP, &value_ll); - - reply_data.reply_type = pcmk_pacemakerd_reply_ping; - reply_data.data.ping.state = - pcmk_pacemakerd_api_daemon_state_text2enum( - crm_element_value(msg_data, XML_PING_ATTR_PACEMAKERDSTATE)); - reply_data.data.ping.status = - crm_str_eq(crm_element_value(msg_data, XML_PING_ATTR_STATUS), - "ok", FALSE)?pcmk_rc_ok:pcmk_rc_error; - reply_data.data.ping.last_good = (time_t) value_ll; - reply_data.data.ping.sys_from = crm_element_value(msg_data, - XML_PING_ATTR_SYSFROM); - -done: - pcmk__call_ipc_callback(api, pcmk_ipc_event_reply, status, &reply_data); -} - -pcmk__ipc_methods_t * -pcmk__pacemakerd_api_methods() -{ - pcmk__ipc_methods_t *cmds = calloc(1, sizeof(pcmk__ipc_methods_t)); - - if (cmds != NULL) { - cmds->new_data = new_data; - cmds->free_data = free_data; - cmds->post_connect = post_connect; - cmds->reply_expected = reply_expected; - cmds->dispatch = dispatch; - cmds->post_disconnect = post_disconnect; - } - return cmds; -} - -int -pcmk_pacemakerd_api_ping(pcmk_ipc_api_t *api, const char *ipc_name) -{ - pacemakerd_api_private_t *private; - xmlNode *cmd; - int rc; - - CRM_CHECK(api != NULL, return -EINVAL); - private = api->api_data; - CRM_ASSERT(private != NULL); - - cmd = create_request(CRM_OP_PING, NULL, NULL, CRM_SYSTEM_MCP, - ipc_name?ipc_name:((crm_system_name? crm_system_name : "client")), - private->client_uuid); - - if (cmd) { - rc = pcmk__send_ipc_request(api, cmd); - if (rc != pcmk_rc_ok) { - crm_debug("Couldn't ping pacemakerd: %s rc=%d", - pcmk_rc_str(rc), rc); - rc = ECOMM; - } - free_xml(cmd); - } else { - rc = ENOMSG; - } - - return rc; -} diff --git a/lib/common/ipc_server.c b/lib/common/ipc_server.c deleted file mode 100644 index 87305dd..0000000 --- a/lib/common/ipc_server.c +++ /dev/null @@ -1,903 +0,0 @@ -/* - * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * - * This source code is licensed under the GNU Lesser General Public License - * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. - */ - -#include - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include "crmcommon_private.h" - -/* Evict clients whose event queue grows this large (by default) */ -#define PCMK_IPC_DEFAULT_QUEUE_MAX 500 - -static GHashTable *client_connections = NULL; - -/*! - * \internal - * \brief Count IPC clients - * - * \return Number of active IPC client connections - */ -guint -pcmk__ipc_client_count() -{ - return client_connections? g_hash_table_size(client_connections) : 0; -} - -/*! - * \internal - * \brief Execute a function for each active IPC client connection - * - * \param[in] func Function to call - * \param[in] user_data Pointer to pass to function - * - * \note The parameters are the same as for g_hash_table_foreach(). - */ -void -pcmk__foreach_ipc_client(GHFunc func, gpointer user_data) -{ - if ((func != NULL) && (client_connections != NULL)) { - g_hash_table_foreach(client_connections, func, user_data); - } -} - -/*! - * \internal - * \brief Remote IPC clients based on iterative function result - * - * \param[in] func Function to call for each active IPC client - * \param[in] user_data Pointer to pass to function - * - * \note The parameters are the same as for g_hash_table_foreach_remove(). - */ -void -pcmk__foreach_ipc_client_remove(GHRFunc func, gpointer user_data) -{ - if ((func != NULL) && (client_connections != NULL)) { - g_hash_table_foreach_remove(client_connections, func, user_data); - } -} - -pcmk__client_t * -pcmk__find_client(qb_ipcs_connection_t *c) -{ - if (client_connections) { - return g_hash_table_lookup(client_connections, c); - } - - crm_trace("No client found for %p", c); - return NULL; -} - -pcmk__client_t * -pcmk__find_client_by_id(const char *id) -{ - gpointer key; - pcmk__client_t *client; - GHashTableIter iter; - - if (client_connections && id) { - g_hash_table_iter_init(&iter, client_connections); - while (g_hash_table_iter_next(&iter, &key, (gpointer *) & client)) { - if (strcmp(client->id, id) == 0) { - return client; - } - } - } - - crm_trace("No client found with id=%s", id); - return NULL; -} - -const char * -pcmk__client_name(pcmk__client_t *c) -{ - if (c == NULL) { - return "null"; - } else if (c->name == NULL && c->id == NULL) { - return "unknown"; - } else if (c->name == NULL) { - return c->id; - } else { - return c->name; - } -} - -void -pcmk__client_cleanup(void) -{ - if (client_connections != NULL) { - int active = g_hash_table_size(client_connections); - - if (active) { - crm_err("Exiting with %d active IPC client%s", - active, pcmk__plural_s(active)); - } - g_hash_table_destroy(client_connections); client_connections = NULL; - } -} - -void -pcmk__drop_all_clients(qb_ipcs_service_t *service) -{ - qb_ipcs_connection_t *c = NULL; - - if (service == NULL) { - return; - } - - c = qb_ipcs_connection_first_get(service); - - while (c != NULL) { - qb_ipcs_connection_t *last = c; - - c = qb_ipcs_connection_next_get(service, last); - - /* There really shouldn't be anyone connected at this point */ - crm_notice("Disconnecting client %p, pid=%d...", - last, pcmk__client_pid(last)); - qb_ipcs_disconnect(last); - qb_ipcs_connection_unref(last); - } -} - -/*! - * \internal - * \brief Allocate a new pcmk__client_t object based on an IPC connection - * - * \param[in] c IPC connection (or NULL to allocate generic client) - * \param[in] key Connection table key (or NULL to use sane default) - * \param[in] uid_client UID corresponding to c (ignored if c is NULL) - * - * \return Pointer to new pcmk__client_t (or NULL on error) - */ -static pcmk__client_t * -client_from_connection(qb_ipcs_connection_t *c, void *key, uid_t uid_client) -{ - pcmk__client_t *client = calloc(1, sizeof(pcmk__client_t)); - - if (client == NULL) { - crm_perror(LOG_ERR, "Allocating client"); - return NULL; - } - - if (c) { -#if ENABLE_ACL - client->user = pcmk__uid2username(uid_client); - if (client->user == NULL) { - client->user = strdup("#unprivileged"); - CRM_CHECK(client->user != NULL, free(client); return NULL); - crm_err("Unable to enforce ACLs for user ID %d, assuming unprivileged", - uid_client); - } -#endif - client->ipcs = c; - client->kind = PCMK__CLIENT_IPC; - client->pid = pcmk__client_pid(c); - if (key == NULL) { - key = c; - } - } - - client->id = crm_generate_uuid(); - if (client->id == NULL) { - crm_err("Could not generate UUID for client"); - free(client->user); - free(client); - return NULL; - } - if (key == NULL) { - key = client->id; - } - if (client_connections == NULL) { - crm_trace("Creating IPC client table"); - client_connections = g_hash_table_new(g_direct_hash, g_direct_equal); - } - g_hash_table_insert(client_connections, key, client); - return client; -} - -/*! - * \brief Allocate a new pcmk__client_t object and generate its ID - * - * \param[in] key What to use as connections hash table key (NULL to use ID) - * - * \return Pointer to new pcmk__client_t (asserts on failure) - */ -pcmk__client_t * -pcmk__new_unauth_client(void *key) -{ - pcmk__client_t *client = client_from_connection(NULL, key, 0); - - CRM_ASSERT(client != NULL); - return client; -} - -pcmk__client_t * -pcmk__new_client(qb_ipcs_connection_t *c, uid_t uid_client, gid_t gid_client) -{ - gid_t uid_cluster = 0; - gid_t gid_cluster = 0; - - pcmk__client_t *client = NULL; - - CRM_CHECK(c != NULL, return NULL); - - if (pcmk_daemon_user(&uid_cluster, &gid_cluster) < 0) { - static bool need_log = TRUE; - - if (need_log) { - crm_warn("Could not find user and group IDs for user %s", - CRM_DAEMON_USER); - need_log = FALSE; - } - } - - if (uid_client != 0) { - crm_trace("Giving group %u access to new IPC connection", gid_cluster); - /* Passing -1 to chown(2) means don't change */ - qb_ipcs_connection_auth_set(c, -1, gid_cluster, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP); - } - - /* TODO: Do our own auth checking, return NULL if unauthorized */ - client = client_from_connection(c, NULL, uid_client); - if (client == NULL) { - return NULL; - } - - if ((uid_client == 0) || (uid_client == uid_cluster)) { - /* Remember when a connection came from root or hacluster */ - set_bit(client->flags, pcmk__client_privileged); - } - - crm_debug("New IPC client %s for PID %u with uid %d and gid %d", - client->id, client->pid, uid_client, gid_client); - return client; -} - -static struct iovec * -pcmk__new_ipc_event(void) -{ - struct iovec *iov = calloc(2, sizeof(struct iovec)); - - CRM_ASSERT(iov != NULL); - return iov; -} - -/*! - * \brief Free an I/O vector created by pcmk__ipc_prepare_iov() - * - * \param[in] event I/O vector to free - */ -void -pcmk_free_ipc_event(struct iovec *event) -{ - if (event != NULL) { - free(event[0].iov_base); - free(event[1].iov_base); - free(event); - } -} - -static void -free_event(gpointer data) -{ - pcmk_free_ipc_event((struct iovec *) data); -} - -static void -add_event(pcmk__client_t *c, struct iovec *iov) -{ - if (c->event_queue == NULL) { - c->event_queue = g_queue_new(); - } - g_queue_push_tail(c->event_queue, iov); -} - -void -pcmk__free_client(pcmk__client_t *c) -{ - if (c == NULL) { - return; - } - - if (client_connections) { - if (c->ipcs) { - crm_trace("Destroying %p/%p (%d remaining)", - c, c->ipcs, g_hash_table_size(client_connections) - 1); - g_hash_table_remove(client_connections, c->ipcs); - - } else { - crm_trace("Destroying remote connection %p (%d remaining)", - c, g_hash_table_size(client_connections) - 1); - g_hash_table_remove(client_connections, c->id); - } - } - - if (c->event_timer) { - g_source_remove(c->event_timer); - } - - if (c->event_queue) { - crm_debug("Destroying %d events", g_queue_get_length(c->event_queue)); - g_queue_free_full(c->event_queue, free_event); - } - - free(c->id); - free(c->name); - free(c->user); - if (c->remote) { - if (c->remote->auth_timeout) { - g_source_remove(c->remote->auth_timeout); - } - free(c->remote->buffer); - free(c->remote); - } - free(c); -} - -/*! - * \internal - * \brief Raise IPC eviction threshold for a client, if allowed - * - * \param[in,out] client Client to modify - * \param[in] qmax New threshold (as non-NULL string) - * - * \return TRUE if change was allowed, FALSE otherwise - */ -bool -pcmk__set_client_queue_max(pcmk__client_t *client, const char *qmax) -{ - if (is_set(client->flags, pcmk__client_privileged)) { - long long qmax_int; - - errno = 0; - qmax_int = crm_parse_ll(qmax, NULL); - if ((errno == 0) && (qmax_int > 0)) { - client->queue_max = (unsigned int) qmax_int; - return TRUE; - } - } - return FALSE; -} - -int -pcmk__client_pid(qb_ipcs_connection_t *c) -{ - struct qb_ipcs_connection_stats stats; - - stats.client_pid = 0; - qb_ipcs_connection_stats_get(c, &stats, 0); - return stats.client_pid; -} - -/*! - * \internal - * \brief Retrieve message XML from data read from client IPC - * - * \param[in] c IPC client connection - * \param[in] data Data read from client connection - * \param[out] id Where to store message ID from libqb header - * \param[out] flags Where to store flags from libqb header - * - * \return Message XML on success, NULL otherwise - */ -xmlNode * -pcmk__client_data2xml(pcmk__client_t *c, void *data, uint32_t *id, - uint32_t *flags) -{ - xmlNode *xml = NULL; - char *uncompressed = NULL; - char *text = ((char *)data) + sizeof(pcmk__ipc_header_t); - pcmk__ipc_header_t *header = data; - - if (!pcmk__valid_ipc_header(header)) { - return NULL; - } - - if (id) { - *id = ((struct qb_ipc_response_header *)data)->id; - } - if (flags) { - *flags = header->flags; - } - - if (is_set(header->flags, crm_ipc_proxied)) { - /* Mark this client as being the endpoint of a proxy connection. - * Proxy connections responses are sent on the event channel, to avoid - * blocking the controller serving as proxy. - */ - c->flags |= pcmk__client_proxied; - } - - if (header->size_compressed) { - int rc = 0; - unsigned int size_u = 1 + header->size_uncompressed; - uncompressed = calloc(1, size_u); - - crm_trace("Decompressing message data %u bytes into %u bytes", - header->size_compressed, size_u); - - rc = BZ2_bzBuffToBuffDecompress(uncompressed, &size_u, text, header->size_compressed, 1, 0); - text = uncompressed; - - if (rc != BZ_OK) { - crm_err("Decompression failed: %s " CRM_XS " bzerror=%d", - bz2_strerror(rc), rc); - free(uncompressed); - return NULL; - } - } - - CRM_ASSERT(text[header->size_uncompressed - 1] == 0); - - xml = string2xml(text); - crm_log_xml_trace(xml, "[IPC received]"); - - free(uncompressed); - return xml; -} - -static int crm_ipcs_flush_events(pcmk__client_t *c); - -static gboolean -crm_ipcs_flush_events_cb(gpointer data) -{ - pcmk__client_t *c = data; - - c->event_timer = 0; - crm_ipcs_flush_events(c); - return FALSE; -} - -/*! - * \internal - * \brief Add progressive delay before next event queue flush - * - * \param[in,out] c Client connection to add delay to - * \param[in] queue_len Current event queue length - */ -static inline void -delay_next_flush(pcmk__client_t *c, unsigned int queue_len) -{ - /* Delay a maximum of 1.5 seconds */ - guint delay = (queue_len < 5)? (1000 + 100 * queue_len) : 1500; - - c->event_timer = g_timeout_add(delay, crm_ipcs_flush_events_cb, c); -} - -/*! - * \internal - * \brief Send client any messages in its queue - * - * \param[in] c Client to flush - * - * \return Standard Pacemaker return value - */ -static int -crm_ipcs_flush_events(pcmk__client_t *c) -{ - int rc = pcmk_rc_ok; - ssize_t qb_rc = 0; - unsigned int sent = 0; - unsigned int queue_len = 0; - - if (c == NULL) { - return rc; - - } else if (c->event_timer) { - /* There is already a timer, wait until it goes off */ - crm_trace("Timer active for %p - %d", c->ipcs, c->event_timer); - return rc; - } - - if (c->event_queue) { - queue_len = g_queue_get_length(c->event_queue); - } - while (sent < 100) { - pcmk__ipc_header_t *header = NULL; - struct iovec *event = NULL; - - if (c->event_queue) { - // We don't pop unless send is successful - event = g_queue_peek_head(c->event_queue); - } - if (event == NULL) { // Queue is empty - break; - } - - qb_rc = qb_ipcs_event_sendv(c->ipcs, event, 2); - if (qb_rc < 0) { - rc = (int) -qb_rc; - break; - } - event = g_queue_pop_head(c->event_queue); - - sent++; - header = event[0].iov_base; - if (header->size_compressed) { - crm_trace("Event %d to %p[%d] (%lld compressed bytes) sent", - header->qb.id, c->ipcs, c->pid, (long long) qb_rc); - } else { - crm_trace("Event %d to %p[%d] (%lld bytes) sent: %.120s", - header->qb.id, c->ipcs, c->pid, (long long) qb_rc, - (char *) (event[1].iov_base)); - } - pcmk_free_ipc_event(event); - } - - queue_len -= sent; - if (sent > 0 || queue_len) { - crm_trace("Sent %d events (%d remaining) for %p[%d]: %s (%lld)", - sent, queue_len, c->ipcs, c->pid, - pcmk_rc_str(rc), (long long) qb_rc); - } - - if (queue_len) { - - /* Allow clients to briefly fall behind on processing incoming messages, - * but drop completely unresponsive clients so the connection doesn't - * consume resources indefinitely. - */ - if (queue_len > QB_MAX(c->queue_max, PCMK_IPC_DEFAULT_QUEUE_MAX)) { - if ((c->queue_backlog <= 1) || (queue_len < c->queue_backlog)) { - /* Don't evict for a new or shrinking backlog */ - crm_warn("Client with process ID %u has a backlog of %u messages " - CRM_XS " %p", c->pid, queue_len, c->ipcs); - } else { - crm_err("Evicting client with process ID %u due to backlog of %u messages " - CRM_XS " %p", c->pid, queue_len, c->ipcs); - c->queue_backlog = 0; - qb_ipcs_disconnect(c->ipcs); - return rc; - } - } - - c->queue_backlog = queue_len; - delay_next_flush(c, queue_len); - - } else { - /* Event queue is empty, there is no backlog */ - c->queue_backlog = 0; - } - - return rc; -} - -/*! - * \internal - * \brief Create an I/O vector for sending an IPC XML message - * - * \param[in] request Identifier for libqb response header - * \param[in] message XML message to send - * \param[in] max_send_size If 0, default IPC buffer size is used - * \param[out] result Where to store prepared I/O vector - * \param[out] bytes Size of prepared data in bytes - * - * \return Standard Pacemaker return code - */ -int -pcmk__ipc_prepare_iov(uint32_t request, xmlNode *message, - uint32_t max_send_size, struct iovec **result, - ssize_t *bytes) -{ - static unsigned int biggest = 0; - struct iovec *iov; - unsigned int total = 0; - char *compressed = NULL; - char *buffer = NULL; - pcmk__ipc_header_t *header = NULL; - - if ((message == NULL) || (result == NULL)) { - return EINVAL; - } - - header = calloc(1, sizeof(pcmk__ipc_header_t)); - if (header == NULL) { - return ENOMEM; /* errno mightn't be set by allocator */ - } - - buffer = dump_xml_unformatted(message); - - if (max_send_size == 0) { - max_send_size = crm_ipc_default_buffer_size(); - } - CRM_LOG_ASSERT(max_send_size != 0); - - *result = NULL; - iov = pcmk__new_ipc_event(); - iov[0].iov_len = sizeof(pcmk__ipc_header_t); - iov[0].iov_base = header; - - header->version = PCMK__IPC_VERSION; - header->size_uncompressed = 1 + strlen(buffer); - total = iov[0].iov_len + header->size_uncompressed; - - if (total < max_send_size) { - iov[1].iov_base = buffer; - iov[1].iov_len = header->size_uncompressed; - - } else { - unsigned int new_size = 0; - - if (pcmk__compress(buffer, (unsigned int) header->size_uncompressed, - (unsigned int) max_send_size, &compressed, - &new_size) == pcmk_rc_ok) { - - header->flags |= crm_ipc_compressed; - header->size_compressed = new_size; - - iov[1].iov_len = header->size_compressed; - iov[1].iov_base = compressed; - - free(buffer); - - biggest = QB_MAX(header->size_compressed, biggest); - - } else { - crm_log_xml_trace(message, "EMSGSIZE"); - biggest = QB_MAX(header->size_uncompressed, biggest); - - crm_err("Could not compress %u-byte message into less than IPC " - "limit of %u bytes; set PCMK_ipc_buffer to higher value " - "(%u bytes suggested)", - header->size_uncompressed, max_send_size, 4 * biggest); - - free(compressed); - free(buffer); - pcmk_free_ipc_event(iov); - return EMSGSIZE; - } - } - - header->qb.size = iov[0].iov_len + iov[1].iov_len; - header->qb.id = (int32_t)request; /* Replying to a specific request */ - - *result = iov; - CRM_ASSERT(header->qb.size > 0); - if (bytes != NULL) { - *bytes = header->qb.size; - } - return pcmk_rc_ok; -} - -int -pcmk__ipc_send_iov(pcmk__client_t *c, struct iovec *iov, uint32_t flags) -{ - int rc = pcmk_rc_ok; - static uint32_t id = 1; - pcmk__ipc_header_t *header = iov[0].iov_base; - - if (c->flags & pcmk__client_proxied) { - /* _ALL_ replies to proxied connections need to be sent as events */ - if (is_not_set(flags, crm_ipc_server_event)) { - flags |= crm_ipc_server_event; - /* this flag lets us know this was originally meant to be a response. - * even though we're sending it over the event channel. */ - flags |= crm_ipc_proxied_relay_response; - } - } - - header->flags |= flags; - if (flags & crm_ipc_server_event) { - header->qb.id = id++; /* We don't really use it, but doesn't hurt to set one */ - - if (flags & crm_ipc_server_free) { - crm_trace("Sending the original to %p[%d]", c->ipcs, c->pid); - add_event(c, iov); - - } else { - struct iovec *iov_copy = pcmk__new_ipc_event(); - - crm_trace("Sending a copy to %p[%d]", c->ipcs, c->pid); - iov_copy[0].iov_len = iov[0].iov_len; - iov_copy[0].iov_base = malloc(iov[0].iov_len); - memcpy(iov_copy[0].iov_base, iov[0].iov_base, iov[0].iov_len); - - iov_copy[1].iov_len = iov[1].iov_len; - iov_copy[1].iov_base = malloc(iov[1].iov_len); - memcpy(iov_copy[1].iov_base, iov[1].iov_base, iov[1].iov_len); - - add_event(c, iov_copy); - } - - } else { - ssize_t qb_rc; - - CRM_LOG_ASSERT(header->qb.id != 0); /* Replying to a specific request */ - - qb_rc = qb_ipcs_response_sendv(c->ipcs, iov, 2); - if (qb_rc < header->qb.size) { - if (qb_rc < 0) { - rc = (int) -qb_rc; - } - crm_notice("Response %d to pid %d failed: %s " - CRM_XS " bytes=%u rc=%lld ipcs=%p", - header->qb.id, c->pid, pcmk_rc_str(rc), - header->qb.size, (long long) qb_rc, c->ipcs); - - } else { - crm_trace("Response %d sent, %lld bytes to %p[%d]", - header->qb.id, (long long) qb_rc, c->ipcs, c->pid); - } - - if (flags & crm_ipc_server_free) { - pcmk_free_ipc_event(iov); - } - } - - if (flags & crm_ipc_server_event) { - rc = crm_ipcs_flush_events(c); - } else { - crm_ipcs_flush_events(c); - } - - if ((rc == EPIPE) || (rc == ENOTCONN)) { - crm_trace("Client %p disconnected", c->ipcs); - } - return rc; -} - -int -pcmk__ipc_send_xml(pcmk__client_t *c, uint32_t request, xmlNode *message, - uint32_t flags) -{ - struct iovec *iov = NULL; - int rc = pcmk_rc_ok; - - if (c == NULL) { - return EINVAL; - } - rc = pcmk__ipc_prepare_iov(request, message, crm_ipc_default_buffer_size(), - &iov, NULL); - if (rc == pcmk_rc_ok) { - rc = pcmk__ipc_send_iov(c, iov, flags | crm_ipc_server_free); - } else { - pcmk_free_ipc_event(iov); - crm_notice("IPC message to pid %d failed: %s " CRM_XS " rc=%d", - c->pid, pcmk_rc_str(rc), rc); - } - return rc; -} - -void -pcmk__ipc_send_ack_as(const char *function, int line, pcmk__client_t *c, - uint32_t request, uint32_t flags, const char *tag) -{ - if (flags & crm_ipc_client_response) { - xmlNode *ack = create_xml_node(NULL, tag); - - crm_trace("Ack'ing IPC message from %s", pcmk__client_name(c)); - c->request_id = 0; - crm_xml_add(ack, "function", function); - crm_xml_add_int(ack, "line", line); - pcmk__ipc_send_xml(c, request, ack, flags); - free_xml(ack); - } -} - -/*! - * \internal - * \brief Add an IPC server to the main loop for the pacemaker-based API - * - * \param[out] ipcs_ro New IPC server for read-only pacemaker-based API - * \param[out] ipcs_rw New IPC server for read/write pacemaker-based API - * \param[out] ipcs_shm New IPC server for shared-memory pacemaker-based API - * \param[in] ro_cb IPC callbacks for read-only API - * \param[in] rw_cb IPC callbacks for read/write and shared-memory APIs - * - * \note This function exits fatally if unable to create the servers. - */ -void pcmk__serve_based_ipc(qb_ipcs_service_t **ipcs_ro, - qb_ipcs_service_t **ipcs_rw, - qb_ipcs_service_t **ipcs_shm, - struct qb_ipcs_service_handlers *ro_cb, - struct qb_ipcs_service_handlers *rw_cb) -{ - *ipcs_ro = mainloop_add_ipc_server(PCMK__SERVER_BASED_RO, - QB_IPC_NATIVE, ro_cb); - - *ipcs_rw = mainloop_add_ipc_server(PCMK__SERVER_BASED_RW, - QB_IPC_NATIVE, rw_cb); - - *ipcs_shm = mainloop_add_ipc_server(PCMK__SERVER_BASED_SHM, - QB_IPC_SHM, rw_cb); - - if (*ipcs_ro == NULL || *ipcs_rw == NULL || *ipcs_shm == NULL) { - crm_err("Failed to create the CIB manager: exiting and inhibiting respawn"); - crm_warn("Verify pacemaker and pacemaker_remote are not both enabled"); - crm_exit(CRM_EX_FATAL); - } -} - -/*! - * \internal - * \brief Destroy IPC servers for pacemaker-based API - * - * \param[out] ipcs_ro IPC server for read-only pacemaker-based API - * \param[out] ipcs_rw IPC server for read/write pacemaker-based API - * \param[out] ipcs_shm IPC server for shared-memory pacemaker-based API - * - * \note This is a convenience function for calling qb_ipcs_destroy() for each - * argument. - */ -void -pcmk__stop_based_ipc(qb_ipcs_service_t *ipcs_ro, - qb_ipcs_service_t *ipcs_rw, - qb_ipcs_service_t *ipcs_shm) -{ - qb_ipcs_destroy(ipcs_ro); - qb_ipcs_destroy(ipcs_rw); - qb_ipcs_destroy(ipcs_shm); -} - -/*! - * \internal - * \brief Add an IPC server to the main loop for the pacemaker-controld API - * - * \param[in] cb IPC callbacks - * - * \return Newly created IPC server - */ -qb_ipcs_service_t * -pcmk__serve_controld_ipc(struct qb_ipcs_service_handlers *cb) -{ - return mainloop_add_ipc_server(CRM_SYSTEM_CRMD, QB_IPC_NATIVE, cb); -} - -/*! - * \internal - * \brief Add an IPC server to the main loop for the pacemaker-attrd API - * - * \param[in] cb IPC callbacks - * - * \note This function exits fatally if unable to create the servers. - */ -void -pcmk__serve_attrd_ipc(qb_ipcs_service_t **ipcs, - struct qb_ipcs_service_handlers *cb) -{ - *ipcs = mainloop_add_ipc_server(T_ATTRD, QB_IPC_NATIVE, cb); - - if (*ipcs == NULL) { - crm_err("Failed to create pacemaker-attrd server: exiting and inhibiting respawn"); - crm_warn("Verify pacemaker and pacemaker_remote are not both enabled."); - crm_exit(CRM_EX_FATAL); - } -} - -/*! - * \internal - * \brief Add an IPC server to the main loop for the pacemaker-fenced API - * - * \param[in] cb IPC callbacks - * - * \note This function exits fatally if unable to create the servers. - */ -void -pcmk__serve_fenced_ipc(qb_ipcs_service_t **ipcs, - struct qb_ipcs_service_handlers *cb) -{ - *ipcs = mainloop_add_ipc_server_with_prio("stonith-ng", QB_IPC_NATIVE, cb, - QB_LOOP_HIGH); - - if (*ipcs == NULL) { - crm_err("Failed to create fencer: exiting and inhibiting respawn."); - crm_warn("Verify pacemaker and pacemaker_remote are not both enabled."); - crm_exit(CRM_EX_FATAL); - } -} diff --git a/lib/common/mainloop.c b/lib/common/mainloop.c index e942e57..e3640f5 100644 --- a/lib/common/mainloop.c +++ b/lib/common/mainloop.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include @@ -834,66 +834,32 @@ mainloop_gio_destroy(gpointer c) free(c_name); } -/*! - * \brief Connect to IPC and add it as a main loop source - * - * \param[in] ipc IPC connection to add - * \param[in] priority Event source priority to use for connection - * \param[in] userdata Data to register with callbacks - * \param[in] callbacks Dispatch and destroy callbacks for connection - * \param[out] source Newly allocated event source - * - * \return Standard Pacemaker return code - * - * \note On failure, the caller is still responsible for ipc. On success, the - * caller should call mainloop_del_ipc_client() when source is no longer - * needed, which will lead to the disconnection of the IPC later in the - * main loop if it is connected. However the IPC disconnects, - * mainloop_gio_destroy() will free ipc and source after calling the - * destroy callback. - */ -int -pcmk__add_mainloop_ipc(crm_ipc_t *ipc, int priority, void *userdata, - struct ipc_client_callbacks *callbacks, - mainloop_io_t **source) +mainloop_io_t * +mainloop_add_ipc_client(const char *name, int priority, size_t max_size, void *userdata, + struct ipc_client_callbacks *callbacks) { - CRM_CHECK((ipc != NULL) && (callbacks != NULL), return EINVAL); + mainloop_io_t *client = NULL; + crm_ipc_t *conn = crm_ipc_new(name, max_size); - if (!crm_ipc_connect(ipc)) { - return ENOTCONN; - } - *source = mainloop_add_fd(crm_ipc_name(ipc), priority, crm_ipc_get_fd(ipc), - userdata, NULL); - if (*source == NULL) { - int rc = errno; + if (conn && crm_ipc_connect(conn)) { + int32_t fd = crm_ipc_get_fd(conn); - crm_ipc_close(ipc); - return rc; + client = mainloop_add_fd(name, priority, fd, userdata, NULL); } - (*source)->ipc = ipc; - (*source)->destroy_fn = callbacks->destroy; - (*source)->dispatch_fn_ipc = callbacks->dispatch; - return pcmk_rc_ok; -} -mainloop_io_t * -mainloop_add_ipc_client(const char *name, int priority, size_t max_size, - void *userdata, struct ipc_client_callbacks *callbacks) -{ - crm_ipc_t *ipc = crm_ipc_new(name, max_size); - mainloop_io_t *source = NULL; - int rc = pcmk__add_mainloop_ipc(ipc, priority, userdata, callbacks, - &source); - - if (rc != pcmk_rc_ok) { - if (crm_log_level == LOG_STDOUT) { - fprintf(stderr, "Connection to %s failed: %s", - name, pcmk_rc_str(rc)); + if (client == NULL) { + crm_perror(LOG_TRACE, "Connection to %s failed", name); + if (conn) { + crm_ipc_close(conn); + crm_ipc_destroy(conn); } - crm_ipc_destroy(ipc); return NULL; } - return source; + + client->ipc = conn; + client->destroy_fn = callbacks->destroy; + client->dispatch_fn_ipc = callbacks->dispatch; + return client; } void @@ -1379,28 +1345,6 @@ drain_timeout_cb(gpointer user_data) } /*! - * \brief Drain some remaining main loop events then quit it - * - * \param[in] mloop Main loop to drain and quit - * \param[in] n Drain up to this many pending events - */ -void -pcmk_quit_main_loop(GMainLoop *mloop, unsigned int n) -{ - if ((mloop != NULL) && g_main_loop_is_running(mloop)) { - GMainContext *ctx = g_main_loop_get_context(mloop); - - /* Drain up to n events in case some memory clean-up is pending - * (helpful to reduce noise in valgrind output). - */ - for (int i = 0; (i < n) && g_main_context_pending(ctx); ++i) { - g_main_context_dispatch(ctx); - } - g_main_loop_quit(mloop); - } -} - -/*! * \brief Process main loop events while a certain condition is met * * \param[in] mloop Main loop to process diff --git a/lib/common/messages.c b/lib/common/messages.c deleted file mode 100644 index d3fa894..0000000 --- a/lib/common/messages.c +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Copyright 2004-2020 the Pacemaker project contributors - * - * The version control history for this file may have further details. - * - * This source code is licensed under the GNU Lesser General Public License - * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. - */ - -#include - -#include -#include - -#include -#include - -#include - -/*! - * \brief Create a Pacemaker request (for IPC or cluster layer) - * - * \param[in] task What to set as the request's task - * \param[in] msg_data What to add as the request's data contents - * \param[in] host_to What to set as the request's destination host - * \param[in] sys_to What to set as the request's destination system - * \param[in] sys_from If not NULL, set as request's origin system - * \param[in] uuid_from If not NULL, use in request's origin system - * \param[in] origin Name of function that called this one - * - * \return XML of new request - * - * \note One of sys_from or uuid_from must be non-NULL - * \note This function should not be called directly, but via the - * create_request() wrapper. - * \note The caller is responsible for freeing the result using free_xml(). - */ -xmlNode * -create_request_adv(const char *task, xmlNode * msg_data, - const char *host_to, const char *sys_to, - const char *sys_from, const char *uuid_from, - const char *origin) -{ - static uint ref_counter = 0; - - char *true_from = NULL; - xmlNode *request = NULL; - char *reference = crm_strdup_printf("%s-%s-%lld-%u", - (task? task : "_empty_"), - (sys_from? sys_from : "_empty_"), - (long long) time(NULL), ref_counter++); - - if (uuid_from != NULL) { - true_from = crm_strdup_printf("%s_%s", uuid_from, - (sys_from? sys_from : "none")); - } else if (sys_from != NULL) { - true_from = strdup(sys_from); - } else { - crm_err("Cannot create IPC request: No originating system specified"); - } - - // host_from will get set for us if necessary by the controller when routed - request = create_xml_node(NULL, __FUNCTION__); - crm_xml_add(request, F_CRM_ORIGIN, origin); - crm_xml_add(request, F_TYPE, T_CRM); - crm_xml_add(request, F_CRM_VERSION, CRM_FEATURE_SET); - crm_xml_add(request, F_CRM_MSG_TYPE, XML_ATTR_REQUEST); - crm_xml_add(request, F_CRM_REFERENCE, reference); - crm_xml_add(request, F_CRM_TASK, task); - crm_xml_add(request, F_CRM_SYS_TO, sys_to); - crm_xml_add(request, F_CRM_SYS_FROM, true_from); - - /* HOSTTO will be ignored if it is to the DC anyway. */ - if (host_to != NULL && strlen(host_to) > 0) { - crm_xml_add(request, F_CRM_HOST_TO, host_to); - } - - if (msg_data != NULL) { - add_message_xml(request, F_CRM_DATA, msg_data); - } - free(reference); - free(true_from); - - return request; -} - -/*! - * \brief Create a Pacemaker reply (for IPC or cluster layer) - * - * \param[in] original_request XML of request this is a reply to - * \param[in] xml_response_data XML to copy as data section of reply - * \param[in] origin Name of function that called this one - * - * \return XML of new reply - * - * \note This function should not be called directly, but via the - * create_reply() wrapper. - * \note The caller is responsible for freeing the result using free_xml(). - */ -xmlNode * -create_reply_adv(xmlNode *original_request, xmlNode *xml_response_data, - const char *origin) -{ - xmlNode *reply = NULL; - - const char *host_from = crm_element_value(original_request, F_CRM_HOST_FROM); - const char *sys_from = crm_element_value(original_request, F_CRM_SYS_FROM); - const char *sys_to = crm_element_value(original_request, F_CRM_SYS_TO); - const char *type = crm_element_value(original_request, F_CRM_MSG_TYPE); - const char *operation = crm_element_value(original_request, F_CRM_TASK); - const char *crm_msg_reference = crm_element_value(original_request, F_CRM_REFERENCE); - - if (type == NULL) { - crm_err("Cannot create new_message, no message type in original message"); - CRM_ASSERT(type != NULL); - return NULL; -#if 0 - } else if (strcasecmp(XML_ATTR_REQUEST, type) != 0) { - crm_err("Cannot create new_message, original message was not a request"); - return NULL; -#endif - } - reply = create_xml_node(NULL, __FUNCTION__); - if (reply == NULL) { - crm_err("Cannot create new_message, malloc failed"); - return NULL; - } - - crm_xml_add(reply, F_CRM_ORIGIN, origin); - crm_xml_add(reply, F_TYPE, T_CRM); - crm_xml_add(reply, F_CRM_VERSION, CRM_FEATURE_SET); - crm_xml_add(reply, F_CRM_MSG_TYPE, XML_ATTR_RESPONSE); - crm_xml_add(reply, F_CRM_REFERENCE, crm_msg_reference); - crm_xml_add(reply, F_CRM_TASK, operation); - - /* since this is a reply, we reverse the from and to */ - crm_xml_add(reply, F_CRM_SYS_TO, sys_from); - crm_xml_add(reply, F_CRM_SYS_FROM, sys_to); - - /* HOSTTO will be ignored if it is to the DC anyway. */ - if (host_from != NULL && strlen(host_from) > 0) { - crm_xml_add(reply, F_CRM_HOST_TO, host_from); - } - - if (xml_response_data != NULL) { - add_message_xml(reply, F_CRM_DATA, xml_response_data); - } - - return reply; -} - -xmlNode * -get_message_xml(xmlNode *msg, const char *field) -{ - xmlNode *tmp = first_named_child(msg, field); - - return __xml_first_child(tmp); -} - -gboolean -add_message_xml(xmlNode *msg, const char *field, xmlNode *xml) -{ - xmlNode *holder = create_xml_node(msg, field); - - add_node_copy(holder, xml); - return TRUE; -} diff --git a/lib/common/options.c b/lib/common/options.c index 9e041c9..9399642 100644 --- a/lib/common/options.c +++ b/lib/common/options.c @@ -407,7 +407,6 @@ pcmk__valid_quorum(const char *value) return safe_str_eq(value, "stop") || safe_str_eq(value, "freeze") || safe_str_eq(value, "ignore") - || safe_str_eq(value, "demote") || safe_str_eq(value, "suicide"); } diff --git a/lib/common/output_html.c b/lib/common/output_html.c index 259e412..c8f0088 100644 --- a/lib/common/output_html.c +++ b/lib/common/output_html.c @@ -72,7 +72,6 @@ html_free_priv(pcmk__output_t *out) { g_queue_free(priv->parent_q); g_slist_free(priv->errors); free(priv); - out->priv = NULL; } static bool @@ -113,11 +112,18 @@ add_error_node(gpointer data, gpointer user_data) { } static void -finish_reset_common(pcmk__output_t *out, crm_exit_t exit_status, bool print) { +html_finish(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest) { private_data_t *priv = out->priv; htmlNodePtr head_node = NULL; htmlNodePtr charset_node = NULL; + /* If root is NULL, html_init failed and we are being called from pcmk__output_free + * in the pcmk__output_new path. + */ + if (priv == NULL || priv->root == NULL) { + return; + } + if (cgi_output && print) { fprintf(out->dest, "Content-Type: text/html\n\n"); } @@ -139,7 +145,7 @@ finish_reset_common(pcmk__output_t *out, crm_exit_t exit_status, bool print) { /* Add any extra header nodes the caller might have created. */ for (int i = 0; i < g_slist_length(extra_headers); i++) { - xmlAddChild(head_node, xmlCopyNode(g_slist_nth_data(extra_headers, i), 1)); + xmlAddChild(head_node, g_slist_nth_data(extra_headers, i)); } /* Stylesheets are included two different ways. The first is via a built-in @@ -167,40 +173,23 @@ finish_reset_common(pcmk__output_t *out, crm_exit_t exit_status, bool print) { if (print) { htmlDocDump(out->dest, priv->root->doc); } -} - -static void -html_finish(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest) { - private_data_t *priv = out->priv; - - /* If root is NULL, html_init failed and we are being called from pcmk__output_free - * in the pcmk__output_new path. - */ - if (priv == NULL || priv->root == NULL) { - return; - } - - finish_reset_common(out, exit_status, print); if (copy_dest != NULL) { *copy_dest = copy_xml(priv->root); } - - g_slist_free_full(extra_headers, (GDestroyNotify) xmlFreeNode); } static void html_reset(pcmk__output_t *out) { CRM_ASSERT(out != NULL); - out->dest = freopen(NULL, "w", out->dest); - CRM_ASSERT(out->dest != NULL); - if (out->priv != NULL) { - finish_reset_common(out, CRM_EX_OK, true); + private_data_t *priv = out->priv; + htmlDocDump(out->dest, priv->root->doc); } html_free_priv(out); + g_slist_free_full(extra_headers, (GDestroyNotify) xmlFreeNode); html_init(out); } @@ -413,7 +402,7 @@ pcmk__output_create_html_node(pcmk__output_t *out, const char *element_name, con } void -pcmk__html_add_header(const char *name, ...) { +pcmk__html_add_header(xmlNodePtr parent, const char *name, ...) { htmlNodePtr header_node; va_list ap; diff --git a/lib/common/output_log.c b/lib/common/output_log.c index 8422ac2..5b45ce4 100644 --- a/lib/common/output_log.c +++ b/lib/common/output_log.c @@ -44,7 +44,6 @@ log_free_priv(pcmk__output_t *out) { g_queue_free(priv->prefixes); free(priv); - out->priv = NULL; } static bool @@ -72,9 +71,6 @@ static void log_reset(pcmk__output_t *out) { CRM_ASSERT(out != NULL); - out->dest = freopen(NULL, "w", out->dest); - CRM_ASSERT(out->dest != NULL); - log_free_priv(out); log_init(out); } diff --git a/lib/common/output_text.c b/lib/common/output_text.c index 2f7e5b0..54c409a 100644 --- a/lib/common/output_text.c +++ b/lib/common/output_text.c @@ -43,7 +43,6 @@ text_free_priv(pcmk__output_t *out) { g_queue_free(priv->parent_q); free(priv); - out->priv = NULL; } static bool @@ -75,9 +74,6 @@ static void text_reset(pcmk__output_t *out) { CRM_ASSERT(out != NULL); - out->dest = freopen(NULL, "w", out->dest); - CRM_ASSERT(out->dest != NULL); - text_free_priv(out); text_init(out); } diff --git a/lib/common/output_xml.c b/lib/common/output_xml.c index 9f8e01b..8565bfe 100644 --- a/lib/common/output_xml.c +++ b/lib/common/output_xml.c @@ -54,7 +54,6 @@ xml_free_priv(pcmk__output_t *out) { g_queue_free(priv->parent_q); g_slist_free(priv->errors); free(priv); - out->priv = NULL; } static bool @@ -106,10 +105,17 @@ add_error_node(gpointer data, gpointer user_data) { } static void -finish_reset_common(pcmk__output_t *out, crm_exit_t exit_status, bool print) { +xml_finish(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest) { xmlNodePtr node; private_data_t *priv = out->priv; + /* If root is NULL, xml_init failed and we are being called from pcmk__output_free + * in the pcmk__output_new path. + */ + if (priv == NULL || priv->root == NULL) { + return; + } + if (legacy_xml) { GSList *node = priv->errors; @@ -141,20 +147,6 @@ finish_reset_common(pcmk__output_t *out, crm_exit_t exit_status, bool print) { fprintf(out->dest, "%s", buf); free(buf); } -} - -static void -xml_finish(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest) { - private_data_t *priv = out->priv; - - /* If root is NULL, xml_init failed and we are being called from pcmk__output_free - * in the pcmk__output_new path. - */ - if (priv == NULL || priv->root == NULL) { - return; - } - - finish_reset_common(out, exit_status, print); if (copy_dest != NULL) { *copy_dest = copy_xml(priv->root); @@ -163,13 +155,15 @@ xml_finish(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_ static void xml_reset(pcmk__output_t *out) { - CRM_ASSERT(out != NULL); + char *buf = NULL; - out->dest = freopen(NULL, "w", out->dest); - CRM_ASSERT(out->dest != NULL); + CRM_ASSERT(out != NULL); if (out->priv != NULL) { - finish_reset_common(out, CRM_EX_OK, true); + private_data_t *priv = out->priv; + buf = dump_xml_formatted_with_text(priv->root); + fprintf(out->dest, "%s", buf); + free(buf); } xml_free_priv(out); diff --git a/lib/common/remote.c b/lib/common/remote.c index f645d49..76c594b 100644 --- a/lib/common/remote.c +++ b/lib/common/remote.c @@ -28,7 +28,7 @@ #include #include -#include +#include #include #include #include diff --git a/lib/common/strings.c b/lib/common/strings.c index fa6558f..a2e17ae 100644 --- a/lib/common/strings.c +++ b/lib/common/strings.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -670,67 +669,3 @@ pcmk__str_in_list(GList *lst, const gchar *s) return g_list_find_custom(lst, s, (GCompareFunc) strcmp) != NULL; } - -/* - * \brief Sort strings, with numeric portions sorted numerically - * - * Sort two strings case-insensitively like strcasecmp(), but with any numeric - * portions of the string sorted numerically. This is particularly useful for - * node names (for example, "node10" will sort higher than "node9" but lower - * than "remotenode9"). - * - * \param[in] s1 First string to compare (must not be NULL) - * \param[in] s2 Second string to compare (must not be NULL) - * - * \retval -1 \p s1 comes before \p s2 - * \retval 0 \p s1 and \p s2 are equal - * \retval 1 \p s1 comes after \p s2 - */ -int -pcmk_numeric_strcasecmp(const char *s1, const char *s2) -{ - while (*s1 && *s2) { - if (isdigit(*s1) && isdigit(*s2)) { - // If node names contain a number, sort numerically - - char *end1 = NULL; - char *end2 = NULL; - long num1 = strtol(s1, &end1, 10); - long num2 = strtol(s2, &end2, 10); - - // allow ordering e.g. 007 > 7 - size_t len1 = end1 - s1; - size_t len2 = end2 - s2; - - if (num1 < num2) { - return -1; - } else if (num1 > num2) { - return 1; - } else if (len1 < len2) { - return -1; - } else if (len1 > len2) { - return 1; - } - s1 = end1; - s2 = end2; - } else { - // Compare non-digits case-insensitively - int lower1 = tolower(*s1); - int lower2 = tolower(*s2); - - if (lower1 < lower2) { - return -1; - } else if (lower1 > lower2) { - return 1; - } - ++s1; - ++s2; - } - } - if (!*s1 && *s2) { - return -1; - } else if (*s1 && !*s2) { - return 1; - } - return 0; -} diff --git a/lib/common/utils.c b/lib/common/utils.c index 0ac96b8..13e7cb2 100644 --- a/lib/common/utils.c +++ b/lib/common/utils.c @@ -116,6 +116,17 @@ score2char(int score) return crm_itoa(score); } +char * +generate_hash_key(const char *crm_msg_reference, const char *sys) +{ + char *hash_key = crm_strdup_printf("%s_%s", (sys? sys : "none"), + crm_msg_reference); + + crm_trace("created hash key: (%s)", hash_key); + return hash_key; +} + + int crm_user_lookup(const char *name, uid_t * uid, gid_t * gid) { diff --git a/lib/common/watchdog.c b/lib/common/watchdog.c index 8838be6..9d8896b 100644 --- a/lib/common/watchdog.c +++ b/lib/common/watchdog.c @@ -227,21 +227,6 @@ pcmk__get_sbd_timeout(void) return sbd_timeout; } -bool -pcmk__get_sbd_sync_resource_startup(void) -{ - static bool sync_resource_startup = false; - static bool checked_sync_resource_startup = false; - - if (!checked_sync_resource_startup) { - sync_resource_startup = - crm_is_true(getenv("SBD_SYNC_RESOURCE_STARTUP")); - checked_sync_resource_startup = true; - } - - return sync_resource_startup; -} - long pcmk__auto_watchdog_timeout() { diff --git a/lib/common/xml.c b/lib/common/xml.c index 6ff22b7..3b555d4 100644 --- a/lib/common/xml.c +++ b/lib/common/xml.c @@ -2615,6 +2615,23 @@ write_xml_file(xmlNode * xml_node, const char *filename, gboolean compress) return write_xml_stream(xml_node, filename, stream, compress); } +xmlNode * +get_message_xml(xmlNode * msg, const char *field) +{ + xmlNode *tmp = first_named_child(msg, field); + + return __xml_first_child(tmp); +} + +gboolean +add_message_xml(xmlNode * msg, const char *field, xmlNode * xml) +{ + xmlNode *holder = create_xml_node(msg, field); + + add_node_copy(holder, xml); + return TRUE; +} + static char * crm_xml_escape_shuffle(char *text, int start, int *length, const char *replace) { diff --git a/lib/lrmd/lrmd_client.c b/lib/lrmd/lrmd_client.c index a6c023b..a2c7200 100644 --- a/lib/lrmd/lrmd_client.c +++ b/lib/lrmd/lrmd_client.c @@ -27,7 +27,7 @@ #include #include #include -#include +#include #include #include diff --git a/lib/pacemaker/pcmk_sched_constraints.c b/lib/pacemaker/pcmk_sched_constraints.c index d8c3e69..9c3a88d 100644 --- a/lib/pacemaker/pcmk_sched_constraints.c +++ b/lib/pacemaker/pcmk_sched_constraints.c @@ -1595,8 +1595,8 @@ custom_action_order(pe_resource_t * lh_rsc, char *lh_action_task, pe_action_t * order = calloc(1, sizeof(pe__ordering_t)); crm_trace("Creating[%d] %s %s %s - %s %s %s", data_set->order_id, - lh_rsc?lh_rsc->id:"NA", lh_action_task?lh_action_task:"NA", lh_action?lh_action->uuid:"NA", - rh_rsc?rh_rsc->id:"NA", rh_action_task?rh_action_task:"NA", rh_action?rh_action->uuid:"NA"); + lh_rsc?lh_rsc->id:"NA", lh_action_task, lh_action?lh_action->uuid:"NA", + rh_rsc?rh_rsc->id:"NA", rh_action_task, rh_action?rh_action->uuid:"NA"); /* CRM_ASSERT(data_set->order_id != 291); */ diff --git a/lib/pacemaker/pcmk_sched_messages.c b/lib/pacemaker/pcmk_sched_messages.c index 3d09a5e..9d013be 100644 --- a/lib/pacemaker/pcmk_sched_messages.c +++ b/lib/pacemaker/pcmk_sched_messages.c @@ -20,7 +20,7 @@ #include #include -#include +#include gboolean show_scores = FALSE; gboolean show_utilization = FALSE; diff --git a/lib/pacemaker/pcmk_sched_native.c b/lib/pacemaker/pcmk_sched_native.c index 4e3bd7c..bd8a0b5 100644 --- a/lib/pacemaker/pcmk_sched_native.c +++ b/lib/pacemaker/pcmk_sched_native.c @@ -41,36 +41,27 @@ gboolean PromoteRsc(pe_resource_t * rsc, pe_node_t * next, gboolean optional, gboolean RoleError(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set); gboolean NullOp(pe_resource_t * rsc, pe_node_t * next, gboolean optional, pe_working_set_t * data_set); -/* This array says what the *next* role should be when transitioning from one - * role to another. For example going from Stopped to Master, the next role is - * RSC_ROLE_SLAVE, because the resource must be started before being promoted. - * The current state then becomes Started, which is fed into this array again, - * giving a next role of RSC_ROLE_MASTER. - */ -static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { - /* Current state Next state*/ - /* Unknown Stopped Started Slave Master */ +/* *INDENT-OFF* */ +enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { +/* Current State */ +/* Next State: Unknown Stopped Started Slave Master */ /* Unknown */ { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, }, /* Stopped */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, }, /* Started */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, - /* Slave */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, - /* Master */ { RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, + /* Slave */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, + /* Master */ { RSC_ROLE_STOPPED, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_SLAVE, RSC_ROLE_MASTER, }, }; -typedef gboolean (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next, - gboolean optional, - pe_working_set_t *data_set); - -// This array picks the function needed to transition from one role to another -static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = { - /* Current state Next state */ - /* Unknown Stopped Started Slave Master */ - /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, }, - /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, }, - /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, }, - /* Slave */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, }, - /* Master */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp , }, +gboolean (*rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX])(pe_resource_t*,pe_node_t*,gboolean,pe_working_set_t*) = { +/* Current State */ +/* Next State: Unknown Stopped Started Slave Master */ + /* Unknown */ { RoleError, StopRsc, RoleError, RoleError, RoleError, }, + /* Stopped */ { RoleError, NullOp, StartRsc, StartRsc, RoleError, }, + /* Started */ { RoleError, StopRsc, NullOp, NullOp, PromoteRsc, }, + /* Slave */ { RoleError, StopRsc, StopRsc, NullOp, PromoteRsc, }, + /* Master */ { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp, }, }; +/* *INDENT-ON* */ static gboolean native_choose_node(pe_resource_t * rsc, pe_node_t * prefer, pe_working_set_t * data_set) @@ -1205,7 +1196,6 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set) pe_node_t *chosen = NULL; pe_node_t *current = NULL; gboolean need_stop = FALSE; - bool need_promote = FALSE; gboolean is_moving = FALSE; gboolean allow_migrate = is_set(rsc->flags, pe_rsc_allow_migrate) ? TRUE : FALSE; @@ -1310,15 +1300,8 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set) need_stop = TRUE; } else if (is_set(rsc->flags, pe_rsc_failed)) { - if (is_set(rsc->flags, pe_rsc_stop)) { - need_stop = TRUE; - pe_rsc_trace(rsc, "Recovering %s", rsc->id); - } else { - pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id); - if (rsc->next_role == RSC_ROLE_MASTER) { - need_promote = TRUE; - } - } + pe_rsc_trace(rsc, "Recovering %s", rsc->id); + need_stop = TRUE; } else if (is_set(rsc->flags, pe_rsc_block)) { pe_rsc_trace(rsc, "Block %s", rsc->id); @@ -1352,16 +1335,10 @@ native_create_actions(pe_resource_t * rsc, pe_working_set_t * data_set) while (rsc->role <= rsc->next_role && role != rsc->role && is_not_set(rsc->flags, pe_rsc_block)) { - bool required = need_stop; - next_role = rsc_state_matrix[role][rsc->role]; - if ((next_role == RSC_ROLE_MASTER) && need_promote) { - required = true; - } pe_rsc_trace(rsc, "Up: Executing: %s->%s (%s)%s", role2text(role), role2text(next_role), - rsc->id, (required? " required" : "")); - if (rsc_action_matrix[role][next_role](rsc, chosen, !required, - data_set) == FALSE) { + rsc->id, need_stop ? " required" : ""); + if (rsc_action_matrix[role][next_role] (rsc, chosen, !need_stop, data_set) == FALSE) { break; } role = next_role; @@ -2371,6 +2348,8 @@ native_expand(pe_resource_t * rsc, pe_working_set_t * data_set) } \ } while(0) +static int rsc_width = 5; +static int detail_width = 5; static void LogAction(const char *change, pe_resource_t *rsc, pe_node_t *origin, pe_node_t *destination, pe_action_t *action, pe_action_t *source, gboolean terminal) { @@ -2381,9 +2360,6 @@ LogAction(const char *change, pe_resource_t *rsc, pe_node_t *origin, pe_node_t * bool same_role = FALSE; bool need_role = FALSE; - static int rsc_width = 5; - static int detail_width = 5; - CRM_ASSERT(action); CRM_ASSERT(destination != NULL || origin != NULL); @@ -2408,40 +2384,36 @@ LogAction(const char *change, pe_resource_t *rsc, pe_node_t *origin, pe_node_t * same_role = TRUE; } - if (need_role && (origin == NULL)) { - /* Starting and promoting a promotable clone instance */ + if(need_role && origin == NULL) { + /* Promoting from Stopped */ details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), destination->details->uname); - } else if (origin == NULL) { - /* Starting a resource */ - details = crm_strdup_printf("%s", destination->details->uname); - - } else if (need_role && (destination == NULL)) { - /* Stopping a promotable clone instance */ + } else if(need_role && destination == NULL) { + /* Demoting a Master or Stopping a Slave */ details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname); - } else if (destination == NULL) { - /* Stopping a resource */ - details = crm_strdup_printf("%s", origin->details->uname); + } else if(origin == NULL || destination == NULL) { + /* Starting or stopping a resource */ + details = crm_strdup_printf("%s", origin?origin->details->uname:destination->details->uname); - } else if (need_role && same_role && same_host) { - /* Recovering, restarting or re-promoting a promotable clone instance */ + } else if(need_role && same_role && same_host) { + /* Recovering or restarting a promotable clone resource */ details = crm_strdup_printf("%s %s", role2text(rsc->role), origin->details->uname); - } else if (same_role && same_host) { + } else if(same_role && same_host) { /* Recovering or Restarting a normal resource */ details = crm_strdup_printf("%s", origin->details->uname); - } else if (need_role && same_role) { - /* Moving a promotable clone instance */ + } else if(same_role && need_role) { + /* Moving a promotable clone resource */ details = crm_strdup_printf("%s -> %s %s", origin->details->uname, destination->details->uname, role2text(rsc->role)); - } else if (same_role) { + } else if(same_role) { /* Moving a normal resource */ details = crm_strdup_printf("%s -> %s", origin->details->uname, destination->details->uname); - } else if (same_host) { - /* Promoting or demoting a promotable clone instance */ + } else if(same_host) { + /* Promoting or demoting a promotable clone resource */ details = crm_strdup_printf("%s -> %s %s", role2text(rsc->role), role2text(rsc->next_role), origin->details->uname); } else { @@ -2584,19 +2556,11 @@ LogActions(pe_resource_t * rsc, pe_working_set_t * data_set, gboolean terminal) } else if (is_set(rsc->flags, pe_rsc_reload)) { LogAction("Reload", rsc, current, next, start, NULL, terminal); - } else if (start == NULL || is_set(start->flags, pe_action_optional)) { - if ((demote != NULL) && (promote != NULL) - && is_not_set(demote->flags, pe_action_optional) - && is_not_set(promote->flags, pe_action_optional)) { - LogAction("Re-promote", rsc, current, next, promote, demote, - terminal); - } else { - pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, - role2text(rsc->role), next->details->uname); - } + pe_rsc_info(rsc, "Leave %s\t(%s %s)", rsc->id, role2text(rsc->role), + next->details->uname); - } else if (is_not_set(start->flags, pe_action_runnable)) { + } else if (start && is_set(start->flags, pe_action_runnable) == FALSE) { LogAction("Stop", rsc, current, NULL, stop, (stop && stop->reason)? stop : start, terminal); STOP_SANITY_ASSERT(__LINE__); @@ -2645,8 +2609,7 @@ LogActions(pe_resource_t * rsc, pe_working_set_t * data_set, gboolean terminal) free(key); - } else if (stop && is_set(rsc->flags, pe_rsc_failed) - && is_set(rsc->flags, pe_rsc_stop)) { + } else if (stop && is_set(rsc->flags, pe_rsc_failed)) { /* 'stop' may be NULL if the failure was ignored */ LogAction("Recover", rsc, current, next, stop, start, terminal); STOP_SANITY_ASSERT(__LINE__); @@ -3399,19 +3362,9 @@ ReloadRsc(pe_resource_t * rsc, pe_node_t *node, pe_working_set_t * data_set) pe_rsc_trace(rsc, "%s: unmanaged", rsc->id); return; - } else if (is_set(rsc->flags, pe_rsc_failed)) { - /* We don't need to specify any particular actions here, normal failure - * recovery will apply. - */ - pe_rsc_trace(rsc, "%s: preventing reload because failed", rsc->id); - return; - - } else if (is_set(rsc->flags, pe_rsc_start_pending)) { - /* If a resource's configuration changed while a start was pending, - * force a full restart. - */ - pe_rsc_trace(rsc, "%s: preventing reload because start pending", rsc->id); - stop_action(rsc, node, FALSE); + } else if (is_set(rsc->flags, pe_rsc_failed) || is_set(rsc->flags, pe_rsc_start_pending)) { + pe_rsc_trace(rsc, "%s: general resource state: flags=0x%.16llx", rsc->id, rsc->flags); + stop_action(rsc, node, FALSE); /* Force a full restart, overkill? */ return; } else if (node == NULL) { diff --git a/lib/pengine/common.c b/lib/pengine/common.c index 37f287b..ded6df8 100644 --- a/lib/pengine/common.c +++ b/lib/pengine/common.c @@ -54,7 +54,7 @@ static pcmk__cluster_option_t pe_opts[] = { * long description */ { - "no-quorum-policy", NULL, "enum", "stop, freeze, ignore, demote, suicide", + "no-quorum-policy", NULL, "enum", "stop, freeze, ignore, suicide", "stop", pcmk__valid_quorum, "What to do when the cluster does not have quorum", NULL @@ -326,9 +326,6 @@ fail2text(enum action_fail_response fail) case action_fail_ignore: result = "ignore"; break; - case action_fail_demote: - result = "demote"; - break; case action_fail_block: result = "block"; break; diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c index 1f06348..16f3a71 100644 --- a/lib/pengine/complex.c +++ b/lib/pengine/complex.c @@ -95,23 +95,10 @@ void get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set) { - pe_rsc_eval_data_t rsc_rule_data = { - .standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS), - .provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER), - .agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE) - }; - - pe_rule_eval_data_t rule_data = { - .node_hash = NULL, - .role = RSC_ROLE_UNKNOWN, - .now = data_set->now, - .match_data = NULL, - .rsc_data = &rsc_rule_data, - .op_data = NULL - }; + GHashTable *node_hash = NULL; if (node) { - rule_data.node_hash = node->details->attrs; + node_hash = node->details->attrs; } if (rsc->xml) { @@ -125,7 +112,7 @@ get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc, } } - pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, &rule_data, + pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, node_hash, meta_hash, NULL, FALSE, data_set); /* set anything else based on the parent */ @@ -135,27 +122,20 @@ get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc, /* and finally check the defaults */ pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_META_SETS, - &rule_data, meta_hash, NULL, FALSE, data_set); + node_hash, meta_hash, NULL, FALSE, data_set); } void get_rsc_attributes(GHashTable * meta_hash, pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set) { - pe_rule_eval_data_t rule_data = { - .node_hash = NULL, - .role = RSC_ROLE_UNKNOWN, - .now = data_set->now, - .match_data = NULL, - .rsc_data = NULL, - .op_data = NULL - }; + GHashTable *node_hash = NULL; if (node) { - rule_data.node_hash = node->details->attrs; + node_hash = node->details->attrs; } - pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, &rule_data, + pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, node_hash, meta_hash, NULL, FALSE, data_set); /* set anything else based on the parent */ @@ -165,7 +145,7 @@ get_rsc_attributes(GHashTable * meta_hash, pe_resource_t * rsc, } else { /* and finally check the defaults */ pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_ATTR_SETS, - &rule_data, meta_hash, NULL, FALSE, data_set); + node_hash, meta_hash, NULL, FALSE, data_set); } } @@ -396,15 +376,6 @@ common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc, bool remote_node = FALSE; bool has_versioned_params = FALSE; - pe_rule_eval_data_t rule_data = { - .node_hash = NULL, - .role = RSC_ROLE_UNKNOWN, - .now = data_set->now, - .match_data = NULL, - .rsc_data = NULL, - .op_data = NULL - }; - crm_log_xml_trace(xml_obj, "Processing resource input..."); if (id == NULL) { @@ -735,7 +706,7 @@ common_unpack(xmlNode * xml_obj, pe_resource_t ** rsc, (*rsc)->utilization = crm_str_table_new(); - pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, &rule_data, + pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, NULL, (*rsc)->utilization, NULL, FALSE, data_set); /* data_set->resources = g_list_append(data_set->resources, (*rsc)); */ diff --git a/lib/pengine/native.c b/lib/pengine/native.c index 20658a0..f0d83d7 100644 --- a/lib/pengine/native.c +++ b/lib/pengine/native.c @@ -359,22 +359,22 @@ native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const c gboolean native_active(pe_resource_t * rsc, gboolean all) { - for (GList *gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) { + GListPtr gIter = rsc->running_on; + + for (; gIter != NULL; gIter = gIter->next) { pe_node_t *a_node = (pe_node_t *) gIter->data; if (a_node->details->unclean) { - pe_rsc_trace(rsc, "Resource %s: node %s is unclean", - rsc->id, a_node->details->uname); + crm_debug("Resource %s: node %s is unclean", rsc->id, a_node->details->uname); return TRUE; } else if (a_node->details->online == FALSE) { - pe_rsc_trace(rsc, "Resource %s: node %s is offline", - rsc->id, a_node->details->uname); + crm_debug("Resource %s: node %s is offline", rsc->id, a_node->details->uname); } else { - pe_rsc_trace(rsc, "Resource %s active on %s", - rsc->id, a_node->details->uname); + crm_debug("Resource %s active on %s", rsc->id, a_node->details->uname); return TRUE; } } + return FALSE; } diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c index ad469ab..75bf0d5 100644 --- a/lib/pengine/pe_output.c +++ b/lib/pengine/pe_output.c @@ -729,11 +729,6 @@ pe__cluster_options_html(pcmk__output_t *out, va_list args) { out->list_item(out, NULL, "No quorum policy: Stop ALL resources"); break; - case no_quorum_demote: - out->list_item(out, NULL, "No quorum policy: Demote promotable " - "resources and stop all other resources"); - break; - case no_quorum_ignore: out->list_item(out, NULL, "No quorum policy: Ignore"); break; @@ -790,11 +785,6 @@ pe__cluster_options_text(pcmk__output_t *out, va_list args) { out->list_item(out, NULL, "No quorum policy: Stop ALL resources"); break; - case no_quorum_demote: - out->list_item(out, NULL, "No quorum policy: Demote promotable " - "resources and stop all other resources"); - break; - case no_quorum_ignore: out->list_item(out, NULL, "No quorum policy: Ignore"); break; @@ -827,10 +817,6 @@ pe__cluster_options_xml(pcmk__output_t *out, va_list args) { xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "stop"); break; - case no_quorum_demote: - xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "demote"); - break; - case no_quorum_ignore: xmlSetProp(node, (pcmkXmlStr) "no-quorum-policy", (pcmkXmlStr) "ignore"); break; diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c index b0fca55..fa9a222 100644 --- a/lib/pengine/rules.c +++ b/lib/pengine/rules.c @@ -38,16 +38,25 @@ gboolean pe_evaluate_rules(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now, crm_time_t *next_change) { - pe_rule_eval_data_t rule_data = { - .node_hash = node_hash, - .role = RSC_ROLE_UNKNOWN, - .now = now, - .match_data = NULL, - .rsc_data = NULL, - .op_data = NULL - }; - - return pe_eval_rules(ruleset, &rule_data, next_change); + // If there are no rules, pass by default + gboolean ruleset_default = TRUE; + + for (xmlNode *rule = first_named_child(ruleset, XML_TAG_RULE); + rule != NULL; rule = crm_next_same_xml(rule)) { + + ruleset_default = FALSE; + if (pe_test_rule(rule, node_hash, RSC_ROLE_UNKNOWN, now, next_change, + NULL)) { + /* Only the deprecated "lifetime" element of location constraints + * may contain more than one rule at the top level -- the schema + * limits a block of nvpairs to a single top-level rule. So, this + * effectively means that a lifetime is active if any rule it + * contains is active. + */ + return TRUE; + } + } + return ruleset_default; } gboolean @@ -55,16 +64,44 @@ pe_test_rule(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role, crm_time_t *now, crm_time_t *next_change, pe_match_data_t *match_data) { - pe_rule_eval_data_t rule_data = { - .node_hash = node_hash, - .role = role, - .now = now, - .match_data = match_data, - .rsc_data = NULL, - .op_data = NULL - }; - - return pe_eval_expr(rule, &rule_data, next_change); + xmlNode *expr = NULL; + gboolean test = TRUE; + gboolean empty = TRUE; + gboolean passed = TRUE; + gboolean do_and = TRUE; + const char *value = NULL; + + rule = expand_idref(rule, NULL); + value = crm_element_value(rule, XML_RULE_ATTR_BOOLEAN_OP); + if (safe_str_eq(value, "or")) { + do_and = FALSE; + passed = FALSE; + } + + crm_trace("Testing rule %s", ID(rule)); + for (expr = __xml_first_child_element(rule); expr != NULL; + expr = __xml_next_element(expr)) { + + test = pe_test_expression(expr, node_hash, role, now, next_change, + match_data); + empty = FALSE; + + if (test && do_and == FALSE) { + crm_trace("Expression %s/%s passed", ID(rule), ID(expr)); + return TRUE; + + } else if (test == FALSE && do_and) { + crm_trace("Expression %s/%s failed", ID(rule), ID(expr)); + return FALSE; + } + } + + if (empty) { + crm_err("Invalid Rule %s: rules must contain at least one expression", ID(rule)); + } + + crm_trace("Rule %s %s", ID(rule), passed ? "passed" : "failed"); + return passed; } /*! @@ -88,16 +125,56 @@ pe_test_expression(xmlNode *expr, GHashTable *node_hash, enum rsc_role_e role, crm_time_t *now, crm_time_t *next_change, pe_match_data_t *match_data) { - pe_rule_eval_data_t rule_data = { - .node_hash = node_hash, - .role = role, - .now = now, - .match_data = match_data, - .rsc_data = NULL, - .op_data = NULL - }; - - return pe_eval_subexpr(expr, &rule_data, next_change); + gboolean accept = FALSE; + const char *uname = NULL; + + switch (find_expression_type(expr)) { + case nested_rule: + accept = pe_test_rule(expr, node_hash, role, now, next_change, + match_data); + break; + case attr_expr: + case loc_expr: + /* these expressions can never succeed if there is + * no node to compare with + */ + if (node_hash != NULL) { + accept = pe_test_attr_expression(expr, node_hash, now, match_data); + } + break; + + case time_expr: + accept = pe_test_date_expression(expr, now, next_change); + break; + + case role_expr: + accept = pe_test_role_expression(expr, role, now); + break; + +#if ENABLE_VERSIONED_ATTRS + case version_expr: + if (node_hash && g_hash_table_lookup_extended(node_hash, + CRM_ATTR_RA_VERSION, + NULL, NULL)) { + accept = pe_test_attr_expression(expr, node_hash, now, NULL); + } else { + // we are going to test it when we have ra-version + accept = TRUE; + } + break; +#endif + + default: + CRM_CHECK(FALSE /* bad type */ , return FALSE); + accept = FALSE; + } + if (node_hash) { + uname = g_hash_table_lookup(node_hash, CRM_ATTR_UNAME); + } + + crm_trace("Expression %s %s on %s", + ID(expr), accept ? "passed" : "failed", uname ? uname : "all nodes"); + return accept; } enum expression_type @@ -112,12 +189,6 @@ find_expression_type(xmlNode * expr) if (safe_str_eq(tag, "date_expression")) { return time_expr; - } else if (safe_str_eq(tag, "rsc_expression")) { - return rsc_expr; - - } else if (safe_str_eq(tag, "op_expression")) { - return op_expr; - } else if (safe_str_eq(tag, XML_TAG_RULE)) { return nested_rule; @@ -142,134 +213,301 @@ find_expression_type(xmlNode * expr) } gboolean -pe_test_role_expression(xmlNode *expr, enum rsc_role_e role, crm_time_t *now) +pe_test_role_expression(xmlNode * expr, enum rsc_role_e role, crm_time_t * now) { - pe_rule_eval_data_t rule_data = { - .node_hash = NULL, - .role = role, - .now = now, - .match_data = NULL, - .rsc_data = NULL, - .op_data = NULL - }; - - return pe__eval_role_expr(expr, &rule_data); + gboolean accept = FALSE; + const char *op = NULL; + const char *value = NULL; + + if (role == RSC_ROLE_UNKNOWN) { + return accept; + } + + value = crm_element_value(expr, XML_EXPR_ATTR_VALUE); + op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION); + + if (safe_str_eq(op, "defined")) { + if (role > RSC_ROLE_STARTED) { + accept = TRUE; + } + + } else if (safe_str_eq(op, "not_defined")) { + if (role < RSC_ROLE_SLAVE && role > RSC_ROLE_UNKNOWN) { + accept = TRUE; + } + + } else if (safe_str_eq(op, "eq")) { + if (text2role(value) == role) { + accept = TRUE; + } + + } else if (safe_str_eq(op, "ne")) { + // Test "ne" only with promotable clone roles + if (role < RSC_ROLE_SLAVE && role > RSC_ROLE_UNKNOWN) { + accept = FALSE; + + } else if (text2role(value) != role) { + accept = TRUE; + } + } + return accept; } gboolean pe_test_attr_expression(xmlNode *expr, GHashTable *hash, crm_time_t *now, pe_match_data_t *match_data) { - pe_rule_eval_data_t rule_data = { - .node_hash = hash, - .role = RSC_ROLE_UNKNOWN, - .now = now, - .match_data = match_data, - .rsc_data = NULL, - .op_data = NULL - }; - - return pe__eval_attr_expr(expr, &rule_data); -} - -/* As per the nethack rules: - * - * moon period = 29.53058 days ~= 30, year = 365.2422 days - * days moon phase advances on first day of year compared to preceding year - * = 365.2422 - 12*29.53058 ~= 11 - * years in Metonic cycle (time until same phases fall on the same days of - * the month) = 18.6 ~= 19 - * moon phase on first day of year (epact) ~= (11*(year%19) + 29) % 30 - * (29 as initial condition) - * current phase in days = first day phase + days elapsed in year - * 6 moons ~= 177 days - * 177 ~= 8 reported phases * 22 - * + 11/22 for rounding - * - * 0-7, with 0: new, 4: full - */ + gboolean accept = FALSE; + gboolean attr_allocated = FALSE; + int cmp = 0; + const char *h_val = NULL; + GHashTable *table = NULL; -static int -phase_of_the_moon(crm_time_t * now) -{ - uint32_t epact, diy, goldn; - uint32_t y; + const char *op = NULL; + const char *type = NULL; + const char *attr = NULL; + const char *value = NULL; + const char *value_source = NULL; - crm_time_get_ordinal(now, &y, &diy); + attr = crm_element_value(expr, XML_EXPR_ATTR_ATTRIBUTE); + op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION); + value = crm_element_value(expr, XML_EXPR_ATTR_VALUE); + type = crm_element_value(expr, XML_EXPR_ATTR_TYPE); + value_source = crm_element_value(expr, XML_EXPR_ATTR_VALUE_SOURCE); - goldn = (y % 19) + 1; - epact = (11 * goldn + 18) % 30; - if ((epact == 25 && goldn > 11) || epact == 24) - epact++; + if (attr == NULL || op == NULL) { + pe_err("Invalid attribute or operation in expression" + " (\'%s\' \'%s\' \'%s\')", crm_str(attr), crm_str(op), crm_str(value)); + return FALSE; + } - return ((((((diy + epact) * 6) + 11) % 177) / 22) & 7); -} + if (match_data) { + if (match_data->re) { + char *resolved_attr = pe_expand_re_matches(attr, match_data->re); -static int -check_one(xmlNode *cron_spec, const char *xml_field, uint32_t time_field) { - int rc = pcmk_rc_undetermined; - const char *value = crm_element_value(cron_spec, xml_field); - long long low, high; + if (resolved_attr) { + attr = (const char *) resolved_attr; + attr_allocated = TRUE; + } + } - if (value == NULL) { - /* Return pe_date_result_undetermined if the field is missing. */ - goto bail; + if (safe_str_eq(value_source, "param")) { + table = match_data->params; + } else if (safe_str_eq(value_source, "meta")) { + table = match_data->meta; + } } - if (pcmk__parse_ll_range(value, &low, &high) == pcmk_rc_unknown_format) { - goto bail; - } else if (low == high) { - /* A single number was given, not a range. */ - if (time_field < low) { - rc = pcmk_rc_before_range; - } else if (time_field > high) { - rc = pcmk_rc_after_range; - } else { - rc = pcmk_rc_within_range; - } - } else if (low != -1 && high != -1) { - /* This is a range with both bounds. */ - if (time_field < low) { - rc = pcmk_rc_before_range; - } else if (time_field > high) { - rc = pcmk_rc_after_range; - } else { - rc = pcmk_rc_within_range; + if (table) { + const char *param_name = value; + const char *param_value = NULL; + + if (param_name && param_name[0]) { + if ((param_value = (const char *)g_hash_table_lookup(table, param_name))) { + value = param_value; + } } - } else if (low == -1) { - /* This is a range with no starting value. */ - rc = time_field <= high ? pcmk_rc_within_range : pcmk_rc_after_range; - } else if (high == -1) { - /* This is a range with no ending value. */ - rc = time_field >= low ? pcmk_rc_within_range : pcmk_rc_before_range; } -bail: - if (rc == pcmk_rc_within_range) { - crm_debug("Condition '%s' in %s: passed", value, xml_field); - } else { - crm_debug("Condition '%s' in %s: failed", value, xml_field); + if (hash != NULL) { + h_val = (const char *)g_hash_table_lookup(hash, attr); } - return rc; -} - -static gboolean -check_passes(int rc) { - /* _within_range is obvious. _undetermined is a pass because - * this is the return value if a field is not given. In this - * case, we just want to ignore it and check other fields to - * see if they place some restriction on what can pass. - */ - return rc == pcmk_rc_within_range || rc == pcmk_rc_undetermined; -} + if (attr_allocated) { + free((char *)attr); + attr = NULL; + } -#define CHECK_ONE(spec, name, var) do { \ - int subpart_rc = check_one(spec, name, var); \ - if (check_passes(subpart_rc) == FALSE) { \ - return subpart_rc; \ - } \ -} while (0) + if (value != NULL && h_val != NULL) { + if (type == NULL) { + if (safe_str_eq(op, "lt") + || safe_str_eq(op, "lte") + || safe_str_eq(op, "gt") + || safe_str_eq(op, "gte")) { + type = "number"; + + } else { + type = "string"; + } + crm_trace("Defaulting to %s based comparison for '%s' op", type, op); + } + + if (safe_str_eq(type, "string")) { + cmp = strcasecmp(h_val, value); + + } else if (safe_str_eq(type, "number")) { + int h_val_f = crm_parse_int(h_val, NULL); + int value_f = crm_parse_int(value, NULL); + + if (h_val_f < value_f) { + cmp = -1; + } else if (h_val_f > value_f) { + cmp = 1; + } else { + cmp = 0; + } + + } else if (safe_str_eq(type, "version")) { + cmp = compare_version(h_val, value); + + } + + } else if (value == NULL && h_val == NULL) { + cmp = 0; + } else if (value == NULL) { + cmp = 1; + } else { + cmp = -1; + } + + if (safe_str_eq(op, "defined")) { + if (h_val != NULL) { + accept = TRUE; + } + + } else if (safe_str_eq(op, "not_defined")) { + if (h_val == NULL) { + accept = TRUE; + } + + } else if (safe_str_eq(op, "eq")) { + if ((h_val == value) || cmp == 0) { + accept = TRUE; + } + + } else if (safe_str_eq(op, "ne")) { + if ((h_val == NULL && value != NULL) + || (h_val != NULL && value == NULL) + || cmp != 0) { + accept = TRUE; + } + + } else if (value == NULL || h_val == NULL) { + // The comparison is meaningless from this point on + accept = FALSE; + + } else if (safe_str_eq(op, "lt")) { + if (cmp < 0) { + accept = TRUE; + } + + } else if (safe_str_eq(op, "lte")) { + if (cmp <= 0) { + accept = TRUE; + } + + } else if (safe_str_eq(op, "gt")) { + if (cmp > 0) { + accept = TRUE; + } + + } else if (safe_str_eq(op, "gte")) { + if (cmp >= 0) { + accept = TRUE; + } + } + + return accept; +} + +/* As per the nethack rules: + * + * moon period = 29.53058 days ~= 30, year = 365.2422 days + * days moon phase advances on first day of year compared to preceding year + * = 365.2422 - 12*29.53058 ~= 11 + * years in Metonic cycle (time until same phases fall on the same days of + * the month) = 18.6 ~= 19 + * moon phase on first day of year (epact) ~= (11*(year%19) + 29) % 30 + * (29 as initial condition) + * current phase in days = first day phase + days elapsed in year + * 6 moons ~= 177 days + * 177 ~= 8 reported phases * 22 + * + 11/22 for rounding + * + * 0-7, with 0: new, 4: full + */ + +static int +phase_of_the_moon(crm_time_t * now) +{ + uint32_t epact, diy, goldn; + uint32_t y; + + crm_time_get_ordinal(now, &y, &diy); + + goldn = (y % 19) + 1; + epact = (11 * goldn + 18) % 30; + if ((epact == 25 && goldn > 11) || epact == 24) + epact++; + + return ((((((diy + epact) * 6) + 11) % 177) / 22) & 7); +} + +static int +check_one(xmlNode *cron_spec, const char *xml_field, uint32_t time_field) { + int rc = pcmk_rc_undetermined; + const char *value = crm_element_value(cron_spec, xml_field); + long long low, high; + + if (value == NULL) { + /* Return pe_date_result_undetermined if the field is missing. */ + goto bail; + } + + if (pcmk__parse_ll_range(value, &low, &high) == pcmk_rc_unknown_format) { + goto bail; + } else if (low == high) { + /* A single number was given, not a range. */ + if (time_field < low) { + rc = pcmk_rc_before_range; + } else if (time_field > high) { + rc = pcmk_rc_after_range; + } else { + rc = pcmk_rc_within_range; + } + } else if (low != -1 && high != -1) { + /* This is a range with both bounds. */ + if (time_field < low) { + rc = pcmk_rc_before_range; + } else if (time_field > high) { + rc = pcmk_rc_after_range; + } else { + rc = pcmk_rc_within_range; + } + } else if (low == -1) { + /* This is a range with no starting value. */ + rc = time_field <= high ? pcmk_rc_within_range : pcmk_rc_after_range; + } else if (high == -1) { + /* This is a range with no ending value. */ + rc = time_field >= low ? pcmk_rc_within_range : pcmk_rc_before_range; + } + +bail: + if (rc == pcmk_rc_within_range) { + crm_debug("Condition '%s' in %s: passed", value, xml_field); + } else { + crm_debug("Condition '%s' in %s: failed", value, xml_field); + } + + return rc; +} + +static gboolean +check_passes(int rc) { + /* _within_range is obvious. _undetermined is a pass because + * this is the return value if a field is not given. In this + * case, we just want to ignore it and check other fields to + * see if they place some restriction on what can pass. + */ + return rc == pcmk_rc_within_range || rc == pcmk_rc_undetermined; +} + +#define CHECK_ONE(spec, name, var) do { \ + int subpart_rc = check_one(spec, name, var); \ + if (check_passes(subpart_rc) == FALSE) { \ + return subpart_rc; \ + } \ +} while (0) int pe_cron_range_satisfied(crm_time_t * now, xmlNode * cron_spec) @@ -343,18 +581,10 @@ pe_parse_xml_duration(crm_time_t * start, xmlNode * duration_spec) * \return TRUE if date expression is in effect at given time, FALSE otherwise */ gboolean -pe_test_date_expression(xmlNode *expr, crm_time_t *now, crm_time_t *next_change) +pe_test_date_expression(xmlNode *time_expr, crm_time_t *now, + crm_time_t *next_change) { - pe_rule_eval_data_t rule_data = { - .node_hash = NULL, - .role = RSC_ROLE_UNKNOWN, - .now = now, - .match_data = NULL, - .rsc_data = NULL, - .op_data = NULL - }; - - switch (pe__eval_date_expr(expr, &rule_data, next_change)) { + switch (pe_eval_date_expression(time_expr, now, next_change)) { case pcmk_rc_within_range: case pcmk_rc_ok: return TRUE; @@ -387,18 +617,86 @@ crm_time_set_if_earlier(crm_time_t *next_change, crm_time_t *t) * \return Standard Pacemaker return code */ int -pe_eval_date_expression(xmlNode *expr, crm_time_t *now, crm_time_t *next_change) +pe_eval_date_expression(xmlNode *time_expr, crm_time_t *now, + crm_time_t *next_change) { - pe_rule_eval_data_t rule_data = { - .node_hash = NULL, - .role = RSC_ROLE_UNKNOWN, - .now = now, - .match_data = NULL, - .rsc_data = NULL, - .op_data = NULL - }; - - return pe__eval_date_expr(expr, &rule_data, next_change); + crm_time_t *start = NULL; + crm_time_t *end = NULL; + const char *value = NULL; + const char *op = crm_element_value(time_expr, "operation"); + + xmlNode *duration_spec = NULL; + xmlNode *date_spec = NULL; + + // "undetermined" will also be returned for parsing errors + int rc = pcmk_rc_undetermined; + + crm_trace("Testing expression: %s", ID(time_expr)); + + duration_spec = first_named_child(time_expr, "duration"); + date_spec = first_named_child(time_expr, "date_spec"); + + value = crm_element_value(time_expr, "start"); + if (value != NULL) { + start = crm_time_new(value); + } + value = crm_element_value(time_expr, "end"); + if (value != NULL) { + end = crm_time_new(value); + } + + if (start != NULL && end == NULL && duration_spec != NULL) { + end = pe_parse_xml_duration(start, duration_spec); + } + + if ((op == NULL) || safe_str_eq(op, "in_range")) { + if ((start == NULL) && (end == NULL)) { + // in_range requires at least one of start or end + } else if ((start != NULL) && (crm_time_compare(now, start) < 0)) { + rc = pcmk_rc_before_range; + crm_time_set_if_earlier(next_change, start); + } else if ((end != NULL) && (crm_time_compare(now, end) > 0)) { + rc = pcmk_rc_after_range; + } else { + rc = pcmk_rc_within_range; + if (end && next_change) { + // Evaluation doesn't change until second after end + crm_time_add_seconds(end, 1); + crm_time_set_if_earlier(next_change, end); + } + } + + } else if (safe_str_eq(op, "date_spec")) { + rc = pe_cron_range_satisfied(now, date_spec); + // @TODO set next_change appropriately + + } else if (safe_str_eq(op, "gt")) { + if (start == NULL) { + // gt requires start + } else if (crm_time_compare(now, start) > 0) { + rc = pcmk_rc_within_range; + } else { + rc = pcmk_rc_before_range; + + // Evaluation doesn't change until second after start + crm_time_add_seconds(start, 1); + crm_time_set_if_earlier(next_change, start); + } + + } else if (safe_str_eq(op, "lt")) { + if (end == NULL) { + // lt requires end + } else if (crm_time_compare(now, end) < 0) { + rc = pcmk_rc_within_range; + crm_time_set_if_earlier(next_change, end); + } else { + rc = pcmk_rc_after_range; + } + } + + crm_time_free(start); + crm_time_free(end); + return rc; } // Information about a block of nvpair elements @@ -463,6 +761,7 @@ populate_hash(xmlNode * nvpair_list, GHashTable * hash, gboolean overwrite, xmlN name = crm_element_value(ref_nvpair, XML_NVPAIR_ATTR_NAME); } + crm_trace("Setting attribute: %s", name); value = crm_element_value(an_attr, XML_NVPAIR_ATTR_VALUE); if (value == NULL) { value = crm_element_value(ref_nvpair, XML_NVPAIR_ATTR_VALUE); @@ -470,6 +769,7 @@ populate_hash(xmlNode * nvpair_list, GHashTable * hash, gboolean overwrite, xmlN if (name == NULL || value == NULL) { continue; + } old_value = g_hash_table_lookup(hash, name); @@ -482,7 +782,6 @@ populate_hash(xmlNode * nvpair_list, GHashTable * hash, gboolean overwrite, xmlN continue; } else if (old_value == NULL) { - crm_trace("Setting attribute: %s = %s", name, value); g_hash_table_insert(hash, strdup(name), strdup(value)); } else if (overwrite) { @@ -554,9 +853,10 @@ add_versioned_attributes(xmlNode * attr_set, xmlNode * versioned_attrs) typedef struct unpack_data_s { gboolean overwrite; + GHashTable *node_hash; void *hash; + crm_time_t *now; crm_time_t *next_change; - pe_rule_eval_data_t *rule_data; xmlNode *top; } unpack_data_t; @@ -566,14 +866,14 @@ unpack_attr_set(gpointer data, gpointer user_data) sorted_set_t *pair = data; unpack_data_t *unpack_data = user_data; - if (!pe_eval_rules(pair->attr_set, unpack_data->rule_data, - unpack_data->next_change)) { + if (!pe_evaluate_rules(pair->attr_set, unpack_data->node_hash, + unpack_data->now, unpack_data->next_change)) { return; } #if ENABLE_VERSIONED_ATTRS - if (get_versioned_rule(pair->attr_set) && !(unpack_data->rule_data->node_hash && - g_hash_table_lookup_extended(unpack_data->rule_data->node_hash, + if (get_versioned_rule(pair->attr_set) && !(unpack_data->node_hash && + g_hash_table_lookup_extended(unpack_data->node_hash, CRM_ATTR_RA_VERSION, NULL, NULL))) { // we haven't actually tested versioned expressions yet return; @@ -591,8 +891,8 @@ unpack_versioned_attr_set(gpointer data, gpointer user_data) sorted_set_t *pair = data; unpack_data_t *unpack_data = user_data; - if (pe_eval_rules(pair->attr_set, unpack_data->rule_data, - unpack_data->next_change)) { + if (pe_evaluate_rules(pair->attr_set, unpack_data->node_hash, + unpack_data->now, unpack_data->next_change)) { add_versioned_attributes(pair->attr_set, unpack_data->hash); } } @@ -656,17 +956,19 @@ make_pairs(xmlNode *top, xmlNode *xml_obj, const char *set_name, * \param[in] top XML document root (used to expand id-ref's) * \param[in] xml_obj XML element containing blocks of nvpair elements * \param[in] set_name If not NULL, only use blocks of this element type + * \param[in] node_hash Node attributes to use when evaluating rules * \param[out] hash Where to store extracted name/value pairs * \param[in] always_first If not NULL, process block with this ID first * \param[in] overwrite Whether to replace existing values with same name - * \param[in] rule_data Matching parameters to use when unpacking + * \param[in] now Time to use when evaluating rules * \param[out] next_change If not NULL, set to when rule evaluation will change * \param[in] unpack_func Function to call to unpack each block */ static void unpack_nvpair_blocks(xmlNode *top, xmlNode *xml_obj, const char *set_name, - void *hash, const char *always_first, gboolean overwrite, - pe_rule_eval_data_t *rule_data, crm_time_t *next_change, + GHashTable *node_hash, void *hash, + const char *always_first, gboolean overwrite, + crm_time_t *now, crm_time_t *next_change, GFunc unpack_func) { GList *pairs = make_pairs(top, xml_obj, set_name, always_first); @@ -674,10 +976,11 @@ unpack_nvpair_blocks(xmlNode *top, xmlNode *xml_obj, const char *set_name, if (pairs) { unpack_data_t data = { .hash = hash, + .node_hash = node_hash, + .now = now, .overwrite = overwrite, .next_change = next_change, .top = top, - .rule_data = rule_data }; g_list_foreach(pairs, unpack_func, &data); @@ -685,16 +988,6 @@ unpack_nvpair_blocks(xmlNode *top, xmlNode *xml_obj, const char *set_name, } } -void -pe_eval_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name, - pe_rule_eval_data_t *rule_data, GHashTable *hash, - const char *always_first, gboolean overwrite, - crm_time_t *next_change) -{ - unpack_nvpair_blocks(top, xml_obj, set_name, hash, always_first, - overwrite, rule_data, next_change, unpack_attr_set); -} - /*! * \brief Extract nvpair blocks contained by an XML element into a hash table * @@ -714,46 +1007,19 @@ pe_unpack_nvpairs(xmlNode *top, xmlNode *xml_obj, const char *set_name, const char *always_first, gboolean overwrite, crm_time_t *now, crm_time_t *next_change) { - pe_rule_eval_data_t rule_data = { - .node_hash = node_hash, - .role = RSC_ROLE_UNKNOWN, - .now = now, - .match_data = NULL, - .rsc_data = NULL, - .op_data = NULL - }; - - pe_eval_nvpairs(top, xml_obj, set_name, &rule_data, hash, - always_first, overwrite, next_change); + unpack_nvpair_blocks(top, xml_obj, set_name, node_hash, hash, always_first, + overwrite, now, next_change, unpack_attr_set); } #if ENABLE_VERSIONED_ATTRS void -pe_eval_versioned_attributes(xmlNode *top, xmlNode *xml_obj, const char *set_name, - pe_rule_eval_data_t *rule_data, xmlNode *hash, - crm_time_t *next_change) -{ - unpack_nvpair_blocks(top, xml_obj, set_name, hash, NULL, FALSE, rule_data, - next_change, unpack_versioned_attr_set); -} - -void pe_unpack_versioned_attributes(xmlNode *top, xmlNode *xml_obj, const char *set_name, GHashTable *node_hash, xmlNode *hash, crm_time_t *now, crm_time_t *next_change) { - pe_rule_eval_data_t rule_data = { - .node_hash = node_hash, - .role = RSC_ROLE_UNKNOWN, - .now = now, - .match_data = NULL, - .rsc_data = NULL, - .op_data = NULL - }; - - unpack_nvpair_blocks(top, xml_obj, set_name, hash, NULL, FALSE, - &rule_data, next_change, unpack_versioned_attr_set); + unpack_nvpair_blocks(top, xml_obj, set_name, node_hash, hash, NULL, FALSE, + now, next_change, unpack_versioned_attr_set); } #endif @@ -839,481 +1105,6 @@ pe_unpack_versioned_parameters(xmlNode *versioned_params, const char *ra_version } #endif -gboolean -pe_eval_rules(xmlNode *ruleset, pe_rule_eval_data_t *rule_data, crm_time_t *next_change) -{ - // If there are no rules, pass by default - gboolean ruleset_default = TRUE; - - for (xmlNode *rule = first_named_child(ruleset, XML_TAG_RULE); - rule != NULL; rule = crm_next_same_xml(rule)) { - - ruleset_default = FALSE; - if (pe_eval_expr(rule, rule_data, next_change)) { - /* Only the deprecated "lifetime" element of location constraints - * may contain more than one rule at the top level -- the schema - * limits a block of nvpairs to a single top-level rule. So, this - * effectively means that a lifetime is active if any rule it - * contains is active. - */ - return TRUE; - } - } - - return ruleset_default; -} - -gboolean -pe_eval_expr(xmlNode *rule, pe_rule_eval_data_t *rule_data, crm_time_t *next_change) -{ - xmlNode *expr = NULL; - gboolean test = TRUE; - gboolean empty = TRUE; - gboolean passed = TRUE; - gboolean do_and = TRUE; - const char *value = NULL; - - rule = expand_idref(rule, NULL); - value = crm_element_value(rule, XML_RULE_ATTR_BOOLEAN_OP); - if (safe_str_eq(value, "or")) { - do_and = FALSE; - passed = FALSE; - } - - crm_trace("Testing rule %s", ID(rule)); - for (expr = __xml_first_child_element(rule); expr != NULL; - expr = __xml_next_element(expr)) { - - test = pe_eval_subexpr(expr, rule_data, next_change); - empty = FALSE; - - if (test && do_and == FALSE) { - crm_trace("Expression %s/%s passed", ID(rule), ID(expr)); - return TRUE; - - } else if (test == FALSE && do_and) { - crm_trace("Expression %s/%s failed", ID(rule), ID(expr)); - return FALSE; - } - } - - if (empty) { - crm_err("Invalid Rule %s: rules must contain at least one expression", ID(rule)); - } - - crm_trace("Rule %s %s", ID(rule), passed ? "passed" : "failed"); - return passed; -} - -gboolean -pe_eval_subexpr(xmlNode *expr, pe_rule_eval_data_t *rule_data, crm_time_t *next_change) -{ - gboolean accept = FALSE; - const char *uname = NULL; - - switch (find_expression_type(expr)) { - case nested_rule: - accept = pe_eval_expr(expr, rule_data, next_change); - break; - case attr_expr: - case loc_expr: - /* these expressions can never succeed if there is - * no node to compare with - */ - if (rule_data->node_hash != NULL) { - accept = pe__eval_attr_expr(expr, rule_data); - } - break; - - case time_expr: - accept = pe_test_date_expression(expr, rule_data->now, next_change); - break; - - case role_expr: - accept = pe__eval_role_expr(expr, rule_data); - break; - - case rsc_expr: - accept = pe__eval_rsc_expr(expr, rule_data); - break; - - case op_expr: - accept = pe__eval_op_expr(expr, rule_data); - break; - -#if ENABLE_VERSIONED_ATTRS - case version_expr: - if (rule_data->node_hash && - g_hash_table_lookup_extended(rule_data->node_hash, - CRM_ATTR_RA_VERSION, NULL, NULL)) { - accept = pe__eval_attr_expr(expr, rule_data); - } else { - // we are going to test it when we have ra-version - accept = TRUE; - } - break; -#endif - - default: - CRM_CHECK(FALSE /* bad type */ , return FALSE); - accept = FALSE; - } - if (rule_data->node_hash) { - uname = g_hash_table_lookup(rule_data->node_hash, CRM_ATTR_UNAME); - } - - crm_trace("Expression %s %s on %s", - ID(expr), accept ? "passed" : "failed", uname ? uname : "all nodes"); - return accept; -} - -gboolean -pe__eval_attr_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) -{ - gboolean accept = FALSE; - gboolean attr_allocated = FALSE; - int cmp = 0; - const char *h_val = NULL; - GHashTable *table = NULL; - - const char *op = NULL; - const char *type = NULL; - const char *attr = NULL; - const char *value = NULL; - const char *value_source = NULL; - - attr = crm_element_value(expr, XML_EXPR_ATTR_ATTRIBUTE); - op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION); - value = crm_element_value(expr, XML_EXPR_ATTR_VALUE); - type = crm_element_value(expr, XML_EXPR_ATTR_TYPE); - value_source = crm_element_value(expr, XML_EXPR_ATTR_VALUE_SOURCE); - - if (attr == NULL || op == NULL) { - pe_err("Invalid attribute or operation in expression" - " (\'%s\' \'%s\' \'%s\')", crm_str(attr), crm_str(op), crm_str(value)); - return FALSE; - } - - if (rule_data->match_data) { - if (rule_data->match_data->re) { - char *resolved_attr = pe_expand_re_matches(attr, rule_data->match_data->re); - - if (resolved_attr) { - attr = (const char *) resolved_attr; - attr_allocated = TRUE; - } - } - - if (safe_str_eq(value_source, "param")) { - table = rule_data->match_data->params; - } else if (safe_str_eq(value_source, "meta")) { - table = rule_data->match_data->meta; - } - } - - if (table) { - const char *param_name = value; - const char *param_value = NULL; - - if (param_name && param_name[0]) { - if ((param_value = (const char *)g_hash_table_lookup(table, param_name))) { - value = param_value; - } - } - } - - if (rule_data->node_hash != NULL) { - h_val = (const char *)g_hash_table_lookup(rule_data->node_hash, attr); - } - - if (attr_allocated) { - free((char *)attr); - attr = NULL; - } - - if (value != NULL && h_val != NULL) { - if (type == NULL) { - if (safe_str_eq(op, "lt") - || safe_str_eq(op, "lte") - || safe_str_eq(op, "gt") - || safe_str_eq(op, "gte")) { - type = "number"; - - } else { - type = "string"; - } - crm_trace("Defaulting to %s based comparison for '%s' op", type, op); - } - - if (safe_str_eq(type, "string")) { - cmp = strcasecmp(h_val, value); - - } else if (safe_str_eq(type, "number")) { - int h_val_f = crm_parse_int(h_val, NULL); - int value_f = crm_parse_int(value, NULL); - - if (h_val_f < value_f) { - cmp = -1; - } else if (h_val_f > value_f) { - cmp = 1; - } else { - cmp = 0; - } - - } else if (safe_str_eq(type, "version")) { - cmp = compare_version(h_val, value); - - } - - } else if (value == NULL && h_val == NULL) { - cmp = 0; - } else if (value == NULL) { - cmp = 1; - } else { - cmp = -1; - } - - if (safe_str_eq(op, "defined")) { - if (h_val != NULL) { - accept = TRUE; - } - - } else if (safe_str_eq(op, "not_defined")) { - if (h_val == NULL) { - accept = TRUE; - } - - } else if (safe_str_eq(op, "eq")) { - if ((h_val == value) || cmp == 0) { - accept = TRUE; - } - - } else if (safe_str_eq(op, "ne")) { - if ((h_val == NULL && value != NULL) - || (h_val != NULL && value == NULL) - || cmp != 0) { - accept = TRUE; - } - - } else if (value == NULL || h_val == NULL) { - // The comparison is meaningless from this point on - accept = FALSE; - - } else if (safe_str_eq(op, "lt")) { - if (cmp < 0) { - accept = TRUE; - } - - } else if (safe_str_eq(op, "lte")) { - if (cmp <= 0) { - accept = TRUE; - } - - } else if (safe_str_eq(op, "gt")) { - if (cmp > 0) { - accept = TRUE; - } - - } else if (safe_str_eq(op, "gte")) { - if (cmp >= 0) { - accept = TRUE; - } - } - - return accept; -} - -int -pe__eval_date_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data, crm_time_t *next_change) -{ - crm_time_t *start = NULL; - crm_time_t *end = NULL; - const char *value = NULL; - const char *op = crm_element_value(expr, "operation"); - - xmlNode *duration_spec = NULL; - xmlNode *date_spec = NULL; - - // "undetermined" will also be returned for parsing errors - int rc = pcmk_rc_undetermined; - - crm_trace("Testing expression: %s", ID(expr)); - - duration_spec = first_named_child(expr, "duration"); - date_spec = first_named_child(expr, "date_spec"); - - value = crm_element_value(expr, "start"); - if (value != NULL) { - start = crm_time_new(value); - } - value = crm_element_value(expr, "end"); - if (value != NULL) { - end = crm_time_new(value); - } - - if (start != NULL && end == NULL && duration_spec != NULL) { - end = pe_parse_xml_duration(start, duration_spec); - } - - if ((op == NULL) || safe_str_eq(op, "in_range")) { - if ((start == NULL) && (end == NULL)) { - // in_range requires at least one of start or end - } else if ((start != NULL) && (crm_time_compare(rule_data->now, start) < 0)) { - rc = pcmk_rc_before_range; - crm_time_set_if_earlier(next_change, start); - } else if ((end != NULL) && (crm_time_compare(rule_data->now, end) > 0)) { - rc = pcmk_rc_after_range; - } else { - rc = pcmk_rc_within_range; - if (end && next_change) { - // Evaluation doesn't change until second after end - crm_time_add_seconds(end, 1); - crm_time_set_if_earlier(next_change, end); - } - } - - } else if (safe_str_eq(op, "date_spec")) { - rc = pe_cron_range_satisfied(rule_data->now, date_spec); - // @TODO set next_change appropriately - - } else if (safe_str_eq(op, "gt")) { - if (start == NULL) { - // gt requires start - } else if (crm_time_compare(rule_data->now, start) > 0) { - rc = pcmk_rc_within_range; - } else { - rc = pcmk_rc_before_range; - - // Evaluation doesn't change until second after start - crm_time_add_seconds(start, 1); - crm_time_set_if_earlier(next_change, start); - } - - } else if (safe_str_eq(op, "lt")) { - if (end == NULL) { - // lt requires end - } else if (crm_time_compare(rule_data->now, end) < 0) { - rc = pcmk_rc_within_range; - crm_time_set_if_earlier(next_change, end); - } else { - rc = pcmk_rc_after_range; - } - } - - crm_time_free(start); - crm_time_free(end); - return rc; -} - -gboolean -pe__eval_op_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) { - const char *name = crm_element_value(expr, XML_NVPAIR_ATTR_NAME); - const char *interval_s = crm_element_value(expr, XML_LRM_ATTR_INTERVAL); - guint interval; - - crm_trace("Testing op_defaults expression: %s", ID(expr)); - - if (rule_data->op_data == NULL) { - crm_trace("No operations data provided"); - return FALSE; - } - - interval = crm_parse_interval_spec(interval_s); - if (interval == 0 && errno != 0) { - crm_trace("Could not parse interval: %s", interval_s); - return FALSE; - } - - if (interval_s != NULL && interval != rule_data->op_data->interval) { - crm_trace("Interval doesn't match: %d != %d", interval, rule_data->op_data->interval); - return FALSE; - } - - if (!crm_str_eq(name, rule_data->op_data->op_name, TRUE)) { - crm_trace("Name doesn't match: %s != %s", name, rule_data->op_data->op_name); - return FALSE; - } - - return TRUE; -} - -gboolean -pe__eval_role_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) -{ - gboolean accept = FALSE; - const char *op = NULL; - const char *value = NULL; - - if (rule_data->role == RSC_ROLE_UNKNOWN) { - return accept; - } - - value = crm_element_value(expr, XML_EXPR_ATTR_VALUE); - op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION); - - if (safe_str_eq(op, "defined")) { - if (rule_data->role > RSC_ROLE_STARTED) { - accept = TRUE; - } - - } else if (safe_str_eq(op, "not_defined")) { - if (rule_data->role < RSC_ROLE_SLAVE && rule_data->role > RSC_ROLE_UNKNOWN) { - accept = TRUE; - } - - } else if (safe_str_eq(op, "eq")) { - if (text2role(value) == rule_data->role) { - accept = TRUE; - } - - } else if (safe_str_eq(op, "ne")) { - // Test "ne" only with promotable clone roles - if (rule_data->role < RSC_ROLE_SLAVE && rule_data->role > RSC_ROLE_UNKNOWN) { - accept = FALSE; - - } else if (text2role(value) != rule_data->role) { - accept = TRUE; - } - } - return accept; -} - -gboolean -pe__eval_rsc_expr(xmlNodePtr expr, pe_rule_eval_data_t *rule_data) -{ - const char *class = crm_element_value(expr, XML_AGENT_ATTR_CLASS); - const char *provider = crm_element_value(expr, XML_AGENT_ATTR_PROVIDER); - const char *type = crm_element_value(expr, XML_EXPR_ATTR_TYPE); - - crm_trace("Testing rsc_defaults expression: %s", ID(expr)); - - if (rule_data->rsc_data == NULL) { - crm_trace("No resource data provided"); - return FALSE; - } - - if (class != NULL && - !crm_str_eq(class, rule_data->rsc_data->standard, TRUE)) { - crm_trace("Class doesn't match: %s != %s", class, rule_data->rsc_data->standard); - return FALSE; - } - - if ((provider == NULL && rule_data->rsc_data->provider != NULL) || - (provider != NULL && rule_data->rsc_data->provider == NULL) || - !crm_str_eq(provider, rule_data->rsc_data->provider, TRUE)) { - crm_trace("Provider doesn't match: %s != %s", provider, rule_data->rsc_data->provider); - return FALSE; - } - - if (type != NULL && - !crm_str_eq(type, rule_data->rsc_data->agent, TRUE)) { - crm_trace("Agent doesn't match: %s != %s", type, rule_data->rsc_data->agent); - return FALSE; - } - - return TRUE; -} - // Deprecated functions kept only for backward API compatibility gboolean test_ruleset(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now); gboolean test_rule(xmlNode *rule, GHashTable *node_hash, enum rsc_role_e role, @@ -1398,15 +1189,6 @@ unpack_instance_attributes(xmlNode *top, xmlNode *xml_obj, const char *set_name, const char *always_first, gboolean overwrite, crm_time_t *now) { - pe_rule_eval_data_t rule_data = { - .node_hash = node_hash, - .role = RSC_ROLE_UNKNOWN, - .now = now, - .match_data = NULL, - .rsc_data = NULL, - .op_data = NULL - }; - - unpack_nvpair_blocks(top, xml_obj, set_name, hash, always_first, - overwrite, &rule_data, NULL, unpack_attr_set); + unpack_nvpair_blocks(top, xml_obj, set_name, node_hash, hash, always_first, + overwrite, now, NULL, unpack_attr_set); } diff --git a/lib/pengine/status.c b/lib/pengine/status.c index ca34639..8dc5095 100644 --- a/lib/pengine/status.c +++ b/lib/pengine/status.c @@ -360,7 +360,7 @@ set_working_set_defaults(pe_working_set_t * data_set) data_set->order_id = 1; data_set->action_id = 1; - data_set->no_quorum_policy = no_quorum_stop; + data_set->no_quorum_policy = no_quorum_freeze; data_set->flags = 0x0ULL; set_bit(data_set->flags, pe_flag_stop_rsc_orphans); diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c index a480680..e5d40c4 100644 --- a/lib/pengine/unpack.c +++ b/lib/pengine/unpack.c @@ -108,7 +108,6 @@ pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, */ node->details->remote_requires_reset = TRUE; set_bit(rsc->flags, pe_rsc_failed); - set_bit(rsc->flags, pe_rsc_stop); } } @@ -118,7 +117,6 @@ pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, "and guest resource no longer exists", node->details->uname, reason); set_bit(node->details->remote_rsc->flags, pe_rsc_failed); - set_bit(node->details->remote_rsc->flags, pe_rsc_stop); } else if (pe__is_remote_node(node)) { pe_resource_t *rsc = node->details->remote_rsc; @@ -190,18 +188,9 @@ unpack_config(xmlNode * config, pe_working_set_t * data_set) const char *value = NULL; GHashTable *config_hash = crm_str_table_new(); - pe_rule_eval_data_t rule_data = { - .node_hash = NULL, - .role = RSC_ROLE_UNKNOWN, - .now = data_set->now, - .match_data = NULL, - .rsc_data = NULL, - .op_data = NULL - }; - data_set->config_hash = config_hash; - pe__unpack_dataset_nvpairs(config, XML_CIB_TAG_PROPSET, &rule_data, config_hash, + pe__unpack_dataset_nvpairs(config, XML_CIB_TAG_PROPSET, NULL, config_hash, CIB_OPTIONS_FIRST, FALSE, data_set); verify_pe_options(data_set->config_hash); @@ -268,9 +257,6 @@ unpack_config(xmlNode * config, pe_working_set_t * data_set) } else if (safe_str_eq(value, "freeze")) { data_set->no_quorum_policy = no_quorum_freeze; - } else if (safe_str_eq(value, "demote")) { - data_set->no_quorum_policy = no_quorum_demote; - } else if (safe_str_eq(value, "suicide")) { if (is_set(data_set->flags, pe_flag_stonith_enabled)) { int do_panic = 0; @@ -300,10 +286,6 @@ unpack_config(xmlNode * config, pe_working_set_t * data_set) case no_quorum_stop: crm_debug("On loss of quorum: Stop ALL resources"); break; - case no_quorum_demote: - crm_debug("On loss of quorum: " - "Demote promotable resources and stop other resources"); - break; case no_quorum_suicide: crm_notice("On loss of quorum: Fence all remaining nodes"); break; @@ -533,15 +515,6 @@ unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set) const char *type = NULL; const char *score = NULL; - pe_rule_eval_data_t rule_data = { - .node_hash = NULL, - .role = RSC_ROLE_UNKNOWN, - .now = data_set->now, - .match_data = NULL, - .rsc_data = NULL, - .op_data = NULL - }; - for (xml_obj = __xml_first_child_element(xml_nodes); xml_obj != NULL; xml_obj = __xml_next_element(xml_obj)) { @@ -574,7 +547,7 @@ unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set) handle_startup_fencing(data_set, new_node); add_node_attrs(xml_obj, new_node, FALSE, data_set); - pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_UTILIZATION, &rule_data, + pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_UTILIZATION, NULL, new_node->details->utilization, NULL, FALSE, data_set); @@ -1923,7 +1896,6 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node, */ if (pe__is_guest_node(node)) { set_bit(rsc->flags, pe_rsc_failed); - set_bit(rsc->flags, pe_rsc_stop); should_fence = TRUE; } else if (is_set(data_set->flags, pe_flag_stonith_enabled)) { @@ -1966,11 +1938,6 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node, /* nothing to do */ break; - case action_fail_demote: - set_bit(rsc->flags, pe_rsc_failed); - demote_action(rsc, node, FALSE); - break; - case action_fail_fence: /* treat it as if it is still running * but also mark the node as unclean @@ -2007,14 +1974,12 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node, case action_fail_recover: if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) { set_bit(rsc->flags, pe_rsc_failed); - set_bit(rsc->flags, pe_rsc_stop); stop_action(rsc, node, FALSE); } break; case action_fail_restart_container: set_bit(rsc->flags, pe_rsc_failed); - set_bit(rsc->flags, pe_rsc_stop); if (rsc->container && pe_rsc_is_bundled(rsc)) { /* A bundle's remote connection can run on a different node than @@ -2033,7 +1998,6 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node, case action_fail_reset_remote: set_bit(rsc->flags, pe_rsc_failed); - set_bit(rsc->flags, pe_rsc_stop); if (is_set(data_set->flags, pe_flag_stonith_enabled)) { tmpnode = NULL; if (rsc->is_remote_node) { @@ -2089,17 +2053,8 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node, } native_add_running(rsc, node, data_set); - switch (on_fail) { - case action_fail_ignore: - break; - case action_fail_demote: - case action_fail_block: - set_bit(rsc->flags, pe_rsc_failed); - break; - default: - set_bit(rsc->flags, pe_rsc_failed); - set_bit(rsc->flags, pe_rsc_stop); - break; + if (on_fail != action_fail_ignore) { + set_bit(rsc->flags, pe_rsc_failed); } } else if (rsc->clone_name && strchr(rsc->clone_name, ':') != NULL) { @@ -2271,7 +2226,7 @@ unpack_lrm_rsc_state(pe_node_t * node, xmlNode * rsc_entry, pe_working_set_t * d xmlNode *rsc_op = NULL; xmlNode *last_failure = NULL; - enum action_fail_response on_fail = action_fail_ignore; + enum action_fail_response on_fail = FALSE; enum rsc_role_e saved_role = RSC_ROLE_UNKNOWN; crm_trace("[%s] Processing %s on %s", @@ -2314,6 +2269,7 @@ unpack_lrm_rsc_state(pe_node_t * node, xmlNode * rsc_entry, pe_working_set_t * d /* process operations */ saved_role = rsc->role; + on_fail = action_fail_ignore; rsc->role = RSC_ROLE_UNKNOWN; sorted_op_list = g_list_sort(op_list, sort_op_by_callid); @@ -2622,7 +2578,6 @@ unpack_migrate_to_success(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, } else { /* Consider it failed here - forces a restart, prevents migration */ set_bit(rsc->flags, pe_rsc_failed); - set_bit(rsc->flags, pe_rsc_stop); clear_bit(rsc->flags, pe_rsc_allow_migrate); } } @@ -2797,96 +2752,6 @@ last_change_str(xmlNode *xml_op) return ((when_s && *when_s)? when_s : "unknown time"); } -/*! - * \internal - * \brief Compare two on-fail values - * - * \param[in] first One on-fail value to compare - * \param[in] second The other on-fail value to compare - * - * \return A negative number if second is more severe than first, zero if they - * are equal, or a positive number if first is more severe than second. - * \note This is only needed until the action_fail_response values can be - * renumbered at the next API compatibility break. - */ -static int -cmp_on_fail(enum action_fail_response first, enum action_fail_response second) -{ - switch (first) { - case action_fail_demote: - switch (second) { - case action_fail_ignore: - return 1; - case action_fail_demote: - return 0; - default: - return -1; - } - break; - - case action_fail_reset_remote: - switch (second) { - case action_fail_ignore: - case action_fail_demote: - case action_fail_recover: - return 1; - case action_fail_reset_remote: - return 0; - default: - return -1; - } - break; - - case action_fail_restart_container: - switch (second) { - case action_fail_ignore: - case action_fail_demote: - case action_fail_recover: - case action_fail_reset_remote: - return 1; - case action_fail_restart_container: - return 0; - default: - return -1; - } - break; - - default: - break; - } - switch (second) { - case action_fail_demote: - return (first == action_fail_ignore)? -1 : 1; - - case action_fail_reset_remote: - switch (first) { - case action_fail_ignore: - case action_fail_demote: - case action_fail_recover: - return -1; - default: - return 1; - } - break; - - case action_fail_restart_container: - switch (first) { - case action_fail_ignore: - case action_fail_demote: - case action_fail_recover: - case action_fail_reset_remote: - return -1; - default: - return 1; - } - break; - - default: - break; - } - return first - second; -} - static void unpack_rsc_op_failure(pe_resource_t * rsc, pe_node_t * node, int rc, xmlNode * xml_op, xmlNode ** last_failure, enum action_fail_response * on_fail, pe_working_set_t * data_set) @@ -2946,7 +2811,10 @@ unpack_rsc_op_failure(pe_resource_t * rsc, pe_node_t * node, int rc, xmlNode * x } action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set); - if (cmp_on_fail(*on_fail, action->on_fail) < 0) { + if ((action->on_fail <= action_fail_fence && *on_fail < action->on_fail) || + (action->on_fail == action_fail_reset_remote && *on_fail <= action_fail_recover) || + (action->on_fail == action_fail_restart_container && *on_fail <= action_fail_recover) || + (*on_fail == action_fail_restart_container && action->on_fail >= action_fail_migrate)) { pe_rsc_trace(rsc, "on-fail %s -> %s for %s (%s)", fail2text(*on_fail), fail2text(action->on_fail), action->uuid, key); *on_fail = action->on_fail; @@ -3421,7 +3289,7 @@ int pe__target_rc_from_xml(xmlNode *xml_op) static enum action_fail_response get_action_on_fail(pe_resource_t *rsc, const char *key, const char *task, pe_working_set_t * data_set) { - enum action_fail_response result = action_fail_recover; + int result = action_fail_recover; pe_action_t *action = custom_action(rsc, strdup(key), task, NULL, TRUE, FALSE, data_set); result = action->on_fail; @@ -3472,11 +3340,7 @@ update_resource_state(pe_resource_t * rsc, pe_node_t * node, xmlNode * xml_op, c clear_past_failure = TRUE; } else if (safe_str_eq(task, CRMD_ACTION_DEMOTE)) { - - if (*on_fail == action_fail_demote) { - // Demote clears an error only if on-fail=demote - clear_past_failure = TRUE; - } + /* Demote from Master does not clear an error */ rsc->role = RSC_ROLE_SLAVE; } else if (safe_str_eq(task, CRMD_ACTION_MIGRATED)) { @@ -3504,7 +3368,6 @@ update_resource_state(pe_resource_t * rsc, pe_node_t * node, xmlNode * xml_op, c case action_fail_block: case action_fail_ignore: - case action_fail_demote: case action_fail_recover: case action_fail_restart_container: *on_fail = action_fail_ignore; @@ -3765,7 +3628,6 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, * that, ensure the remote connection is considered failed. */ set_bit(node->details->remote_rsc->flags, pe_rsc_failed); - set_bit(node->details->remote_rsc->flags, pe_rsc_stop); } // fall through @@ -3795,8 +3657,7 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op, record_failed_op(xml_op, node, rsc, data_set); - if ((failure_strategy == action_fail_restart_container) - && cmp_on_fail(*on_fail, action_fail_recover) <= 0) { + if (failure_strategy == action_fail_restart_container && *on_fail <= action_fail_recover) { *on_fail = failure_strategy; } @@ -3837,15 +3698,6 @@ add_node_attrs(xmlNode *xml_obj, pe_node_t *node, bool overwrite, { const char *cluster_name = NULL; - pe_rule_eval_data_t rule_data = { - .node_hash = NULL, - .role = RSC_ROLE_UNKNOWN, - .now = data_set->now, - .match_data = NULL, - .rsc_data = NULL, - .op_data = NULL - }; - g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_UNAME), strdup(node->details->uname)); @@ -3867,7 +3719,7 @@ add_node_attrs(xmlNode *xml_obj, pe_node_t *node, bool overwrite, strdup(cluster_name)); } - pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_ATTR_SETS, &rule_data, + pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_ATTR_SETS, NULL, node->details->attrs, NULL, overwrite, data_set); if (pe_node_attribute_raw(node, CRM_ATTR_SITE_NAME) == NULL) { diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c index ccbf3a0..b18b7e4 100644 --- a/lib/pengine/utils.c +++ b/lib/pengine/utils.c @@ -13,6 +13,7 @@ #include #include +#include #include #include @@ -22,8 +23,8 @@ extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root); void print_str_str(gpointer key, gpointer value, gpointer user_data); gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data); -static void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, - pe_working_set_t * data_set, guint interval_ms); +void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, + pe_working_set_t * data_set); static xmlNode *find_rsc_op_entry_helper(pe_resource_t * rsc, const char *key, gboolean include_disabled); @@ -213,8 +214,53 @@ pe__node_list2table(GList *list) gint sort_node_uname(gconstpointer a, gconstpointer b) { - return pcmk_numeric_strcasecmp(((const pe_node_t *) a)->details->uname, - ((const pe_node_t *) b)->details->uname); + const char *name_a = ((const pe_node_t *) a)->details->uname; + const char *name_b = ((const pe_node_t *) b)->details->uname; + + while (*name_a && *name_b) { + if (isdigit(*name_a) && isdigit(*name_b)) { + // If node names contain a number, sort numerically + + char *end_a = NULL; + char *end_b = NULL; + long num_a = strtol(name_a, &end_a, 10); + long num_b = strtol(name_b, &end_b, 10); + + // allow ordering e.g. 007 > 7 + size_t len_a = end_a - name_a; + size_t len_b = end_b - name_b; + + if (num_a < num_b) { + return -1; + } else if (num_a > num_b) { + return 1; + } else if (len_a < len_b) { + return -1; + } else if (len_a > len_b) { + return 1; + } + name_a = end_a; + name_b = end_b; + } else { + // Compare non-digits case-insensitively + int lower_a = tolower(*name_a); + int lower_b = tolower(*name_b); + + if (lower_a < lower_b) { + return -1; + } else if (lower_a > lower_b) { + return 1; + } + ++name_a; + ++name_b; + } + } + if (!*name_a && *name_b) { + return -1; + } else if (*name_a && !*name_b) { + return 1; + } + return 0; } /*! @@ -435,31 +481,6 @@ sort_rsc_priority(gconstpointer a, gconstpointer b) return 0; } -static enum pe_quorum_policy -effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set) -{ - enum pe_quorum_policy policy = data_set->no_quorum_policy; - - if (is_set(data_set->flags, pe_flag_have_quorum)) { - policy = no_quorum_ignore; - - } else if (data_set->no_quorum_policy == no_quorum_demote) { - switch (rsc->role) { - case RSC_ROLE_MASTER: - case RSC_ROLE_SLAVE: - if (rsc->next_role > RSC_ROLE_SLAVE) { - rsc->next_role = RSC_ROLE_SLAVE; - } - policy = no_quorum_ignore; - break; - default: - policy = no_quorum_stop; - break; - } - } - return policy; -} - pe_action_t * custom_action(pe_resource_t * rsc, char *key, const char *task, pe_node_t * on_node, gboolean optional, gboolean save_action, @@ -547,13 +568,9 @@ custom_action(pe_resource_t * rsc, char *key, const char *task, } if (rsc != NULL) { - guint interval_ms = 0; - action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE); - parse_op_key(key, NULL, NULL, &interval_ms); - unpack_operation(action, action->op_entry, rsc->container, data_set, - interval_ms); + unpack_operation(action, action->op_entry, rsc->container, data_set); if (save_action) { rsc->actions = g_list_prepend(rsc->actions, action); @@ -572,7 +589,6 @@ custom_action(pe_resource_t * rsc, char *key, const char *task, if (rsc != NULL) { enum action_tasks a_task = text2task(action->task); - enum pe_quorum_policy quorum_policy = effective_quorum_policy(rsc, data_set); int warn_level = LOG_TRACE; if (save_action) { @@ -581,19 +597,10 @@ custom_action(pe_resource_t * rsc, char *key, const char *task, if (is_set(action->flags, pe_action_have_node_attrs) == FALSE && action->node != NULL && action->op_entry != NULL) { - pe_rule_eval_data_t rule_data = { - .node_hash = action->node->details->attrs, - .role = RSC_ROLE_UNKNOWN, - .now = data_set->now, - .match_data = NULL, - .rsc_data = NULL, - .op_data = NULL - }; - pe_set_action_bit(action, pe_action_have_node_attrs); pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS, - &rule_data, action->extra, NULL, - FALSE, data_set); + action->node->details->attrs, + action->extra, NULL, FALSE, data_set); } if (is_set(action->flags, pe_action_pseudo)) { @@ -655,11 +662,13 @@ custom_action(pe_resource_t * rsc, char *key, const char *task, crm_trace("Action %s requires only stonith", action->uuid); action->runnable = TRUE; #endif - } else if (quorum_policy == no_quorum_stop) { + } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE + && data_set->no_quorum_policy == no_quorum_stop) { pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "no quorum", pe_action_runnable, TRUE); crm_debug("%s\t%s (cancelled : quorum)", action->node->details->uname, action->uuid); - } else if (quorum_policy == no_quorum_freeze) { + } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE + && data_set->no_quorum_policy == no_quorum_freeze) { pe_rsc_trace(rsc, "Check resource is already active: %s %s %s %s", rsc->id, action->uuid, role2text(rsc->next_role), role2text(rsc->role)); if (rsc->fns->active(rsc, TRUE) == FALSE || rsc->next_role > rsc->role) { pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "quorum freeze", pe_action_runnable, TRUE); @@ -694,36 +703,25 @@ custom_action(pe_resource_t * rsc, char *key, const char *task, return action; } -static bool -valid_stop_on_fail(const char *value) -{ - return safe_str_neq(value, "standby") - && safe_str_neq(value, "demote") - && safe_str_neq(value, "stop"); -} - static const char * unpack_operation_on_fail(pe_action_t * action) { - const char *name = NULL; - const char *role = NULL; - const char *on_fail = NULL; - const char *interval_spec = NULL; - const char *enabled = NULL; const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL); - if (safe_str_eq(action->task, CRMD_ACTION_STOP) - && !valid_stop_on_fail(value)) { - + if (safe_str_eq(action->task, CRMD_ACTION_STOP) && safe_str_eq(value, "standby")) { pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s stop " - "action to default value because '%s' is not " - "allowed for stop", action->rsc->id, value); + "action to default value because 'standby' is not " + "allowed for stop", action->rsc->id); return NULL; - } else if (safe_str_eq(action->task, CRMD_ACTION_DEMOTE) && !value) { /* demote on_fail defaults to master monitor value if present */ xmlNode *operation = NULL; + const char *name = NULL; + const char *role = NULL; + const char *on_fail = NULL; + const char *interval_spec = NULL; + const char *enabled = NULL; CRM_CHECK(action->rsc != NULL, return NULL); @@ -746,31 +744,12 @@ unpack_operation_on_fail(pe_action_t * action) continue; } else if (crm_parse_interval_spec(interval_spec) == 0) { continue; - } else if (safe_str_eq(on_fail, "demote")) { - continue; } value = on_fail; } } else if (safe_str_eq(action->task, CRM_OP_LRM_DELETE)) { value = "ignore"; - - } else if (safe_str_eq(value, "demote")) { - name = crm_element_value(action->op_entry, "name"); - role = crm_element_value(action->op_entry, "role"); - on_fail = crm_element_value(action->op_entry, XML_OP_ATTR_ON_FAIL); - interval_spec = crm_element_value(action->op_entry, - XML_LRM_ATTR_INTERVAL); - - if (safe_str_neq(name, CRMD_ACTION_PROMOTE) - && (safe_str_neq(name, CRMD_ACTION_STATUS) - || safe_str_neq(role, "Master") - || (crm_parse_interval_spec(interval_spec) == 0))) { - pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s %s " - "action to default value because 'demote' is not " - "allowed for it", action->rsc->id, name); - return NULL; - } } return value; @@ -894,15 +873,6 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set const char *timeout = NULL; int timeout_ms = 0; - pe_rule_eval_data_t rule_data = { - .node_hash = NULL, - .role = RSC_ROLE_UNKNOWN, - .now = data_set->now, - .match_data = NULL, - .rsc_data = NULL, - .op_data = NULL - }; - for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP); child != NULL; child = crm_next_same_xml(child)) { if (safe_str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME))) { @@ -914,7 +884,7 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set if (timeout == NULL && data_set->op_defaults) { GHashTable *action_meta = crm_str_table_new(); pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, - &rule_data, action_meta, NULL, FALSE, data_set); + NULL, action_meta, NULL, FALSE, data_set); timeout = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT); } @@ -975,49 +945,29 @@ unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj, * and start delay values as integer milliseconds), requirements, and * failure policy. * - * \param[in,out] action Action to unpack into - * \param[in] xml_obj Operation XML (or NULL if all defaults) - * \param[in] container Resource that contains affected resource, if any - * \param[in] data_set Cluster state - * \param[in] interval_ms How frequently to perform the operation + * \param[in,out] action Action to unpack into + * \param[in] xml_obj Operation XML (or NULL if all defaults) + * \param[in] container Resource that contains affected resource, if any + * \param[in] data_set Cluster state */ -static void +void unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * container, - pe_working_set_t * data_set, guint interval_ms) + pe_working_set_t * data_set) { + guint interval_ms = 0; int timeout = 0; char *value_ms = NULL; const char *value = NULL; - const char *field = XML_LRM_ATTR_INTERVAL; + const char *field = NULL; char *default_timeout = NULL; #if ENABLE_VERSIONED_ATTRS pe_rsc_action_details_t *rsc_details = NULL; #endif - pe_rsc_eval_data_t rsc_rule_data = { - .standard = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_CLASS), - .provider = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_PROVIDER), - .agent = crm_element_value(action->rsc->xml, XML_EXPR_ATTR_TYPE) - }; - - pe_op_eval_data_t op_rule_data = { - .op_name = action->task, - .interval = interval_ms - }; - - pe_rule_eval_data_t rule_data = { - .node_hash = NULL, - .role = RSC_ROLE_UNKNOWN, - .now = data_set->now, - .match_data = NULL, - .rsc_data = &rsc_rule_data, - .op_data = &op_rule_data - }; - CRM_CHECK(action && action->rsc, return); // Cluster-wide - pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, &rule_data, + pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, NULL, action->meta, NULL, FALSE, data_set); // Probe timeouts default differently, so handle timeout default later @@ -1031,20 +981,19 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai xmlAttrPtr xIter = NULL; // take precedence over defaults - pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, &rule_data, + pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, NULL, action->meta, NULL, TRUE, data_set); #if ENABLE_VERSIONED_ATTRS rsc_details = pe_rsc_action_details(action); - - pe_eval_versioned_attributes(data_set->input, xml_obj, - XML_TAG_ATTR_SETS, &rule_data, - rsc_details->versioned_parameters, - NULL); - pe_eval_versioned_attributes(data_set->input, xml_obj, - XML_TAG_META_SETS, &rule_data, - rsc_details->versioned_meta, - NULL); + pe_unpack_versioned_attributes(data_set->input, xml_obj, + XML_TAG_ATTR_SETS, NULL, + rsc_details->versioned_parameters, + data_set->now, NULL); + pe_unpack_versioned_attributes(data_set->input, xml_obj, + XML_TAG_META_SETS, NULL, + rsc_details->versioned_meta, + data_set->now, NULL); #endif /* Anything set as an XML property has highest precedence. @@ -1061,11 +1010,23 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai g_hash_table_remove(action->meta, "id"); // Normalize interval to milliseconds + field = XML_LRM_ATTR_INTERVAL; + value = g_hash_table_lookup(action->meta, field); + if (value != NULL) { + interval_ms = crm_parse_interval_spec(value); + + } else if ((xml_obj == NULL) && !strcmp(action->task, RSC_STATUS)) { + /* An orphaned recurring monitor will not have any XML. However, we + * want the interval to be set, so the action can be properly detected + * as a recurring monitor. Parse it from the key in this case. + */ + parse_op_key(action->uuid, NULL, NULL, &interval_ms); + } if (interval_ms > 0) { value_ms = crm_strdup_printf("%u", interval_ms); g_hash_table_replace(action->meta, strdup(field), value_ms); - } else if (g_hash_table_lookup(action->meta, field) != NULL) { + } else if (value) { g_hash_table_remove(action->meta, field); } @@ -1169,10 +1130,6 @@ unpack_operation(pe_action_t * action, xmlNode * xml_obj, pe_resource_t * contai value = NULL; } - } else if (safe_str_eq(value, "demote")) { - action->on_fail = action_fail_demote; - value = "demote instance"; - } else { pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value); value = NULL; @@ -2736,14 +2693,14 @@ pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set) */ void pe__unpack_dataset_nvpairs(xmlNode *xml_obj, const char *set_name, - pe_rule_eval_data_t *rule_data, GHashTable *hash, + GHashTable *node_hash, GHashTable *hash, const char *always_first, gboolean overwrite, pe_working_set_t *data_set) { crm_time_t *next_change = crm_time_new_undefined(); - pe_eval_nvpairs(data_set->input, xml_obj, set_name, rule_data, hash, - always_first, overwrite, next_change); + pe_unpack_nvpairs(data_set->input, xml_obj, set_name, node_hash, hash, + always_first, overwrite, data_set->now, next_change); if (crm_time_is_defined(next_change)) { time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change); diff --git a/maint/mocked/based.c b/maint/mocked/based.c index 2cfad9f..0d5fd2d 100644 --- a/maint/mocked/based.c +++ b/maint/mocked/based.c @@ -23,7 +23,7 @@ #include #if 0 -#include "crm/common/ipc_internal.h" /* pcmk__client_t */ +#include "crm/common/ipcs_internal.h" /* pcmk__client_t */ #include "crm/common/xml.h" /* crm_xml_add */ #endif #include "crm/msg_xml.h" /* F_SUBTYPE */ diff --git a/maint/mocked/based.h b/maint/mocked/based.h index c214c08..ef1dc95 100644 --- a/maint/mocked/based.h +++ b/maint/mocked/based.h @@ -11,7 +11,7 @@ #include /* size_t */ #include /* bool */ -#include /* pcmk__client_t */ +#include /* pcmk__client_t */ struct module_s; diff --git a/tools/Makefile.am b/tools/Makefile.am index 4609b0f..c822a8c 100644 --- a/tools/Makefile.am +++ b/tools/Makefile.am @@ -12,7 +12,7 @@ if BUILD_SYSTEMD systemdsystemunit_DATA = crm_mon.service endif -noinst_HEADERS = crm_mon.h crm_resource.h +noinst_HEADERS = crm_mon.h crm_resource.h crm_resource_controller.h pcmkdir = $(datadir)/$(PACKAGE) pcmk_DATA = report.common report.collector @@ -115,6 +115,7 @@ crm_attribute_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la \ crm_resource_SOURCES = crm_resource.c \ crm_resource_ban.c \ + crm_resource_controller.c \ crm_resource_print.c \ crm_resource_runtime.c crm_resource_LDADD = $(top_builddir)/lib/pengine/libpe_rules.la \ diff --git a/tools/cibsecret.in b/tools/cibsecret.in index 568833c..9b74ba3 100644 --- a/tools/cibsecret.in +++ b/tools/cibsecret.in @@ -162,34 +162,28 @@ check_env() { } # This must be called (and return success) before calling $rsh or $rcp_to_from -get_live_peers() { - # Get local node name - GLP_LOCAL_NODE="$(crm_node -n)" - [ $? -eq 0 ] || fatal "couldn't get local node name" - - # Get a list of all other cluster nodes - GLP_ALL_PEERS="$(crm_node -l)" - [ $? -eq 0 ] || fatal "couldn't determine cluster nodes" - GLP_ALL_PEERS="$(echo "$GLP_ALL_PEERS" | awk '{print $2}' | grep -v "^${GLP_LOCAL_NODE}$")" +get_live_nodes() { + # Get a list of all cluster nodes + GLN_ALL_NODES="$(crm_node -l | awk '{print $2}' | grep -v "$(uname -n)")" # Make a list of those that respond to pings if [ "$(id -u)" = "0" ] && which fping >/dev/null 2>&1; then - LIVE_NODES=$(fping -a $GLP_ALL_PEERS 2>/dev/null) + LIVE_NODES=$(fping -a $GLN_ALL_NODES 2>/dev/null) else LIVE_NODES="" - for GLP_NODE in $GLP_ALL_PEERS; do \ - ping -c 2 -q "$GLP_NODE" >/dev/null 2>&1 && - LIVE_NODES="$LIVE_NODES $GLP_NODE" + for GLN_NODE in $GLN_ALL_NODES; do \ + ping -c 2 -q "$GLN_NODE" >/dev/null 2>&1 && + LIVE_NODES="$LIVE_NODES $GLN_NODE" done fi # Warn the user about any that didn't respond to pings - GLP_DOWN="$( (for GLP_NODE in $LIVE_NODES $GLP_ALL_PEERS; do echo "$GLP_NODE"; done) | sort | uniq -u)" - if [ "$(echo "$GLP_DOWN" | wc -w)" = "1" ]; then - warn "node $GLP_DOWN is down" + GLN_DOWN="$( (for GLN_NODE in $LIVE_NODES $GLN_ALL_NODES; do echo "$GLN_NODE"; done) | sort | uniq -u)" + if [ "$(echo "$GLN_DOWN" | wc -w)" = "1" ]; then + warn "node $GLN_DOWN is down" warn "you'll need to update it using \"$PROG sync\" later" - elif [ -n "$GLP_DOWN" ]; then - warn "nodes $(echo "$GLP_DOWN" | tr '\n' ' ')are down" + elif [ -n "$GLN_DOWN" ]; then + warn "nodes $(echo "$GLN_DOWN" | tr '\n' ' ')are down" warn "you'll need to update them using \"$PROG sync\" later" fi @@ -241,7 +235,7 @@ scp_fun() { # TODO: this procedure should be replaced with csync2 # provided that csync2 has already been configured sync_files() { - get_live_peers || return + get_live_nodes || return info "syncing $LRM_CIBSECRETS to $(echo "$LIVE_NODES" | tr '\n' ' ') ..." $rsh rm -rf "$LRM_CIBSECRETS" && $rsh mkdir -p "$(dirname "$LRM_CIBSECRETS")" && @@ -250,7 +244,7 @@ sync_files() { sync_one() { SO_FILE="$1" - get_live_peers || return + get_live_nodes || return info "syncing $SO_FILE to $(echo "$LIVE_NODES" | tr '\n' ' ') ..." $rsh mkdir -p "$(dirname "$SO_FILE")" && if [ -f "$SO_FILE" ]; then diff --git a/tools/crm_mon.c b/tools/crm_mon.c index 2fe9fb4..6c50447 100644 --- a/tools/crm_mon.c +++ b/tools/crm_mon.c @@ -1350,12 +1350,6 @@ main(int argc, char **argv) options.mon_ops |= mon_op_print_timing | mon_op_inactive_resources; } - if ((output_format == mon_output_html || output_format == mon_output_cgi) && - out->dest != stdout) { - pcmk__html_add_header("meta", "http-equiv", "refresh", "content", - crm_itoa(options.reconnect_msec/1000), NULL); - } - crm_info("Starting %s", crm_system_name); if (cib) { @@ -2024,10 +2018,6 @@ mon_refresh_display(gpointer user_data) break; } - if (options.daemonize) { - out->reset(out); - } - stonith_history_free(stonith_history); stonith_history = NULL; pe_reset_working_set(mon_data_set); @@ -2116,6 +2106,15 @@ clean_up_connections(void) } } +static void +handle_html_output(crm_exit_t exit_code) { + xmlNodePtr html = NULL; + + pcmk__html_add_header(html, "meta", "http-equiv", "refresh", "content", + crm_itoa(options.reconnect_msec/1000), NULL); + out->finish(out, exit_code, true, (void **) &html); +} + /* * De-init ncurses, disconnect from the CIB manager, disconnect fencing, * deallocate memory and show usage-message if requested. @@ -2184,12 +2183,16 @@ clean_up(crm_exit_t exit_code) * crm_mon to be able to do so. */ if (out != NULL) { - if (options.daemonize) { - out->dest = freopen(NULL, "w", out->dest); - CRM_ASSERT(out->dest != NULL); - } + switch (output_format) { + case mon_output_cgi: + case mon_output_html: + handle_html_output(exit_code); + break; - out->finish(out, exit_code, true, NULL); + default: + out->finish(out, exit_code, true, NULL); + break; + } pcmk__output_free(out); pcmk__unregister_formats(); diff --git a/tools/crm_mon_curses.c b/tools/crm_mon_curses.c index b1129ba..896fd98 100644 --- a/tools/crm_mon_curses.c +++ b/tools/crm_mon_curses.c @@ -46,7 +46,6 @@ curses_free_priv(pcmk__output_t *out) { g_queue_free(priv->parent_q); free(priv); - out->priv = NULL; } static bool diff --git a/tools/crm_node.c b/tools/crm_node.c index 146454d..34511f3 100644 --- a/tools/crm_node.c +++ b/tools/crm_node.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #define SUMMARY "crm_node - Tool for displaying low-level node information" @@ -40,6 +39,7 @@ gboolean command_cb(const gchar *option_name, const gchar *optarg, gpointer data gboolean name_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); gboolean remove_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error); +static char *pid_s = NULL; static GMainLoop *mainloop = NULL; static crm_exit_t exit_code = CRM_EX_OK; @@ -130,181 +130,205 @@ remove_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError * return TRUE; } -static gint -sort_node(gconstpointer a, gconstpointer b) +/*! + * \internal + * \brief Exit crm_node + * Clean up memory, and either quit mainloop (if running) or exit + * + * \param[in] value Exit status + */ +static void +crm_node_exit(crm_exit_t value) { - const pcmk_controld_api_node_t *node_a = a; - const pcmk_controld_api_node_t *node_b = b; + if (pid_s) { + free(pid_s); + pid_s = NULL; + } + + exit_code = value; - return pcmk_numeric_strcasecmp((node_a->uname? node_a->uname : ""), - (node_b->uname? node_b->uname : "")); + if (mainloop && g_main_loop_is_running(mainloop)) { + g_main_loop_quit(mainloop); + } else { + crm_exit(exit_code); + } } static void -controller_event_cb(pcmk_ipc_api_t *controld_api, - enum pcmk_ipc_event event_type, crm_exit_t status, - void *event_data, void *user_data) +exit_disconnect(gpointer user_data) { - pcmk_controld_api_reply_t *reply = event_data; + fprintf(stderr, "error: Lost connection to cluster\n"); + crm_node_exit(CRM_EX_DISCONNECT); +} - switch (event_type) { - case pcmk_ipc_event_disconnect: - if (exit_code == CRM_EX_DISCONNECT) { // Unexpected - fprintf(stderr, "error: Lost connection to controller\n"); - } - goto done; - break; +typedef int (*ipc_dispatch_fn) (const char *buffer, ssize_t length, + gpointer userdata); - case pcmk_ipc_event_reply: - break; +static crm_ipc_t * +new_mainloop_for_ipc(const char *system, ipc_dispatch_fn dispatch) +{ + mainloop_io_t *source = NULL; + crm_ipc_t *ipc = NULL; - default: - return; + struct ipc_client_callbacks ipc_callbacks = { + .dispatch = dispatch, + .destroy = exit_disconnect + }; + + mainloop = g_main_loop_new(NULL, FALSE); + source = mainloop_add_ipc_client(system, G_PRIORITY_DEFAULT, 0, + NULL, &ipc_callbacks); + ipc = mainloop_get_ipc_client(source); + if (ipc == NULL) { + fprintf(stderr, + "error: Could not connect to cluster (is it running?)\n"); + crm_node_exit(CRM_EX_DISCONNECT); } + return ipc; +} - if (status != CRM_EX_OK) { - fprintf(stderr, "error: Bad reply from controller: %s\n", - crm_exit_str(status)); +static void +run_mainloop_and_exit(void) +{ + g_main_loop_run(mainloop); + g_main_loop_unref(mainloop); + mainloop = NULL; + crm_node_exit(exit_code); +} + +static int +send_controller_hello(crm_ipc_t *controller) +{ + xmlNode *hello = NULL; + int rc; + + pid_s = pcmk__getpid_s(); + hello = create_hello_message(pid_s, crm_system_name, "1", "0"); + rc = crm_ipc_send(controller, hello, 0, 0, NULL); + free_xml(hello); + return (rc < 0)? rc : 0; +} + +static int +send_node_info_request(crm_ipc_t *controller, uint32_t nodeid) +{ + xmlNode *ping = NULL; + int rc; + + ping = create_request(CRM_OP_NODE_INFO, NULL, NULL, CRM_SYSTEM_CRMD, + crm_system_name, pid_s); + if (nodeid > 0) { + crm_xml_add_int(ping, XML_ATTR_ID, nodeid); + } + rc = crm_ipc_send(controller, ping, 0, 0, NULL); + free_xml(ping); + return (rc < 0)? rc : 0; +} + +static int +dispatch_controller(const char *buffer, ssize_t length, gpointer userdata) +{ + xmlNode *message = string2xml(buffer); + xmlNode *data = NULL; + const char *value = NULL; + + if (message == NULL) { + fprintf(stderr, "error: Could not understand reply from controller\n"); + crm_node_exit(CRM_EX_PROTOCOL); + return 0; + } + crm_log_xml_trace(message, "controller reply"); + + exit_code = CRM_EX_PROTOCOL; + + // Validate reply + value = crm_element_value(message, F_CRM_MSG_TYPE); + if (safe_str_neq(value, XML_ATTR_RESPONSE)) { + fprintf(stderr, "error: Message from controller was not a reply\n"); + goto done; + } + value = crm_element_value(message, XML_ATTR_REFERENCE); + if (value == NULL) { + fprintf(stderr, "error: Controller reply did not specify original message\n"); + goto done; + } + data = get_message_xml(message, F_CRM_DATA); + if (data == NULL) { + fprintf(stderr, "error: Controller reply did not contain any data\n"); goto done; } - // Parse desired info from reply and display to user switch (options.command) { case 'i': - if (reply->reply_type != pcmk_controld_reply_info) { - fprintf(stderr, - "error: Unknown reply type %d from controller\n", - reply->reply_type); - goto done; + value = crm_element_value(data, XML_ATTR_ID); + if (value == NULL) { + fprintf(stderr, "error: Controller reply did not contain node ID\n"); + } else { + printf("%s\n", value); + exit_code = CRM_EX_OK; } - if (reply->data.node_info.id == 0) { - fprintf(stderr, - "error: Controller reply did not contain node ID\n"); - exit_code = CRM_EX_PROTOCOL; - goto done; - } - printf("%d\n", reply->data.node_info.id); break; case 'n': case 'N': - if (reply->reply_type != pcmk_controld_reply_info) { - fprintf(stderr, - "error: Unknown reply type %d from controller\n", - reply->reply_type); - goto done; - } - if (reply->data.node_info.uname == NULL) { + value = crm_element_value(data, XML_ATTR_UNAME); + if (value == NULL) { fprintf(stderr, "Node is not known to cluster\n"); exit_code = CRM_EX_NOHOST; - goto done; + } else { + printf("%s\n", value); + exit_code = CRM_EX_OK; } - printf("%s\n", reply->data.node_info.uname); break; case 'q': - if (reply->reply_type != pcmk_controld_reply_info) { - fprintf(stderr, - "error: Unknown reply type %d from controller\n", - reply->reply_type); - goto done; - } - printf("%d\n", reply->data.node_info.have_quorum); - if (!(reply->data.node_info.have_quorum)) { - exit_code = CRM_EX_QUORUM; - goto done; - } - break; - - case 'l': - case 'p': - if (reply->reply_type != pcmk_controld_reply_nodes) { - fprintf(stderr, - "error: Unknown reply type %d from controller\n", - reply->reply_type); - goto done; - } - reply->data.nodes = g_list_sort(reply->data.nodes, sort_node); - for (GList *node_iter = reply->data.nodes; - node_iter != NULL; node_iter = node_iter->next) { - - pcmk_controld_api_node_t *node = node_iter->data; - const char *uname = (node->uname? node->uname : ""); - const char *state = (node->state? node->state : ""); - - if (options.command == 'l') { - printf("%lu %s %s\n", - (unsigned long) node->id, uname, state); - - // i.e. CRM_NODE_MEMBER, but we don't want to include cluster.h - } else if (!strcmp(state, "member")) { - printf("%s ", uname); - } - } - if (options.command == 'p') { - printf("\n"); + value = crm_element_value(data, XML_ATTR_HAVE_QUORUM); + if (value == NULL) { + fprintf(stderr, "error: Controller reply did not contain quorum status\n"); + } else { + bool quorum = crm_is_true(value); + + printf("%d\n", quorum); + exit_code = quorum? CRM_EX_OK : CRM_EX_QUORUM; } break; default: fprintf(stderr, "internal error: Controller reply not expected\n"); exit_code = CRM_EX_SOFTWARE; - goto done; + break; } - // Success - exit_code = CRM_EX_OK; done: - pcmk_disconnect_ipc(controld_api); - pcmk_quit_main_loop(mainloop, 10); + free_xml(message); + crm_node_exit(exit_code); + return 0; } static void -run_controller_mainloop(uint32_t nodeid, bool list_nodes) +run_controller_mainloop(uint32_t nodeid) { - pcmk_ipc_api_t *controld_api = NULL; + crm_ipc_t *controller = NULL; int rc; - // Set disconnect exit code to handle unexpected disconnects - exit_code = CRM_EX_DISCONNECT; - - // Create controller IPC object - rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld); - if (rc != pcmk_rc_ok) { - fprintf(stderr, "error: Could not connect to controller: %s\n", - pcmk_rc_str(rc)); - return; - } - pcmk_register_ipc_callback(controld_api, controller_event_cb, NULL); + controller = new_mainloop_for_ipc(CRM_SYSTEM_CRMD, dispatch_controller); - // Connect to controller - rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_main); - if (rc != pcmk_rc_ok) { - fprintf(stderr, "error: Could not connect to controller: %s\n", - pcmk_rc_str(rc)); - exit_code = pcmk_rc2exitc(rc); - return; + rc = send_controller_hello(controller); + if (rc < 0) { + fprintf(stderr, "error: Could not register with controller: %s\n", + pcmk_strerror(rc)); + crm_node_exit(crm_errno2exit(rc)); } - if (list_nodes) { - rc = pcmk_controld_api_list_nodes(controld_api); - } else { - rc = pcmk_controld_api_node_info(controld_api, nodeid); - } - if (rc != pcmk_rc_ok) { + rc = send_node_info_request(controller, nodeid); + if (rc < 0) { fprintf(stderr, "error: Could not ping controller: %s\n", - pcmk_rc_str(rc)); - pcmk_disconnect_ipc(controld_api); - exit_code = pcmk_rc2exitc(rc); - return; + pcmk_strerror(rc)); + crm_node_exit(crm_errno2exit(rc)); } - // Run main loop to get controller reply via controller_event_cb() - mainloop = g_main_loop_new(NULL, FALSE); - g_main_loop_run(mainloop); - g_main_loop_unref(mainloop); - mainloop = NULL; - pcmk_free_ipc_api(controld_api); + // Run main loop to get controller reply via dispatch_controller() + run_mainloop_and_exit(); } static void @@ -315,12 +339,11 @@ print_node_name(void) if (name != NULL) { printf("%s\n", name); - exit_code = CRM_EX_OK; - return; + crm_node_exit(CRM_EX_OK); } else { // Otherwise ask the controller - run_controller_mainloop(0, false); + run_controller_mainloop(0); } } @@ -368,56 +391,32 @@ cib_remove_node(long id, const char *name) } static int -controller_remove_node(const char *node_name, long nodeid) -{ - pcmk_ipc_api_t *controld_api = NULL; - int rc; - - // Create controller IPC object - rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld); - if (rc != pcmk_rc_ok) { - fprintf(stderr, "error: Could not connect to controller: %s\n", - pcmk_rc_str(rc)); - return ENOTCONN; - } - - // Connect to controller (without main loop) - rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_sync); - if (rc != pcmk_rc_ok) { - fprintf(stderr, "error: Could not connect to controller: %s\n", - pcmk_rc_str(rc)); - pcmk_free_ipc_api(controld_api); - return rc; - } - - rc = pcmk_ipc_purge_node(controld_api, node_name, nodeid); - if (rc != pcmk_rc_ok) { - fprintf(stderr, - "error: Could not clear node from controller's cache: %s\n", - pcmk_rc_str(rc)); - } - - pcmk_free_ipc_api(controld_api); - return pcmk_rc_ok; -} - -static int tools_remove_node_cache(const char *node_name, long nodeid, const char *target) { int rc = -1; - crm_ipc_t *conn = NULL; + crm_ipc_t *conn = crm_ipc_new(target, 0); xmlNode *cmd = NULL; - conn = crm_ipc_new(target, 0); if (!conn) { return -ENOTCONN; } + if (!crm_ipc_connect(conn)) { crm_perror(LOG_ERR, "Connection to %s failed", target); crm_ipc_destroy(conn); return -ENOTCONN; } + if(safe_str_eq(target, CRM_SYSTEM_CRMD)) { + // The controller requires a hello message before sending a request + rc = send_controller_hello(conn); + if (rc < 0) { + fprintf(stderr, "error: Could not register with controller: %s\n", + pcmk_strerror(rc)); + return rc; + } + } + crm_trace("Removing %s[%ld] from the %s membership cache", node_name, nodeid, target); @@ -434,9 +433,9 @@ tools_remove_node_cache(const char *node_name, long nodeid, const char *target) crm_xml_add_int(cmd, PCMK__XA_ATTR_NODE_ID, (int) nodeid); } - } else { // Fencer or pacemakerd - cmd = create_request(CRM_OP_RM_NODE_CACHE, NULL, NULL, target, - crm_system_name, NULL); + } else { + cmd = create_request(CRM_OP_RM_NODE_CACHE, + NULL, NULL, target, crm_system_name, pid_s); if (nodeid > 0) { crm_xml_set_id(cmd, "%ld", nodeid); } @@ -448,7 +447,6 @@ tools_remove_node_cache(const char *node_name, long nodeid, const char *target) target, node_name, nodeid, rc); if (rc > 0) { - // @TODO Should this be done just once after all the rest? rc = cib_remove_node(nodeid, node_name); } @@ -463,12 +461,12 @@ tools_remove_node_cache(const char *node_name, long nodeid, const char *target) static void remove_node(const char *target_uname) { - int rc; int d = 0; long nodeid = 0; const char *node_name = NULL; char *endptr = NULL; const char *daemons[] = { + CRM_SYSTEM_CRMD, "stonith-ng", T_ATTRD, CRM_SYSTEM_MCP, @@ -484,21 +482,87 @@ remove_node(const char *target_uname) node_name = target_uname; } - rc = controller_remove_node(node_name, nodeid); - if (rc != pcmk_rc_ok) { - exit_code = pcmk_rc2exitc(rc); - return; - } - for (d = 0; d < DIMOF(daemons); d++) { if (tools_remove_node_cache(node_name, nodeid, daemons[d])) { crm_err("Failed to connect to %s to remove node '%s'", daemons[d], target_uname); - exit_code = CRM_EX_ERROR; + crm_node_exit(CRM_EX_ERROR); return; } } - exit_code = CRM_EX_OK; + crm_node_exit(CRM_EX_OK); +} + +static gint +compare_node_xml(gconstpointer a, gconstpointer b) +{ + const char *a_name = crm_element_value((xmlNode*) a, "uname"); + const char *b_name = crm_element_value((xmlNode*) b, "uname"); + + return strcmp((a_name? a_name : ""), (b_name? b_name : "")); +} + +static int +node_mcp_dispatch(const char *buffer, ssize_t length, gpointer userdata) +{ + GList *nodes = NULL; + xmlNode *node = NULL; + xmlNode *msg = string2xml(buffer); + const char *uname; + const char *state; + + if (msg == NULL) { + fprintf(stderr, "error: Could not understand pacemakerd response\n"); + crm_node_exit(CRM_EX_PROTOCOL); + return 0; + } + + crm_log_xml_trace(msg, "message"); + + for (node = __xml_first_child(msg); node != NULL; node = __xml_next(node)) { + nodes = g_list_insert_sorted(nodes, node, compare_node_xml); + } + + for (GList *iter = nodes; iter; iter = iter->next) { + node = (xmlNode*) iter->data; + uname = crm_element_value(node, "uname"); + state = crm_element_value(node, "state"); + + if (options.command == 'l') { + int id = 0; + + crm_element_value_int(node, "id", &id); + printf("%d %s %s\n", id, (uname? uname : ""), (state? state : "")); + + // This is CRM_NODE_MEMBER but we don't want to include cluster header + } else if ((options.command == 'p') && safe_str_eq(state, "member")) { + printf("%s ", (uname? uname : "")); + } + } + if (options.command == 'p') { + fprintf(stdout, "\n"); + } + + free_xml(msg); + crm_node_exit(CRM_EX_OK); + return 0; +} + +static void +run_pacemakerd_mainloop(void) +{ + crm_ipc_t *ipc = NULL; + xmlNode *poke = NULL; + + ipc = new_mainloop_for_ipc(CRM_SYSTEM_MCP, node_mcp_dispatch); + + // Sending anything will get us a list of nodes + poke = create_xml_node(NULL, "poke"); + crm_ipc_send(ipc, poke, 0, 0, NULL); + free_xml(poke); + + // Handle reply via node_mcp_dispatch() + run_mainloop_and_exit(); } static GOptionContext * @@ -585,11 +649,11 @@ main(int argc, char **argv) case 'i': case 'q': case 'N': - run_controller_mainloop(options.nodeid, false); + run_controller_mainloop(options.nodeid); break; case 'l': case 'p': - run_controller_mainloop(0, true); + run_pacemakerd_mainloop(); break; default: break; @@ -599,5 +663,6 @@ done: g_strfreev(processed_args); g_clear_error(&error); pcmk__free_arg_context(context); - return crm_exit(exit_code); + crm_node_exit(exit_code); + return exit_code; } diff --git a/tools/crm_resource.c b/tools/crm_resource.c index c8c1cfa..6853ad5 100644 --- a/tools/crm_resource.c +++ b/tools/crm_resource.c @@ -11,70 +11,62 @@ #include #include + +#include +#include + #include #include #include + #include #include #include #include #include -#include -#include -#include - bool BE_QUIET = FALSE; bool scope_master = FALSE; int cib_options = cib_sync_call; -static crm_exit_t exit_code = CRM_EX_OK; -// Things that should be cleaned up on exit static GMainLoop *mainloop = NULL; + +// Things that should be cleaned up on exit static cib_t *cib_conn = NULL; -static pcmk_ipc_api_t *controld_api = NULL; +static pcmk_controld_api_t *controld_api = NULL; static pe_working_set_t *data_set = NULL; #define MESSAGE_TIMEOUT_S 60 // Clean up and exit static crm_exit_t -bye(crm_exit_t ec) +bye(crm_exit_t exit_code) { + static bool crm_resource_shutdown_flag = false; + + if (crm_resource_shutdown_flag) { + // Allow usage like "return bye(...);" + return exit_code; + } + crm_resource_shutdown_flag = true; + if (cib_conn != NULL) { cib_t *save_cib_conn = cib_conn; - cib_conn = NULL; // Ensure we can't free this twice + cib_conn = NULL; save_cib_conn->cmds->signoff(save_cib_conn); cib_delete(save_cib_conn); } if (controld_api != NULL) { - pcmk_ipc_api_t *save_controld_api = controld_api; + pcmk_controld_api_t *save_controld_api = controld_api; - controld_api = NULL; // Ensure we can't free this twice - pcmk_free_ipc_api(save_controld_api); - } - if (mainloop != NULL) { - g_main_loop_unref(mainloop); - mainloop = NULL; + controld_api = NULL; + pcmk_free_controld_api(save_controld_api); } pe_free_working_set(data_set); data_set = NULL; - crm_exit(ec); - return ec; -} - -static void -quit_main_loop(crm_exit_t ec) -{ - exit_code = ec; - if (mainloop != NULL) { - GMainLoop *mloop = mainloop; - - mainloop = NULL; // Don't re-enter this block - pcmk_quit_main_loop(mloop, 10); - g_main_loop_unref(mloop); - } + crm_exit(exit_code); + return exit_code; } static gboolean @@ -84,54 +76,39 @@ resource_ipc_timeout(gpointer data) fprintf(stderr, "\nAborting because no messages received in %d seconds\n", MESSAGE_TIMEOUT_S); crm_err("No messages received in %d seconds", MESSAGE_TIMEOUT_S); - quit_main_loop(CRM_EX_TIMEOUT); + bye(CRM_EX_TIMEOUT); return FALSE; } static void -controller_event_callback(pcmk_ipc_api_t *api, enum pcmk_ipc_event event_type, - crm_exit_t status, void *event_data, void *user_data) +handle_controller_reply(pcmk_controld_api_t *capi, void *api_data, + void *user_data) { - switch (event_type) { - case pcmk_ipc_event_disconnect: - if (exit_code == CRM_EX_DISCONNECT) { // Unexpected - crm_info("Connection to controller was terminated"); - } - quit_main_loop(exit_code); - break; - - case pcmk_ipc_event_reply: - if (status != CRM_EX_OK) { - fprintf(stderr, "\nError: bad reply from controller: %s\n", - crm_exit_str(status)); - pcmk_disconnect_ipc(api); - quit_main_loop(status); - } else { - fprintf(stderr, "."); - if ((pcmk_controld_api_replies_expected(api) == 0) - && mainloop && g_main_loop_is_running(mainloop)) { - fprintf(stderr, " OK\n"); - crm_debug("Got all the replies we expected"); - pcmk_disconnect_ipc(api); - quit_main_loop(CRM_EX_OK); - } - } - break; - - default: - break; + fprintf(stderr, "."); + if ((capi->replies_expected(capi) == 0) + && mainloop && g_main_loop_is_running(mainloop)) { + fprintf(stderr, " OK\n"); + crm_debug("Got all the replies we expected"); + bye(CRM_EX_OK); } } static void -start_mainloop(pcmk_ipc_api_t *capi) +handle_controller_drop(pcmk_controld_api_t *capi, void *api_data, + void *user_data) { - unsigned int count = pcmk_controld_api_replies_expected(capi); + crm_info("Connection to controller was terminated"); + bye(CRM_EX_DISCONNECT); +} + +static void +start_mainloop(pcmk_controld_api_t *capi) +{ + if (capi->replies_expected(capi) > 0) { + unsigned int count = capi->replies_expected(capi); - if (count > 0) { fprintf(stderr, "Waiting for %d %s from the controller", count, pcmk__plural_alt(count, "reply", "replies")); - exit_code = CRM_EX_DISCONNECT; // For unexpected disconnects mainloop = g_main_loop_new(NULL, FALSE); g_timeout_add(MESSAGE_TIMEOUT_S * 1000, resource_ipc_timeout, NULL); g_main_loop_run(mainloop); @@ -687,6 +664,7 @@ main(int argc, char **argv) int argerr = 0; int flag; int find_flags = 0; // Flags to use when searching for resource + crm_exit_t exit_code = CRM_EX_OK; crm_log_cli_init("crm_resource"); pcmk__set_cli_options(NULL, "| [options]", long_options, @@ -1173,15 +1151,21 @@ main(int argc, char **argv) // Establish a connection to the controller if needed if (require_crmd) { - rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld); - if (rc != pcmk_rc_ok) { - CMD_ERR("Error connecting to the controller: %s", pcmk_rc_str(rc)); - rc = pcmk_rc2legacy(rc); - goto bail; - } - pcmk_register_ipc_callback(controld_api, controller_event_callback, - NULL); - rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_main); + char *client_uuid; + pcmk_controld_api_cb_t dispatch_cb = { + handle_controller_reply, NULL + }; + pcmk_controld_api_cb_t destroy_cb = { + handle_controller_drop, NULL + }; + + + client_uuid = pcmk__getpid_s(); + controld_api = pcmk_new_controld_api(crm_system_name, client_uuid); + free(client_uuid); + + rc = controld_api->connect(controld_api, true, &dispatch_cb, + &destroy_cb); if (rc != pcmk_rc_ok) { CMD_ERR("Error connecting to the controller: %s", pcmk_rc_str(rc)); rc = pcmk_rc2legacy(rc); @@ -1541,8 +1525,8 @@ main(int argc, char **argv) NULL, NULL, NULL, NULL, attr_options)); - if (pcmk_controld_api_reprobe(controld_api, host_uname, - router_node) == pcmk_rc_ok) { + if (controld_api->reprobe(controld_api, host_uname, + router_node) == pcmk_rc_ok) { start_mainloop(controld_api); } diff --git a/tools/crm_resource.h b/tools/crm_resource.h index cb7f506..0bf7bee 100644 --- a/tools/crm_resource.h +++ b/tools/crm_resource.h @@ -21,6 +21,7 @@ #include #include #include +#include "crm_resource_controller.h" extern bool print_pending; @@ -35,6 +36,8 @@ extern char *move_lifetime; extern const char *attr_set_type; +extern pcmk_controld_api_cb_t controld_api_cb; + /* ban */ int cli_resource_prefer(const char *rsc_id, const char *host, cib_t * cib_conn); int cli_resource_ban(const char *rsc_id, const char *host, GListPtr allnodes, cib_t * cib_conn); @@ -60,16 +63,16 @@ int cli_resource_print_operations(const char *rsc_id, const char *host_uname, bo /* runtime */ void cli_resource_check(cib_t * cib, pe_resource_t *rsc); -int cli_resource_fail(pcmk_ipc_api_t *controld_api, +int cli_resource_fail(pcmk_controld_api_t *controld_api, const char *host_uname, const char *rsc_id, pe_working_set_t *data_set); int cli_resource_search(pe_resource_t *rsc, const char *requested_name, pe_working_set_t *data_set); -int cli_resource_delete(pcmk_ipc_api_t *controld_api, +int cli_resource_delete(pcmk_controld_api_t *controld_api, const char *host_uname, pe_resource_t *rsc, const char *operation, const char *interval_spec, bool just_failures, pe_working_set_t *data_set); -int cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name, +int cli_cleanup_all(pcmk_controld_api_t *controld_api, const char *node_name, const char *operation, const char *interval_spec, pe_working_set_t *data_set); int cli_resource_restart(pe_resource_t *rsc, const char *host, int timeout_ms, diff --git a/tools/crm_resource_controller.c b/tools/crm_resource_controller.c new file mode 100644 index 0000000..994a7be --- /dev/null +++ b/tools/crm_resource_controller.c @@ -0,0 +1,425 @@ +/* + * Copyright 2020 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU General Public License version 2 + * or later (GPLv2+) WITHOUT ANY WARRANTY. + */ + +#include +#include +#include +#include "crm_resource.h" + +// API object's private members +struct controller_private { + char *client_name; // Client name to use with IPC + char *client_uuid; // Client UUID to use with IPC + mainloop_io_t *source; // If main loop used, I/O source for IPC + crm_ipc_t *ipc; // IPC connection to controller + int replies_expected; // How many controller replies are expected + pcmk_controld_api_cb_t dispatch_cb; // Caller's registered dispatch callback + pcmk_controld_api_cb_t destroy_cb; // Caller's registered destroy callback +}; + +static void +call_client_callback(pcmk_controld_api_t *api, pcmk_controld_api_cb_t *cb, + void *api_data) +{ + if ((cb != NULL) && (cb->callback != NULL)) { + cb->callback(api, api_data, cb->user_data); + } +} + +/* + * IPC callbacks when used with main loop + */ + +static void +controller_ipc_destroy(gpointer user_data) +{ + pcmk_controld_api_t *api = user_data; + struct controller_private *private = api->private; + + private->ipc = NULL; + private->source = NULL; + call_client_callback(api, &(private->destroy_cb), NULL); +} + +// \return < 0 if connection is no longer required, >= 0 if it is +static int +controller_ipc_dispatch(const char *buffer, ssize_t length, gpointer user_data) +{ + xmlNode *msg = NULL; + pcmk_controld_api_t *api = user_data; + + CRM_CHECK(buffer && api && api->private, return 0); + + msg = string2xml(buffer); + if (msg == NULL) { + crm_warn("Received malformed controller IPC message"); + } else { + struct controller_private *private = api->private; + + crm_log_xml_trace(msg, "controller-reply"); + private->replies_expected--; + call_client_callback(api, &(private->dispatch_cb), + get_message_xml(msg, F_CRM_DATA)); + free_xml(msg); + } + return 0; +} + +/* + * IPC utilities + */ + +// \return Standard Pacemaker return code +static int +send_hello(crm_ipc_t *ipc, const char *client_name, const char *client_uuid) +{ + xmlNode *hello = create_hello_message(client_uuid, client_name, "0", "1"); + int rc = crm_ipc_send(ipc, hello, 0, 0, NULL); + + free_xml(hello); + if (rc < 0) { + rc = pcmk_legacy2rc(rc); + crm_info("Could not send IPC hello to %s: %s " CRM_XS " rc=%s", + CRM_SYSTEM_CRMD /* ipc->name */, + pcmk_rc_str(rc), rc); + return rc; + } + crm_debug("Sent IPC hello to %s", CRM_SYSTEM_CRMD /* ipc->name */); + return pcmk_rc_ok; +} + +// \return Standard Pacemaker return code +static int +send_controller_request(pcmk_controld_api_t *api, const char *op, + xmlNode *msg_data, const char *node) +{ + int rc; + struct controller_private *private = api->private; + xmlNode *cmd = create_request(op, msg_data, node, CRM_SYSTEM_CRMD, + private->client_name, private->client_uuid); + const char *reference = crm_element_value(cmd, XML_ATTR_REFERENCE); + + if ((cmd == NULL) || (reference == NULL)) { + return EINVAL; + } + + //@TODO pass as args? 0=crm_ipc_flags, 0=timeout_ms (default 5s), NULL=reply + crm_log_xml_trace(cmd, "controller-request"); + rc = crm_ipc_send(private->ipc, cmd, 0, 0, NULL); + free_xml(cmd); + if (rc < 0) { + return pcmk_legacy2rc(rc); + } + private->replies_expected++; + return pcmk_rc_ok; +} + +/* + * pcmk_controld_api_t methods + */ + +static int +controller_connect_mainloop(pcmk_controld_api_t *api) +{ + struct controller_private *private = api->private; + struct ipc_client_callbacks callbacks = { + .dispatch = controller_ipc_dispatch, + .destroy = controller_ipc_destroy, + }; + + private->source = mainloop_add_ipc_client(CRM_SYSTEM_CRMD, + G_PRIORITY_DEFAULT, 0, api, + &callbacks); + if (private->source == NULL) { + return ENOTCONN; + } + + private->ipc = mainloop_get_ipc_client(private->source); + if (private->ipc == NULL) { + (void) api->disconnect(api); + return ENOTCONN; + } + + crm_debug("Connected to %s IPC (attaching to main loop)", CRM_SYSTEM_CRMD); + return pcmk_rc_ok; +} + +static int +controller_connect_no_mainloop(pcmk_controld_api_t *api) +{ + struct controller_private *private = api->private; + + private->ipc = crm_ipc_new(CRM_SYSTEM_CRMD, 0); + if (private->ipc == NULL) { + return ENOTCONN; + } + if (!crm_ipc_connect(private->ipc)) { + crm_ipc_close(private->ipc); + crm_ipc_destroy(private->ipc); + private->ipc = NULL; + return errno; + } + /* @TODO caller needs crm_ipc_get_fd(private->ipc); either add method for + * that, or replace use_mainloop with int *fd + */ + crm_debug("Connected to %s IPC", CRM_SYSTEM_CRMD); + return pcmk_rc_ok; +} + +static void +set_callback(pcmk_controld_api_cb_t *dest, pcmk_controld_api_cb_t *source) +{ + if (source) { + dest->callback = source->callback; + dest->user_data = source->user_data; + } +} + +static int +controller_api_connect(pcmk_controld_api_t *api, bool use_mainloop, + pcmk_controld_api_cb_t *dispatch_cb, + pcmk_controld_api_cb_t *destroy_cb) +{ + int rc = pcmk_rc_ok; + struct controller_private *private; + + if (api == NULL) { + return EINVAL; + } + private = api->private; + + set_callback(&(private->dispatch_cb), dispatch_cb); + set_callback(&(private->destroy_cb), destroy_cb); + + if (private->ipc != NULL) { + return pcmk_rc_ok; // already connected + } + + if (use_mainloop) { + rc = controller_connect_mainloop(api); + } else { + rc = controller_connect_no_mainloop(api); + } + if (rc != pcmk_rc_ok) { + return rc; + } + + rc = send_hello(private->ipc, private->client_name, private->client_uuid); + if (rc != pcmk_rc_ok) { + (void) api->disconnect(api); + } + return rc; +} + +static int +controller_api_disconnect(pcmk_controld_api_t *api) +{ + struct controller_private *private = api->private; + + if (private->source != NULL) { + // Attached to main loop + mainloop_del_ipc_client(private->source); + private->source = NULL; + private->ipc = NULL; + + } else if (private->ipc != NULL) { + // Not attached to main loop + crm_ipc_t *ipc = private->ipc; + + private->ipc = NULL; + crm_ipc_close(ipc); + crm_ipc_destroy(ipc); + } + crm_debug("Disconnected from %s IPC", CRM_SYSTEM_CRMD /* ipc->name */); + return pcmk_rc_ok; +} + +//@TODO dispatch function for non-mainloop a la stonith_dispatch() +//@TODO convenience retry-connect function a la stonith_api_connect_retry() + +static unsigned int +controller_api_replies_expected(pcmk_controld_api_t *api) +{ + if (api != NULL) { + struct controller_private *private = api->private; + + return private->replies_expected; + } + return 0; +} + +static xmlNode * +create_reprobe_message_data(const char *target_node, const char *router_node) +{ + xmlNode *msg_data; + + msg_data = create_xml_node(NULL, "data_for_" CRM_OP_REPROBE); + crm_xml_add(msg_data, XML_LRM_ATTR_TARGET, target_node); + if ((router_node != NULL) && safe_str_neq(router_node, target_node)) { + crm_xml_add(msg_data, XML_LRM_ATTR_ROUTER_NODE, router_node); + } + return msg_data; +} + +static int +controller_api_reprobe(pcmk_controld_api_t *api, const char *target_node, + const char *router_node) +{ + int rc = EINVAL; + + if (api != NULL) { + xmlNode *msg_data; + + crm_debug("Sending %s IPC request to reprobe %s via %s", + CRM_SYSTEM_CRMD, crm_str(target_node), crm_str(router_node)); + msg_data = create_reprobe_message_data(target_node, router_node); + rc = send_controller_request(api, CRM_OP_REPROBE, msg_data, + (router_node? router_node : target_node)); + free_xml(msg_data); + } + return rc; +} + +// \return Standard Pacemaker return code +static int +controller_resource_op(pcmk_controld_api_t *api, const char *op, + const char *target_node, const char *router_node, + bool cib_only, const char *rsc_id, + const char *rsc_long_id, const char *standard, + const char *provider, const char *type) +{ + int rc; + char *key; + xmlNode *msg_data, *xml_rsc, *params; + + if (api == NULL) { + return EINVAL; + } + if (router_node == NULL) { + router_node = target_node; + } + + msg_data = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP); + + /* The controller logs the transition key from resource op requests, so we + * need to have *something* for it. + */ + key = pcmk__transition_key(0, getpid(), 0, + "xxxxxxxx-xrsc-opxx-xcrm-resourcexxxx"); + crm_xml_add(msg_data, XML_ATTR_TRANSITION_KEY, key); + free(key); + + crm_xml_add(msg_data, XML_LRM_ATTR_TARGET, target_node); + if (safe_str_neq(router_node, target_node)) { + crm_xml_add(msg_data, XML_LRM_ATTR_ROUTER_NODE, router_node); + } + + if (cib_only) { + // Indicate that only the CIB needs to be cleaned + crm_xml_add(msg_data, PCMK__XA_MODE, XML_TAG_CIB); + } + + xml_rsc = create_xml_node(msg_data, XML_CIB_TAG_RESOURCE); + crm_xml_add(xml_rsc, XML_ATTR_ID, rsc_id); + crm_xml_add(xml_rsc, XML_ATTR_ID_LONG, rsc_long_id); + crm_xml_add(xml_rsc, XML_AGENT_ATTR_CLASS, standard); + crm_xml_add(xml_rsc, XML_AGENT_ATTR_PROVIDER, provider); + crm_xml_add(xml_rsc, XML_ATTR_TYPE, type); + + params = create_xml_node(msg_data, XML_TAG_ATTRS); + crm_xml_add(params, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET); + + // The controller parses the timeout from the request + key = crm_meta_name(XML_ATTR_TIMEOUT); + crm_xml_add(params, key, "60000"); /* 1 minute */ //@TODO pass as arg + free(key); + + rc = send_controller_request(api, op, msg_data, router_node); + free_xml(msg_data); + return rc; +} + +static int +controller_api_fail_resource(pcmk_controld_api_t *api, + const char *target_node, const char *router_node, + const char *rsc_id, const char *rsc_long_id, + const char *standard, const char *provider, + const char *type) +{ + crm_debug("Sending %s IPC request to fail %s (a.k.a. %s) on %s via %s", + CRM_SYSTEM_CRMD, crm_str(rsc_id), crm_str(rsc_long_id), + crm_str(target_node), crm_str(router_node)); + return controller_resource_op(api, CRM_OP_LRM_FAIL, target_node, + router_node, false, rsc_id, rsc_long_id, + standard, provider, type); +} + +static int +controller_api_refresh_resource(pcmk_controld_api_t *api, + const char *target_node, + const char *router_node, + const char *rsc_id, const char *rsc_long_id, + const char *standard, const char *provider, + const char *type, bool cib_only) +{ + crm_debug("Sending %s IPC request to refresh %s (a.k.a. %s) on %s via %s", + CRM_SYSTEM_CRMD, crm_str(rsc_id), crm_str(rsc_long_id), + crm_str(target_node), crm_str(router_node)); + return controller_resource_op(api, CRM_OP_LRM_DELETE, target_node, + router_node, cib_only, rsc_id, rsc_long_id, + standard, provider, type); +} + +pcmk_controld_api_t * +pcmk_new_controld_api(const char *client_name, const char *client_uuid) +{ + struct controller_private *private; + pcmk_controld_api_t *api = calloc(1, sizeof(pcmk_controld_api_t)); + + CRM_ASSERT(api != NULL); + + api->private = calloc(1, sizeof(struct controller_private)); + CRM_ASSERT(api->private != NULL); + private = api->private; + + if (client_name == NULL) { + client_name = crm_system_name? crm_system_name : "client"; + } + private->client_name = strdup(client_name); + CRM_ASSERT(private->client_name != NULL); + + if (client_uuid == NULL) { + private->client_uuid = crm_generate_uuid(); + } else { + private->client_uuid = strdup(client_uuid); + } + CRM_ASSERT(private->client_uuid != NULL); + + api->connect = controller_api_connect; + api->disconnect = controller_api_disconnect; + api->replies_expected = controller_api_replies_expected; + api->reprobe = controller_api_reprobe; + api->fail_resource = controller_api_fail_resource; + api->refresh_resource = controller_api_refresh_resource; + return api; +} + +void +pcmk_free_controld_api(pcmk_controld_api_t *api) +{ + if (api != NULL) { + struct controller_private *private = api->private; + + api->disconnect(api); + free(private->client_name); + free(private->client_uuid); + free(api->private); + free(api); + } +} diff --git a/tools/crm_resource_controller.h b/tools/crm_resource_controller.h new file mode 100644 index 0000000..50e20b4 --- /dev/null +++ b/tools/crm_resource_controller.h @@ -0,0 +1,198 @@ +/* + * Copyright 2020 the Pacemaker project contributors + * + * The version control history for this file may have further details. + * + * This source code is licensed under the GNU Lesser General Public License + * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY. + */ +#ifndef PCMK__CONTROLD_API_H +#define PCMK__CONTROLD_API_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include // bool + +/* This is a demonstration of an abstracted controller IPC API. It is expected + * that this will be improved and moved to libcrmcommon. + * + * @TODO We could consider whether it's reasonable to have a single type for + * all daemons' IPC APIs (e.g. pcmk_ipc_api_t instead of pcmk_*_api_t). They + * could potentially have common connect/disconnect methods and then a void* to + * a group of API-specific methods. + * + * In that case, the callback type would also need to be generic, taking + * (pcmk_ipc_api_t *api, void *api_data, void *user_data), with individual APIs + * having functions for getting useful info from api_data. If all APIs followed + * the call_id model, we could use int call_id instead of api_data. + * + * A major annoyance is that the controller IPC protocol currently does not have + * any way to tie a particular reply to a particular request. The current + * clients (crmadmin, crm_node, and crm_resource) simply know what kind of reply + * to expect for the kind of request they sent. In crm_resource's case, all it + * does is count replies, ignoring their content altogether. + * + * That really forces us to have a single callback for all events rather than a + * per-request callback. That in turn implies that callers can only provide a + * single user data pointer. + * + * @TODO Define protocol version constants to use in hello message. + * @TODO Allow callers to specify timeouts. + * @TODO Define call IDs for controller ops, while somehow maintaining backward + * compatibility, since a client running on a Pacemaker Remote node could + * be older or newer than the controller on the connection's cluster + * node. + * @TODO The controller currently does not respond to hello messages. We should + * establish a common connection handshake protocol for all daemons that + * involves a hello message and acknowledgement. We should support sync + * or async connection (i.e. block until the ack is received, or return + * after the hello is sent and call a connection callback when the hello + * ack is received). + */ + +//! \internal +typedef struct pcmk_controld_api_s pcmk_controld_api_t; + +//! \internal +typedef struct pcmk_controld_api_callback_s { + void (*callback)(pcmk_controld_api_t *api, void *api_data, void *user_data); + void *user_data; +} pcmk_controld_api_cb_t; + +//! \internal +struct pcmk_controld_api_s { + //! \internal + void *private; + + /*! + * \internal + * \brief Connect to the local controller + * + * \param[in] api Controller API instance + * \param[in] use_mainloop If true, attach IPC to main loop + * \param[in] dispatch_cb If not NULL, call this when replies are received + * \param[in] destroy_cb If not NULL, call this if connection drops + * + * \return Standard Pacemaker return code + * \note Only the pointers inside the callback objects need to be + * persistent, not the callback objects themselves. The destroy_cb + * will be called only for unrequested disconnects. + */ + int (*connect)(pcmk_controld_api_t *api, bool use_mainloop, + pcmk_controld_api_cb_t *dispatch_cb, + pcmk_controld_api_cb_t *destroy_cb); + + /*! + * \internal + * \brief Disconnect from the local controller + * + * \param[in] api Controller API instance + * + * \return Standard Pacemaker return code + */ + int (*disconnect)(pcmk_controld_api_t *api); + + /*! + * \internal + * \brief Check number of replies still expected from controller + * + * \param[in] api Controller API instance + * + * \return Number of expected replies + */ + unsigned int (*replies_expected)(pcmk_controld_api_t *api); + + /*! + * \internal + * \brief Send a reprobe controller operation + * + * \param[in] api Controller API instance + * \param[in] target_node Name of node to reprobe + * \param[in] router_node Router node for host + * + * \return Standard Pacemaker return code + */ + int (*reprobe)(pcmk_controld_api_t *api, const char *target_node, + const char *router_node); + + /* @TODO These methods have a lot of arguments. One possibility would be to + * make a struct for agent info (standard/provider/type), which theortically + * could be used throughout pacemaker code. However that would end up being + * really awkward to use generically, since sometimes you need to allocate + * those strings (char *) and other times you only have references into XML + * (const char *). We could make some structs just for this API. + */ + + /*! + * \internal + * \brief Ask the controller to fail a resource + * + * \param[in] api Controller API instance + * \param[in] target_node Name of node resource is on + * \param[in] router_node Router node for target + * \param[in] rsc_id ID of resource to fail + * \param[in] rsc_long_id Long ID of resource (if any) + * \param[in] standard Standard of resource + * \param[in] provider Provider of resource (if any) + * \param[in] type Type of resource to fail + * + * \return Standard Pacemaker return code + */ + int (*fail_resource)(pcmk_controld_api_t *api, const char *target_node, + const char *router_node, const char *rsc_id, + const char *rsc_long_id, const char *standard, + const char *provider, const char *type); + + /*! + * \internal + * \brief Ask the controller to refresh a resource + * + * \param[in] api Controller API instance + * \param[in] target_node Name of node resource is on + * \param[in] router_node Router node for target + * \param[in] rsc_id ID of resource to refresh + * \param[in] rsc_long_id Long ID of resource (if any) + * \param[in] standard Standard of resource + * \param[in] provider Provider of resource (if any) + * \param[in] type Type of resource + * \param[in] cib_only If true, clean resource from CIB only + * + * \return Standard Pacemaker return code + */ + int (*refresh_resource)(pcmk_controld_api_t *api, const char *target_node, + const char *router_node, const char *rsc_id, + const char *rsc_long_id, const char *standard, + const char *provider, const char *type, + bool cib_only); +}; + +/*! + * \internal + * \brief Create new controller IPC API object for clients + * + * \param[in] client_name Client name to use with IPC + * \param[in] client_uuid Client UUID to use with IPC + * + * \return Newly allocated object + * \note This function asserts on errors, so it will never return NULL. + * The caller is responsible for freeing the result with + * pcmk_free_controld_api(). + */ +pcmk_controld_api_t *pcmk_new_controld_api(const char *client_name, + const char *client_uuid); + +/*! + * \internal + * \brief Free a controller IPC API object + * + * \param[in] api Controller IPC API object to free + */ +void pcmk_free_controld_api(pcmk_controld_api_t *api); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c index cc2abeb..37789d1 100644 --- a/tools/crm_resource_runtime.c +++ b/tools/crm_resource_runtime.c @@ -8,7 +8,6 @@ */ #include -#include int resource_verbose = 0; bool do_force = FALSE; @@ -461,7 +460,7 @@ cli_resource_delete_attribute(pe_resource_t *rsc, const char *requested_name, // \return Standard Pacemaker return code static int -send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource, +send_lrm_rsc_op(pcmk_controld_api_t *controld_api, bool do_fail_resource, const char *host_uname, const char *rsc_id, pe_working_set_t *data_set) { @@ -529,13 +528,14 @@ send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource, rsc_api_id = rsc->id; } if (do_fail_resource) { - return pcmk_controld_api_fail(controld_api, host_uname, router_node, - rsc_api_id, rsc_long_id, - rsc_class, rsc_provider, rsc_type); + return controld_api->fail_resource(controld_api, host_uname, + router_node, rsc_api_id, rsc_long_id, + rsc_class, rsc_provider, rsc_type); } else { - return pcmk_controld_api_refresh(controld_api, host_uname, router_node, - rsc_api_id, rsc_long_id, rsc_class, - rsc_provider, rsc_type, cib_only); + return controld_api->refresh_resource(controld_api, host_uname, + router_node, rsc_api_id, + rsc_long_id, rsc_class, + rsc_provider, rsc_type, cib_only); } } @@ -558,7 +558,7 @@ rsc_fail_name(pe_resource_t *rsc) // \return Standard Pacemaker return code static int -clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname, +clear_rsc_history(pcmk_controld_api_t *controld_api, const char *host_uname, const char *rsc_id, pe_working_set_t *data_set) { int rc = pcmk_ok; @@ -574,16 +574,16 @@ clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname, } crm_trace("Processing %d mainloop inputs", - pcmk_controld_api_replies_expected(controld_api)); + controld_api->replies_expected(controld_api)); while (g_main_context_iteration(NULL, FALSE)) { crm_trace("Processed mainloop input, %d still remaining", - pcmk_controld_api_replies_expected(controld_api)); + controld_api->replies_expected(controld_api)); } return rc; } static int -clear_rsc_failures(pcmk_ipc_api_t *controld_api, const char *node_name, +clear_rsc_failures(pcmk_controld_api_t *controld_api, const char *node_name, const char *rsc_id, const char *operation, const char *interval_spec, pe_working_set_t *data_set) { @@ -683,7 +683,7 @@ clear_rsc_fail_attrs(pe_resource_t *rsc, const char *operation, } int -cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname, +cli_resource_delete(pcmk_controld_api_t *controld_api, const char *host_uname, pe_resource_t *rsc, const char *operation, const char *interval_spec, bool just_failures, pe_working_set_t *data_set) @@ -792,7 +792,7 @@ cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname, } int -cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name, +cli_cleanup_all(pcmk_controld_api_t *controld_api, const char *node_name, const char *operation, const char *interval_spec, pe_working_set_t *data_set) { @@ -905,7 +905,7 @@ cli_resource_check(cib_t * cib_conn, pe_resource_t *rsc) // \return Standard Pacemaker return code int -cli_resource_fail(pcmk_ipc_api_t *controld_api, const char *host_uname, +cli_resource_fail(pcmk_controld_api_t *controld_api, const char *host_uname, const char *rsc_id, pe_working_set_t *data_set) { crm_notice("Failing %s on %s", rsc_id, host_uname); diff --git a/tools/crmadmin.c b/tools/crmadmin.c index 2ebdd14..c58de59 100644 --- a/tools/crmadmin.c +++ b/tools/crmadmin.c @@ -9,49 +9,54 @@ #include +#include + #include -#include -#include // atoi() +#include +#include -#include // gboolean, GMainLoop, etc. -#include // xmlNode +#include +#include +#include +#include #include -#include #include #include -#include -#include -#include -#include -#define DEFAULT_MESSAGE_TIMEOUT_MS 30000 +#include -static guint message_timer_id = 0; -static guint message_timeout_ms = DEFAULT_MESSAGE_TIMEOUT_MS; -static GMainLoop *mainloop = NULL; +#include -bool do_work(pcmk_ipc_api_t *api); -void do_find_node_list(xmlNode *xml_node); -static char *ipc_name = NULL; +static int message_timer_id = -1; +static int message_timeout_ms = 30 * 1000; +static GMainLoop *mainloop = NULL; +static crm_ipc_t *crmd_channel = NULL; +static char *admin_uuid = NULL; + +gboolean do_init(void); +int do_work(void); +void crmadmin_ipc_connection_destroy(gpointer user_data); +int admin_msg_callback(const char *buffer, ssize_t length, gpointer userdata); +int do_find_node_list(xmlNode * xml_node); gboolean admin_message_timeout(gpointer data); -static enum { - cmd_none, - cmd_shutdown, - cmd_health, - cmd_elect_dc, - cmd_whois_dc, - cmd_list_nodes, - cmd_pacemakerd_health, -} command = cmd_none; - static gboolean BE_VERBOSE = FALSE; +static int expected_responses = 1; static gboolean BASH_EXPORT = FALSE; +static gboolean DO_HEALTH = FALSE; +static gboolean DO_RESET = FALSE; +static gboolean DO_RESOURCE = FALSE; +static gboolean DO_ELECT_DC = FALSE; +static gboolean DO_WHOIS_DC = FALSE; +static gboolean DO_NODE_LIST = FALSE; static gboolean BE_SILENT = FALSE; +static gboolean DO_RESOURCE_LIST = FALSE; +static const char *crmd_operation = NULL; static char *dest_node = NULL; static crm_exit_t exit_code = CRM_EX_OK; +static const char *sys_to = NULL; static pcmk__cli_option_t long_options[] = { // long option, argument type, storage, short option, description, flags @@ -87,15 +92,6 @@ static pcmk__cli_option_t long_options[] = { pcmk__option_default }, { - "pacemakerd", no_argument, NULL, 'P', - "Display the status of local pacemakerd.", pcmk__option_default - }, - { - "-spacer-", no_argument, NULL, '-', - "\n\tResult is the state of the sub-daemons watched by pacemakerd.\n", - pcmk__option_default - }, - { "dc_lookup", no_argument, NULL, 'D', "Display the uname of the node co-ordinating the cluster.", pcmk__option_default @@ -135,232 +131,28 @@ static pcmk__cli_option_t long_options[] = { }, { "bash-export", no_argument, NULL, 'B', - "Display nodes as shell commands of the form 'export uname=uuid' " - "(valid with -N/--nodes)", - pcmk__option_default - }, - { - "ipc-name", required_argument, NULL, 'i', - "Name to use for ipc instead of 'crmadmin' (with -P/--pacemakerd).", + "Create Bash export entries of the form 'export uname=uuid'\n", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', - "\nNotes:", pcmk__option_default + "Notes:", pcmk__option_default }, { "-spacer-", no_argument, NULL, '-', - "\nThe -K and -E commands do not work and may be removed in a future " - "version.", + " The -K and -E commands are rarely used and may be removed in " + "future versions.", pcmk__option_default }, { 0, 0, 0, 0 } }; -static void -quit_main_loop(crm_exit_t ec) -{ - exit_code = ec; - if (mainloop != NULL) { - GMainLoop *mloop = mainloop; - - mainloop = NULL; // Don't re-enter this block - pcmk_quit_main_loop(mloop, 10); - g_main_loop_unref(mloop); - } -} - -static void -controller_event_cb(pcmk_ipc_api_t *controld_api, - enum pcmk_ipc_event event_type, crm_exit_t status, - void *event_data, void *user_data) -{ - pcmk_controld_api_reply_t *reply = event_data; - - switch (event_type) { - case pcmk_ipc_event_disconnect: - if (exit_code == CRM_EX_DISCONNECT) { // Unexpected - fprintf(stderr, "error: Lost connection to controller\n"); - } - goto done; - break; - - case pcmk_ipc_event_reply: - break; - - default: - return; - } - - if (message_timer_id != 0) { - g_source_remove(message_timer_id); - message_timer_id = 0; - } - - if (status != CRM_EX_OK) { - fprintf(stderr, "error: Bad reply from controller: %s", - crm_exit_str(status)); - exit_code = status; - goto done; - } - - if (reply->reply_type != pcmk_controld_reply_ping) { - fprintf(stderr, "error: Unknown reply type %d from controller\n", - reply->reply_type); - goto done; - } - - // Parse desired information from reply - switch (command) { - case cmd_health: - printf("Status of %s@%s: %s (%s)\n", - reply->data.ping.sys_from, - reply->host_from, - reply->data.ping.fsa_state, - reply->data.ping.result); - if (BE_SILENT && (reply->data.ping.fsa_state != NULL)) { - fprintf(stderr, "%s\n", reply->data.ping.fsa_state); - } - exit_code = CRM_EX_OK; - break; - - case cmd_whois_dc: - printf("Designated Controller is: %s\n", reply->host_from); - if (BE_SILENT && (reply->host_from != NULL)) { - fprintf(stderr, "%s\n", reply->host_from); - } - exit_code = CRM_EX_OK; - break; - - default: // Not really possible here - exit_code = CRM_EX_SOFTWARE; - break; - } - -done: - pcmk_disconnect_ipc(controld_api); - quit_main_loop(exit_code); -} - -static void -pacemakerd_event_cb(pcmk_ipc_api_t *pacemakerd_api, - enum pcmk_ipc_event event_type, crm_exit_t status, - void *event_data, void *user_data) -{ - pcmk_pacemakerd_api_reply_t *reply = event_data; - - switch (event_type) { - case pcmk_ipc_event_disconnect: - if (exit_code == CRM_EX_DISCONNECT) { // Unexpected - fprintf(stderr, "error: Lost connection to pacemakerd\n"); - } - goto done; - break; - - case pcmk_ipc_event_reply: - break; - - default: - return; - } - - if (message_timer_id != 0) { - g_source_remove(message_timer_id); - message_timer_id = 0; - } - - if (status != CRM_EX_OK) { - fprintf(stderr, "error: Bad reply from pacemakerd: %s", - crm_exit_str(status)); - exit_code = status; - goto done; - } - - if (reply->reply_type != pcmk_pacemakerd_reply_ping) { - fprintf(stderr, "error: Unknown reply type %d from pacemakerd\n", - reply->reply_type); - goto done; - } - - // Parse desired information from reply - switch (command) { - case cmd_pacemakerd_health: - { - crm_time_t *crm_when = crm_time_new(NULL); - char *pinged_buf = NULL; - - crm_time_set_timet(crm_when, &reply->data.ping.last_good); - pinged_buf = crm_time_as_string(crm_when, - crm_time_log_date | crm_time_log_timeofday | - crm_time_log_with_timezone); - - printf("Status of %s: '%s' %s %s\n", - reply->data.ping.sys_from, - (reply->data.ping.status == pcmk_rc_ok)? - pcmk_pacemakerd_api_daemon_state_enum2text( - reply->data.ping.state):"query failed", - (reply->data.ping.status == pcmk_rc_ok)?"last updated":"", - (reply->data.ping.status == pcmk_rc_ok)?pinged_buf:""); - if (BE_SILENT && - (reply->data.ping.state != pcmk_pacemakerd_state_invalid)) { - fprintf(stderr, "%s\n", - (reply->data.ping.status == pcmk_rc_ok)? - pcmk_pacemakerd_api_daemon_state_enum2text( - reply->data.ping.state): - "query failed"); - } - exit_code = CRM_EX_OK; - free(pinged_buf); - } - break; - - default: // Not really possible here - exit_code = CRM_EX_SOFTWARE; - break; - } - -done: - pcmk_disconnect_ipc(pacemakerd_api); - quit_main_loop(exit_code); -} - -// \return Standard Pacemaker return code -static int -list_nodes() -{ - cib_t *the_cib = cib_new(); - xmlNode *output = NULL; - int rc; - - if (the_cib == NULL) { - return ENOMEM; - } - rc = the_cib->cmds->signon(the_cib, crm_system_name, cib_command); - if (rc != pcmk_ok) { - return pcmk_legacy2rc(rc); - } - - rc = the_cib->cmds->query(the_cib, NULL, &output, - cib_scope_local | cib_sync_call); - if (rc == pcmk_ok) { - do_find_node_list(output); - free_xml(output); - } - the_cib->cmds->signoff(the_cib); - return pcmk_legacy2rc(rc); -} - int main(int argc, char **argv) { int option_index = 0; int argerr = 0; int flag; - int rc; - pcmk_ipc_api_t *controld_api = NULL; - pcmk_ipc_api_t *pacemakerd_api = NULL; - bool need_controld_api = true; - bool need_pacemakerd_api = false; crm_log_cli_init("crmadmin"); pcmk__set_cli_options(NULL, " [options]", long_options, @@ -380,58 +172,44 @@ main(int argc, char **argv) crm_bump_log_level(argc, argv); break; case 't': - message_timeout_ms = (guint) atoi(optarg); + message_timeout_ms = atoi(optarg); if (message_timeout_ms < 1) { - message_timeout_ms = DEFAULT_MESSAGE_TIMEOUT_MS; + message_timeout_ms = 30 * 1000; } break; - case 'i': - ipc_name = strdup(optarg); - break; + case '$': case '?': pcmk__cli_help(flag, CRM_EX_OK); break; case 'D': - command = cmd_whois_dc; + DO_WHOIS_DC = TRUE; break; case 'B': BASH_EXPORT = TRUE; break; case 'K': - command = cmd_shutdown; + DO_RESET = TRUE; crm_trace("Option %c => %s", flag, optarg); - if (dest_node != NULL) { - free(dest_node); - } dest_node = strdup(optarg); + crmd_operation = CRM_OP_LOCAL_SHUTDOWN; break; case 'q': BE_SILENT = TRUE; break; - case 'P': - command = cmd_pacemakerd_health; - need_pacemakerd_api = true; - need_controld_api = false; - break; case 'S': - command = cmd_health; + DO_HEALTH = TRUE; crm_trace("Option %c => %s", flag, optarg); - if (dest_node != NULL) { - free(dest_node); - } dest_node = strdup(optarg); break; case 'E': - command = cmd_elect_dc; + DO_ELECT_DC = TRUE; break; case 'N': - command = cmd_list_nodes; - need_controld_api = false; + DO_NODE_LIST = TRUE; break; case 'H': - fprintf(stderr, "Cluster-wide health option not supported\n"); - ++argerr; + DO_HEALTH = TRUE; break; default: printf("Argument code 0%o (%c) is not (?yet?) supported\n", flag, flag); @@ -451,164 +229,304 @@ main(int argc, char **argv) ++argerr; } - if (command == cmd_none) { - fprintf(stderr, "error: Must specify a command option\n\n"); - ++argerr; - } - if (argerr) { pcmk__cli_help('?', CRM_EX_USAGE); } - // Connect to the controller if needed - if (need_controld_api) { - rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld); - if (controld_api == NULL) { - fprintf(stderr, "error: Could not connect to controller: %s\n", - pcmk_rc_str(rc)); - exit_code = pcmk_rc2exitc(rc); - goto done; - } - pcmk_register_ipc_callback(controld_api, controller_event_cb, NULL); - rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_main); - if (rc != pcmk_rc_ok) { - fprintf(stderr, "error: Could not connect to controller: %s\n", - pcmk_rc_str(rc)); - exit_code = pcmk_rc2exitc(rc); - goto done; + if (do_init()) { + int res = 0; + + res = do_work(); + if (res > 0) { + /* wait for the reply by creating a mainloop and running it until + * the callbacks are invoked... + */ + mainloop = g_main_loop_new(NULL, FALSE); + crm_trace("Waiting for %d replies from the local CRM", expected_responses); + + message_timer_id = g_timeout_add(message_timeout_ms, admin_message_timeout, NULL); + + g_main_loop_run(mainloop); + + } else if (res < 0) { + crm_err("No message to send"); + exit_code = CRM_EX_ERROR; } + } else { + crm_warn("Init failed, could not perform requested operations"); + exit_code = CRM_EX_UNAVAILABLE; } - // Connect to pacemakerd if needed - if (need_pacemakerd_api) { - rc = pcmk_new_ipc_api(&pacemakerd_api, pcmk_ipc_pacemakerd); - if (pacemakerd_api == NULL) { - fprintf(stderr, "error: Could not connect to pacemakerd: %s\n", - pcmk_rc_str(rc)); - exit_code = pcmk_rc2exitc(rc); - goto done; + crm_trace("%s exiting normally", crm_system_name); + return exit_code; +} + +int +do_work(void) +{ + int ret = 1; + + /* construct the request */ + xmlNode *msg_data = NULL; + gboolean all_is_good = TRUE; + + if (DO_HEALTH == TRUE) { + crm_trace("Querying the system"); + + sys_to = CRM_SYSTEM_DC; + + if (dest_node != NULL) { + sys_to = CRM_SYSTEM_CRMD; + crmd_operation = CRM_OP_PING; + + if (BE_VERBOSE) { + expected_responses = 1; + } + + } else { + crm_info("Cluster-wide health not available yet"); + all_is_good = FALSE; } - pcmk_register_ipc_callback(pacemakerd_api, pacemakerd_event_cb, NULL); - rc = pcmk_connect_ipc(pacemakerd_api, pcmk_ipc_dispatch_main); - if (rc != pcmk_rc_ok) { - fprintf(stderr, "error: Could not connect to pacemakerd: %s\n", - pcmk_rc_str(rc)); - exit_code = pcmk_rc2exitc(rc); - goto done; + + } else if (DO_ELECT_DC) { + /* tell the local node to initiate an election */ + + dest_node = NULL; + sys_to = CRM_SYSTEM_CRMD; + crmd_operation = CRM_OP_VOTE; + ret = 0; /* no return message */ + + } else if (DO_WHOIS_DC) { + dest_node = NULL; + sys_to = CRM_SYSTEM_DC; + crmd_operation = CRM_OP_PING; + + } else if (DO_NODE_LIST) { + + cib_t *the_cib = cib_new(); + xmlNode *output = NULL; + + int rc = the_cib->cmds->signon(the_cib, crm_system_name, cib_command); + + if (rc != pcmk_ok) { + fprintf(stderr, "Could not connect to CIB: %s\n", + pcmk_strerror(rc)); + return -1; + } + + rc = the_cib->cmds->query(the_cib, NULL, &output, cib_scope_local | cib_sync_call); + if(rc == pcmk_ok) { + do_find_node_list(output); + + free_xml(output); } + the_cib->cmds->signoff(the_cib); + crm_exit(crm_errno2exit(rc)); + + } else if (DO_RESET) { + /* tell dest_node to initiate the shutdown procedure + * + * if dest_node is NULL, the request will be sent to the + * local node + */ + sys_to = CRM_SYSTEM_CRMD; + ret = 0; /* no return message */ + + } else { + crm_err("Unknown options"); + all_is_good = FALSE; + } + + if (all_is_good == FALSE) { + crm_err("Creation of request failed. No message to send"); + return -1; + } + +/* send it */ + if (crmd_channel == NULL) { + crm_err("The IPC connection is not valid, cannot send anything"); + return -1; + } + + if (sys_to == NULL) { + if (dest_node != NULL) { + sys_to = CRM_SYSTEM_CRMD; + } else { + sys_to = CRM_SYSTEM_DC; + } + } + + { + xmlNode *cmd = create_request(crmd_operation, msg_data, dest_node, sys_to, + crm_system_name, admin_uuid); + + crm_ipc_send(crmd_channel, cmd, 0, 0, NULL); + free_xml(cmd); } - if (do_work(controld_api?controld_api:pacemakerd_api)) { - // A reply is needed from controller, so run main loop to get it - exit_code = CRM_EX_DISCONNECT; // For unexpected disconnects - mainloop = g_main_loop_new(NULL, FALSE); - message_timer_id = g_timeout_add(message_timeout_ms, - admin_message_timeout, NULL); - g_main_loop_run(mainloop); + return ret; +} + +void +crmadmin_ipc_connection_destroy(gpointer user_data) +{ + crm_err("Connection to controller was terminated"); + if (mainloop) { + g_main_loop_quit(mainloop); + } else { + crm_exit(CRM_EX_DISCONNECT); } +} + +struct ipc_client_callbacks crm_callbacks = { + .dispatch = admin_msg_callback, + .destroy = crmadmin_ipc_connection_destroy +}; -done: +gboolean +do_init(void) +{ + mainloop_io_t *source = + mainloop_add_ipc_client(CRM_SYSTEM_CRMD, G_PRIORITY_DEFAULT, 0, NULL, &crm_callbacks); - if (controld_api != NULL) { - pcmk_ipc_api_t *capi = controld_api; - controld_api = NULL; // Ensure we can't free this twice - pcmk_free_ipc_api(capi); + admin_uuid = pcmk__getpid_s(); + + crmd_channel = mainloop_get_ipc_client(source); + + if (DO_RESOURCE || DO_RESOURCE_LIST || DO_NODE_LIST) { + return TRUE; + + } else if (crmd_channel != NULL) { + xmlNode *xml = create_hello_message(admin_uuid, crm_system_name, "0", "1"); + + crm_ipc_send(crmd_channel, xml, 0, 0, NULL); + return TRUE; + } + return FALSE; +} + +static bool +validate_crm_message(xmlNode * msg, const char *sys, const char *uuid, const char *msg_type) +{ + const char *type = NULL; + const char *crm_msg_reference = NULL; + + if (msg == NULL) { + return FALSE; } - if (pacemakerd_api != NULL) { - pcmk_ipc_api_t *capi = pacemakerd_api; - pacemakerd_api = NULL; // Ensure we can't free this twice - pcmk_free_ipc_api(capi); + type = crm_element_value(msg, F_CRM_MSG_TYPE); + crm_msg_reference = crm_element_value(msg, XML_ATTR_REFERENCE); + + if (type == NULL) { + crm_info("No message type defined."); + return FALSE; + + } else if (msg_type != NULL && strcasecmp(msg_type, type) != 0) { + crm_info("Expecting a (%s) message but received a (%s).", msg_type, type); + return FALSE; } - if (mainloop != NULL) { - g_main_loop_unref(mainloop); - mainloop = NULL; + if (crm_msg_reference == NULL) { + crm_info("No message crm_msg_reference defined."); + return FALSE; } - return crm_exit(exit_code); + + return TRUE; } -// \return True if reply from controller is needed -bool -do_work(pcmk_ipc_api_t *api) +int +admin_msg_callback(const char *buffer, ssize_t length, gpointer userdata) { - bool need_reply = false; - int rc = pcmk_rc_ok; + static int received_responses = 0; + xmlNode *xml = string2xml(buffer); - switch (command) { - case cmd_shutdown: - rc = pcmk_controld_api_shutdown(api, dest_node); - break; + received_responses++; + g_source_remove(message_timer_id); - case cmd_health: // dest_node != NULL - case cmd_whois_dc: // dest_node == NULL - rc = pcmk_controld_api_ping(api, dest_node); - need_reply = true; - break; + crm_log_xml_trace(xml, "ipc"); - case cmd_elect_dc: - rc = pcmk_controld_api_start_election(api); - break; + if (xml == NULL) { + crm_info("XML in IPC message was not valid... " "discarding."); - case cmd_list_nodes: - rc = list_nodes(); - break; + } else if (validate_crm_message(xml, crm_system_name, admin_uuid, XML_ATTR_RESPONSE) == FALSE) { + crm_trace("Message was not a CRM response. Discarding."); - case cmd_pacemakerd_health: - rc = pcmk_pacemakerd_api_ping(api, ipc_name); - need_reply = true; - break; + } else if (DO_HEALTH) { + xmlNode *data = get_message_xml(xml, F_CRM_DATA); + const char *state = crm_element_value(data, XML_PING_ATTR_CRMDSTATE); - case cmd_none: // not actually possible here - break; + printf("Status of %s@%s: %s (%s)\n", + crm_element_value(data, XML_PING_ATTR_SYSFROM), + crm_element_value(xml, F_CRM_HOST_FROM), + state, crm_element_value(data, XML_PING_ATTR_STATUS)); + + if (BE_SILENT && state != NULL) { + fprintf(stderr, "%s\n", state); + } + + } else if (DO_WHOIS_DC) { + const char *dc = crm_element_value(xml, F_CRM_HOST_FROM); + + printf("Designated Controller is: %s\n", dc); + if (BE_SILENT && dc != NULL) { + fprintf(stderr, "%s\n", dc); + } + crm_exit(CRM_EX_OK); } - if (rc != pcmk_rc_ok) { - fprintf(stderr, "error: Command failed: %s", pcmk_rc_str(rc)); - exit_code = pcmk_rc2exitc(rc); + + free_xml(xml); + + if (received_responses >= expected_responses) { + crm_trace("Received expected number (%d) of replies, exiting normally", + expected_responses); + crm_exit(CRM_EX_OK); } - return need_reply; + + message_timer_id = g_timeout_add(message_timeout_ms, admin_message_timeout, NULL); + return 0; } gboolean admin_message_timeout(gpointer data) { - fprintf(stderr, - "error: No reply received from controller before timeout (%dms)\n", - message_timeout_ms); - message_timer_id = 0; - quit_main_loop(CRM_EX_TIMEOUT); - return FALSE; // Tells glib to remove source + fprintf(stderr, "No messages received in %d seconds.. aborting\n", + (int)message_timeout_ms / 1000); + crm_err("No messages received in %d seconds", (int)message_timeout_ms / 1000); + exit_code = CRM_EX_TIMEOUT; + g_main_loop_quit(mainloop); + return FALSE; } -void +int do_find_node_list(xmlNode * xml_node) { int found = 0; xmlNode *node = NULL; xmlNode *nodes = get_object_root(XML_CIB_TAG_NODES, xml_node); - for (node = first_named_child(nodes, XML_CIB_TAG_NODE); node != NULL; - node = crm_next_same_xml(node)) { + for (node = __xml_first_child_element(nodes); node != NULL; + node = __xml_next_element(node)) { - if (BASH_EXPORT) { - printf("export %s=%s\n", - crm_element_value(node, XML_ATTR_UNAME), - crm_element_value(node, XML_ATTR_ID)); - } else { - const char *node_type = crm_element_value(node, XML_ATTR_TYPE); + if (crm_str_eq((const char *)node->name, XML_CIB_TAG_NODE, TRUE)) { - if (node_type == NULL) { - node_type = "member"; + if (BASH_EXPORT) { + printf("export %s=%s\n", + crm_element_value(node, XML_ATTR_UNAME), + crm_element_value(node, XML_ATTR_ID)); + } else { + printf("%s node: %s (%s)\n", + crm_element_value(node, XML_ATTR_TYPE), + crm_element_value(node, XML_ATTR_UNAME), + crm_element_value(node, XML_ATTR_ID)); } - printf("%s node: %s (%s)\n", node_type, - crm_element_value(node, XML_ATTR_UNAME), - crm_element_value(node, XML_ATTR_ID)); + found++; } - found++; } - // @TODO List Pacemaker Remote nodes that don't have a entry if (found == 0) { - printf("No nodes configured\n"); + printf("NO nodes configured\n"); } + + return found; } diff --git a/xml/constraints-next.rng b/xml/constraints-next.rng index 1fa3e75..7e0d98e 100644 --- a/xml/constraints-next.rng +++ b/xml/constraints-next.rng @@ -43,7 +43,7 @@ - + @@ -255,7 +255,7 @@ - + diff --git a/xml/nodes-3.4.rng b/xml/nodes-3.4.rng deleted file mode 100644 index 0132c72..0000000 --- a/xml/nodes-3.4.rng +++ /dev/null @@ -1,44 +0,0 @@ - - - - - - - - - - - - - - - - member - ping - remote - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/xml/nvset-3.4.rng b/xml/nvset-3.4.rng deleted file mode 100644 index 91a7d23..0000000 --- a/xml/nvset-3.4.rng +++ /dev/null @@ -1,63 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/xml/options-3.4.rng b/xml/options-3.4.rng deleted file mode 100644 index 22330d8..0000000 --- a/xml/options-3.4.rng +++ /dev/null @@ -1,111 +0,0 @@ - - - - - - - - - - - - cluster-infrastructure - - - - - - heartbeat - openais - classic openais - classic openais (with plugin) - cman - - - - - - - - - - - cluster-infrastructure - cluster_recheck_interval - dc_deadtime - default-action-timeout - default_action_timeout - default-migration-threshold - default_migration_threshold - default-resource-failure-stickiness - default_resource_failure_stickiness - default-resource-stickiness - default_resource_stickiness - election_timeout - expected-quorum-votes - is-managed-default - is_managed_default - no_quorum_policy - notification-agent - notification-recipient - remove_after_stop - shutdown_escalation - startup_fencing - stonith_action - stonith_enabled - stop_orphan_actions - stop_orphan_resources - symmetric_cluster - transition_idle_timeout - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/xml/resources-3.4.rng b/xml/resources-3.4.rng deleted file mode 100644 index 887dc1c..0000000 --- a/xml/resources-3.4.rng +++ /dev/null @@ -1,426 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - isolation - isolation-host - isolation-instance - isolation-wrapper - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ([0-9\-]+) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - requires - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Stopped - Started - Slave - Master - - - - - - - ignore - block - demote - stop - restart - standby - fence - restart-container - - - - - - - - - - - - - - ocf - - - - - lsb - heartbeat - stonith - upstart - service - systemd - nagios - - - - - diff --git a/xml/rule-3.4.rng b/xml/rule-3.4.rng deleted file mode 100644 index 5d1daf0..0000000 --- a/xml/rule-3.4.rng +++ /dev/null @@ -1,165 +0,0 @@ - - - - - - - - - - - - - - - - - - - - or - and - - - - - - - - - - - - - - lt - gt - lte - gte - eq - ne - defined - not_defined - - - - - - - - - string - number - version - - - - - - - literal - param - meta - - - - - - - - - in_range - - - - - - - - - - - - - - - - - gt - - - - lt - - - - - - date_spec - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -