|
rpm-build |
3ee90c |
/*
|
|
rpm-build |
3ee90c |
* Copyright 2004-2019 the Pacemaker project contributors
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* The version control history for this file may have further details.
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* This source code is licensed under the GNU General Public License version 2
|
|
rpm-build |
3ee90c |
* or later (GPLv2+) WITHOUT ANY WARRANTY.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
#include <crm_internal.h>
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
#include <sys/param.h>
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
#include <crm/crm.h>
|
|
rpm-build |
3ee90c |
#include <crm/cib.h>
|
|
rpm-build |
3ee90c |
#include <crm/msg_xml.h>
|
|
rpm-build |
3ee90c |
#include <crm/common/xml.h>
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
#include <glib.h>
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
#include <crm/pengine/status.h>
|
|
rpm-build |
3ee90c |
#include <pacemaker-internal.h>
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
CRM_TRACE_INIT_DATA(pe_allocate);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
void set_alloc_actions(pe_working_set_t * data_set);
|
|
rpm-build |
3ee90c |
extern void ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set);
|
|
rpm-build |
3ee90c |
extern gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set);
|
|
rpm-build |
3ee90c |
static void apply_remote_node_ordering(pe_working_set_t *data_set);
|
|
rpm-build |
3ee90c |
static enum remote_connection_state get_remote_node_state(pe_node_t *node);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
enum remote_connection_state {
|
|
rpm-build |
3ee90c |
remote_state_unknown = 0,
|
|
rpm-build |
3ee90c |
remote_state_alive = 1,
|
|
rpm-build |
3ee90c |
remote_state_resting = 2,
|
|
rpm-build |
3ee90c |
remote_state_failed = 3,
|
|
rpm-build |
3ee90c |
remote_state_stopped = 4
|
|
rpm-build |
3ee90c |
};
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static const char *
|
|
rpm-build |
3ee90c |
state2text(enum remote_connection_state state)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
switch (state) {
|
|
rpm-build |
3ee90c |
case remote_state_unknown:
|
|
rpm-build |
3ee90c |
return "unknown";
|
|
rpm-build |
3ee90c |
case remote_state_alive:
|
|
rpm-build |
3ee90c |
return "alive";
|
|
rpm-build |
3ee90c |
case remote_state_resting:
|
|
rpm-build |
3ee90c |
return "resting";
|
|
rpm-build |
3ee90c |
case remote_state_failed:
|
|
rpm-build |
3ee90c |
return "failed";
|
|
rpm-build |
3ee90c |
case remote_state_stopped:
|
|
rpm-build |
3ee90c |
return "stopped";
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
return "impossible";
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
resource_alloc_functions_t resource_class_alloc_functions[] = {
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
native_merge_weights,
|
|
rpm-build |
3ee90c |
native_color,
|
|
rpm-build |
3ee90c |
native_create_actions,
|
|
rpm-build |
3ee90c |
native_create_probe,
|
|
rpm-build |
3ee90c |
native_internal_constraints,
|
|
rpm-build |
3ee90c |
native_rsc_colocation_lh,
|
|
rpm-build |
3ee90c |
native_rsc_colocation_rh,
|
|
rpm-build |
3ee90c |
native_rsc_location,
|
|
rpm-build |
3ee90c |
native_action_flags,
|
|
rpm-build |
3ee90c |
native_update_actions,
|
|
rpm-build |
3ee90c |
native_expand,
|
|
rpm-build |
3ee90c |
native_append_meta,
|
|
rpm-build |
3ee90c |
},
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
group_merge_weights,
|
|
rpm-build |
3ee90c |
group_color,
|
|
rpm-build |
3ee90c |
group_create_actions,
|
|
rpm-build |
3ee90c |
native_create_probe,
|
|
rpm-build |
3ee90c |
group_internal_constraints,
|
|
rpm-build |
3ee90c |
group_rsc_colocation_lh,
|
|
rpm-build |
3ee90c |
group_rsc_colocation_rh,
|
|
rpm-build |
3ee90c |
group_rsc_location,
|
|
rpm-build |
3ee90c |
group_action_flags,
|
|
rpm-build |
3ee90c |
group_update_actions,
|
|
rpm-build |
3ee90c |
group_expand,
|
|
rpm-build |
3ee90c |
group_append_meta,
|
|
rpm-build |
3ee90c |
},
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
clone_merge_weights,
|
|
rpm-build |
3ee90c |
clone_color,
|
|
rpm-build |
3ee90c |
clone_create_actions,
|
|
rpm-build |
3ee90c |
clone_create_probe,
|
|
rpm-build |
3ee90c |
clone_internal_constraints,
|
|
rpm-build |
3ee90c |
clone_rsc_colocation_lh,
|
|
rpm-build |
3ee90c |
clone_rsc_colocation_rh,
|
|
rpm-build |
3ee90c |
clone_rsc_location,
|
|
rpm-build |
3ee90c |
clone_action_flags,
|
|
rpm-build |
3ee90c |
pcmk__multi_update_actions,
|
|
rpm-build |
3ee90c |
clone_expand,
|
|
rpm-build |
3ee90c |
clone_append_meta,
|
|
rpm-build |
3ee90c |
},
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
pcmk__bundle_merge_weights,
|
|
rpm-build |
3ee90c |
pcmk__bundle_color,
|
|
rpm-build |
3ee90c |
pcmk__bundle_create_actions,
|
|
rpm-build |
3ee90c |
pcmk__bundle_create_probe,
|
|
rpm-build |
3ee90c |
pcmk__bundle_internal_constraints,
|
|
rpm-build |
3ee90c |
pcmk__bundle_rsc_colocation_lh,
|
|
rpm-build |
3ee90c |
pcmk__bundle_rsc_colocation_rh,
|
|
rpm-build |
3ee90c |
pcmk__bundle_rsc_location,
|
|
rpm-build |
3ee90c |
pcmk__bundle_action_flags,
|
|
rpm-build |
3ee90c |
pcmk__multi_update_actions,
|
|
rpm-build |
3ee90c |
pcmk__bundle_expand,
|
|
rpm-build |
3ee90c |
pcmk__bundle_append_meta,
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
};
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
gboolean
|
|
rpm-build |
3ee90c |
update_action_flags(action_t * action, enum pe_action_flags flags, const char *source, int line)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
static unsigned long calls = 0;
|
|
rpm-build |
3ee90c |
gboolean changed = FALSE;
|
|
rpm-build |
3ee90c |
gboolean clear = is_set(flags, pe_action_clear);
|
|
rpm-build |
3ee90c |
enum pe_action_flags last = action->flags;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (clear) {
|
|
rpm-build |
3ee90c |
action->flags = crm_clear_bit(source, line, action->uuid, action->flags, flags);
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
action->flags = crm_set_bit(source, line, action->uuid, action->flags, flags);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (last != action->flags) {
|
|
rpm-build |
3ee90c |
calls++;
|
|
rpm-build |
3ee90c |
changed = TRUE;
|
|
rpm-build |
3ee90c |
/* Useful for tracking down _who_ changed a specific flag */
|
|
rpm-build |
3ee90c |
/* CRM_ASSERT(calls != 534); */
|
|
rpm-build |
3ee90c |
clear_bit(flags, pe_action_clear);
|
|
rpm-build |
3ee90c |
crm_trace("%s on %s: %sset flags 0x%.6x (was 0x%.6x, now 0x%.6x, %lu, %s)",
|
|
rpm-build |
3ee90c |
action->uuid, action->node ? action->node->details->uname : "[none]",
|
|
rpm-build |
3ee90c |
clear ? "un-" : "", flags, last, action->flags, calls, source);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
return changed;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static gboolean
|
|
rpm-build |
3ee90c |
check_rsc_parameters(resource_t * rsc, node_t * node, xmlNode * rsc_entry,
|
|
rpm-build |
3ee90c |
gboolean active_here, pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
int attr_lpc = 0;
|
|
rpm-build |
3ee90c |
gboolean force_restart = FALSE;
|
|
rpm-build |
3ee90c |
gboolean delete_resource = FALSE;
|
|
rpm-build |
3ee90c |
gboolean changed = FALSE;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
const char *value = NULL;
|
|
rpm-build |
3ee90c |
const char *old_value = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
const char *attr_list[] = {
|
|
rpm-build |
3ee90c |
XML_ATTR_TYPE,
|
|
rpm-build |
3ee90c |
XML_AGENT_ATTR_CLASS,
|
|
rpm-build |
3ee90c |
XML_AGENT_ATTR_PROVIDER
|
|
rpm-build |
3ee90c |
};
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (; attr_lpc < DIMOF(attr_list); attr_lpc++) {
|
|
rpm-build |
3ee90c |
value = crm_element_value(rsc->xml, attr_list[attr_lpc]);
|
|
rpm-build |
3ee90c |
old_value = crm_element_value(rsc_entry, attr_list[attr_lpc]);
|
|
rpm-build |
3ee90c |
if (value == old_value /* i.e. NULL */
|
|
rpm-build |
3ee90c |
|| crm_str_eq(value, old_value, TRUE)) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
changed = TRUE;
|
|
rpm-build |
3ee90c |
trigger_unfencing(rsc, node, "Device definition changed", NULL, data_set);
|
|
rpm-build |
3ee90c |
if (active_here) {
|
|
rpm-build |
3ee90c |
force_restart = TRUE;
|
|
rpm-build |
3ee90c |
crm_notice("Forcing restart of %s on %s, %s changed: %s -> %s",
|
|
rpm-build |
3ee90c |
rsc->id, node->details->uname, attr_list[attr_lpc],
|
|
rpm-build |
3ee90c |
crm_str(old_value), crm_str(value));
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
if (force_restart) {
|
|
rpm-build |
3ee90c |
/* make sure the restart happens */
|
|
rpm-build |
3ee90c |
stop_action(rsc, node, FALSE);
|
|
rpm-build |
3ee90c |
set_bit(rsc->flags, pe_rsc_start_pending);
|
|
rpm-build |
3ee90c |
delete_resource = TRUE;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (changed) {
|
|
rpm-build |
3ee90c |
delete_resource = TRUE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
return delete_resource;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
CancelXmlOp(resource_t * rsc, xmlNode * xml_op, node_t * active_node,
|
|
rpm-build |
3ee90c |
const char *reason, pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
guint interval_ms = 0;
|
|
rpm-build |
3ee90c |
action_t *cancel = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
const char *task = NULL;
|
|
rpm-build |
3ee90c |
const char *call_id = NULL;
|
|
rpm-build |
3ee90c |
const char *interval_ms_s = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
CRM_CHECK(xml_op != NULL, return);
|
|
rpm-build |
3ee90c |
CRM_CHECK(active_node != NULL, return);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
|
|
rpm-build |
3ee90c |
call_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
|
|
rpm-build |
3ee90c |
interval_ms_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
interval_ms = crm_parse_ms(interval_ms_s);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_info("Action " CRM_OP_FMT " on %s will be stopped: %s",
|
|
rpm-build |
3ee90c |
rsc->id, task, interval_ms,
|
|
rpm-build |
3ee90c |
active_node->details->uname, (reason? reason : "unknown"));
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
cancel = pe_cancel_op(rsc, task, interval_ms, active_node, data_set);
|
|
rpm-build |
3ee90c |
add_hash_param(cancel->meta, XML_LRM_ATTR_CALLID, call_id);
|
|
rpm-build |
3ee90c |
custom_action_order(rsc, stop_key(rsc), NULL, rsc, NULL, cancel, pe_order_optional, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static gboolean
|
|
rpm-build |
3ee90c |
check_action_definition(resource_t * rsc, node_t * active_node, xmlNode * xml_op,
|
|
rpm-build |
3ee90c |
pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
char *key = NULL;
|
|
rpm-build |
3ee90c |
guint interval_ms = 0;
|
|
rpm-build |
3ee90c |
const char *interval_ms_s = NULL;
|
|
rpm-build |
3ee90c |
const op_digest_cache_t *digest_data = NULL;
|
|
rpm-build |
3ee90c |
gboolean did_change = FALSE;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
|
|
rpm-build |
3ee90c |
const char *digest_secure = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
CRM_CHECK(active_node != NULL, return FALSE);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
interval_ms_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS);
|
|
rpm-build |
3ee90c |
interval_ms = crm_parse_ms(interval_ms_s);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (interval_ms > 0) {
|
|
rpm-build |
3ee90c |
xmlNode *op_match = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* we need to reconstruct the key because of the way we used to construct resource IDs */
|
|
rpm-build |
3ee90c |
key = generate_op_key(rsc->id, task, interval_ms);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
pe_rsc_trace(rsc, "Checking parameters for %s", key);
|
|
rpm-build |
3ee90c |
op_match = find_rsc_op_entry(rsc, key);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (op_match == NULL && is_set(data_set->flags, pe_flag_stop_action_orphans)) {
|
|
rpm-build |
3ee90c |
CancelXmlOp(rsc, xml_op, active_node, "orphan", data_set);
|
|
rpm-build |
3ee90c |
free(key);
|
|
rpm-build |
3ee90c |
return TRUE;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (op_match == NULL) {
|
|
rpm-build |
3ee90c |
pe_rsc_debug(rsc, "Orphan action detected: %s on %s", key, active_node->details->uname);
|
|
rpm-build |
3ee90c |
free(key);
|
|
rpm-build |
3ee90c |
return TRUE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
free(key);
|
|
rpm-build |
3ee90c |
key = NULL;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_trace("Testing " CRM_OP_FMT " on %s",
|
|
rpm-build |
3ee90c |
rsc->id, task, interval_ms, active_node->details->uname);
|
|
rpm-build |
3ee90c |
if ((interval_ms == 0) && safe_str_eq(task, RSC_STATUS)) {
|
|
rpm-build |
3ee90c |
/* Reload based on the start action not a probe */
|
|
rpm-build |
3ee90c |
task = RSC_START;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if ((interval_ms == 0) && safe_str_eq(task, RSC_MIGRATED)) {
|
|
rpm-build |
3ee90c |
/* Reload based on the start action not a migrate */
|
|
rpm-build |
3ee90c |
task = RSC_START;
|
|
rpm-build |
3ee90c |
} else if ((interval_ms == 0) && safe_str_eq(task, RSC_PROMOTE)) {
|
|
rpm-build |
3ee90c |
/* Reload based on the start action not a promote */
|
|
rpm-build |
3ee90c |
task = RSC_START;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
digest_data = rsc_action_digest_cmp(rsc, xml_op, active_node, data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if(is_set(data_set->flags, pe_flag_sanitized)) {
|
|
rpm-build |
3ee90c |
digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if(digest_data->rc != RSC_DIGEST_MATCH
|
|
rpm-build |
3ee90c |
&& digest_secure
|
|
rpm-build |
3ee90c |
&& digest_data->digest_secure_calc
|
|
rpm-build |
3ee90c |
&& strcmp(digest_data->digest_secure_calc, digest_secure) == 0) {
|
|
rpm-build |
3ee90c |
if (is_set(data_set->flags, pe_flag_stdout)) {
|
|
rpm-build |
3ee90c |
printf("Only 'private' parameters to " CRM_OP_FMT " on %s changed: %s\n",
|
|
rpm-build |
3ee90c |
rsc->id, task, interval_ms, active_node->details->uname,
|
|
rpm-build |
3ee90c |
crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (digest_data->rc == RSC_DIGEST_RESTART) {
|
|
rpm-build |
3ee90c |
/* Changes that force a restart */
|
|
rpm-build |
3ee90c |
pe_action_t *required = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
did_change = TRUE;
|
|
rpm-build |
3ee90c |
key = generate_op_key(rsc->id, task, interval_ms);
|
|
rpm-build |
3ee90c |
crm_log_xml_info(digest_data->params_restart, "params:restart");
|
|
rpm-build |
3ee90c |
required = custom_action(rsc, key, task, NULL, TRUE, TRUE, data_set);
|
|
rpm-build |
3ee90c |
pe_action_set_flag_reason(__FUNCTION__, __LINE__, required, NULL,
|
|
rpm-build |
3ee90c |
"resource definition change", pe_action_optional, TRUE);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
trigger_unfencing(rsc, active_node, "Device parameters changed", NULL, data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if ((digest_data->rc == RSC_DIGEST_ALL) || (digest_data->rc == RSC_DIGEST_UNKNOWN)) {
|
|
rpm-build |
3ee90c |
/* Changes that can potentially be handled by a reload */
|
|
rpm-build |
3ee90c |
const char *digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
did_change = TRUE;
|
|
rpm-build |
3ee90c |
trigger_unfencing(rsc, active_node, "Device parameters changed (reload)", NULL, data_set);
|
|
rpm-build |
3ee90c |
crm_log_xml_info(digest_data->params_all, "params:reload");
|
|
rpm-build |
3ee90c |
key = generate_op_key(rsc->id, task, interval_ms);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (interval_ms > 0) {
|
|
rpm-build |
3ee90c |
action_t *op = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
#if 0
|
|
rpm-build |
3ee90c |
/* Always reload/restart the entire resource */
|
|
rpm-build |
3ee90c |
ReloadRsc(rsc, active_node, data_set);
|
|
rpm-build |
3ee90c |
#else
|
|
rpm-build |
3ee90c |
/* Re-sending the recurring op is sufficient - the old one will be cancelled automatically */
|
|
rpm-build |
3ee90c |
op = custom_action(rsc, key, task, active_node, TRUE, TRUE, data_set);
|
|
rpm-build |
3ee90c |
set_bit(op->flags, pe_action_reschedule);
|
|
rpm-build |
3ee90c |
#endif
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (digest_restart) {
|
|
rpm-build |
3ee90c |
pe_rsc_trace(rsc, "Reloading '%s' action for resource %s", task, rsc->id);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Reload this resource */
|
|
rpm-build |
3ee90c |
ReloadRsc(rsc, active_node, data_set);
|
|
rpm-build |
3ee90c |
free(key);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
pe_action_t *required = NULL;
|
|
rpm-build |
3ee90c |
pe_rsc_trace(rsc, "Resource %s doesn't know how to reload", rsc->id);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Re-send the start/demote/promote op
|
|
rpm-build |
3ee90c |
* Recurring ops will be detected independently
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
required = custom_action(rsc, key, task, NULL, TRUE, TRUE, data_set);
|
|
rpm-build |
3ee90c |
pe_action_set_flag_reason(__FUNCTION__, __LINE__, required, NULL,
|
|
rpm-build |
3ee90c |
"resource definition change", pe_action_optional, TRUE);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
return did_change;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/*!
|
|
rpm-build |
3ee90c |
* \internal
|
|
rpm-build |
3ee90c |
* \brief Do deferred action checks after allocation
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* \param[in] data_set Working set for cluster
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
check_params(pe_resource_t *rsc, pe_node_t *node, xmlNode *rsc_op,
|
|
rpm-build |
3ee90c |
enum pe_check_parameters check, pe_working_set_t *data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
const char *reason = NULL;
|
|
rpm-build |
3ee90c |
op_digest_cache_t *digest_data = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
switch (check) {
|
|
rpm-build |
3ee90c |
case pe_check_active:
|
|
rpm-build |
3ee90c |
if (check_action_definition(rsc, node, rsc_op, data_set)
|
|
rpm-build |
3ee90c |
&& pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
|
|
rpm-build |
3ee90c |
data_set)) {
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
reason = "action definition changed";
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
break;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
case pe_check_last_failure:
|
|
rpm-build |
3ee90c |
digest_data = rsc_action_digest_cmp(rsc, rsc_op, node, data_set);
|
|
rpm-build |
3ee90c |
switch (digest_data->rc) {
|
|
rpm-build |
3ee90c |
case RSC_DIGEST_UNKNOWN:
|
|
rpm-build |
3ee90c |
crm_trace("Resource %s history entry %s on %s has no digest to compare",
|
|
rpm-build |
3ee90c |
rsc->id, ID(rsc_op), node->details->id);
|
|
rpm-build |
3ee90c |
break;
|
|
rpm-build |
3ee90c |
case RSC_DIGEST_MATCH:
|
|
rpm-build |
3ee90c |
break;
|
|
rpm-build |
3ee90c |
default:
|
|
rpm-build |
3ee90c |
reason = "resource parameters have changed";
|
|
rpm-build |
3ee90c |
break;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
break;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (reason) {
|
|
rpm-build |
3ee90c |
pe__clear_failcount(rsc, node, reason, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
check_actions_for(xmlNode * rsc_entry, resource_t * rsc, node_t * node, pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr gIter = NULL;
|
|
rpm-build |
3ee90c |
int offset = -1;
|
|
rpm-build |
3ee90c |
guint interval_ms = 0;
|
|
rpm-build |
3ee90c |
int stop_index = 0;
|
|
rpm-build |
3ee90c |
int start_index = 0;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
const char *task = NULL;
|
|
rpm-build |
3ee90c |
const char *interval_ms_s = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
xmlNode *rsc_op = NULL;
|
|
rpm-build |
3ee90c |
GListPtr op_list = NULL;
|
|
rpm-build |
3ee90c |
GListPtr sorted_op_list = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
CRM_CHECK(node != NULL, return);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (is_set(rsc->flags, pe_rsc_orphan)) {
|
|
rpm-build |
3ee90c |
resource_t *parent = uber_parent(rsc);
|
|
rpm-build |
3ee90c |
if(parent == NULL
|
|
rpm-build |
3ee90c |
|| pe_rsc_is_clone(parent) == FALSE
|
|
rpm-build |
3ee90c |
|| is_set(parent->flags, pe_rsc_unique)) {
|
|
rpm-build |
3ee90c |
pe_rsc_trace(rsc, "Skipping param check for %s and deleting: orphan", rsc->id);
|
|
rpm-build |
3ee90c |
DeleteRsc(rsc, node, FALSE, data_set);
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
pe_rsc_trace(rsc, "Skipping param check for %s (orphan clone)", rsc->id);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
return;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (pe_find_node_id(rsc->running_on, node->details->id) == NULL) {
|
|
rpm-build |
3ee90c |
if (check_rsc_parameters(rsc, node, rsc_entry, FALSE, data_set)) {
|
|
rpm-build |
3ee90c |
DeleteRsc(rsc, node, FALSE, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
pe_rsc_trace(rsc, "Skipping param check for %s: no longer active on %s",
|
|
rpm-build |
3ee90c |
rsc->id, node->details->uname);
|
|
rpm-build |
3ee90c |
return;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
pe_rsc_trace(rsc, "Processing %s on %s", rsc->id, node->details->uname);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (check_rsc_parameters(rsc, node, rsc_entry, TRUE, data_set)) {
|
|
rpm-build |
3ee90c |
DeleteRsc(rsc, node, FALSE, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (rsc_op = __xml_first_child_element(rsc_entry); rsc_op != NULL;
|
|
rpm-build |
3ee90c |
rsc_op = __xml_next_element(rsc_op)) {
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) {
|
|
rpm-build |
3ee90c |
op_list = g_list_prepend(op_list, rsc_op);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
|
|
rpm-build |
3ee90c |
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
xmlNode *rsc_op = (xmlNode *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
offset++;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (start_index < stop_index) {
|
|
rpm-build |
3ee90c |
/* stopped */
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
} else if (offset < start_index) {
|
|
rpm-build |
3ee90c |
/* action occurred prior to a start */
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
interval_ms_s = crm_element_value(rsc_op, XML_LRM_ATTR_INTERVAL_MS);
|
|
rpm-build |
3ee90c |
interval_ms = crm_parse_ms(interval_ms_s);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if ((interval_ms > 0) &&
|
|
rpm-build |
3ee90c |
(is_set(rsc->flags, pe_rsc_maintenance) || node->details->maintenance)) {
|
|
rpm-build |
3ee90c |
// Maintenance mode cancels recurring operations
|
|
rpm-build |
3ee90c |
CancelXmlOp(rsc, rsc_op, node, "maintenance mode", data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if ((interval_ms > 0)
|
|
rpm-build |
3ee90c |
|| safe_str_eq(task, RSC_STATUS)
|
|
rpm-build |
3ee90c |
|| safe_str_eq(task, RSC_START)
|
|
rpm-build |
3ee90c |
|| safe_str_eq(task, RSC_PROMOTE)
|
|
rpm-build |
3ee90c |
|| safe_str_eq(task, RSC_MIGRATED)) {
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* If a resource operation failed, and the operation's definition
|
|
rpm-build |
3ee90c |
* has changed, clear any fail count so they can be retried fresh.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (pe__bundle_needs_remote_name(rsc)) {
|
|
rpm-build |
3ee90c |
/* We haven't allocated resources to nodes yet, so if the
|
|
rpm-build |
3ee90c |
* REMOTE_CONTAINER_HACK is used, we may calculate the digest
|
|
rpm-build |
3ee90c |
* based on the literal "#uname" value rather than the properly
|
|
rpm-build |
3ee90c |
* substituted value. That would mistakenly make the action
|
|
rpm-build |
3ee90c |
* definition appear to have been changed. Defer the check until
|
|
rpm-build |
3ee90c |
* later in this case.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
pe__add_param_check(rsc_op, rsc, node, pe_check_active,
|
|
rpm-build |
3ee90c |
data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (check_action_definition(rsc, node, rsc_op, data_set)
|
|
rpm-build |
3ee90c |
&& pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
|
|
rpm-build |
3ee90c |
data_set)) {
|
|
rpm-build |
3ee90c |
pe__clear_failcount(rsc, node, "action definition changed",
|
|
rpm-build |
3ee90c |
data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
g_list_free(sorted_op_list);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static GListPtr
|
|
rpm-build |
3ee90c |
find_rsc_list(GListPtr result, resource_t * rsc, const char *id, gboolean renamed_clones,
|
|
rpm-build |
3ee90c |
gboolean partial, pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr gIter = NULL;
|
|
rpm-build |
3ee90c |
gboolean match = FALSE;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (id == NULL) {
|
|
rpm-build |
3ee90c |
return NULL;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (rsc == NULL) {
|
|
rpm-build |
3ee90c |
if (data_set == NULL) {
|
|
rpm-build |
3ee90c |
return NULL;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
pe_resource_t *child = (pe_resource_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
result = find_rsc_list(result, child, id, renamed_clones, partial,
|
|
rpm-build |
3ee90c |
NULL);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
return result;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (partial) {
|
|
rpm-build |
3ee90c |
if (strstr(rsc->id, id)) {
|
|
rpm-build |
3ee90c |
match = TRUE;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (renamed_clones && rsc->clone_name && strstr(rsc->clone_name, id)) {
|
|
rpm-build |
3ee90c |
match = TRUE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
if (strcmp(rsc->id, id) == 0) {
|
|
rpm-build |
3ee90c |
match = TRUE;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (renamed_clones && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
|
|
rpm-build |
3ee90c |
match = TRUE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (match) {
|
|
rpm-build |
3ee90c |
result = g_list_prepend(result, rsc);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (rsc->children) {
|
|
rpm-build |
3ee90c |
gIter = rsc->children;
|
|
rpm-build |
3ee90c |
for (; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
resource_t *child = (resource_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
result = find_rsc_list(result, child, id, renamed_clones, partial, NULL);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
return result;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
check_actions(pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
const char *id = NULL;
|
|
rpm-build |
3ee90c |
node_t *node = NULL;
|
|
rpm-build |
3ee90c |
xmlNode *lrm_rscs = NULL;
|
|
rpm-build |
3ee90c |
xmlNode *status = get_object_root(XML_CIB_TAG_STATUS, data_set->input);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
xmlNode *node_state = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (node_state = __xml_first_child_element(status); node_state != NULL;
|
|
rpm-build |
3ee90c |
node_state = __xml_next_element(node_state)) {
|
|
rpm-build |
3ee90c |
if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) {
|
|
rpm-build |
3ee90c |
id = crm_element_value(node_state, XML_ATTR_ID);
|
|
rpm-build |
3ee90c |
lrm_rscs = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
|
|
rpm-build |
3ee90c |
lrm_rscs = find_xml_node(lrm_rscs, XML_LRM_TAG_RESOURCES, FALSE);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
node = pe_find_node_id(data_set->nodes, id);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (node == NULL) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Still need to check actions for a maintenance node to cancel existing monitor operations */
|
|
rpm-build |
3ee90c |
} else if (can_run_resources(node) == FALSE && node->details->maintenance == FALSE) {
|
|
rpm-build |
3ee90c |
crm_trace("Skipping param check for %s: can't run resources",
|
|
rpm-build |
3ee90c |
node->details->uname);
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_trace("Processing node %s", node->details->uname);
|
|
rpm-build |
3ee90c |
if (node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) {
|
|
rpm-build |
3ee90c |
xmlNode *rsc_entry = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (rsc_entry = __xml_first_child_element(lrm_rscs);
|
|
rpm-build |
3ee90c |
rsc_entry != NULL;
|
|
rpm-build |
3ee90c |
rsc_entry = __xml_next_element(rsc_entry)) {
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) {
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (xml_has_children(rsc_entry)) {
|
|
rpm-build |
3ee90c |
GListPtr gIter = NULL;
|
|
rpm-build |
3ee90c |
GListPtr result = NULL;
|
|
rpm-build |
3ee90c |
const char *rsc_id = ID(rsc_entry);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
CRM_CHECK(rsc_id != NULL, return);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
result = find_rsc_list(NULL, NULL, rsc_id, TRUE, FALSE, data_set);
|
|
rpm-build |
3ee90c |
for (gIter = result; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
resource_t *rsc = (resource_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (rsc->variant != pe_native) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
check_actions_for(rsc_entry, rsc, node, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
g_list_free(result);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static gboolean
|
|
rpm-build |
3ee90c |
apply_placement_constraints(pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr gIter = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_trace("Applying constraints...");
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
pe__location_t *cons = gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
cons->rsc_lh->cmds->rsc_location(cons->rsc_lh, cons);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
return TRUE;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static gboolean
|
|
rpm-build |
3ee90c |
failcount_clear_action_exists(node_t * node, resource_t * rsc)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
gboolean rc = FALSE;
|
|
rpm-build |
3ee90c |
GList *list = pe__resource_actions(rsc, node, CRM_OP_CLEAR_FAILCOUNT, TRUE);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (list) {
|
|
rpm-build |
3ee90c |
rc = TRUE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
g_list_free(list);
|
|
rpm-build |
3ee90c |
return rc;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/*!
|
|
rpm-build |
3ee90c |
* \internal
|
|
rpm-build |
3ee90c |
* \brief Force resource away if failures hit migration threshold
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* \param[in,out] rsc Resource to check for failures
|
|
rpm-build |
3ee90c |
* \param[in,out] node Node to check for failures
|
|
rpm-build |
3ee90c |
* \param[in,out] data_set Cluster working set to update
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
check_migration_threshold(resource_t *rsc, node_t *node,
|
|
rpm-build |
3ee90c |
pe_working_set_t *data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
int fail_count, countdown;
|
|
rpm-build |
3ee90c |
resource_t *failed;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Migration threshold of 0 means never force away */
|
|
rpm-build |
3ee90c |
if (rsc->migration_threshold == 0) {
|
|
rpm-build |
3ee90c |
return;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
// If we're ignoring failures, also ignore the migration threshold
|
|
rpm-build |
3ee90c |
if (is_set(rsc->flags, pe_rsc_failure_ignored)) {
|
|
rpm-build |
3ee90c |
return;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* If there are no failures, there's no need to force away */
|
|
rpm-build |
3ee90c |
fail_count = pe_get_failcount(node, rsc, NULL,
|
|
rpm-build |
3ee90c |
pe_fc_effective|pe_fc_fillers, NULL,
|
|
rpm-build |
3ee90c |
data_set);
|
|
rpm-build |
3ee90c |
if (fail_count <= 0) {
|
|
rpm-build |
3ee90c |
return;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* How many more times recovery will be tried on this node */
|
|
rpm-build |
3ee90c |
countdown = QB_MAX(rsc->migration_threshold - fail_count, 0);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* If failed resource has a parent, we'll force the parent away */
|
|
rpm-build |
3ee90c |
failed = rsc;
|
|
rpm-build |
3ee90c |
if (is_not_set(rsc->flags, pe_rsc_unique)) {
|
|
rpm-build |
3ee90c |
failed = uber_parent(rsc);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (countdown == 0) {
|
|
rpm-build |
3ee90c |
resource_location(failed, node, -INFINITY, "__fail_limit__", data_set);
|
|
rpm-build |
3ee90c |
crm_warn("Forcing %s away from %s after %d failures (max=%d)",
|
|
rpm-build |
3ee90c |
failed->id, node->details->uname, fail_count,
|
|
rpm-build |
3ee90c |
rsc->migration_threshold);
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
crm_info("%s can fail %d more times on %s before being forced off",
|
|
rpm-build |
3ee90c |
failed->id, countdown, node->details->uname);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
common_apply_stickiness(resource_t * rsc, node_t * node, pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
if (rsc->children) {
|
|
rpm-build |
3ee90c |
GListPtr gIter = rsc->children;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
resource_t *child_rsc = (resource_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
common_apply_stickiness(child_rsc, node, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
return;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (is_set(rsc->flags, pe_rsc_managed)
|
|
rpm-build |
3ee90c |
&& rsc->stickiness != 0 && g_list_length(rsc->running_on) == 1) {
|
|
rpm-build |
3ee90c |
node_t *current = pe_find_node_id(rsc->running_on, node->details->id);
|
|
rpm-build |
3ee90c |
node_t *match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (current == NULL) {
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (match != NULL || is_set(data_set->flags, pe_flag_symmetric_cluster)) {
|
|
rpm-build |
3ee90c |
resource_t *sticky_rsc = rsc;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
resource_location(sticky_rsc, node, rsc->stickiness, "stickiness", data_set);
|
|
rpm-build |
3ee90c |
pe_rsc_debug(sticky_rsc, "Resource %s: preferring current location"
|
|
rpm-build |
3ee90c |
" (node=%s, weight=%d)", sticky_rsc->id,
|
|
rpm-build |
3ee90c |
node->details->uname, rsc->stickiness);
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
GHashTableIter iter;
|
|
rpm-build |
3ee90c |
node_t *nIter = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
pe_rsc_debug(rsc, "Ignoring stickiness for %s: the cluster is asymmetric"
|
|
rpm-build |
3ee90c |
" and node %s is not explicitly allowed", rsc->id, node->details->uname);
|
|
rpm-build |
3ee90c |
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
|
|
rpm-build |
3ee90c |
while (g_hash_table_iter_next(&iter, NULL, (void **)&nIter)) {
|
|
rpm-build |
3ee90c |
crm_err("%s[%s] = %d", rsc->id, nIter->details->uname, nIter->weight);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Check the migration threshold only if a failcount clear action
|
|
rpm-build |
3ee90c |
* has not already been placed for this resource on the node.
|
|
rpm-build |
3ee90c |
* There is no sense in potentially forcing the resource from this
|
|
rpm-build |
3ee90c |
* node if the failcount is being reset anyway.
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* @TODO A clear_failcount operation can be scheduled in stage4() via
|
|
rpm-build |
3ee90c |
* check_actions_for(), or in stage5() via check_params(). This runs in
|
|
rpm-build |
3ee90c |
* stage2(), so it cannot detect those, meaning we might check the migration
|
|
rpm-build |
3ee90c |
* threshold when we shouldn't -- worst case, we stop or move the resource,
|
|
rpm-build |
3ee90c |
* then move it back next transition.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
if (failcount_clear_action_exists(node, rsc) == FALSE) {
|
|
rpm-build |
3ee90c |
check_migration_threshold(rsc, node, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
void
|
|
rpm-build |
3ee90c |
complex_set_cmds(resource_t * rsc)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr gIter = rsc->children;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
rsc->cmds = &resource_class_alloc_functions[rsc->variant];
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
resource_t *child_rsc = (resource_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
complex_set_cmds(child_rsc);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
void
|
|
rpm-build |
3ee90c |
set_alloc_actions(pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
GListPtr gIter = data_set->resources;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
resource_t *rsc = (resource_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
complex_set_cmds(rsc);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
calculate_system_health(gpointer gKey, gpointer gValue, gpointer user_data)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
const char *key = (const char *)gKey;
|
|
rpm-build |
3ee90c |
const char *value = (const char *)gValue;
|
|
rpm-build |
3ee90c |
int *system_health = (int *)user_data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (!gKey || !gValue || !user_data) {
|
|
rpm-build |
3ee90c |
return;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (crm_starts_with(key, "#health")) {
|
|
rpm-build |
3ee90c |
int score;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Convert the value into an integer */
|
|
rpm-build |
3ee90c |
score = char2score(value);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Add it to the running total */
|
|
rpm-build |
3ee90c |
*system_health = merge_weights(score, *system_health);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static gboolean
|
|
rpm-build |
3ee90c |
apply_system_health(pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr gIter = NULL;
|
|
rpm-build |
3ee90c |
const char *health_strategy = pe_pref(data_set->config_hash, "node-health-strategy");
|
|
rpm-build |
3ee90c |
int base_health = 0;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (health_strategy == NULL || safe_str_eq(health_strategy, "none")) {
|
|
rpm-build |
3ee90c |
/* Prevent any accidental health -> score translation */
|
|
rpm-build |
3ee90c |
node_score_red = 0;
|
|
rpm-build |
3ee90c |
node_score_yellow = 0;
|
|
rpm-build |
3ee90c |
node_score_green = 0;
|
|
rpm-build |
3ee90c |
return TRUE;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (safe_str_eq(health_strategy, "migrate-on-red")) {
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Resources on nodes which have health values of red are
|
|
rpm-build |
3ee90c |
* weighted away from that node.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
node_score_red = -INFINITY;
|
|
rpm-build |
3ee90c |
node_score_yellow = 0;
|
|
rpm-build |
3ee90c |
node_score_green = 0;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (safe_str_eq(health_strategy, "only-green")) {
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Resources on nodes which have health values of red or yellow
|
|
rpm-build |
3ee90c |
* are forced away from that node.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
node_score_red = -INFINITY;
|
|
rpm-build |
3ee90c |
node_score_yellow = -INFINITY;
|
|
rpm-build |
3ee90c |
node_score_green = 0;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (safe_str_eq(health_strategy, "progressive")) {
|
|
rpm-build |
3ee90c |
/* Same as the above, but use the r/y/g scores provided by the user
|
|
rpm-build |
3ee90c |
* Defaults are provided by the pe_prefs table
|
|
rpm-build |
3ee90c |
* Also, custom health "base score" can be used
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
base_health = crm_parse_int(pe_pref(data_set->config_hash, "node-health-base"), "0");
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (safe_str_eq(health_strategy, "custom")) {
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Requires the admin to configure the rsc_location constaints for
|
|
rpm-build |
3ee90c |
* processing the stored health scores
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
/* TODO: Check for the existence of appropriate node health constraints */
|
|
rpm-build |
3ee90c |
return TRUE;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
crm_err("Unknown node health strategy: %s", health_strategy);
|
|
rpm-build |
3ee90c |
return FALSE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_info("Applying automated node health strategy: %s", health_strategy);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
int system_health = base_health;
|
|
rpm-build |
3ee90c |
node_t *node = (node_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Search through the node hash table for system health entries. */
|
|
rpm-build |
3ee90c |
g_hash_table_foreach(node->details->attrs, calculate_system_health, &system_health);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_info(" Node %s has an combined system health of %d",
|
|
rpm-build |
3ee90c |
node->details->uname, system_health);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* If the health is non-zero, then create a new rsc2node so that the
|
|
rpm-build |
3ee90c |
* weight will be added later on.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
if (system_health != 0) {
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
GListPtr gIter2 = data_set->resources;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (; gIter2 != NULL; gIter2 = gIter2->next) {
|
|
rpm-build |
3ee90c |
resource_t *rsc = (resource_t *) gIter2->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
rsc2node_new(health_strategy, rsc, system_health, NULL, node, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
return TRUE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
gboolean
|
|
rpm-build |
3ee90c |
stage0(pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (data_set->input == NULL) {
|
|
rpm-build |
3ee90c |
return FALSE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (is_set(data_set->flags, pe_flag_have_status) == FALSE) {
|
|
rpm-build |
3ee90c |
crm_trace("Calculating status");
|
|
rpm-build |
3ee90c |
cluster_status(data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
set_alloc_actions(data_set);
|
|
rpm-build |
3ee90c |
apply_system_health(data_set);
|
|
rpm-build |
3ee90c |
unpack_constraints(cib_constraints, data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
return TRUE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/*
|
|
rpm-build |
3ee90c |
* Check nodes for resources started outside of the LRM
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
gboolean
|
|
rpm-build |
3ee90c |
probe_resources(pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
action_t *probe_node_complete = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (GListPtr gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
node_t *node = (node_t *) gIter->data;
|
|
rpm-build |
3ee90c |
const char *probed = pe_node_attribute_raw(node, CRM_OP_PROBED);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (node->details->online == FALSE) {
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (pe__is_remote_node(node) && node->details->remote_rsc
|
|
rpm-build |
3ee90c |
&& (get_remote_node_state(node) == remote_state_failed)) {
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
pe_fence_node(data_set, node, "the connection is unrecoverable");
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (node->details->unclean) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (node->details->rsc_discovery_enabled == FALSE) {
|
|
rpm-build |
3ee90c |
/* resource discovery is disabled for this node */
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (probed != NULL && crm_is_true(probed) == FALSE) {
|
|
rpm-build |
3ee90c |
action_t *probe_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_REPROBE, node->details->uname),
|
|
rpm-build |
3ee90c |
CRM_OP_REPROBE, node, FALSE, TRUE, data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
add_hash_param(probe_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (GListPtr gIter2 = data_set->resources; gIter2 != NULL; gIter2 = gIter2->next) {
|
|
rpm-build |
3ee90c |
resource_t *rsc = (resource_t *) gIter2->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
rsc->cmds->create_probe(rsc, node, probe_node_complete, FALSE, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
return TRUE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
rsc_discover_filter(resource_t *rsc, node_t *node)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr gIter = rsc->children;
|
|
rpm-build |
3ee90c |
resource_t *top = uber_parent(rsc);
|
|
rpm-build |
3ee90c |
node_t *match;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (rsc->exclusive_discover == FALSE && top->exclusive_discover == FALSE) {
|
|
rpm-build |
3ee90c |
return;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
resource_t *child_rsc = (resource_t *) gIter->data;
|
|
rpm-build |
3ee90c |
rsc_discover_filter(child_rsc, node);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
|
|
rpm-build |
3ee90c |
if (match && match->rsc_discover_mode != pe_discover_exclusive) {
|
|
rpm-build |
3ee90c |
match->weight = -INFINITY;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/*
|
|
rpm-build |
3ee90c |
* Count how many valid nodes we have (so we know the maximum number of
|
|
rpm-build |
3ee90c |
* colors we can resolve).
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* Apply node constraints (i.e. filter the "allowed_nodes" part of resources)
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
gboolean
|
|
rpm-build |
3ee90c |
stage2(pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr gIter = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_trace("Applying placement constraints");
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
gIter = data_set->nodes;
|
|
rpm-build |
3ee90c |
for (; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
node_t *node = (node_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (node == NULL) {
|
|
rpm-build |
3ee90c |
/* error */
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (node->weight >= 0.0 /* global weight */
|
|
rpm-build |
3ee90c |
&& node->details->online && node->details->type != node_ping) {
|
|
rpm-build |
3ee90c |
data_set->max_valid_nodes++;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
apply_placement_constraints(data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
gIter = data_set->nodes;
|
|
rpm-build |
3ee90c |
for (; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
GListPtr gIter2 = NULL;
|
|
rpm-build |
3ee90c |
node_t *node = (node_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
gIter2 = data_set->resources;
|
|
rpm-build |
3ee90c |
for (; gIter2 != NULL; gIter2 = gIter2->next) {
|
|
rpm-build |
3ee90c |
resource_t *rsc = (resource_t *) gIter2->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
common_apply_stickiness(rsc, node, data_set);
|
|
rpm-build |
3ee90c |
rsc_discover_filter(rsc, node);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
return TRUE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/*
|
|
rpm-build |
3ee90c |
* Create internal resource constraints before allocation
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
gboolean
|
|
rpm-build |
3ee90c |
stage3(pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
GListPtr gIter = data_set->resources;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
resource_t *rsc = (resource_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
rsc->cmds->internal_constraints(rsc, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
return TRUE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/*
|
|
rpm-build |
3ee90c |
* Check for orphaned or redefined actions
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
gboolean
|
|
rpm-build |
3ee90c |
stage4(pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
check_actions(data_set);
|
|
rpm-build |
3ee90c |
return TRUE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static void *
|
|
rpm-build |
3ee90c |
convert_const_pointer(const void *ptr)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
/* Worst function ever */
|
|
rpm-build |
3ee90c |
return (void *)ptr;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static gint
|
|
rpm-build |
3ee90c |
sort_rsc_process_order(gconstpointer a, gconstpointer b, gpointer data)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
int rc = 0;
|
|
rpm-build |
3ee90c |
int r1_weight = -INFINITY;
|
|
rpm-build |
3ee90c |
int r2_weight = -INFINITY;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
const char *reason = "existence";
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
const GListPtr nodes = (GListPtr) data;
|
|
rpm-build |
3ee90c |
const resource_t *resource1 = a;
|
|
rpm-build |
3ee90c |
const resource_t *resource2 = b;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
node_t *r1_node = NULL;
|
|
rpm-build |
3ee90c |
node_t *r2_node = NULL;
|
|
rpm-build |
3ee90c |
GListPtr gIter = NULL;
|
|
rpm-build |
3ee90c |
GHashTable *r1_nodes = NULL;
|
|
rpm-build |
3ee90c |
GHashTable *r2_nodes = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (a == NULL && b == NULL) {
|
|
rpm-build |
3ee90c |
goto done;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
if (a == NULL) {
|
|
rpm-build |
3ee90c |
return 1;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
if (b == NULL) {
|
|
rpm-build |
3ee90c |
return -1;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
reason = "priority";
|
|
rpm-build |
3ee90c |
r1_weight = resource1->priority;
|
|
rpm-build |
3ee90c |
r2_weight = resource2->priority;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (r1_weight > r2_weight) {
|
|
rpm-build |
3ee90c |
rc = -1;
|
|
rpm-build |
3ee90c |
goto done;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (r1_weight < r2_weight) {
|
|
rpm-build |
3ee90c |
rc = 1;
|
|
rpm-build |
3ee90c |
goto done;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
reason = "no node list";
|
|
rpm-build |
3ee90c |
if (nodes == NULL) {
|
|
rpm-build |
3ee90c |
goto done;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
r1_nodes = rsc_merge_weights(convert_const_pointer(resource1),
|
|
rpm-build |
3ee90c |
resource1->id, NULL, NULL, 1,
|
|
rpm-build |
3ee90c |
pe_weights_forward | pe_weights_init);
|
|
rpm-build |
3ee90c |
dump_node_scores(LOG_TRACE, NULL, resource1->id, r1_nodes);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
r2_nodes = rsc_merge_weights(convert_const_pointer(resource2),
|
|
rpm-build |
3ee90c |
resource2->id, NULL, NULL, 1,
|
|
rpm-build |
3ee90c |
pe_weights_forward | pe_weights_init);
|
|
rpm-build |
3ee90c |
dump_node_scores(LOG_TRACE, NULL, resource2->id, r2_nodes);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Current location score */
|
|
rpm-build |
3ee90c |
reason = "current location";
|
|
rpm-build |
3ee90c |
r1_weight = -INFINITY;
|
|
rpm-build |
3ee90c |
r2_weight = -INFINITY;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (resource1->running_on) {
|
|
rpm-build |
3ee90c |
r1_node = pe__current_node(resource1);
|
|
rpm-build |
3ee90c |
r1_node = g_hash_table_lookup(r1_nodes, r1_node->details->id);
|
|
rpm-build |
3ee90c |
if (r1_node != NULL) {
|
|
rpm-build |
3ee90c |
r1_weight = r1_node->weight;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
if (resource2->running_on) {
|
|
rpm-build |
3ee90c |
r2_node = pe__current_node(resource2);
|
|
rpm-build |
3ee90c |
r2_node = g_hash_table_lookup(r2_nodes, r2_node->details->id);
|
|
rpm-build |
3ee90c |
if (r2_node != NULL) {
|
|
rpm-build |
3ee90c |
r2_weight = r2_node->weight;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (r1_weight > r2_weight) {
|
|
rpm-build |
3ee90c |
rc = -1;
|
|
rpm-build |
3ee90c |
goto done;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (r1_weight < r2_weight) {
|
|
rpm-build |
3ee90c |
rc = 1;
|
|
rpm-build |
3ee90c |
goto done;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
reason = "score";
|
|
rpm-build |
3ee90c |
for (gIter = nodes; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
node_t *node = (node_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
r1_node = NULL;
|
|
rpm-build |
3ee90c |
r2_node = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
r1_weight = -INFINITY;
|
|
rpm-build |
3ee90c |
if (r1_nodes) {
|
|
rpm-build |
3ee90c |
r1_node = g_hash_table_lookup(r1_nodes, node->details->id);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
if (r1_node) {
|
|
rpm-build |
3ee90c |
r1_weight = r1_node->weight;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
r2_weight = -INFINITY;
|
|
rpm-build |
3ee90c |
if (r2_nodes) {
|
|
rpm-build |
3ee90c |
r2_node = g_hash_table_lookup(r2_nodes, node->details->id);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
if (r2_node) {
|
|
rpm-build |
3ee90c |
r2_weight = r2_node->weight;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (r1_weight > r2_weight) {
|
|
rpm-build |
3ee90c |
rc = -1;
|
|
rpm-build |
3ee90c |
goto done;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (r1_weight < r2_weight) {
|
|
rpm-build |
3ee90c |
rc = 1;
|
|
rpm-build |
3ee90c |
goto done;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
done:
|
|
rpm-build |
3ee90c |
crm_trace("%s (%d) on %s %c %s (%d) on %s: %s",
|
|
rpm-build |
3ee90c |
resource1->id, r1_weight, r1_node ? r1_node->details->id : "n/a",
|
|
rpm-build |
3ee90c |
rc < 0 ? '>' : rc > 0 ? '<' : '=',
|
|
rpm-build |
3ee90c |
resource2->id, r2_weight, r2_node ? r2_node->details->id : "n/a", reason);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (r1_nodes) {
|
|
rpm-build |
3ee90c |
g_hash_table_destroy(r1_nodes);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
if (r2_nodes) {
|
|
rpm-build |
3ee90c |
g_hash_table_destroy(r2_nodes);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
return rc;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
allocate_resources(pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr gIter = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (is_set(data_set->flags, pe_flag_have_remote_nodes)) {
|
|
rpm-build |
3ee90c |
/* Force remote connection resources to be allocated first. This
|
|
rpm-build |
3ee90c |
* also forces any colocation dependencies to be allocated as well */
|
|
rpm-build |
3ee90c |
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
resource_t *rsc = (resource_t *) gIter->data;
|
|
rpm-build |
3ee90c |
if (rsc->is_remote_node == FALSE) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
pe_rsc_trace(rsc, "Allocating: %s", rsc->id);
|
|
rpm-build |
3ee90c |
/* For remote node connection resources, always prefer the partial
|
|
rpm-build |
3ee90c |
* migration target during resource allocation, if the rsc is in the
|
|
rpm-build |
3ee90c |
* middle of a migration.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
rsc->cmds->allocate(rsc, rsc->partial_migration_target, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* now do the rest of the resources */
|
|
rpm-build |
3ee90c |
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
resource_t *rsc = (resource_t *) gIter->data;
|
|
rpm-build |
3ee90c |
if (rsc->is_remote_node == TRUE) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
pe_rsc_trace(rsc, "Allocating: %s", rsc->id);
|
|
rpm-build |
3ee90c |
rsc->cmds->allocate(rsc, NULL, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* We always use pe_order_preserve with these convenience functions to exempt
|
|
rpm-build |
3ee90c |
* internally generated constraints from the prohibition of user constraints
|
|
rpm-build |
3ee90c |
* involving remote connection resources.
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* The start ordering additionally uses pe_order_runnable_left so that the
|
|
rpm-build |
3ee90c |
* specified action is not runnable if the start is not runnable.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static inline void
|
|
rpm-build |
3ee90c |
order_start_then_action(resource_t *lh_rsc, action_t *rh_action,
|
|
rpm-build |
3ee90c |
enum pe_ordering extra, pe_working_set_t *data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
if (lh_rsc && rh_action && data_set) {
|
|
rpm-build |
3ee90c |
custom_action_order(lh_rsc, start_key(lh_rsc), NULL,
|
|
rpm-build |
3ee90c |
rh_action->rsc, NULL, rh_action,
|
|
rpm-build |
3ee90c |
pe_order_preserve | pe_order_runnable_left | extra,
|
|
rpm-build |
3ee90c |
data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static inline void
|
|
rpm-build |
3ee90c |
order_action_then_stop(action_t *lh_action, resource_t *rh_rsc,
|
|
rpm-build |
3ee90c |
enum pe_ordering extra, pe_working_set_t *data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
if (lh_action && rh_rsc && data_set) {
|
|
rpm-build |
3ee90c |
custom_action_order(lh_action->rsc, NULL, lh_action,
|
|
rpm-build |
3ee90c |
rh_rsc, stop_key(rh_rsc), NULL,
|
|
rpm-build |
3ee90c |
pe_order_preserve | extra, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
// Clear fail counts for orphaned rsc on all online nodes
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
cleanup_orphans(resource_t * rsc, pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr gIter = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
node_t *node = (node_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (node->details->online
|
|
rpm-build |
3ee90c |
&& pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
|
|
rpm-build |
3ee90c |
data_set)) {
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
pe_action_t *clear_op = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
|
|
rpm-build |
3ee90c |
data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* We can't use order_action_then_stop() here because its
|
|
rpm-build |
3ee90c |
* pe_order_preserve breaks things
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
custom_action_order(clear_op->rsc, NULL, clear_op,
|
|
rpm-build |
3ee90c |
rsc, stop_key(rsc), NULL,
|
|
rpm-build |
3ee90c |
pe_order_optional, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
gboolean
|
|
rpm-build |
3ee90c |
stage5(pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr gIter = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (safe_str_neq(data_set->placement_strategy, "default")) {
|
|
rpm-build |
3ee90c |
GListPtr nodes = g_list_copy(data_set->nodes);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
nodes = sort_nodes_by_weight(nodes, NULL, data_set);
|
|
rpm-build |
3ee90c |
data_set->resources =
|
|
rpm-build |
3ee90c |
g_list_sort_with_data(data_set->resources, sort_rsc_process_order, nodes);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
g_list_free(nodes);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
gIter = data_set->nodes;
|
|
rpm-build |
3ee90c |
for (; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
node_t *node = (node_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
dump_node_capacity(show_utilization ? 0 : utilization_log_level, "Original", node);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_trace("Allocating services");
|
|
rpm-build |
3ee90c |
/* Take (next) highest resource, assign it and create its actions */
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
allocate_resources(data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
gIter = data_set->nodes;
|
|
rpm-build |
3ee90c |
for (; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
node_t *node = (node_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
dump_node_capacity(show_utilization ? 0 : utilization_log_level, "Remaining", node);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
// Process deferred action checks
|
|
rpm-build |
3ee90c |
pe__foreach_param_check(data_set, check_params);
|
|
rpm-build |
3ee90c |
pe__free_param_checks(data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (is_set(data_set->flags, pe_flag_startup_probes)) {
|
|
rpm-build |
3ee90c |
crm_trace("Calculating needed probes");
|
|
rpm-build |
3ee90c |
/* This code probably needs optimization
|
|
rpm-build |
3ee90c |
* ptest -x with 100 nodes, 100 clones and clone-max=100:
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
With probes:
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status
|
|
rpm-build |
3ee90c |
ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints
|
|
rpm-build |
3ee90c |
ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints
|
|
rpm-build |
3ee90c |
ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:292 Check actions
|
|
rpm-build |
3ee90c |
ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: do_calculations: pengine.c:299 Allocate resources
|
|
rpm-build |
3ee90c |
ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: stage5: allocate.c:881 Allocating services
|
|
rpm-build |
3ee90c |
ptest[14781]: 2010/09/27_17:56:49 notice: TRACE: stage5: allocate.c:894 Calculating needed probes
|
|
rpm-build |
3ee90c |
ptest[14781]: 2010/09/27_17:56:51 notice: TRACE: stage5: allocate.c:899 Creating actions
|
|
rpm-build |
3ee90c |
ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: stage5: allocate.c:905 Creating done
|
|
rpm-build |
3ee90c |
ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases
|
|
rpm-build |
3ee90c |
ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints
|
|
rpm-build |
3ee90c |
36s
|
|
rpm-build |
3ee90c |
ptest[14781]: 2010/09/27_17:57:28 notice: TRACE: do_calculations: pengine.c:320 Create transition graph
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
Without probes:
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
ptest[14637]: 2010/09/27_17:56:21 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status
|
|
rpm-build |
3ee90c |
ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints
|
|
rpm-build |
3ee90c |
ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints
|
|
rpm-build |
3ee90c |
ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:292 Check actions
|
|
rpm-build |
3ee90c |
ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: do_calculations: pengine.c:299 Allocate resources
|
|
rpm-build |
3ee90c |
ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: stage5: allocate.c:881 Allocating services
|
|
rpm-build |
3ee90c |
ptest[14637]: 2010/09/27_17:56:24 notice: TRACE: stage5: allocate.c:899 Creating actions
|
|
rpm-build |
3ee90c |
ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: stage5: allocate.c:905 Creating done
|
|
rpm-build |
3ee90c |
ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases
|
|
rpm-build |
3ee90c |
ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints
|
|
rpm-build |
3ee90c |
ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:320 Create transition graph
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
probe_resources(data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_trace("Handle orphans");
|
|
rpm-build |
3ee90c |
if (is_set(data_set->flags, pe_flag_stop_rsc_orphans)) {
|
|
rpm-build |
3ee90c |
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* There's no need to recurse into rsc->children because those
|
|
rpm-build |
3ee90c |
* should just be unallocated clone instances.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
if (is_set(rsc->flags, pe_rsc_orphan)) {
|
|
rpm-build |
3ee90c |
cleanup_orphans(rsc, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_trace("Creating actions");
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
resource_t *rsc = (resource_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
rsc->cmds->create_actions(rsc, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_trace("Creating done");
|
|
rpm-build |
3ee90c |
return TRUE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static gboolean
|
|
rpm-build |
3ee90c |
is_managed(const resource_t * rsc)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr gIter = rsc->children;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (is_set(rsc->flags, pe_rsc_managed)) {
|
|
rpm-build |
3ee90c |
return TRUE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
resource_t *child_rsc = (resource_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (is_managed(child_rsc)) {
|
|
rpm-build |
3ee90c |
return TRUE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
return FALSE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static gboolean
|
|
rpm-build |
3ee90c |
any_managed_resources(pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
GListPtr gIter = data_set->resources;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
resource_t *rsc = (resource_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (is_managed(rsc)) {
|
|
rpm-build |
3ee90c |
return TRUE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
return FALSE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/*!
|
|
rpm-build |
3ee90c |
* \internal
|
|
rpm-build |
3ee90c |
* \brief Create pseudo-op for guest node fence, and order relative to it
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* \param[in] node Guest node to fence
|
|
rpm-build |
3ee90c |
* \param[in] data_set Working set of CIB state
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
fence_guest(pe_node_t *node, pe_working_set_t *data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
resource_t *container = node->details->remote_rsc->container;
|
|
rpm-build |
3ee90c |
pe_action_t *stop = NULL;
|
|
rpm-build |
3ee90c |
pe_action_t *stonith_op = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* The fence action is just a label; we don't do anything differently for
|
|
rpm-build |
3ee90c |
* off vs. reboot. We specify it explicitly, rather than let it default to
|
|
rpm-build |
3ee90c |
* cluster's default action, because we are not _initiating_ fencing -- we
|
|
rpm-build |
3ee90c |
* are creating a pseudo-event to describe fencing that is already occurring
|
|
rpm-build |
3ee90c |
* by other means (container recovery).
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
const char *fence_action = "off";
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Check whether guest's container resource has any explicit stop or
|
|
rpm-build |
3ee90c |
* start (the stop may be implied by fencing of the guest's host).
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
if (container) {
|
|
rpm-build |
3ee90c |
stop = find_first_action(container->actions, NULL, CRMD_ACTION_STOP, NULL);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (find_first_action(container->actions, NULL, CRMD_ACTION_START, NULL)) {
|
|
rpm-build |
3ee90c |
fence_action = "reboot";
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Create a fence pseudo-event, so we have an event to order actions
|
|
rpm-build |
3ee90c |
* against, and the controller can always detect it.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
stonith_op = pe_fence_op(node, fence_action, FALSE, "guest is unclean", data_set);
|
|
rpm-build |
3ee90c |
update_action_flags(stonith_op, pe_action_pseudo | pe_action_runnable,
|
|
rpm-build |
3ee90c |
__FUNCTION__, __LINE__);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* We want to imply stops/demotes after the guest is stopped, not wait until
|
|
rpm-build |
3ee90c |
* it is restarted, so we always order pseudo-fencing after stop, not start
|
|
rpm-build |
3ee90c |
* (even though start might be closer to what is done for a real reboot).
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
if(stop && is_set(stop->flags, pe_action_pseudo)) {
|
|
rpm-build |
3ee90c |
pe_action_t *parent_stonith_op = pe_fence_op(stop->node, NULL, FALSE, NULL, data_set);
|
|
rpm-build |
3ee90c |
crm_info("Implying guest node %s is down (action %d) after %s fencing",
|
|
rpm-build |
3ee90c |
node->details->uname, stonith_op->id, stop->node->details->uname);
|
|
rpm-build |
3ee90c |
order_actions(parent_stonith_op, stonith_op,
|
|
rpm-build |
3ee90c |
pe_order_runnable_left|pe_order_implies_then);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (stop) {
|
|
rpm-build |
3ee90c |
order_actions(stop, stonith_op,
|
|
rpm-build |
3ee90c |
pe_order_runnable_left|pe_order_implies_then);
|
|
rpm-build |
3ee90c |
crm_info("Implying guest node %s is down (action %d) "
|
|
rpm-build |
3ee90c |
"after container %s is stopped (action %d)",
|
|
rpm-build |
3ee90c |
node->details->uname, stonith_op->id,
|
|
rpm-build |
3ee90c |
container->id, stop->id);
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
/* If we're fencing the guest node but there's no stop for the guest
|
|
rpm-build |
3ee90c |
* resource, we must think the guest is already stopped. However, we may
|
|
rpm-build |
3ee90c |
* think so because its resource history was just cleaned. To avoid
|
|
rpm-build |
3ee90c |
* unnecessarily considering the guest node down if it's really up,
|
|
rpm-build |
3ee90c |
* order the pseudo-fencing after any stop of the connection resource,
|
|
rpm-build |
3ee90c |
* which will be ordered after any container (re-)probe.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
stop = find_first_action(node->details->remote_rsc->actions, NULL,
|
|
rpm-build |
3ee90c |
RSC_STOP, NULL);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (stop) {
|
|
rpm-build |
3ee90c |
order_actions(stop, stonith_op, pe_order_optional);
|
|
rpm-build |
3ee90c |
crm_info("Implying guest node %s is down (action %d) "
|
|
rpm-build |
3ee90c |
"after connection is stopped (action %d)",
|
|
rpm-build |
3ee90c |
node->details->uname, stonith_op->id, stop->id);
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
/* Not sure why we're fencing, but everything must already be
|
|
rpm-build |
3ee90c |
* cleanly stopped.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
crm_info("Implying guest node %s is down (action %d) ",
|
|
rpm-build |
3ee90c |
node->details->uname, stonith_op->id);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Order/imply other actions relative to pseudo-fence as with real fence */
|
|
rpm-build |
3ee90c |
stonith_constraints(node, stonith_op, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/*
|
|
rpm-build |
3ee90c |
* Create dependencies for stonith and shutdown operations
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
gboolean
|
|
rpm-build |
3ee90c |
stage6(pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
action_t *dc_down = NULL;
|
|
rpm-build |
3ee90c |
action_t *stonith_op = NULL;
|
|
rpm-build |
3ee90c |
gboolean integrity_lost = FALSE;
|
|
rpm-build |
3ee90c |
gboolean need_stonith = TRUE;
|
|
rpm-build |
3ee90c |
GListPtr gIter;
|
|
rpm-build |
3ee90c |
GListPtr stonith_ops = NULL;
|
|
rpm-build |
3ee90c |
GList *shutdown_ops = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Remote ordering constraints need to happen prior to calculating fencing
|
|
rpm-build |
3ee90c |
* because it is one more place we will mark the node as dirty.
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* A nice side effect of doing them early is that apply_*_ordering() can be
|
|
rpm-build |
3ee90c |
* simpler because pe_fence_node() has already done some of the work.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
crm_trace("Creating remote ordering constraints");
|
|
rpm-build |
3ee90c |
apply_remote_node_ordering(data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_trace("Processing fencing and shutdown cases");
|
|
rpm-build |
3ee90c |
if (any_managed_resources(data_set) == FALSE) {
|
|
rpm-build |
3ee90c |
crm_notice("Delaying fencing operations until there are resources to manage");
|
|
rpm-build |
3ee90c |
need_stonith = FALSE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Check each node for stonith/shutdown */
|
|
rpm-build |
3ee90c |
for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
node_t *node = (node_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Guest nodes are "fenced" by recovering their container resource,
|
|
rpm-build |
3ee90c |
* so handle them separately.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
if (pe__is_guest_node(node)) {
|
|
rpm-build |
3ee90c |
if (node->details->remote_requires_reset && need_stonith
|
|
rpm-build |
3ee90c |
&& pe_can_fence(data_set, node)) {
|
|
rpm-build |
3ee90c |
fence_guest(node, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
stonith_op = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (node->details->unclean
|
|
rpm-build |
3ee90c |
&& need_stonith && pe_can_fence(data_set, node)) {
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
stonith_op = pe_fence_op(node, NULL, FALSE, "node is unclean", data_set);
|
|
rpm-build |
3ee90c |
pe_warn("Scheduling Node %s for STONITH", node->details->uname);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
stonith_constraints(node, stonith_op, data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (node->details->is_dc) {
|
|
rpm-build |
3ee90c |
// Remember if the DC is being fenced
|
|
rpm-build |
3ee90c |
dc_down = stonith_op;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (is_not_set(data_set->flags, pe_flag_concurrent_fencing)
|
|
rpm-build |
3ee90c |
&& (stonith_ops != NULL)) {
|
|
rpm-build |
3ee90c |
/* Concurrent fencing is disabled, so order each non-DC
|
|
rpm-build |
3ee90c |
* fencing in a chain. If there is any DC fencing or
|
|
rpm-build |
3ee90c |
* shutdown, it will be ordered after the last action in the
|
|
rpm-build |
3ee90c |
* chain later.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
order_actions((pe_action_t *) stonith_ops->data,
|
|
rpm-build |
3ee90c |
stonith_op, pe_order_optional);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
// Remember all non-DC fencing actions in a separate list
|
|
rpm-build |
3ee90c |
stonith_ops = g_list_prepend(stonith_ops, stonith_op);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (node->details->online && node->details->shutdown &&
|
|
rpm-build |
3ee90c |
/* TODO define what a shutdown op means for a remote node.
|
|
rpm-build |
3ee90c |
* For now we do not send shutdown operations for remote nodes, but
|
|
rpm-build |
3ee90c |
* if we can come up with a good use for this in the future, we will. */
|
|
rpm-build |
3ee90c |
pe__is_guest_or_remote_node(node) == FALSE) {
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
action_t *down_op = sched_shutdown_op(node, data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (node->details->is_dc) {
|
|
rpm-build |
3ee90c |
// Remember if the DC is being shut down
|
|
rpm-build |
3ee90c |
dc_down = down_op;
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
// Remember non-DC shutdowns for later ordering
|
|
rpm-build |
3ee90c |
shutdown_ops = g_list_prepend(shutdown_ops, down_op);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (node->details->unclean && stonith_op == NULL) {
|
|
rpm-build |
3ee90c |
integrity_lost = TRUE;
|
|
rpm-build |
3ee90c |
pe_warn("Node %s is unclean!", node->details->uname);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (integrity_lost) {
|
|
rpm-build |
3ee90c |
if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) {
|
|
rpm-build |
3ee90c |
pe_warn("YOUR RESOURCES ARE NOW LIKELY COMPROMISED");
|
|
rpm-build |
3ee90c |
pe_err("ENABLE STONITH TO KEEP YOUR RESOURCES SAFE");
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE) {
|
|
rpm-build |
3ee90c |
crm_notice("Cannot fence unclean nodes until quorum is"
|
|
rpm-build |
3ee90c |
" attained (or no-quorum-policy is set to ignore)");
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (dc_down != NULL) {
|
|
rpm-build |
3ee90c |
/* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
|
|
rpm-build |
3ee90c |
* DC elections. However, we don't want to order non-DC shutdowns before
|
|
rpm-build |
3ee90c |
* a DC *fencing*, because even though we don't want a node that's
|
|
rpm-build |
3ee90c |
* shutting down to become DC, the DC fencing could be ordered before a
|
|
rpm-build |
3ee90c |
* clone stop that's also ordered before the shutdowns, thus leading to
|
|
rpm-build |
3ee90c |
* a graph loop.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
if (safe_str_eq(dc_down->task, CRM_OP_SHUTDOWN)) {
|
|
rpm-build |
3ee90c |
for (gIter = shutdown_ops; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
action_t *node_stop = (action_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_debug("Ordering shutdown on %s before %s on DC %s",
|
|
rpm-build |
3ee90c |
node_stop->node->details->uname,
|
|
rpm-build |
3ee90c |
dc_down->task, dc_down->node->details->uname);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
order_actions(node_stop, dc_down, pe_order_optional);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
// Order any non-DC fencing before any DC fencing or shutdown
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (is_set(data_set->flags, pe_flag_concurrent_fencing)) {
|
|
rpm-build |
3ee90c |
/* With concurrent fencing, order each non-DC fencing action
|
|
rpm-build |
3ee90c |
* separately before any DC fencing or shutdown.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
for (gIter = stonith_ops; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
order_actions((pe_action_t *) gIter->data, dc_down,
|
|
rpm-build |
3ee90c |
pe_order_optional);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
} else if (stonith_ops) {
|
|
rpm-build |
3ee90c |
/* Without concurrent fencing, the non-DC fencing actions are
|
|
rpm-build |
3ee90c |
* already ordered relative to each other, so we just need to order
|
|
rpm-build |
3ee90c |
* the DC fencing after the last action in the chain (which is the
|
|
rpm-build |
3ee90c |
* first item in the list).
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
order_actions((pe_action_t *) stonith_ops->data, dc_down,
|
|
rpm-build |
3ee90c |
pe_order_optional);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
g_list_free(stonith_ops);
|
|
rpm-build |
3ee90c |
g_list_free(shutdown_ops);
|
|
rpm-build |
3ee90c |
return TRUE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/*
|
|
rpm-build |
3ee90c |
* Determine the sets of independent actions and the correct order for the
|
|
rpm-build |
3ee90c |
* actions in each set.
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* Mark dependencies of un-runnable actions un-runnable
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
static GListPtr
|
|
rpm-build |
3ee90c |
find_actions_by_task(GListPtr actions, resource_t * rsc, const char *original_key)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr list = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
list = find_actions(actions, original_key, NULL);
|
|
rpm-build |
3ee90c |
if (list == NULL) {
|
|
rpm-build |
3ee90c |
/* we're potentially searching a child of the original resource */
|
|
rpm-build |
3ee90c |
char *key = NULL;
|
|
rpm-build |
3ee90c |
char *task = NULL;
|
|
rpm-build |
3ee90c |
guint interval_ms = 0;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (parse_op_key(original_key, NULL, &task, &interval_ms)) {
|
|
rpm-build |
3ee90c |
key = generate_op_key(rsc->id, task, interval_ms);
|
|
rpm-build |
3ee90c |
list = find_actions(actions, key, NULL);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
crm_err("search key: %s", original_key);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
free(key);
|
|
rpm-build |
3ee90c |
free(task);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
return list;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
rsc_order_then(pe_action_t *lh_action, pe_resource_t *rsc,
|
|
rpm-build |
3ee90c |
pe__ordering_t *order)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr gIter = NULL;
|
|
rpm-build |
3ee90c |
GListPtr rh_actions = NULL;
|
|
rpm-build |
3ee90c |
action_t *rh_action = NULL;
|
|
rpm-build |
3ee90c |
enum pe_ordering type;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
CRM_CHECK(rsc != NULL, return);
|
|
rpm-build |
3ee90c |
CRM_CHECK(order != NULL, return);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
type = order->type;
|
|
rpm-build |
3ee90c |
rh_action = order->rh_action;
|
|
rpm-build |
3ee90c |
crm_trace("Processing RH of ordering constraint %d", order->id);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (rh_action != NULL) {
|
|
rpm-build |
3ee90c |
rh_actions = g_list_prepend(NULL, rh_action);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (rsc != NULL) {
|
|
rpm-build |
3ee90c |
rh_actions = find_actions_by_task(rsc->actions, rsc, order->rh_action_task);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (rh_actions == NULL) {
|
|
rpm-build |
3ee90c |
pe_rsc_trace(rsc, "No RH-Side (%s/%s) found for constraint..."
|
|
rpm-build |
3ee90c |
" ignoring", rsc->id, order->rh_action_task);
|
|
rpm-build |
3ee90c |
if (lh_action) {
|
|
rpm-build |
3ee90c |
pe_rsc_trace(rsc, "LH-Side was: %s", lh_action->uuid);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
return;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (lh_action && lh_action->rsc == rsc && is_set(lh_action->flags, pe_action_dangle)) {
|
|
rpm-build |
3ee90c |
pe_rsc_trace(rsc, "Detected dangling operation %s -> %s", lh_action->uuid,
|
|
rpm-build |
3ee90c |
order->rh_action_task);
|
|
rpm-build |
3ee90c |
clear_bit(type, pe_order_implies_then);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
gIter = rh_actions;
|
|
rpm-build |
3ee90c |
for (; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
action_t *rh_action_iter = (action_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (lh_action) {
|
|
rpm-build |
3ee90c |
order_actions(lh_action, rh_action_iter, type);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (type & pe_order_implies_then) {
|
|
rpm-build |
3ee90c |
update_action_flags(rh_action_iter, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
|
|
rpm-build |
3ee90c |
crm_warn("Unrunnable %s 0x%.6x", rh_action_iter->uuid, type);
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
crm_warn("neither %s 0x%.6x", rh_action_iter->uuid, type);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
g_list_free(rh_actions);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
rsc_order_first(pe_resource_t *lh_rsc, pe__ordering_t *order,
|
|
rpm-build |
3ee90c |
pe_working_set_t *data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr gIter = NULL;
|
|
rpm-build |
3ee90c |
GListPtr lh_actions = NULL;
|
|
rpm-build |
3ee90c |
action_t *lh_action = order->lh_action;
|
|
rpm-build |
3ee90c |
resource_t *rh_rsc = order->rh_rsc;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_trace("Processing LH of ordering constraint %d", order->id);
|
|
rpm-build |
3ee90c |
CRM_ASSERT(lh_rsc != NULL);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (lh_action != NULL) {
|
|
rpm-build |
3ee90c |
lh_actions = g_list_prepend(NULL, lh_action);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
lh_actions = find_actions_by_task(lh_rsc->actions, lh_rsc, order->lh_action_task);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (lh_actions == NULL && lh_rsc != rh_rsc) {
|
|
rpm-build |
3ee90c |
char *key = NULL;
|
|
rpm-build |
3ee90c |
char *op_type = NULL;
|
|
rpm-build |
3ee90c |
guint interval_ms = 0;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
parse_op_key(order->lh_action_task, NULL, &op_type, &interval_ms);
|
|
rpm-build |
3ee90c |
key = generate_op_key(lh_rsc->id, op_type, interval_ms);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_STOPPED && safe_str_eq(op_type, RSC_STOP)) {
|
|
rpm-build |
3ee90c |
free(key);
|
|
rpm-build |
3ee90c |
pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring",
|
|
rpm-build |
3ee90c |
lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_SLAVE && safe_str_eq(op_type, RSC_DEMOTE)) {
|
|
rpm-build |
3ee90c |
free(key);
|
|
rpm-build |
3ee90c |
pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring",
|
|
rpm-build |
3ee90c |
lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - creating",
|
|
rpm-build |
3ee90c |
lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
|
|
rpm-build |
3ee90c |
lh_action = custom_action(lh_rsc, key, op_type, NULL, TRUE, TRUE, data_set);
|
|
rpm-build |
3ee90c |
lh_actions = g_list_prepend(NULL, lh_action);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
free(op_type);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
gIter = lh_actions;
|
|
rpm-build |
3ee90c |
for (; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
action_t *lh_action_iter = (action_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (rh_rsc == NULL && order->rh_action) {
|
|
rpm-build |
3ee90c |
rh_rsc = order->rh_action->rsc;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
if (rh_rsc) {
|
|
rpm-build |
3ee90c |
rsc_order_then(lh_action_iter, rh_rsc, order);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (order->rh_action) {
|
|
rpm-build |
3ee90c |
order_actions(lh_action_iter, order->rh_action, order->type);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
g_list_free(lh_actions);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
extern void update_colo_start_chain(pe_action_t *action,
|
|
rpm-build |
3ee90c |
pe_working_set_t *data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static int
|
|
rpm-build |
3ee90c |
is_recurring_action(action_t *action)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
const char *interval_ms_s = g_hash_table_lookup(action->meta,
|
|
rpm-build |
3ee90c |
XML_LRM_ATTR_INTERVAL_MS);
|
|
rpm-build |
3ee90c |
guint interval_ms = crm_parse_ms(interval_ms_s);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
return (interval_ms > 0);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
apply_container_ordering(action_t *action, pe_working_set_t *data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
/* VMs are also classified as containers for these purposes... in
|
|
rpm-build |
3ee90c |
* that they both involve a 'thing' running on a real or remote
|
|
rpm-build |
3ee90c |
* cluster node.
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* This allows us to be smarter about the type and extent of
|
|
rpm-build |
3ee90c |
* recovery actions required in various scenarios
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
resource_t *remote_rsc = NULL;
|
|
rpm-build |
3ee90c |
resource_t *container = NULL;
|
|
rpm-build |
3ee90c |
enum action_tasks task = text2task(action->task);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
CRM_ASSERT(action->rsc);
|
|
rpm-build |
3ee90c |
CRM_ASSERT(action->node);
|
|
rpm-build |
3ee90c |
CRM_ASSERT(pe__is_guest_or_remote_node(action->node));
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
remote_rsc = action->node->details->remote_rsc;
|
|
rpm-build |
3ee90c |
CRM_ASSERT(remote_rsc);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
container = remote_rsc->container;
|
|
rpm-build |
3ee90c |
CRM_ASSERT(container);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if(is_set(container->flags, pe_rsc_failed)) {
|
|
rpm-build |
3ee90c |
pe_fence_node(data_set, action->node, "container failed");
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_trace("Order %s action %s relative to %s%s for %s%s",
|
|
rpm-build |
3ee90c |
action->task, action->uuid,
|
|
rpm-build |
3ee90c |
is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
|
|
rpm-build |
3ee90c |
remote_rsc->id,
|
|
rpm-build |
3ee90c |
is_set(container->flags, pe_rsc_failed)? "failed " : "",
|
|
rpm-build |
3ee90c |
container->id);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (safe_str_eq(action->task, CRMD_ACTION_MIGRATE)
|
|
rpm-build |
3ee90c |
|| safe_str_eq(action->task, CRMD_ACTION_MIGRATED)) {
|
|
rpm-build |
3ee90c |
/* Migration ops map to "no_action", but we need to apply the same
|
|
rpm-build |
3ee90c |
* ordering as for stop or demote (see get_router_node()).
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
task = stop_rsc;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
switch (task) {
|
|
rpm-build |
3ee90c |
case start_rsc:
|
|
rpm-build |
3ee90c |
case action_promote:
|
|
rpm-build |
3ee90c |
/* Force resource recovery if the container is recovered */
|
|
rpm-build |
3ee90c |
order_start_then_action(container, action, pe_order_implies_then,
|
|
rpm-build |
3ee90c |
data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Wait for the connection resource to be up too */
|
|
rpm-build |
3ee90c |
order_start_then_action(remote_rsc, action, pe_order_none,
|
|
rpm-build |
3ee90c |
data_set);
|
|
rpm-build |
3ee90c |
break;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
case stop_rsc:
|
|
rpm-build |
3ee90c |
case action_demote:
|
|
rpm-build |
3ee90c |
if (is_set(container->flags, pe_rsc_failed)) {
|
|
rpm-build |
3ee90c |
/* When the container representing a guest node fails, any stop
|
|
rpm-build |
3ee90c |
* or demote actions for resources running on the guest node
|
|
rpm-build |
3ee90c |
* are implied by the container stopping. This is similar to
|
|
rpm-build |
3ee90c |
* how fencing operations work for cluster nodes and remote
|
|
rpm-build |
3ee90c |
* nodes.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
/* Ensure the operation happens before the connection is brought
|
|
rpm-build |
3ee90c |
* down.
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* If we really wanted to, we could order these after the
|
|
rpm-build |
3ee90c |
* connection start, IFF the container's current role was
|
|
rpm-build |
3ee90c |
* stopped (otherwise we re-introduce an ordering loop when the
|
|
rpm-build |
3ee90c |
* connection is restarting).
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
order_action_then_stop(action, remote_rsc, pe_order_none,
|
|
rpm-build |
3ee90c |
data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
break;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
default:
|
|
rpm-build |
3ee90c |
/* Wait for the connection resource to be up */
|
|
rpm-build |
3ee90c |
if (is_recurring_action(action)) {
|
|
rpm-build |
3ee90c |
/* In case we ever get the recovery logic wrong, force
|
|
rpm-build |
3ee90c |
* recurring monitors to be restarted, even if just
|
|
rpm-build |
3ee90c |
* the connection was re-established
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
if(task != no_action) {
|
|
rpm-build |
3ee90c |
order_start_then_action(remote_rsc, action,
|
|
rpm-build |
3ee90c |
pe_order_implies_then, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
order_start_then_action(remote_rsc, action, pe_order_none,
|
|
rpm-build |
3ee90c |
data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
break;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static enum remote_connection_state
|
|
rpm-build |
3ee90c |
get_remote_node_state(pe_node_t *node)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
resource_t *remote_rsc = NULL;
|
|
rpm-build |
3ee90c |
node_t *cluster_node = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
CRM_ASSERT(node);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
remote_rsc = node->details->remote_rsc;
|
|
rpm-build |
3ee90c |
CRM_ASSERT(remote_rsc);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
cluster_node = pe__current_node(remote_rsc);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* If the cluster node the remote connection resource resides on
|
|
rpm-build |
3ee90c |
* is unclean or went offline, we can't process any operations
|
|
rpm-build |
3ee90c |
* on that remote node until after it starts elsewhere.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
if(remote_rsc->next_role == RSC_ROLE_STOPPED || remote_rsc->allocated_to == NULL) {
|
|
rpm-build |
3ee90c |
/* The connection resource is not going to run anywhere */
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (cluster_node && cluster_node->details->unclean) {
|
|
rpm-build |
3ee90c |
/* The remote connection is failed because its resource is on a
|
|
rpm-build |
3ee90c |
* failed node and can't be recovered elsewhere, so we must fence.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
return remote_state_failed;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (is_not_set(remote_rsc->flags, pe_rsc_failed)) {
|
|
rpm-build |
3ee90c |
/* Connection resource is cleanly stopped */
|
|
rpm-build |
3ee90c |
return remote_state_stopped;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Connection resource is failed */
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
|
|
rpm-build |
3ee90c |
&& remote_rsc->remote_reconnect_ms
|
|
rpm-build |
3ee90c |
&& node->details->remote_was_fenced
|
|
rpm-build |
3ee90c |
&& !pe__shutdown_requested(node)) {
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* We won't know whether the connection is recoverable until the
|
|
rpm-build |
3ee90c |
* reconnect interval expires and we reattempt connection.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
return remote_state_unknown;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* The remote connection is in a failed state. If there are any
|
|
rpm-build |
3ee90c |
* resources known to be active on it (stop) or in an unknown state
|
|
rpm-build |
3ee90c |
* (probe), we must assume the worst and fence it.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
return remote_state_failed;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (cluster_node == NULL) {
|
|
rpm-build |
3ee90c |
/* Connection is recoverable but not currently running anywhere, see if we can recover it first */
|
|
rpm-build |
3ee90c |
return remote_state_unknown;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if(cluster_node->details->unclean == TRUE
|
|
rpm-build |
3ee90c |
|| cluster_node->details->online == FALSE) {
|
|
rpm-build |
3ee90c |
/* Connection is running on a dead node, see if we can recover it first */
|
|
rpm-build |
3ee90c |
return remote_state_resting;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (g_list_length(remote_rsc->running_on) > 1
|
|
rpm-build |
3ee90c |
&& remote_rsc->partial_migration_source
|
|
rpm-build |
3ee90c |
&& remote_rsc->partial_migration_target) {
|
|
rpm-build |
3ee90c |
/* We're in the middle of migrating a connection resource,
|
|
rpm-build |
3ee90c |
* wait until after the resource migrates before performing
|
|
rpm-build |
3ee90c |
* any actions.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
return remote_state_resting;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
return remote_state_alive;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/*!
|
|
rpm-build |
3ee90c |
* \internal
|
|
rpm-build |
3ee90c |
* \brief Order actions on remote node relative to actions for the connection
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
apply_remote_ordering(action_t *action, pe_working_set_t *data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
resource_t *remote_rsc = NULL;
|
|
rpm-build |
3ee90c |
enum action_tasks task = text2task(action->task);
|
|
rpm-build |
3ee90c |
enum remote_connection_state state = get_remote_node_state(action->node);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
enum pe_ordering order_opts = pe_order_none;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (action->rsc == NULL) {
|
|
rpm-build |
3ee90c |
return;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
CRM_ASSERT(action->node);
|
|
rpm-build |
3ee90c |
CRM_ASSERT(pe__is_guest_or_remote_node(action->node));
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
remote_rsc = action->node->details->remote_rsc;
|
|
rpm-build |
3ee90c |
CRM_ASSERT(remote_rsc);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_trace("Order %s action %s relative to %s%s (state: %s)",
|
|
rpm-build |
3ee90c |
action->task, action->uuid,
|
|
rpm-build |
3ee90c |
is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
|
|
rpm-build |
3ee90c |
remote_rsc->id, state2text(state));
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (safe_str_eq(action->task, CRMD_ACTION_MIGRATE)
|
|
rpm-build |
3ee90c |
|| safe_str_eq(action->task, CRMD_ACTION_MIGRATED)) {
|
|
rpm-build |
3ee90c |
/* Migration ops map to "no_action", but we need to apply the same
|
|
rpm-build |
3ee90c |
* ordering as for stop or demote (see get_router_node()).
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
task = stop_rsc;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
switch (task) {
|
|
rpm-build |
3ee90c |
case start_rsc:
|
|
rpm-build |
3ee90c |
case action_promote:
|
|
rpm-build |
3ee90c |
order_opts = pe_order_none;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (state == remote_state_failed) {
|
|
rpm-build |
3ee90c |
/* Force recovery, by making this action required */
|
|
rpm-build |
3ee90c |
order_opts |= pe_order_implies_then;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Ensure connection is up before running this action */
|
|
rpm-build |
3ee90c |
order_start_then_action(remote_rsc, action, order_opts, data_set);
|
|
rpm-build |
3ee90c |
break;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
case stop_rsc:
|
|
rpm-build |
3ee90c |
if(state == remote_state_alive) {
|
|
rpm-build |
3ee90c |
order_action_then_stop(action, remote_rsc,
|
|
rpm-build |
3ee90c |
pe_order_implies_first, data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if(state == remote_state_failed) {
|
|
rpm-build |
3ee90c |
/* The resource is active on the node, but since we don't have a
|
|
rpm-build |
3ee90c |
* valid connection, the only way to stop the resource is by
|
|
rpm-build |
3ee90c |
* fencing the node. There is no need to order the stop relative
|
|
rpm-build |
3ee90c |
* to the remote connection, since the stop will become implied
|
|
rpm-build |
3ee90c |
* by the fencing.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
pe_fence_node(data_set, action->node, "resources are active and the connection is unrecoverable");
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if(remote_rsc->next_role == RSC_ROLE_STOPPED) {
|
|
rpm-build |
3ee90c |
/* State must be remote_state_unknown or remote_state_stopped.
|
|
rpm-build |
3ee90c |
* Since the connection is not coming back up in this
|
|
rpm-build |
3ee90c |
* transition, stop this resource first.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
order_action_then_stop(action, remote_rsc,
|
|
rpm-build |
3ee90c |
pe_order_implies_first, data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
/* The connection is going to be started somewhere else, so
|
|
rpm-build |
3ee90c |
* stop this resource after that completes.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
order_start_then_action(remote_rsc, action, pe_order_none, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
break;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
case action_demote:
|
|
rpm-build |
3ee90c |
/* Only order this demote relative to the connection start if the
|
|
rpm-build |
3ee90c |
* connection isn't being torn down. Otherwise, the demote would be
|
|
rpm-build |
3ee90c |
* blocked because the connection start would not be allowed.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
if(state == remote_state_resting || state == remote_state_unknown) {
|
|
rpm-build |
3ee90c |
order_start_then_action(remote_rsc, action, pe_order_none,
|
|
rpm-build |
3ee90c |
data_set);
|
|
rpm-build |
3ee90c |
} /* Otherwise we can rely on the stop ordering */
|
|
rpm-build |
3ee90c |
break;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
default:
|
|
rpm-build |
3ee90c |
/* Wait for the connection resource to be up */
|
|
rpm-build |
3ee90c |
if (is_recurring_action(action)) {
|
|
rpm-build |
3ee90c |
/* In case we ever get the recovery logic wrong, force
|
|
rpm-build |
3ee90c |
* recurring monitors to be restarted, even if just
|
|
rpm-build |
3ee90c |
* the connection was re-established
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
order_start_then_action(remote_rsc, action,
|
|
rpm-build |
3ee90c |
pe_order_implies_then, data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
node_t *cluster_node = pe__current_node(remote_rsc);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if(task == monitor_rsc && state == remote_state_failed) {
|
|
rpm-build |
3ee90c |
/* We would only be here if we do not know the
|
|
rpm-build |
3ee90c |
* state of the resource on the remote node.
|
|
rpm-build |
3ee90c |
* Since we have no way to find out, it is
|
|
rpm-build |
3ee90c |
* necessary to fence the node.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
pe_fence_node(data_set, action->node, "resources are in an unknown state and the connection is unrecoverable");
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if(cluster_node && state == remote_state_stopped) {
|
|
rpm-build |
3ee90c |
/* The connection is currently up, but is going
|
|
rpm-build |
3ee90c |
* down permanently.
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* Make sure we check services are actually
|
|
rpm-build |
3ee90c |
* stopped _before_ we let the connection get
|
|
rpm-build |
3ee90c |
* closed
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
order_action_then_stop(action, remote_rsc,
|
|
rpm-build |
3ee90c |
pe_order_runnable_left, data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
order_start_then_action(remote_rsc, action, pe_order_none,
|
|
rpm-build |
3ee90c |
data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
break;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
apply_remote_node_ordering(pe_working_set_t *data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
if (is_set(data_set->flags, pe_flag_have_remote_nodes) == FALSE) {
|
|
rpm-build |
3ee90c |
return;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (GListPtr gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
action_t *action = (action_t *) gIter->data;
|
|
rpm-build |
3ee90c |
resource_t *remote = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
// We are only interested in resource actions
|
|
rpm-build |
3ee90c |
if (action->rsc == NULL) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Special case: If we are clearing the failcount of an actual
|
|
rpm-build |
3ee90c |
* remote connection resource, then make sure this happens before
|
|
rpm-build |
3ee90c |
* any start of the resource in this transition.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
if (action->rsc->is_remote_node &&
|
|
rpm-build |
3ee90c |
safe_str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT)) {
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
custom_action_order(action->rsc,
|
|
rpm-build |
3ee90c |
NULL,
|
|
rpm-build |
3ee90c |
action,
|
|
rpm-build |
3ee90c |
action->rsc,
|
|
rpm-build |
3ee90c |
generate_op_key(action->rsc->id, RSC_START, 0),
|
|
rpm-build |
3ee90c |
NULL,
|
|
rpm-build |
3ee90c |
pe_order_optional,
|
|
rpm-build |
3ee90c |
data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
// We are only interested in actions allocated to a node
|
|
rpm-build |
3ee90c |
if (action->node == NULL) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (!pe__is_guest_or_remote_node(action->node)) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* We are only interested in real actions.
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* @TODO This is probably wrong; pseudo-actions might be converted to
|
|
rpm-build |
3ee90c |
* real actions and vice versa later in update_actions() at the end of
|
|
rpm-build |
3ee90c |
* stage7().
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
if (is_set(action->flags, pe_action_pseudo)) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
remote = action->node->details->remote_rsc;
|
|
rpm-build |
3ee90c |
if (remote == NULL) {
|
|
rpm-build |
3ee90c |
// Orphaned
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Another special case: if a resource is moving to a Pacemaker Remote
|
|
rpm-build |
3ee90c |
* node, order the stop on the original node after any start of the
|
|
rpm-build |
3ee90c |
* remote connection. This ensures that if the connection fails to
|
|
rpm-build |
3ee90c |
* start, we leave the resource running on the original node.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
if (safe_str_eq(action->task, RSC_START)) {
|
|
rpm-build |
3ee90c |
for (GList *item = action->rsc->actions; item != NULL;
|
|
rpm-build |
3ee90c |
item = item->next) {
|
|
rpm-build |
3ee90c |
pe_action_t *rsc_action = item->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if ((rsc_action->node->details != action->node->details)
|
|
rpm-build |
3ee90c |
&& safe_str_eq(rsc_action->task, RSC_STOP)) {
|
|
rpm-build |
3ee90c |
custom_action_order(remote, start_key(remote), NULL,
|
|
rpm-build |
3ee90c |
action->rsc, NULL, rsc_action,
|
|
rpm-build |
3ee90c |
pe_order_optional, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* The action occurs across a remote connection, so create
|
|
rpm-build |
3ee90c |
* ordering constraints that guarantee the action occurs while the node
|
|
rpm-build |
3ee90c |
* is active (after start, before stop ... things like that).
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* This is somewhat brittle in that we need to make sure the results of
|
|
rpm-build |
3ee90c |
* this ordering are compatible with the result of get_router_node().
|
|
rpm-build |
3ee90c |
* It would probably be better to add XML_LRM_ATTR_ROUTER_NODE as part
|
|
rpm-build |
3ee90c |
* of this logic rather than action2xml().
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
if (remote->container) {
|
|
rpm-build |
3ee90c |
crm_trace("Container ordering for %s", action->uuid);
|
|
rpm-build |
3ee90c |
apply_container_ordering(action, data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
crm_trace("Remote ordering for %s", action->uuid);
|
|
rpm-build |
3ee90c |
apply_remote_ordering(action, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static gboolean
|
|
rpm-build |
3ee90c |
order_first_probe_unneeded(pe_action_t * probe, pe_action_t * rh_action)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
/* No need to probe the resource on the node that is being
|
|
rpm-build |
3ee90c |
* unfenced. Otherwise it might introduce transition loop
|
|
rpm-build |
3ee90c |
* since probe will be performed after the node is
|
|
rpm-build |
3ee90c |
* unfenced.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
if (safe_str_eq(rh_action->task, CRM_OP_FENCE)
|
|
rpm-build |
3ee90c |
&& probe->node && rh_action->node
|
|
rpm-build |
3ee90c |
&& probe->node->details == rh_action->node->details) {
|
|
rpm-build |
3ee90c |
const char *op = g_hash_table_lookup(rh_action->meta, "stonith_action");
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (safe_str_eq(op, "on")) {
|
|
rpm-build |
3ee90c |
return TRUE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
// Shutdown waits for probe to complete only if it's on the same node
|
|
rpm-build |
3ee90c |
if ((safe_str_eq(rh_action->task, CRM_OP_SHUTDOWN))
|
|
rpm-build |
3ee90c |
&& probe->node && rh_action->node
|
|
rpm-build |
3ee90c |
&& probe->node->details != rh_action->node->details) {
|
|
rpm-build |
3ee90c |
return TRUE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
return FALSE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
order_first_probes_imply_stops(pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr gIter = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
pe__ordering_t *order = gIter->data;
|
|
rpm-build |
3ee90c |
enum pe_ordering order_type = pe_order_optional;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
pe_resource_t *lh_rsc = order->lh_rsc;
|
|
rpm-build |
3ee90c |
pe_resource_t *rh_rsc = order->rh_rsc;
|
|
rpm-build |
3ee90c |
pe_action_t *lh_action = order->lh_action;
|
|
rpm-build |
3ee90c |
pe_action_t *rh_action = order->rh_action;
|
|
rpm-build |
3ee90c |
const char *lh_action_task = order->lh_action_task;
|
|
rpm-build |
3ee90c |
const char *rh_action_task = order->rh_action_task;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
GListPtr probes = NULL;
|
|
rpm-build |
3ee90c |
GListPtr rh_actions = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
GListPtr pIter = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (lh_rsc == NULL) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (rh_rsc && lh_rsc == rh_rsc) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (lh_action == NULL && lh_action_task == NULL) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (rh_action == NULL && rh_action_task == NULL) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Technically probe is expected to return "not running", which could be
|
|
rpm-build |
3ee90c |
* the alternative of stop action if the status of the resource is
|
|
rpm-build |
3ee90c |
* unknown yet.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
if (lh_action && safe_str_neq(lh_action->task, RSC_STOP)) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (lh_action == NULL
|
|
rpm-build |
3ee90c |
&& lh_action_task
|
|
rpm-build |
3ee90c |
&& crm_ends_with(lh_action_task, "_" RSC_STOP "_0") == FALSE) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Do not probe the resource inside of a stopping container. Otherwise
|
|
rpm-build |
3ee90c |
* it might introduce transition loop since probe will be performed
|
|
rpm-build |
3ee90c |
* after the container starts again.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
if (rh_rsc && lh_rsc->container == rh_rsc) {
|
|
rpm-build |
3ee90c |
if (rh_action && safe_str_eq(rh_action->task, RSC_STOP)) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (rh_action == NULL && rh_action_task
|
|
rpm-build |
3ee90c |
&& crm_ends_with(rh_action_task,"_" RSC_STOP "_0")) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (order->type == pe_order_none) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
// Preserve the order options for future filtering
|
|
rpm-build |
3ee90c |
if (is_set(order->type, pe_order_apply_first_non_migratable)) {
|
|
rpm-build |
3ee90c |
set_bit(order_type, pe_order_apply_first_non_migratable);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (is_set(order->type, pe_order_same_node)) {
|
|
rpm-build |
3ee90c |
set_bit(order_type, pe_order_same_node);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
// Keep the order types for future filtering
|
|
rpm-build |
3ee90c |
if (order->type == pe_order_anti_colocation
|
|
rpm-build |
3ee90c |
|| order->type == pe_order_load) {
|
|
rpm-build |
3ee90c |
order_type = order->type;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
probes = pe__resource_actions(lh_rsc, NULL, RSC_STATUS, FALSE);
|
|
rpm-build |
3ee90c |
if (probes == NULL) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (rh_action) {
|
|
rpm-build |
3ee90c |
rh_actions = g_list_prepend(rh_actions, rh_action);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (rh_rsc && rh_action_task) {
|
|
rpm-build |
3ee90c |
rh_actions = find_actions(rh_rsc->actions, rh_action_task, NULL);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (rh_actions == NULL) {
|
|
rpm-build |
3ee90c |
g_list_free(probes);
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_trace("Processing for LH probe based on ordering constraint %s -> %s"
|
|
rpm-build |
3ee90c |
" (id=%d, type=%.6x)",
|
|
rpm-build |
3ee90c |
lh_action ? lh_action->uuid : lh_action_task,
|
|
rpm-build |
3ee90c |
rh_action ? rh_action->uuid : rh_action_task,
|
|
rpm-build |
3ee90c |
order->id, order->type);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (pIter = probes; pIter != NULL; pIter = pIter->next) {
|
|
rpm-build |
3ee90c |
pe_action_t *probe = (pe_action_t *) pIter->data;
|
|
rpm-build |
3ee90c |
GListPtr rIter = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (rIter = rh_actions; rIter != NULL; rIter = rIter->next) {
|
|
rpm-build |
3ee90c |
pe_action_t *rh_action_iter = (pe_action_t *) rIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (order_first_probe_unneeded(probe, rh_action_iter)) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
order_actions(probe, rh_action_iter, order_type);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
g_list_free(rh_actions);
|
|
rpm-build |
3ee90c |
g_list_free(probes);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
order_first_probe_then_restart_repromote(pe_action_t * probe,
|
|
rpm-build |
3ee90c |
pe_action_t * after,
|
|
rpm-build |
3ee90c |
pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr gIter = NULL;
|
|
rpm-build |
3ee90c |
bool interleave = FALSE;
|
|
rpm-build |
3ee90c |
pe_resource_t *compatible_rsc = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (probe == NULL
|
|
rpm-build |
3ee90c |
|| probe->rsc == NULL
|
|
rpm-build |
3ee90c |
|| probe->rsc->variant != pe_native) {
|
|
rpm-build |
3ee90c |
return;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (after == NULL
|
|
rpm-build |
3ee90c |
// Avoid running into any possible loop
|
|
rpm-build |
3ee90c |
|| is_set(after->flags, pe_action_tracking)) {
|
|
rpm-build |
3ee90c |
return;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (safe_str_neq(probe->task, RSC_STATUS)) {
|
|
rpm-build |
3ee90c |
return;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
pe_set_action_bit(after, pe_action_tracking);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_trace("Processing based on %s %s -> %s %s",
|
|
rpm-build |
3ee90c |
probe->uuid,
|
|
rpm-build |
3ee90c |
probe->node ? probe->node->details->uname: "",
|
|
rpm-build |
3ee90c |
after->uuid,
|
|
rpm-build |
3ee90c |
after->node ? after->node->details->uname : "");
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (after->rsc
|
|
rpm-build |
3ee90c |
/* Better not build a dependency directly with a clone/group.
|
|
rpm-build |
3ee90c |
* We are going to proceed through the ordering chain and build
|
|
rpm-build |
3ee90c |
* dependencies with its children.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
&& after->rsc->variant == pe_native
|
|
rpm-build |
3ee90c |
&& probe->rsc != after->rsc) {
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
GListPtr then_actions = NULL;
|
|
rpm-build |
3ee90c |
enum pe_ordering probe_order_type = pe_order_optional;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (safe_str_eq(after->task, RSC_START)) {
|
|
rpm-build |
3ee90c |
then_actions = pe__resource_actions(after->rsc, NULL, RSC_STOP, FALSE);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (safe_str_eq(after->task, RSC_PROMOTE)) {
|
|
rpm-build |
3ee90c |
then_actions = pe__resource_actions(after->rsc, NULL, RSC_DEMOTE, FALSE);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (gIter = then_actions; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
pe_action_t *then = (pe_action_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
// Skip any pseudo action which for example is implied by fencing
|
|
rpm-build |
3ee90c |
if (is_set(then->flags, pe_action_pseudo)) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
order_actions(probe, then, probe_order_type);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
g_list_free(then_actions);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (after->rsc
|
|
rpm-build |
3ee90c |
&& after->rsc->variant > pe_group) {
|
|
rpm-build |
3ee90c |
const char *interleave_s = g_hash_table_lookup(after->rsc->meta,
|
|
rpm-build |
3ee90c |
XML_RSC_ATTR_INTERLEAVE);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
interleave = crm_is_true(interleave_s);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (interleave) {
|
|
rpm-build |
3ee90c |
/* For an interleaved clone, we should build a dependency only
|
|
rpm-build |
3ee90c |
* with the relevant clone child.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
compatible_rsc = find_compatible_child(probe->rsc,
|
|
rpm-build |
3ee90c |
after->rsc,
|
|
rpm-build |
3ee90c |
RSC_ROLE_UNKNOWN,
|
|
rpm-build |
3ee90c |
FALSE, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (gIter = after->actions_after; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
pe_action_wrapper_t *after_wrapper = (pe_action_wrapper_t *) gIter->data;
|
|
rpm-build |
3ee90c |
/* pe_order_implies_then is the reason why a required A.start
|
|
rpm-build |
3ee90c |
* implies/enforces B.start to be required too, which is the cause of
|
|
rpm-build |
3ee90c |
* B.restart/re-promote.
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* Not sure about pe_order_implies_then_on_node though. It's now only
|
|
rpm-build |
3ee90c |
* used for unfencing case, which tends to introduce transition
|
|
rpm-build |
3ee90c |
* loops...
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (is_not_set(after_wrapper->type, pe_order_implies_then)) {
|
|
rpm-build |
3ee90c |
/* The order type between a group/clone and its child such as
|
|
rpm-build |
3ee90c |
* B.start-> B_child.start is:
|
|
rpm-build |
3ee90c |
* pe_order_implies_first_printed | pe_order_runnable_left
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* Proceed through the ordering chain and build dependencies with
|
|
rpm-build |
3ee90c |
* its children.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
if (after->rsc == NULL
|
|
rpm-build |
3ee90c |
|| after->rsc->variant < pe_group
|
|
rpm-build |
3ee90c |
|| probe->rsc->parent == after->rsc
|
|
rpm-build |
3ee90c |
|| after_wrapper->action->rsc == NULL
|
|
rpm-build |
3ee90c |
|| after_wrapper->action->rsc->variant > pe_group
|
|
rpm-build |
3ee90c |
|| after->rsc != after_wrapper->action->rsc->parent) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Proceed to the children of a group or a non-interleaved clone.
|
|
rpm-build |
3ee90c |
* For an interleaved clone, proceed only to the relevant child.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
if (after->rsc->variant > pe_group
|
|
rpm-build |
3ee90c |
&& interleave == TRUE
|
|
rpm-build |
3ee90c |
&& (compatible_rsc == NULL
|
|
rpm-build |
3ee90c |
|| compatible_rsc != after_wrapper->action->rsc)) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_trace("Proceeding through %s %s -> %s %s (type=0x%.6x)",
|
|
rpm-build |
3ee90c |
after->uuid,
|
|
rpm-build |
3ee90c |
after->node ? after->node->details->uname: "",
|
|
rpm-build |
3ee90c |
after_wrapper->action->uuid,
|
|
rpm-build |
3ee90c |
after_wrapper->action->node ? after_wrapper->action->node->details->uname : "",
|
|
rpm-build |
3ee90c |
after_wrapper->type);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
order_first_probe_then_restart_repromote(probe, after_wrapper->action, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static void clear_actions_tracking_flag(pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr gIter = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
pe_action_t *action = (pe_action_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (is_set(action->flags, pe_action_tracking)) {
|
|
rpm-build |
3ee90c |
pe_clear_action_bit(action, pe_action_tracking);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
order_first_rsc_probes(pe_resource_t * rsc, pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr gIter = NULL;
|
|
rpm-build |
3ee90c |
GListPtr probes = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
pe_resource_t * child = (pe_resource_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
order_first_rsc_probes(child, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (rsc->variant != pe_native) {
|
|
rpm-build |
3ee90c |
return;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (gIter = probes; gIter != NULL; gIter= gIter->next) {
|
|
rpm-build |
3ee90c |
pe_action_t *probe = (pe_action_t *) gIter->data;
|
|
rpm-build |
3ee90c |
GListPtr aIter = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (aIter = probe->actions_after; aIter != NULL; aIter = aIter->next) {
|
|
rpm-build |
3ee90c |
pe_action_wrapper_t *after_wrapper = (pe_action_wrapper_t *) aIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
order_first_probe_then_restart_repromote(probe, after_wrapper->action, data_set);
|
|
rpm-build |
3ee90c |
clear_actions_tracking_flag(data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
g_list_free(probes);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
order_first_probes(pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr gIter = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
pe_resource_t *rsc = (pe_resource_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
order_first_rsc_probes(rsc, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
order_first_probes_imply_stops(data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
order_then_probes(pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
#if 0
|
|
rpm-build |
3ee90c |
GListPtr gIter = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
resource_t *rsc = (resource_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Given "A then B", we would prefer to wait for A to be
|
|
rpm-build |
3ee90c |
* started before probing B.
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* If A was a filesystem on which the binaries and data for B
|
|
rpm-build |
3ee90c |
* lived, it would have been useful if the author of B's agent
|
|
rpm-build |
3ee90c |
* could assume that A is running before B.monitor will be
|
|
rpm-build |
3ee90c |
* called.
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* However we can't _only_ probe once A is running, otherwise
|
|
rpm-build |
3ee90c |
* we'd not detect the state of B if A could not be started
|
|
rpm-build |
3ee90c |
* for some reason.
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* In practice however, we cannot even do an opportunistic
|
|
rpm-build |
3ee90c |
* version of this because B may be moving:
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* B.probe -> B.start
|
|
rpm-build |
3ee90c |
* B.probe -> B.stop
|
|
rpm-build |
3ee90c |
* B.stop -> B.start
|
|
rpm-build |
3ee90c |
* A.stop -> A.start
|
|
rpm-build |
3ee90c |
* A.start -> B.probe
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* So far so good, but if we add the result of this code:
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* B.stop -> A.stop
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* Then we get a loop:
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* B.probe -> B.stop -> A.stop -> A.start -> B.probe
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* We could kill the 'B.probe -> B.stop' dependency, but that
|
|
rpm-build |
3ee90c |
* could mean stopping B "too" soon, because B.start must wait
|
|
rpm-build |
3ee90c |
* for the probes to complete.
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* Another option is to allow it only if A is a non-unique
|
|
rpm-build |
3ee90c |
* clone with clone-max == node-max (since we'll never be
|
|
rpm-build |
3ee90c |
* moving it). However, we could still be stopping one
|
|
rpm-build |
3ee90c |
* instance at the same time as starting another.
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
* The complexity of checking for allowed conditions combined
|
|
rpm-build |
3ee90c |
* with the ever narrowing usecase suggests that this code
|
|
rpm-build |
3ee90c |
* should remain disabled until someone gets smarter.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
action_t *start = NULL;
|
|
rpm-build |
3ee90c |
GListPtr actions = NULL;
|
|
rpm-build |
3ee90c |
GListPtr probes = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
actions = pe__resource_actions(rsc, NULL, RSC_START, FALSE);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (actions) {
|
|
rpm-build |
3ee90c |
start = actions->data;
|
|
rpm-build |
3ee90c |
g_list_free(actions);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if(start == NULL) {
|
|
rpm-build |
3ee90c |
crm_err("No start action for %s", rsc->id);
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (actions = start->actions_before; actions != NULL; actions = actions->next) {
|
|
rpm-build |
3ee90c |
action_wrapper_t *before = (action_wrapper_t *) actions->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
GListPtr pIter = NULL;
|
|
rpm-build |
3ee90c |
action_t *first = before->action;
|
|
rpm-build |
3ee90c |
resource_t *first_rsc = first->rsc;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if(first->required_runnable_before) {
|
|
rpm-build |
3ee90c |
GListPtr clone_actions = NULL;
|
|
rpm-build |
3ee90c |
for (clone_actions = first->actions_before; clone_actions != NULL; clone_actions = clone_actions->next) {
|
|
rpm-build |
3ee90c |
before = (action_wrapper_t *) clone_actions->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_trace("Testing %s -> %s (%p) for %s", first->uuid, before->action->uuid, before->action->rsc, start->uuid);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
CRM_ASSERT(before->action->rsc);
|
|
rpm-build |
3ee90c |
first_rsc = before->action->rsc;
|
|
rpm-build |
3ee90c |
break;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if(safe_str_neq(first->task, RSC_START)) {
|
|
rpm-build |
3ee90c |
crm_trace("Not a start op %s for %s", first->uuid, start->uuid);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if(first_rsc == NULL) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if(uber_parent(first_rsc) == uber_parent(start->rsc)) {
|
|
rpm-build |
3ee90c |
crm_trace("Same parent %s for %s", first_rsc->id, start->uuid);
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if(FALSE && pe_rsc_is_clone(uber_parent(first_rsc)) == FALSE) {
|
|
rpm-build |
3ee90c |
crm_trace("Not a clone %s for %s", first_rsc->id, start->uuid);
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_err("Applying %s before %s %d", first->uuid, start->uuid, uber_parent(first_rsc)->variant);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (pIter = probes; pIter != NULL; pIter = pIter->next) {
|
|
rpm-build |
3ee90c |
action_t *probe = (action_t *) pIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_err("Ordering %s before %s", first->uuid, probe->uuid);
|
|
rpm-build |
3ee90c |
order_actions(first, probe, pe_order_optional);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
#endif
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static void
|
|
rpm-build |
3ee90c |
order_probes(pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
order_first_probes(data_set);
|
|
rpm-build |
3ee90c |
order_then_probes(data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
gboolean
|
|
rpm-build |
3ee90c |
stage7(pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GList *gIter = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_trace("Applying ordering constraints");
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* Don't ask me why, but apparently they need to be processed in
|
|
rpm-build |
3ee90c |
* the order they were created in... go figure
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* Also g_list_append() has horrendous performance characteristics
|
|
rpm-build |
3ee90c |
* So we need to use g_list_prepend() and then reverse the list here
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
data_set->ordering_constraints = g_list_reverse(data_set->ordering_constraints);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
pe__ordering_t *order = gIter->data;
|
|
rpm-build |
3ee90c |
resource_t *rsc = order->lh_rsc;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_trace("Applying ordering constraint: %d", order->id);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (rsc != NULL) {
|
|
rpm-build |
3ee90c |
crm_trace("rsc_action-to-*");
|
|
rpm-build |
3ee90c |
rsc_order_first(rsc, order, data_set);
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
rsc = order->rh_rsc;
|
|
rpm-build |
3ee90c |
if (rsc != NULL) {
|
|
rpm-build |
3ee90c |
crm_trace("action-to-rsc_action");
|
|
rpm-build |
3ee90c |
rsc_order_then(order->lh_action, rsc, order);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
crm_trace("action-to-action");
|
|
rpm-build |
3ee90c |
order_actions(order->lh_action, order->rh_action, order->type);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
action_t *action = (action_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
update_colo_start_chain(action, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_trace("Ordering probes");
|
|
rpm-build |
3ee90c |
order_probes(data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_trace("Updating %d actions", g_list_length(data_set->actions));
|
|
rpm-build |
3ee90c |
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
action_t *action = (action_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
update_action(action, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
// Check for invalid orderings
|
|
rpm-build |
3ee90c |
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
pe_action_t *action = (pe_action_t *) gIter->data;
|
|
rpm-build |
3ee90c |
pe_action_wrapper_t *input = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (GList *input_iter = action->actions_before;
|
|
rpm-build |
3ee90c |
input_iter != NULL; input_iter = input_iter->next) {
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
input = (pe_action_wrapper_t *) input_iter->data;
|
|
rpm-build |
3ee90c |
if (pcmk__ordering_is_invalid(action, input)) {
|
|
rpm-build |
3ee90c |
input->type = pe_order_none;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
LogNodeActions(data_set, FALSE);
|
|
rpm-build |
3ee90c |
for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
resource_t *rsc = (resource_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
LogActions(rsc, data_set, FALSE);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
return TRUE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
static int transition_id = -1;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/*!
|
|
rpm-build |
3ee90c |
* \internal
|
|
rpm-build |
3ee90c |
* \brief Log a message after calculating a transition
|
|
rpm-build |
3ee90c |
*
|
|
rpm-build |
3ee90c |
* \param[in] filename Where transition input is stored
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
void
|
|
rpm-build |
3ee90c |
pcmk__log_transition_summary(const char *filename)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
if (was_processing_error) {
|
|
rpm-build |
3ee90c |
crm_err("Calculated transition %d (with errors), saving inputs in %s",
|
|
rpm-build |
3ee90c |
transition_id, filename);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else if (was_processing_warning) {
|
|
rpm-build |
3ee90c |
crm_warn("Calculated transition %d (with warnings), saving inputs in %s",
|
|
rpm-build |
3ee90c |
transition_id, filename);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
crm_notice("Calculated transition %d, saving inputs in %s",
|
|
rpm-build |
3ee90c |
transition_id, filename);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
if (crm_config_error) {
|
|
rpm-build |
3ee90c |
crm_notice("Configuration errors found during scheduler processing,"
|
|
rpm-build |
3ee90c |
" please run \"crm_verify -L\" to identify issues");
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/*
|
|
rpm-build |
3ee90c |
* Create a dependency graph to send to the transitioner (via the controller)
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
gboolean
|
|
rpm-build |
3ee90c |
stage8(pe_working_set_t * data_set)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr gIter = NULL;
|
|
rpm-build |
3ee90c |
const char *value = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
transition_id++;
|
|
rpm-build |
3ee90c |
crm_trace("Creating transition graph %d.", transition_id);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
value = pe_pref(data_set->config_hash, "cluster-delay");
|
|
rpm-build |
3ee90c |
crm_xml_add(data_set->graph, "cluster-delay", value);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
value = pe_pref(data_set->config_hash, "stonith-timeout");
|
|
rpm-build |
3ee90c |
crm_xml_add(data_set->graph, "stonith-timeout", value);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_xml_add(data_set->graph, "failed-stop-offset", "INFINITY");
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (is_set(data_set->flags, pe_flag_start_failure_fatal)) {
|
|
rpm-build |
3ee90c |
crm_xml_add(data_set->graph, "failed-start-offset", "INFINITY");
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
crm_xml_add(data_set->graph, "failed-start-offset", "1");
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
value = pe_pref(data_set->config_hash, "batch-limit");
|
|
rpm-build |
3ee90c |
crm_xml_add(data_set->graph, "batch-limit", value);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_xml_add_int(data_set->graph, "transition_id", transition_id);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
value = pe_pref(data_set->config_hash, "migration-limit");
|
|
rpm-build |
3ee90c |
if (crm_int_helper(value, NULL) > 0) {
|
|
rpm-build |
3ee90c |
crm_xml_add(data_set->graph, "migration-limit", value);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (data_set->recheck_by > 0) {
|
|
rpm-build |
3ee90c |
char *recheck_epoch = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
recheck_epoch = crm_strdup_printf("%llu",
|
|
rpm-build |
3ee90c |
(long long) data_set->recheck_by);
|
|
rpm-build |
3ee90c |
crm_xml_add(data_set->graph, "recheck-by", recheck_epoch);
|
|
rpm-build |
3ee90c |
free(recheck_epoch);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* errors...
|
|
rpm-build |
3ee90c |
slist_iter(action, action_t, action_list, lpc,
|
|
rpm-build |
3ee90c |
if(action->optional == FALSE && action->runnable == FALSE) {
|
|
rpm-build |
3ee90c |
print_action("Ignoring", action, TRUE);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
);
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* The following code will de-duplicate action inputs, so nothing past this
|
|
rpm-build |
3ee90c |
* should rely on the action input type flags retaining their original
|
|
rpm-build |
3ee90c |
* values.
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
gIter = data_set->resources;
|
|
rpm-build |
3ee90c |
for (; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
resource_t *rsc = (resource_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
pe_rsc_trace(rsc, "processing actions for rsc=%s", rsc->id);
|
|
rpm-build |
3ee90c |
rsc->cmds->expand(rsc, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_log_xml_trace(data_set->graph, "created resource-driven action list");
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* pseudo action to distribute list of nodes with maintenance state update */
|
|
rpm-build |
3ee90c |
add_maintenance_update(data_set);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
/* catch any non-resource specific actions */
|
|
rpm-build |
3ee90c |
crm_trace("processing non-resource actions");
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
gIter = data_set->actions;
|
|
rpm-build |
3ee90c |
for (; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
action_t *action = (action_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (action->rsc
|
|
rpm-build |
3ee90c |
&& action->node
|
|
rpm-build |
3ee90c |
&& action->node->details->shutdown
|
|
rpm-build |
3ee90c |
&& is_not_set(action->rsc->flags, pe_rsc_maintenance)
|
|
rpm-build |
3ee90c |
&& is_not_set(action->flags, pe_action_optional)
|
|
rpm-build |
3ee90c |
&& is_not_set(action->flags, pe_action_runnable)
|
|
rpm-build |
3ee90c |
&& crm_str_eq(action->task, RSC_STOP, TRUE)
|
|
rpm-build |
3ee90c |
) {
|
|
rpm-build |
3ee90c |
/* Eventually we should just ignore the 'fence' case
|
|
rpm-build |
3ee90c |
* But for now it's the best way to detect (in CTS) when
|
|
rpm-build |
3ee90c |
* CIB resource updates are being lost
|
|
rpm-build |
3ee90c |
*/
|
|
rpm-build |
3ee90c |
if (is_set(data_set->flags, pe_flag_have_quorum)
|
|
rpm-build |
3ee90c |
|| data_set->no_quorum_policy == no_quorum_ignore) {
|
|
rpm-build |
3ee90c |
crm_crit("Cannot %s node '%s' because of %s:%s%s (%s)",
|
|
rpm-build |
3ee90c |
action->node->details->unclean ? "fence" : "shut down",
|
|
rpm-build |
3ee90c |
action->node->details->uname, action->rsc->id,
|
|
rpm-build |
3ee90c |
is_not_set(action->rsc->flags, pe_rsc_managed) ? " unmanaged" : " blocked",
|
|
rpm-build |
3ee90c |
is_set(action->rsc->flags, pe_rsc_failed) ? " failed" : "",
|
|
rpm-build |
3ee90c |
action->uuid);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
graph_element_from_action(action, data_set);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
crm_log_xml_trace(data_set->graph, "created generic action list");
|
|
rpm-build |
3ee90c |
crm_trace("Created transition graph %d.", transition_id);
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
return TRUE;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
void
|
|
rpm-build |
3ee90c |
LogNodeActions(pe_working_set_t * data_set, gboolean terminal)
|
|
rpm-build |
3ee90c |
{
|
|
rpm-build |
3ee90c |
GListPtr gIter = NULL;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
|
|
rpm-build |
3ee90c |
char *node_name = NULL;
|
|
rpm-build |
3ee90c |
char *task = NULL;
|
|
rpm-build |
3ee90c |
action_t *action = (action_t *) gIter->data;
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (action->rsc != NULL) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
} else if (is_set(action->flags, pe_action_optional)) {
|
|
rpm-build |
3ee90c |
continue;
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (pe__is_guest_node(action->node)) {
|
|
rpm-build |
3ee90c |
node_name = crm_strdup_printf("%s (resource: %s)", action->node->details->uname, action->node->details->remote_rsc->container->id);
|
|
rpm-build |
3ee90c |
} else if(action->node) {
|
|
rpm-build |
3ee90c |
node_name = crm_strdup_printf("%s", action->node->details->uname);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if (safe_str_eq(action->task, CRM_OP_SHUTDOWN)) {
|
|
rpm-build |
3ee90c |
task = strdup("Shutdown");
|
|
rpm-build |
3ee90c |
} else if (safe_str_eq(action->task, CRM_OP_FENCE)) {
|
|
rpm-build |
3ee90c |
const char *op = g_hash_table_lookup(action->meta, "stonith_action");
|
|
rpm-build |
3ee90c |
task = crm_strdup_printf("Fence (%s)", op);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
if(task == NULL) {
|
|
rpm-build |
3ee90c |
/* Nothing to report */
|
|
rpm-build |
3ee90c |
} else if(terminal && action->reason) {
|
|
rpm-build |
3ee90c |
printf(" * %s %s '%s'\n", task, node_name, action->reason);
|
|
rpm-build |
3ee90c |
} else if(terminal) {
|
|
rpm-build |
3ee90c |
printf(" * %s %s\n", task, node_name);
|
|
rpm-build |
3ee90c |
} else if(action->reason) {
|
|
rpm-build |
3ee90c |
crm_notice(" * %s %s '%s'\n", task, node_name, action->reason);
|
|
rpm-build |
3ee90c |
} else {
|
|
rpm-build |
3ee90c |
crm_notice(" * %s %s\n", task, node_name);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
|
|
rpm-build |
3ee90c |
free(node_name);
|
|
rpm-build |
3ee90c |
free(task);
|
|
rpm-build |
3ee90c |
}
|
|
rpm-build |
3ee90c |
}
|