/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */
/*
*
* (C) 2001 by Argonne National Laboratory.
* See COPYRIGHT in top-level directory.
*/
#include "mpiimpl.h"
#if !defined(MPIR_REQUEST_PTR_ARRAY_SIZE)
/* use a larger default size of 64 in order to enhance SQMR performance */
#define MPIR_REQUEST_PTR_ARRAY_SIZE 64
#endif
/* -- Begin Profiling Symbol Block for routine MPI_Waitall */
#if defined(HAVE_PRAGMA_WEAK)
#pragma weak MPI_Waitall = PMPI_Waitall
#elif defined(HAVE_PRAGMA_HP_SEC_DEF)
#pragma _HP_SECONDARY_DEF PMPI_Waitall MPI_Waitall
#elif defined(HAVE_PRAGMA_CRI_DUP)
#pragma _CRI duplicate MPI_Waitall as PMPI_Waitall
#elif defined(HAVE_WEAK_ATTRIBUTE)
int MPI_Waitall(int count, MPI_Request array_of_requests[], MPI_Status array_of_statuses[])
__attribute__ ((weak, alias("PMPI_Waitall")));
#endif
/* -- End Profiling Symbol Block */
/* Define MPICH_MPI_FROM_PMPI if weak symbols are not supported to build
the MPI routines */
#ifndef MPICH_MPI_FROM_PMPI
#undef MPI_Waitall
#define MPI_Waitall PMPI_Waitall
#undef FUNCNAME
#define FUNCNAME MPIR_Waitall_impl
#undef FCNAME
#define FCNAME MPL_QUOTE(FUNCNAME)
int MPIR_Waitall_impl(int count, MPIR_Request * request_ptrs[], MPI_Status array_of_statuses[],
int requests_property)
{
int mpi_errno = MPI_SUCCESS;
MPID_Progress_state progress_state;
int i;
if (requests_property & MPIR_REQUESTS_PROPERTY__NO_NULL) {
MPID_Progress_start(&progress_state);
for (i = 0; i < count; ++i) {
while (!MPIR_Request_is_complete(request_ptrs[i])) {
mpi_errno = MPID_Progress_wait(&progress_state);
/* must check and handle the error, can't guard with HAVE_ERROR_CHECKING, but it's
* OK for the error case to be slower */
if (unlikely(mpi_errno)) {
/* --BEGIN ERROR HANDLING-- */
MPID_Progress_end(&progress_state);
MPIR_ERR_POP(mpi_errno);
/* --END ERROR HANDLING-- */
}
}
}
MPID_Progress_end(&progress_state);
} else {
MPID_Progress_start(&progress_state);
for (i = 0; i < count; i++) {
if (request_ptrs[i] == NULL) {
continue;
}
/* wait for ith request to complete */
while (!MPIR_Request_is_complete(request_ptrs[i])) {
/* generalized requests should already be finished */
MPIR_Assert(request_ptrs[i]->kind != MPIR_REQUEST_KIND__GREQUEST);
mpi_errno = MPID_Progress_wait(&progress_state);
if (mpi_errno != MPI_SUCCESS) {
/* --BEGIN ERROR HANDLING-- */
MPID_Progress_end(&progress_state);
MPIR_ERR_POP(mpi_errno);
/* --END ERROR HANDLING-- */
}
}
}
MPID_Progress_end(&progress_state);
}
fn_exit:
return mpi_errno;
fn_fail:
goto fn_exit;
}
#undef FUNCNAME
#define FUNCNAME MPIR_Waitall
#undef FCNAME
#define FCNAME MPL_QUOTE(FUNCNAME)
int MPIR_Waitall(int count, MPI_Request array_of_requests[], MPI_Status array_of_statuses[])
{
int mpi_errno = MPI_SUCCESS;
MPIR_Request *request_ptr_array[MPIR_REQUEST_PTR_ARRAY_SIZE];
MPIR_Request **request_ptrs = request_ptr_array;
int i, j, ii, icount;
int n_completed;
int active_flag;
int rc = MPI_SUCCESS;
int disabled_anysource = FALSE;
const int ignoring_statuses = (array_of_statuses == MPI_STATUSES_IGNORE);
int requests_property = MPIR_REQUESTS_PROPERTY__OPT_ALL;
MPIR_CHKLMEM_DECL(1);
/* Convert MPI request handles to a request object pointers */
if (count > MPIR_REQUEST_PTR_ARRAY_SIZE) {
MPIR_CHKLMEM_MALLOC(request_ptrs, MPIR_Request **, count * sizeof(MPIR_Request *),
mpi_errno, "request pointers", MPL_MEM_OBJECT);
}
for (ii = 0; ii < count; ii += MPIR_CVAR_REQUEST_BATCH_SIZE) {
icount = count - ii > MPIR_CVAR_REQUEST_BATCH_SIZE ?
MPIR_CVAR_REQUEST_BATCH_SIZE : count - ii;
n_completed = 0;
requests_property = MPIR_REQUESTS_PROPERTY__OPT_ALL;
for (i = ii; i < ii + icount; i++) {
if (array_of_requests[i] != MPI_REQUEST_NULL) {
MPIR_Request_get_ptr(array_of_requests[i], request_ptrs[i]);
/* Validate object pointers if error checking is enabled */
#ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
MPIR_Request_valid_ptr(request_ptrs[i], mpi_errno);
if (mpi_errno)
MPIR_ERR_POP(mpi_errno);
MPIR_ERR_CHKANDJUMP1((request_ptrs[i]->kind == MPIR_REQUEST_KIND__MPROBE),
mpi_errno, MPI_ERR_ARG, "**msgnotreq",
"**msgnotreq %d", i);
}
MPID_END_ERROR_CHECKS;
}
#endif
/* If one of the requests is an anysource on a communicator that's
* disabled such communication, convert this operation to a testall
* instead to prevent getting stuck in the progress engine. */
if (unlikely(MPIR_Request_is_anysrc_mismatched(request_ptrs[i]))) {
disabled_anysource = TRUE;
}
if (request_ptrs[i]->kind != MPIR_REQUEST_KIND__RECV &&
request_ptrs[i]->kind != MPIR_REQUEST_KIND__SEND) {
requests_property &= ~MPIR_REQUESTS_PROPERTY__SEND_RECV_ONLY;
/* If this is extended generalized request, we can complete it here. */
if (MPIR_Request_has_wait_fn(request_ptrs[i])) {
while (!MPIR_Request_is_complete(request_ptrs[i]))
MPIR_Grequest_wait(request_ptrs[i], &array_of_statuses[i]);
}
}
} else {
if (!ignoring_statuses)
MPIR_Status_set_empty(&array_of_statuses[i]);
request_ptrs[i] = NULL;
n_completed += 1;
requests_property &= ~MPIR_REQUESTS_PROPERTY__NO_NULL;
}
}
if (n_completed == icount) {
continue;
}
if (unlikely(disabled_anysource)) {
mpi_errno =
MPIR_Testall(count, array_of_requests, &disabled_anysource, array_of_statuses);
goto fn_exit;
}
mpi_errno = MPID_Waitall(icount, &request_ptrs[ii], array_of_statuses, requests_property);
if (mpi_errno)
MPIR_ERR_POP(mpi_errno);
if (requests_property == MPIR_REQUESTS_PROPERTY__OPT_ALL && ignoring_statuses) {
/* NOTE-O1: high-message-rate optimization. For simple send and recv
* operations and MPI_STATUSES_IGNORE we use a fastpath approach that strips
* out as many unnecessary jumps and error handling as possible.
*
* Possible variation: permit request_ptrs[i]==NULL at the cost of an
* additional branch inside the for-loop below. */
for (i = ii; i < ii + icount; ++i) {
rc = MPIR_Request_completion_processing_fastpath(&array_of_requests[i],
request_ptrs[i]);
if (rc != MPI_SUCCESS) {
MPIR_ERR_SET(mpi_errno, MPI_ERR_IN_STATUS, "**instatus");
goto fn_exit;
}
}
continue;
}
if (ignoring_statuses) {
for (i = ii; i < ii + icount; i++) {
if (request_ptrs[i] == NULL)
continue;
rc = MPIR_Request_completion_processing(request_ptrs[i], MPI_STATUS_IGNORE,
&active_flag);
if (!MPIR_Request_is_persistent(request_ptrs[i])) {
MPIR_Request_free(request_ptrs[i]);
array_of_requests[i] = MPI_REQUEST_NULL;
}
if (rc != MPI_SUCCESS) {
MPIR_ERR_SET(mpi_errno, MPI_ERR_IN_STATUS, "**instatus");
goto fn_exit;
}
}
continue;
}
for (i = ii; i < ii + icount; i++) {
if (request_ptrs[i] == NULL)
continue;
rc = MPIR_Request_completion_processing(request_ptrs[i], &array_of_statuses[i],
&active_flag);
if (!MPIR_Request_is_persistent(request_ptrs[i])) {
MPIR_Request_free(request_ptrs[i]);
array_of_requests[i] = MPI_REQUEST_NULL;
}
if (rc == MPI_SUCCESS) {
array_of_statuses[i].MPI_ERROR = MPI_SUCCESS;
} else {
/* req completed with an error */
MPIR_ERR_SET(mpi_errno, MPI_ERR_IN_STATUS, "**instatus");
/* set the error code for this request */
array_of_statuses[i].MPI_ERROR = rc;
if (unlikely(MPIX_ERR_PROC_FAILED == MPIR_ERR_GET_CLASS(rc)))
rc = MPIX_ERR_PROC_FAILED_PENDING;
else
rc = MPI_ERR_PENDING;
/* set the error codes for the rest of the uncompleted requests to PENDING */
for (j = i + 1; j < count; ++j) {
if (request_ptrs[j] == NULL) {
/* either the user specified MPI_REQUEST_NULL, or this is a completed greq */
array_of_statuses[j].MPI_ERROR = MPI_SUCCESS;
} else {
array_of_statuses[j].MPI_ERROR = rc;
}
}
goto fn_exit;
}
}
}
fn_exit:
if (count > MPIR_REQUEST_PTR_ARRAY_SIZE) {
MPIR_CHKLMEM_FREEALL();
}
return mpi_errno;
fn_fail:
goto fn_exit;
}
#endif
#undef FUNCNAME
#define FUNCNAME MPI_Waitall
#undef FCNAME
#define FCNAME MPL_QUOTE(FUNCNAME)
/*@
MPI_Waitall - Waits for all given MPI Requests to complete
Input Parameters:
+ count - list length (integer)
- array_of_requests - array of request handles (array of handles)
Output Parameters:
. array_of_statuses - array of status objects (array of Statuses). May be
'MPI_STATUSES_IGNORE'.
Notes:
If one or more of the requests completes with an error, 'MPI_ERR_IN_STATUS' is
returned. An error value will be present is elements of 'array_of_status'
associated with the requests. Likewise, the 'MPI_ERROR' field in the status
elements associated with requests that have successfully completed will be
'MPI_SUCCESS'. Finally, those requests that have not completed will have a
value of 'MPI_ERR_PENDING'.
While it is possible to list a request handle more than once in the
array_of_requests, such an action is considered erroneous and may cause the
program to unexecpectedly terminate or produce incorrect results.
.N waitstatus
.N ThreadSafe
.N Fortran
.N Errors
.N MPI_SUCCESS
.N MPI_ERR_REQUEST
.N MPI_ERR_ARG
.N MPI_ERR_IN_STATUS
@*/
int MPI_Waitall(int count, MPI_Request array_of_requests[], MPI_Status array_of_statuses[])
{
int mpi_errno = MPI_SUCCESS;
MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPI_WAITALL);
MPIR_ERRTEST_INITIALIZED_ORDIE();
MPID_THREAD_CS_ENTER(VNI_GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX);
MPIR_FUNC_TERSE_REQUEST_ENTER(MPID_STATE_MPI_WAITALL);
/* Check the arguments */
#ifdef HAVE_ERROR_CHECKING
{
MPID_BEGIN_ERROR_CHECKS;
{
int i;
MPIR_ERRTEST_COUNT(count, mpi_errno);
if (count != 0) {
MPIR_ERRTEST_ARGNULL(array_of_requests, "array_of_requests", mpi_errno);
/* NOTE: MPI_STATUSES_IGNORE != NULL */
MPIR_ERRTEST_ARGNULL(array_of_statuses, "array_of_statuses", mpi_errno);
}
for (i = 0; i < count; i++) {
MPIR_ERRTEST_ARRAYREQUEST_OR_NULL(array_of_requests[i], i, mpi_errno);
}
}
MPID_END_ERROR_CHECKS;
}
#endif /* HAVE_ERROR_CHECKING */
/* ... body of routine ... */
mpi_errno = MPIR_Waitall(count, array_of_requests, array_of_statuses);
if (mpi_errno)
goto fn_fail;
/* ... end of body of routine ... */
fn_exit:
MPIR_FUNC_TERSE_REQUEST_EXIT(MPID_STATE_MPI_WAITALL);
MPID_THREAD_CS_EXIT(VNI_GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX);
return mpi_errno;
fn_fail:
/* --BEGIN ERROR HANDLING-- */
#ifdef HAVE_ERROR_CHECKING
mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE,
FCNAME, __LINE__, MPI_ERR_OTHER,
"**mpi_waitall",
"**mpi_waitall %d %p %p",
count, array_of_requests, array_of_statuses);
#endif
mpi_errno = MPIR_Err_return_comm(NULL, FCNAME, mpi_errno);
goto fn_exit;
/* --END ERROR HANDLING-- */
}