Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 35 additions & 0 deletions src/sst/core/sst_mpi.cc
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,41 @@ SST_MPI_Allgather(const void* sendbuf, int sendcount, MPI_Datatype sendtype, voi
#endif
}

int
SST_MPI_Bcast(void* UNUSED_WO_MPI(buffer), int UNUSED_WO_MPI(count), MPI_Datatype UNUSED_WO_MPI(datatype),
int UNUSED_WO_MPI(root), MPI_Comm UNUSED_WO_MPI(comm))
{
#ifdef SST_CONFIG_HAVE_MPI
return MPI_Bcast(buffer, count, datatype, root, comm);
#else
// Bcast is a no-op if there is no MPI
return 0;
#endif
}

int
SST_MPI_Send(const void* UNUSED_WO_MPI(buf), int UNUSED_WO_MPI(count), MPI_Datatype UNUSED_WO_MPI(datatype),
int UNUSED_WO_MPI(dest), int UNUSED_WO_MPI(tag), MPI_Comm UNUSED_WO_MPI(comm))
{
#ifdef SST_CONFIG_HAVE_MPI
return MPI_Send(buf, count, datatype, dest, tag, comm);
#else
return 0;
#endif
}

int
SST_MPI_Recv(void* UNUSED_WO_MPI(buf), int UNUSED_WO_MPI(count), MPI_Datatype UNUSED_WO_MPI(datatype),
int UNUSED_WO_MPI(source), int UNUSED_WO_MPI(tag), MPI_Comm UNUSED_WO_MPI(comm), MPI_Status* UNUSED_WO_MPI(status))
{
#ifdef SST_CONFIG_HAVE_MPI
return MPI_Recv(buf, count, datatype, source, tag, comm, status);
#else
return 0;
#endif
}


int
SST_MPI_GetRank()
{
Expand Down
19 changes: 16 additions & 3 deletions src/sst/core/sst_mpi.h
Original file line number Diff line number Diff line change
Expand Up @@ -123,15 +123,28 @@ struct mpi_double_int_t
#define MPI_MAXLOC 0
#define MPI_MINLOC 0

#define MPI_Status int
#define MPI_STATUS_IGNORE 0

#endif

// Verions of typical MPI functions that will hide the
// SST_CONFIG_HAVE_MPI macros
///// Verions of typical MPI functions that will hide the SST_CONFIG_HAVE_MPI macros ////

// These functions will generally copy the data from input to output and will behave as expected for a 1 rank MPI job
// when MPI is not enabled
int SST_MPI_Allreduce(const void* sendbuf, void* recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
int SST_MPI_Barrier(MPI_Comm comm);
int SST_MPI_Allgather(const void* sendbuf, int sendcount, MPI_Datatype sendtype, void* recvbuf, int recvcount,
MPI_Datatype recvtype, MPI_Comm comm);
int SST_MPI_Bcast(void* buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm);

// MPI_Barrier is a no-op when MPI is not enabled and can be safely called in that case
int SST_MPI_Barrier(MPI_Comm comm);

// MPI_Send and MPI_Recv are both no-ops if MPI is not present. However, they should be in segments of code that won't
// get exectuted if MPI is not available (i.e. only run if there is more than one rank, controlled be either an
// if-statement or for-loop, but this is generally also necessary even when MPI is enabled)
int SST_MPI_Send(const void* buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm);
int SST_MPI_Recv(void* buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status* status);

int SST_MPI_GetRank();
#endif
28 changes: 14 additions & 14 deletions src/sst/core/util/perfReporter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -202,10 +202,10 @@ PerfReporter::output(int rank, int num_ranks)
// A helper for rank exchanges
auto recvStringFromRank = [](int src) -> std::string {
int len = 0;
MPI_Recv(&len, 1, MPI_INT, src, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
SST_MPI_Recv(&len, 1, MPI_INT, src, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
if ( len <= 0 ) return {};
std::string s(static_cast<size_t>(len), '\0');
MPI_Recv(s.data(), len, MPI_CHAR, src, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
SST_MPI_Recv(s.data(), len, MPI_CHAR, src, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
return s;
};

Expand All @@ -214,7 +214,7 @@ PerfReporter::output(int rank, int num_ranks)
if ( rank == 0 ) {
record_count = static_cast<uint32_t>(records_.size());
}
MPI_Bcast(&record_count, 1, MPI_UINT32_T, 0, MPI_COMM_WORLD);
SST_MPI_Bcast(&record_count, 1, MPI_UINT32_T, 0, MPI_COMM_WORLD);

if ( record_count == 0 ) return; // Nothing to print

Expand Down Expand Up @@ -242,8 +242,8 @@ PerfReporter::output(int rank, int num_ranks)

// Broadcast the record currently being output
int length = static_cast<int>(record.first.size());
MPI_Bcast(&length, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast((void*)&record.first[0], length, MPI_CHAR, 0, MPI_COMM_WORLD);
SST_MPI_Bcast(&length, 1, MPI_INT, 0, MPI_COMM_WORLD);
SST_MPI_Bcast((void*)&record.first[0], length, MPI_CHAR, 0, MPI_COMM_WORLD);

auto json_o = nlohmann::ordered_json::object();

Expand Down Expand Up @@ -317,17 +317,17 @@ PerfReporter::output(int rank, int num_ranks)
for ( uint32_t count = 0; count < record_count; count++ ) {
// Get record name
int length = 0;
MPI_Bcast(&length, 1, MPI_INT, 0, MPI_COMM_WORLD);
SST_MPI_Bcast(&length, 1, MPI_INT, 0, MPI_COMM_WORLD);
char* name = new char[length];
MPI_Bcast(name, length, MPI_CHAR, 0, MPI_COMM_WORLD);
SST_MPI_Bcast(name, length, MPI_CHAR, 0, MPI_COMM_WORLD);

// Lookup record
auto record = records_.find(name);

if ( output_console_ || output_txt ) {
if ( record == records_.end() ) { // None found
int length = 0;
MPI_Send(&length, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
SST_MPI_Send(&length, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
}
else {
std::stringstream str;
Expand All @@ -343,15 +343,15 @@ PerfReporter::output(int rank, int num_ranks)

std::string send_str = str.str();
int length = send_str.size();
MPI_Send(&length, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Send(&send_str[0], length, MPI_CHAR, 0, 0, MPI_COMM_WORLD);
SST_MPI_Send(&length, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
SST_MPI_Send(send_str.data(), length, MPI_CHAR, 0, 0, MPI_COMM_WORLD);
}
}

if ( output_json ) {
if ( record == records_.end() ) {
int length = 0;
MPI_Send(&length, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
SST_MPI_Send(&length, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
}
else {
std::stringstream str;
Expand All @@ -369,11 +369,11 @@ PerfReporter::output(int rank, int num_ranks)
}

int length = send_str.size();
MPI_Send(&length, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
MPI_Send(&send_str[0], length, MPI_CHAR, 0, 0, MPI_COMM_WORLD);
SST_MPI_Send(&length, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
SST_MPI_Send(send_str.data(), length, MPI_CHAR, 0, 0, MPI_COMM_WORLD);
}
}
delete name;
delete[] name;
}
}
}
Expand Down
Loading