Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MPI shared memory #14

Open
wants to merge 4 commits into
base: unstable
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 19 additions & 0 deletions c++/mpi/communicator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@

namespace mpi {

class shared_communicator;

/**
* @ingroup mpi_essentials
* @brief C++ wrapper around `MPI_Comm` providing various convenience functions.
Expand All @@ -38,6 +40,7 @@ namespace mpi {
* Note that copying the communicator simply copies the `MPI_Comm` object, without calling `MPI_Comm_dup`.
*/
class communicator {
friend class shared_communicator;
// Wrapped `MPI_Comm` object.
MPI_Comm _com = MPI_COMM_WORLD;

Expand All @@ -54,6 +57,8 @@ namespace mpi {
/// Get the wrapped `MPI_Comm` object.
[[nodiscard]] MPI_Comm get() const noexcept { return _com; }

[[nodiscard]] bool is_null() const noexcept { return _com == MPI_COMM_NULL; }

/**
* @brief Get the rank of the calling process in the communicator.
* @return The result of `MPI_Comm_rank` if mpi::has_env is true, otherwise 0.
Expand Down Expand Up @@ -98,6 +103,8 @@ namespace mpi {
return {};
}

[[nodiscard]] shared_communicator split_shared(int split_type = MPI_COMM_TYPE_SHARED, int key = 0) const;

/**
* @brief If mpi::has_env is true, `MPI_Abort` is called with the given error code, otherwise std::abort is called.
* @param error_code The error code to pass to `MPI_Abort`.
Expand Down Expand Up @@ -147,4 +154,16 @@ namespace mpi {
}
};

/// The shared communicator class
class shared_communicator : public communicator {};

[[nodiscard]] inline shared_communicator communicator::split_shared(int split_type, int key) const {
if (has_env) {
shared_communicator c;
MPI_Comm_split_type(_com, split_type, key, MPI_INFO_NULL, &c._com);
return c;
} else
return {};
}

} // namespace mpi
1 change: 1 addition & 0 deletions c++/mpi/mpi.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
#include "./generic_communication.hpp"
#include "./lazy.hpp"
#include "./operators.hpp"
#include "./window.hpp"

namespace mpi {

Expand Down
183 changes: 183 additions & 0 deletions c++/mpi/window.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,183 @@
// Copyright (c) 2024 Simons Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0.txt
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Authors: Thomas Hahn, Alexander Hampel, Olivier Parcollet, Nils Wentzell

/**
* @file
* @brief Provides a C++ wrapper class for an `MPI_Comm` object.
*/

#pragma once

#include "./communicator.hpp"

#include <mpi.h>

#include <cassert>

namespace mpi {

template <class BaseType> class shared_window;

/// The window class
template <class BaseType>
class window {
friend class shared_window<BaseType>;
MPI_Win win{MPI_WIN_NULL};
public:
window() = default;
window(window const&) = delete;
window(window &&other) noexcept : win{std::exchange(other.win, MPI_WIN_NULL)} {}
window& operator=(window const&) = delete;
window& operator=(window &&rhs) noexcept {
if (this != std::addressof(rhs)) {
this->free();
this->win = std::exchange(rhs.win, MPI_WIN_NULL);
}
return *this;
}

/// Create a window over an existing local memory buffer
explicit window(communicator &c, BaseType *base, MPI_Aint size = 0) noexcept {
MPI_Win_create(base, size * sizeof(BaseType), alignof(BaseType), MPI_INFO_NULL, c.get(), &win);
}

/// Create a window and allocate memory for a local memory buffer
explicit window(communicator &c, MPI_Aint size = 0) noexcept {
void *baseptr = nullptr;
MPI_Win_allocate(size * sizeof(BaseType), alignof(BaseType), MPI_INFO_NULL, c.get(), &baseptr, &win);
}

~window() { free(); }

explicit operator MPI_Win() const noexcept { return win; };
explicit operator MPI_Win*() noexcept { return &win; };

void free() noexcept {
if (win != MPI_WIN_NULL) {
MPI_Win_free(&win);
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Check that MPI_Win_free is indeed blocking on all ranks.

}
}

/// Synchronization routine in active target RMA. It opens and closes an access epoch.
void fence(int assert = 0) const noexcept {
MPI_Win_fence(assert, win);
}

/// Complete all outstanding RMA operations at both the origin and the target
void flush(int rank = -1) const noexcept {
if (rank < 0) {
MPI_Win_flush_all(win);
} else {
MPI_Win_flush(rank, win);
}
}

/// Synchronize the private and public copies of the window
void sync() const noexcept {
MPI_Win_sync(win);
}

/// Starts an RMA access epoch locking access to a particular or all ranks in the window
void lock(int rank = -1, int lock_type = MPI_LOCK_SHARED, int assert = 0) const noexcept {
if (rank < 0) {
MPI_Win_lock_all(assert, win);
} else {
MPI_Win_lock(lock_type, rank, assert, win);
}
}

/// Completes an RMA access epoch started by a call to lock()
void unlock(int rank = -1) const noexcept {
if (rank < 0) {
MPI_Win_unlock_all(win);
} else {
MPI_Win_unlock(rank, win);
}
}

/// Load data from a remote memory window.
template <typename TargetType = BaseType, typename OriginType>
std::enable_if_t<has_mpi_type<OriginType> && has_mpi_type<TargetType>, void>
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Replace std::enable_if with requires.

get(OriginType *origin_addr, int origin_count, int target_rank, MPI_Aint target_disp = 0, int target_count = -1) const noexcept {
MPI_Datatype origin_datatype = mpi_type<OriginType>::get();
MPI_Datatype target_datatype = mpi_type<TargetType>::get();
int target_count_ = target_count < 0 ? origin_count : target_count;
MPI_Get(origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count_, target_datatype, win);
};

/// Store data to a remote memory window.
template <typename TargetType = BaseType, typename OriginType>
std::enable_if_t<has_mpi_type<OriginType> && has_mpi_type<TargetType>, void>
put(OriginType *origin_addr, int origin_count, int target_rank, MPI_Aint target_disp = 0, int target_count = -1) const noexcept {
MPI_Datatype origin_datatype = mpi_type<OriginType>::get();
MPI_Datatype target_datatype = mpi_type<TargetType>::get();
int target_count_ = target_count < 0 ? origin_count : target_count;
MPI_Put(origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count_, target_datatype, win);
};

/// Accumulate data into target process through remote memory access.
template <typename TargetType = BaseType, typename OriginType>
std::enable_if_t<has_mpi_type<OriginType> && has_mpi_type<TargetType>, void>
accumulate(OriginType const *origin_addr, int origin_count, int target_rank, MPI_Aint target_disp = 0, int target_count = -1, MPI_Op op = MPI_SUM) const noexcept {
MPI_Datatype origin_datatype = mpi_type<OriginType>::get();
MPI_Datatype target_datatype = mpi_type<TargetType>::get();
int target_count_ = target_count < 0 ? origin_count : target_count;
MPI_Accumulate(origin_addr, origin_count, origin_datatype, target_rank, target_disp, target_count_, target_datatype, op, win);
}

/// Obtains the value of a window attribute.
void* get_attr(int win_keyval) const noexcept {
int flag;
void *attribute_val;
MPI_Win_get_attr(win, win_keyval, &attribute_val, &flag);
assert(flag);
return attribute_val;
}

// Expose some commonly used attributes
BaseType* base() const noexcept { return static_cast<BaseType*>(get_attr(MPI_WIN_BASE)); }
MPI_Aint size() const noexcept { return *static_cast<MPI_Aint*>(get_attr(MPI_WIN_SIZE)); }
int disp_unit() const noexcept { return *static_cast<int*>(get_attr(MPI_WIN_DISP_UNIT)); }
};
Comment on lines +150 to +154
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can these function calls be turned into members that are initialized at construction?


/// The shared_window class
template <class BaseType>
class shared_window : public window<BaseType> {
public:
shared_window() = default;

/// Create a window and allocate memory for a shared memory buffer
explicit shared_window(shared_communicator& c, MPI_Aint size) noexcept {
void* baseptr = nullptr;
MPI_Win_allocate_shared(size * sizeof(BaseType), alignof(BaseType), MPI_INFO_NULL, c.get(), &baseptr, &(this->win));
}

/// Query a shared memory window
std::tuple<MPI_Aint, int, void*> query(int rank = MPI_PROC_NULL) const noexcept {
MPI_Aint size = 0;
int disp_unit = 0;
void *baseptr = nullptr;
MPI_Win_shared_query(this->win, rank, &size, &disp_unit, &baseptr);
return {size, disp_unit, baseptr};
}

// Override the commonly used attributes of the window base class
BaseType* base(int rank = MPI_PROC_NULL) const noexcept { return static_cast<BaseType*>(std::get<2>(query(rank))); }
MPI_Aint size(int rank = MPI_PROC_NULL) const noexcept { return std::get<0>(query(rank)) / sizeof(BaseType); }
int disp_unit(int rank = MPI_PROC_NULL) const noexcept { return std::get<1>(query(rank)); }
};

}
4 changes: 2 additions & 2 deletions test/c++/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ file(GLOB_RECURSE all_tests RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cpp)

# List of all no mpi tests
file(GLOB_RECURSE nompi_tests RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cpp)
# remove custom and monitor mpi tests as they explicitly use MPI
list(REMOVE_ITEM nompi_tests mpi_custom.cpp mpi_monitor.cpp)
# remove custom, monitor, and window mpi tests as they explicitly use MPI
list(REMOVE_ITEM nompi_tests mpi_custom.cpp mpi_monitor.cpp mpi_window.cpp)

# ========= OpenMP Dependency ==========

Expand Down
Loading
Loading