diff --git a/opm/grid/CpGrid.hpp b/opm/grid/CpGrid.hpp index 6544b6591..57480be18 100644 --- a/opm/grid/CpGrid.hpp +++ b/opm/grid/CpGrid.hpp @@ -1512,6 +1512,11 @@ namespace Dune OPM_THROW(std::logic_error, "No distributed view available in grid"); current_view_data_=distributed_data_[0].get(); } + + void setAllowEmptyPartitions(bool allow) + { + allowEmptyPartitions_ = allow; + } //@} #if HAVE_MPI @@ -1629,6 +1634,10 @@ namespace Dune */ std::map zoltanParams; + /** + * @brief Allow empty partitions + */ + bool allowEmptyPartitions_ = false; }; // end Class CpGrid diff --git a/opm/grid/cpgrid/CpGrid.cpp b/opm/grid/cpgrid/CpGrid.cpp index fc72c4bae..9a091c081 100644 --- a/opm/grid/cpgrid/CpGrid.cpp +++ b/opm/grid/cpgrid/CpGrid.cpp @@ -410,21 +410,24 @@ CpGrid::scatterGrid(EdgeWeightMethod method, procsWithZeroCells = cc.sum(procsWithZeroCells); if (procsWithZeroCells) { - std::string msg = "At least one process has zero cells. Aborting. \n" - " Try decreasing the imbalance tolerance for zoltan with \n" - " --zoltan-imbalance-tolerance. The current value is " - + std::to_string(zoltanImbalanceTol); - if (cc.rank()==0) - { - OPM_THROW(std::runtime_error, msg ); - } - else - { - OPM_THROW_NOLOG(std::runtime_error, msg); + if (allowEmptyPartitions_) { + Opm::OpmLog::warning("At least one process has zero cells. Continuing as requested."); + } else { + std::string msg = "At least one process has zero cells. Aborting. \n" + " Try decreasing the imbalance tolerance for zoltan with \n" + " --zoltan-imbalance-tolerance. The current value is " + + std::to_string(zoltanImbalanceTol); + if (cc.rank()==0) + { + OPM_THROW(std::runtime_error, msg ); + } + else + { + OPM_THROW_NOLOG(std::runtime_error, msg); + } } } - // distributed_data should be empty at this point. distributed_data_.push_back(std::make_shared(cc)); distributed_data_[0]->setUniqueBoundaryIds(data_[0]->uniqueBoundaryIds()); diff --git a/opm/grid/utility/RegionMapping.hpp b/opm/grid/utility/RegionMapping.hpp index 198c2afa7..adab6fde5 100644 --- a/opm/grid/utility/RegionMapping.hpp +++ b/opm/grid/utility/RegionMapping.hpp @@ -25,6 +25,10 @@ #include #include +#if HAVE_MPI +#include +#endif + namespace Opm { @@ -47,12 +51,29 @@ namespace Opm * * \param[in] reg Forward region mapping, restricted to * active cells only. + * \param[in] comm Global communicator to base region communicator on. */ explicit - RegionMapping(const Region& reg) + RegionMapping(const Region& reg +#if HAVE_MPI + , MPI_Comm comm = MPI_COMM_WORLD +#endif + ) : reg_(reg) { +#if HAVE_MPI + rev_.init(reg_, comm); +#else rev_.init(reg_); +#endif + } + + ~RegionMapping() + { +#if HAVE_MPI + if (rev_.comm_ != MPI_COMM_NULL) + MPI_Comm_free(&rev_.comm_); +#endif } /** @@ -113,6 +134,10 @@ namespace Opm rev_.c.begin() + rev_.p[i + 1]); } +#if HAVE_MPI + MPI_Comm comm() const { return rev_.comm_; } +#endif + private: /** * Copy of forward region mapping (cell-to-region). @@ -131,12 +156,21 @@ namespace Opm std::vector p; /**< Region start pointers */ std::vector c; /**< Region cells */ +#if HAVE_MPI + MPI_Comm comm_; +#endif + /** * Compute reverse mapping. Standard linear insertion * sort algorithm. */ +#if HAVE_MPI + void + init(const Region& reg, MPI_Comm comm) +#else void init(const Region& reg) +#endif { binid.clear(); for (const auto& r : reg) { @@ -154,6 +188,10 @@ namespace Opm id.second = n++; } } +#if HAVE_MPI + MPI_Comm_split(comm, reg.empty() ? MPI_UNDEFINED : 1, + 0, &comm_); +#endif for (decltype(p.size()) i = 1, sz = p.size(); i < sz; ++i) { p[0] += p[i];