diff --git a/src/colvar.cpp b/src/colvar.cpp index d81d24587..eceecd5d7 100644 --- a/src/colvar.cpp +++ b/src/colvar.cpp @@ -1280,7 +1280,7 @@ int colvar::init_dependencies() { // Initialize feature_states for each instance feature_states.reserve(f_cv_ntot); for (i = feature_states.size(); i < f_cv_ntot; i++) { - feature_states.push_back(feature_state(true, false)); + feature_states.emplace_back(true, false); // Most features are available, so we set them so // and list exceptions below } @@ -2043,8 +2043,8 @@ void colvar::communicate_forces() func_grads.reserve(cvcs.size()); for (i = 0; i < cvcs.size(); i++) { if (!cvcs[i]->is_enabled()) continue; - func_grads.push_back(cvm::matrix2d (x.size(), - cvcs[i]->value().size())); + func_grads.emplace_back(x.size(), + cvcs[i]->value().size()); } int res = cvm::proxy->run_colvar_gradient_callback(scripted_function, sorted_cvc_values, func_grads); @@ -2803,7 +2803,7 @@ int colvar::calc_acf() case acf_vel: // allocate space for the velocities history for (i = 0; i < acf_stride; i++) { - acf_v_history.push_back(std::list()); + acf_v_history.emplace_back(); } acf_v_history_p = acf_v_history.begin(); break; @@ -2812,7 +2812,7 @@ int colvar::calc_acf() case acf_p2coor: // allocate space for the coordinates history for (i = 0; i < acf_stride; i++) { - acf_x_history.push_back(std::list()); + acf_x_history.emplace_back(); } acf_x_history_p = acf_x_history.begin(); break; @@ -3003,7 +3003,7 @@ int colvar::calc_runave() acf_nframes = 0; - x_history.push_back(std::list()); + x_history.emplace_back(); x_history_p = x_history.begin(); } else { diff --git a/src/colvar_neuralnetworkcompute.cpp b/src/colvar_neuralnetworkcompute.cpp index b77db0cfa..c34732002 100644 --- a/src/colvar_neuralnetworkcompute.cpp +++ b/src/colvar_neuralnetworkcompute.cpp @@ -246,15 +246,15 @@ bool neuralNetworkCompute::addDenseLayer(const denseLayer& layer) { if (m_dense_layers.empty()) { // add layer to this ann directly if m_dense_layers is empty m_dense_layers.push_back(layer); - m_layers_output.push_back(std::vector(layer.getOutputSize())); - m_grads_tmp.push_back(std::vector>(layer.getOutputSize(), std::vector(layer.getInputSize(), 0))); + m_layers_output.emplace_back(layer.getOutputSize()); + m_grads_tmp.emplace_back(layer.getOutputSize(), std::vector(layer.getInputSize(), 0)); return true; } else { // otherwise, we need to check if the output of last layer in m_dense_layers matches the input of layer to be added if (m_dense_layers.back().getOutputSize() == layer.getInputSize()) { m_dense_layers.push_back(layer); - m_layers_output.push_back(std::vector(layer.getOutputSize())); - m_grads_tmp.push_back(std::vector>(layer.getOutputSize(), std::vector(layer.getInputSize(), 0))); + m_layers_output.emplace_back(layer.getOutputSize()); + m_grads_tmp.emplace_back(layer.getOutputSize(), std::vector(layer.getInputSize(), 0)); return true; } else { return false; diff --git a/src/colvaratoms.cpp b/src/colvaratoms.cpp index 827ba4429..2dba1133d 100644 --- a/src/colvaratoms.cpp +++ b/src/colvaratoms.cpp @@ -213,7 +213,7 @@ int cvm::atom_group::init_dependencies() { // default as unavailable, not enabled feature_states.reserve(f_ag_ntot); for (i = feature_states.size(); i < colvardeps::f_ag_ntot; i++) { - feature_states.push_back(feature_state(false, false)); + feature_states.emplace_back(false, false); } // Features that are implemented (or not) by all atom groups diff --git a/src/colvarbias.cpp b/src/colvarbias.cpp index bd3fd4e4a..ba701c833 100644 --- a/src/colvarbias.cpp +++ b/src/colvarbias.cpp @@ -248,7 +248,7 @@ int colvarbias::init_dependencies() { // Initialize feature_states for each instance feature_states.reserve(f_cvb_ntot); for (i = feature_states.size(); i < f_cvb_ntot; i++) { - feature_states.push_back(feature_state(true, false)); + feature_states.emplace_back(true, false); // Most features are available, so we set them so // and list exceptions below } @@ -352,7 +352,7 @@ int colvarbias::add_colvar(std::string const &cv_name) // although possibly not at all timesteps add_child(cv); - colvar_forces.push_back(colvarvalue()); + colvar_forces.emplace_back(); colvar_forces.back().type(cv->value()); // make sure each force is initialized to zero colvar_forces.back().is_derivative(); // colvar constraints are not applied to the force colvar_forces.back().reset(); diff --git a/src/colvarbias_abf.cpp b/src/colvarbias_abf.cpp index 3eb2f4f96..723612b74 100644 --- a/src/colvarbias_abf.cpp +++ b/src/colvarbias_abf.cpp @@ -925,7 +925,7 @@ template OST & colvarbias_abf::write_state_data_template_(OST &os { auto flags = os.flags(); - os.setf(std::ios::fmtflags(std::ios::dec), std::ios::floatfield); // default floating-point format + os.unsetf(std::ios::floatfield); // default floating-point format write_state_data_key(os, "samples"); samples->write_raw(os, 8); @@ -941,7 +941,7 @@ template OST & colvarbias_abf::write_state_data_template_(OST &os } if (b_CZAR_estimator) { - os.setf(std::ios::fmtflags(std::ios::dec), std::ios::floatfield); // default floating-point format + os.unsetf(std::ios::floatfield); // default floating-point format write_state_data_key(os, "z_samples"); z_samples->write_raw(os, 8); write_state_data_key(os, "z_gradient"); diff --git a/src/colvarbias_histogram.cpp b/src/colvarbias_histogram.cpp index 54eabbe72..4971fd8e4 100644 --- a/src/colvarbias_histogram.cpp +++ b/src/colvarbias_histogram.cpp @@ -216,7 +216,7 @@ cvm::memory_stream & colvarbias_histogram::read_state_data(cvm::memory_stream& i std::ostream & colvarbias_histogram::write_state_data(std::ostream& os) { std::ios::fmtflags flags(os.flags()); - os.setf(std::ios::fmtflags(std::ios::dec), std::ios::floatfield); + os.unsetf(std::ios::floatfield); write_state_data_key(os, "grid"); grid->write_raw(os, 8); os.flags(flags); diff --git a/src/colvarbias_histogram_reweight_amd.cpp b/src/colvarbias_histogram_reweight_amd.cpp index 562fcdf31..4525a6da2 100644 --- a/src/colvarbias_histogram_reweight_amd.cpp +++ b/src/colvarbias_histogram_reweight_amd.cpp @@ -318,7 +318,7 @@ void colvarbias_reweightaMD::compute_cumulant_expansion_factor( template OST & colvarbias_reweightaMD::write_state_data_template_(OST& os) { std::ios::fmtflags flags(os.flags()); - os.setf(std::ios::fmtflags(std::ios::dec), std::ios::floatfield); + os.unsetf(std::ios::floatfield); write_state_data_key(os, "grid"); grid->write_raw(os, 8); write_state_data_key(os, "grid_count"); diff --git a/src/colvarbias_meta.cpp b/src/colvarbias_meta.cpp index bad8f2caa..0205d40e3 100644 --- a/src/colvarbias_meta.cpp +++ b/src/colvarbias_meta.cpp @@ -1705,7 +1705,7 @@ template IST &colvarbias_meta::read_hill_template_(IST &is) } hill_iter const hills_end = hills.end(); - hills.push_back(hill(h_it, h_weight, h_centers, h_sigmas, h_replica)); + hills.emplace_back(h_it, h_weight, h_centers, h_sigmas, h_replica); if (new_hills_begin == hills_end) { // if new_hills_begin is unset, set it for the first time new_hills_begin = hills.end(); diff --git a/src/colvarcomp.cpp b/src/colvarcomp.cpp index 760e2697f..695c2dc22 100644 --- a/src/colvarcomp.cpp +++ b/src/colvarcomp.cpp @@ -308,7 +308,7 @@ int colvar::cvc::init_dependencies() { feature_states.reserve(f_cvc_ntot); for (i = feature_states.size(); i < colvardeps::f_cvc_ntot; i++) { bool avail = is_dynamic(i) ? false : true; - feature_states.push_back(feature_state(avail, false)); + feature_states.emplace_back(avail, false); } // Features that are implemented by all cvcs by default diff --git a/src/colvarcomp_neuralnetwork.cpp b/src/colvarcomp_neuralnetwork.cpp index 2e5d7429c..231084b49 100644 --- a/src/colvarcomp_neuralnetwork.cpp +++ b/src/colvarcomp_neuralnetwork.cpp @@ -77,14 +77,14 @@ int colvar::neuralNetwork::init(std::string const &conf) return cvm::error("Unknown activation function name: \"" + function_name + "\".\n", COLVARS_INPUT_ERROR); } - activation_functions.push_back(std::make_pair(false, function_name)); + activation_functions.emplace_back(false, function_name); cvm::log(std::string{"The activation function for layer["} + cvm::to_str(num_activation_functions + 1) + std::string{"] is "} + function_name + '\n'); ++num_activation_functions; #ifdef LEPTON } else if (key_lookup(conf, lookup_key_custom.c_str())) { std::string function_expression; get_keyval(conf, lookup_key_custom.c_str(), function_expression, std::string("")); - activation_functions.push_back(std::make_pair(true, function_expression)); + activation_functions.emplace_back(true, function_expression); cvm::log(std::string{"The custom activation function for layer["} + cvm::to_str(num_activation_functions + 1) + std::string{"] is "} + function_expression + '\n'); ++num_activation_functions; #endif diff --git a/src/colvardeps.cpp b/src/colvardeps.cpp index 772921c98..4411437d7 100644 --- a/src/colvardeps.cpp +++ b/src/colvardeps.cpp @@ -421,14 +421,14 @@ void colvardeps::require_feature_children(int f, int g) { void colvardeps::require_feature_alt(int f, int g, int h) { - features()[f]->requires_alt.push_back(std::vector(2)); + features()[f]->requires_alt.emplace_back(2); features()[f]->requires_alt.back()[0] = g; features()[f]->requires_alt.back()[1] = h; } void colvardeps::require_feature_alt(int f, int g, int h, int i) { - features()[f]->requires_alt.push_back(std::vector(3)); + features()[f]->requires_alt.emplace_back(3); features()[f]->requires_alt.back()[0] = g; features()[f]->requires_alt.back()[1] = h; features()[f]->requires_alt.back()[2] = i; @@ -436,7 +436,7 @@ void colvardeps::require_feature_alt(int f, int g, int h, int i) { void colvardeps::require_feature_alt(int f, int g, int h, int i, int j) { - features()[f]->requires_alt.push_back(std::vector(4)); + features()[f]->requires_alt.emplace_back(4); features()[f]->requires_alt.back()[0] = g; features()[f]->requires_alt.back()[1] = h; features()[f]->requires_alt.back()[2] = i; diff --git a/src/colvargrid_integrate.cpp b/src/colvargrid_integrate.cpp index 9d4828eac..b916210f5 100644 --- a/src/colvargrid_integrate.cpp +++ b/src/colvargrid_integrate.cpp @@ -49,7 +49,7 @@ colvargrid_integrate::colvargrid_integrate(std::shared_ptr for (size_t i = 0; i < nd; i++ ) { if (!periodic[i]) nx[i]++; // Shift the grid by half the bin width (values at edges instead of center of bins) - lower_boundaries.push_back(gradients->lower_boundaries[i].real_value - 0.5 * widths[i]); + lower_boundaries.emplace_back(gradients->lower_boundaries[i].real_value - 0.5 * widths[i]); } setup(nx); diff --git a/src/colvarmodule.cpp b/src/colvarmodule.cpp index addfac880..046f5bd91 100644 --- a/src/colvarmodule.cpp +++ b/src/colvarmodule.cpp @@ -2125,7 +2125,7 @@ int cvm::read_index_file(char const *filename) if (!is) { return COLVARS_FILE_ERROR; } else { - index_file_names.push_back(std::string(filename)); + index_file_names.emplace_back(filename); } while (is.good()) { diff --git a/src/colvarproxy.cpp b/src/colvarproxy.cpp index 8e5c76759..0559c94e3 100644 --- a/src/colvarproxy.cpp +++ b/src/colvarproxy.cpp @@ -54,9 +54,9 @@ int colvarproxy_atoms::add_atom_slot(int atom_id) atoms_refcount.push_back(1); atoms_masses.push_back(1.0); atoms_charges.push_back(0.0); - atoms_positions.push_back(cvm::rvector(0.0, 0.0, 0.0)); - atoms_total_forces.push_back(cvm::rvector(0.0, 0.0, 0.0)); - atoms_new_colvar_forces.push_back(cvm::rvector(0.0, 0.0, 0.0)); + atoms_positions.emplace_back(0.0, 0.0, 0.0); + atoms_total_forces.emplace_back(0.0, 0.0, 0.0); + atoms_new_colvar_forces.emplace_back(0.0, 0.0, 0.0); modified_atom_list_ = true; return (atoms_ids.size() - 1); } @@ -181,9 +181,9 @@ int colvarproxy_atom_groups::add_atom_group_slot(int atom_group_id) atom_groups_refcount.push_back(1); atom_groups_masses.push_back(1.0); atom_groups_charges.push_back(0.0); - atom_groups_coms.push_back(cvm::rvector(0.0, 0.0, 0.0)); - atom_groups_total_forces.push_back(cvm::rvector(0.0, 0.0, 0.0)); - atom_groups_new_colvar_forces.push_back(cvm::rvector(0.0, 0.0, 0.0)); + atom_groups_coms.emplace_back(0.0, 0.0, 0.0); + atom_groups_total_forces.emplace_back(0.0, 0.0, 0.0); + atom_groups_new_colvar_forces.emplace_back(0.0, 0.0, 0.0); return (atom_groups_ids.size() - 1); } @@ -503,7 +503,7 @@ int colvarproxy::request_deletion() void colvarproxy::add_config(std::string const &cmd, std::string const &conf) { - reinterpret_cast > *>(config_queue_)->push_back(std::make_pair(cmd, conf)); + reinterpret_cast > *>(config_queue_)->emplace_back(cmd, conf); } diff --git a/src/colvars_memstream.h b/src/colvars_memstream.h index e1ff9b02b..90931e92a 100644 --- a/src/colvars_memstream.h +++ b/src/colvars_memstream.h @@ -106,16 +106,19 @@ class cvm::memory_stream { inline memory_stream & seekg(size_t pos) { read_pos_ = pos; return *this; } /// Ignore formatting operators - inline void setf(decltype(std::ios::fmtflags(std::ios::unitbuf)), decltype(std::ios::floatfield)) {} + inline void setf(std::ios::fmtflags /* flags */, std::ios::fmtflags /* mask */) {} /// Ignore formatting operators - inline void setf(decltype(std::ios::fmtflags(std::ios::unitbuf))) {} + inline void setf(std::ios::fmtflags) {} /// Ignore formatting operators - inline void flags(decltype(std::ios::fmtflags(std::ios::unitbuf))) {} + inline void unsetf(std::ios::fmtflags) {} - /// Get the current formatting flags (throw a useless result because this stream is unformatted) - inline decltype(std::ios::fmtflags(std::ios::unitbuf)) flags() const { return std::ios::fmtflags(std::ios::unitbuf); } + /// Ignore formatting operators + inline void flags(std::ios::fmtflags) {} + + /// Get the current formatting flags (i.e. none because this stream is unformatted) + inline std::ios::fmtflags flags() const { return std::ios::fmtflags{}; } /// Get the error code inline std::ios::iostate rdstate() const { return state_; } diff --git a/src/colvarscript.cpp b/src/colvarscript.cpp index fb31baa1d..250dc8177 100644 --- a/src/colvarscript.cpp +++ b/src/colvarscript.cpp @@ -481,7 +481,7 @@ std::vector colvarscript::obj_to_str_vector(unsigned char *obj) str+"\n", COLVARS_INPUT_ERROR); break; } - new_result.push_back(std::string("")); + new_result.emplace_back(""); while (str[i] != '\"') { new_result.back().append(1, str[i]); if (i >= str.length()) {