diff --git a/src/BlockVector.cc b/src/BlockVector.cc index 40271750..9a411ebe 100644 --- a/src/BlockVector.cc +++ b/src/BlockVector.cc @@ -209,7 +209,7 @@ void BlockVector::allocate_storage() std::cerr << "ERROR BlockVector: trying to use allocation " << size_storage_ << " bigger than initialy preallocated " << allocated_size_storage_ << "!!!" << std::endl; - ct.global_exit(0); + ct.global_exit(); } storage_ = class_storage_[my_allocation_]; assert(class_storage_.size() > 0); diff --git a/src/Control.cc b/src/Control.cc index 043e5640..d0b1a224 100644 --- a/src/Control.cc +++ b/src/Control.cc @@ -495,7 +495,7 @@ void Control::sync(void) if (mpirc != MPI_SUCCESS) { (*MPIdata::sout) << "MPI Bcast of Control failed!!!" << std::endl; - MPI_Abort(comm_global_, 2); + MPI_Abort(comm_global_, EXIT_FAILURE); } }; @@ -1152,7 +1152,7 @@ void Control::setTolEigenvalueGram(const float tol) << threshold_eigenvalue_gram_ << std::endl; } -void Control::global_exit(int i) { MPI_Abort(comm_global_, i); } +void Control::global_exit() { MPI_Abort(comm_global_, EXIT_FAILURE); } void Control::setSpecies(Potentials& pot) { @@ -1342,7 +1342,7 @@ void Control::setOptions(const boost::program_options::variables_map& vm) (*MPIdata::serr) << "ERROR in Control::setOptions: Invalid restart dump type" << std::endl; - MPI_Abort(comm_global_, 2); + MPI_Abort(comm_global_, EXIT_FAILURE); } (*MPIdata::sout) << "Output restart file: " << out_restart_file @@ -1549,14 +1549,14 @@ void Control::setOptions(const boost::program_options::variables_map& vm) else { std::cerr << "ERROR: Spread Penalty needs a type" << std::endl; - MPI_Abort(comm_global_, 0); + MPI_Abort(comm_global_, EXIT_FAILURE); } if (spread_penalty_target_ <= 0.) { (*MPIdata::sout) << "Invalid value for Spread Penalty target: " << spread_penalty_target_ << std::endl; - MPI_Abort(comm_global_, 0); + MPI_Abort(comm_global_, EXIT_FAILURE); } } @@ -1599,7 +1599,7 @@ void Control::setOptions(const boost::program_options::variables_map& vm) { (*MPIdata::sout) << "Invalid value for Thermostat.type: " << thermostat_type << std::endl; - MPI_Abort(comm_global_, 0); + MPI_Abort(comm_global_, EXIT_FAILURE); } tkel = vm["Thermostat.temperature"].as(); @@ -1608,7 +1608,7 @@ void Control::setOptions(const boost::program_options::variables_map& vm) (*MPIdata::sout) << "Invalid value for Thermostat.temperature: " << tkel << std::endl; - MPI_Abort(comm_global_, 0); + MPI_Abort(comm_global_, EXIT_FAILURE); } thtime = vm["Thermostat.relax_time"].as(); if (thtime < 0.) @@ -1616,7 +1616,7 @@ void Control::setOptions(const boost::program_options::variables_map& vm) (*MPIdata::sout) << "Invalid value for Thermostat.relax_time: " << thtime << std::endl; - MPI_Abort(comm_global_, 0); + MPI_Abort(comm_global_, EXIT_FAILURE); } if (str.compare("SCALING") == 0) @@ -1627,7 +1627,7 @@ void Control::setOptions(const boost::program_options::variables_map& vm) (*MPIdata::sout) << "Invalid value for Thermostat.width: " << thwidth << std::endl; - MPI_Abort(comm_global_, 0); + MPI_Abort(comm_global_, EXIT_FAILURE); } } } diff --git a/src/Control.h b/src/Control.h index a15bd24c..77932fb3 100644 --- a/src/Control.h +++ b/src/Control.h @@ -353,7 +353,7 @@ class Control return pair_mlwf_distance_threshold_; } - void global_exit(int i); + void global_exit(); bool Mehrstellen() const { return (lap_type == 0 || lap_type == 10); } diff --git a/src/DistMatrix/BlacsContext.cc b/src/DistMatrix/BlacsContext.cc index e87cb7dd..7610a3cb 100644 --- a/src/DistMatrix/BlacsContext.cc +++ b/src/DistMatrix/BlacsContext.cc @@ -191,7 +191,7 @@ BlacsContext::BlacsContext( { std::cerr << " BlacsContext::BlacsContext: type = " << type << " is an incorrect parameter" << std::endl; - MPI_Abort(comm_global, 0); + MPI_Abort(comm_global, EXIT_FAILURE); } size_ = nprow_ * npcol_; @@ -222,7 +222,7 @@ BlacsContext::BlacsContext( { std::cerr << " nprocs_=" << nprocs_ << std::endl; std::cerr << " BlacsContext nprow*npcol > nprocs_" << std::endl; - MPI_Abort(comm_global, 0); + MPI_Abort(comm_global, EXIT_FAILURE); } ictxt_ = Csys2blacs_handle(comm_global_); @@ -252,7 +252,7 @@ BlacsContext::BlacsContext( { std::cerr << " BlacsContext::BlacsContext: invalid parameters" << " in " << __FILE__ << ":" << __LINE__ << std::endl; - MPI_Abort(comm_global, 0); + MPI_Abort(comm_global, EXIT_FAILURE); } int* pmap = new int[nprow * npcol]; // build pmap @@ -296,7 +296,7 @@ BlacsContext::BlacsContext(BlacsContext& bc, const int irow, const int icol, { std::cerr << " BlacsContext::BlacsContext: invalid parameters" << std::endl; - MPI_Abort(comm_global_, 0); + MPI_Abort(comm_global_, EXIT_FAILURE); } int* pmap = new int[nprow * npcol]; // build pmap @@ -350,7 +350,7 @@ BlacsContext::BlacsContext(const BlacsContext& bc, const char type) std::cerr << " BlacsContext::BlacsContext: row/col incorrect parameter: " << type << std::endl; - MPI_Abort(comm_global_, 0); + MPI_Abort(comm_global_, EXIT_FAILURE); } ictxt_ = Csys2blacs_handle(comm_global_); diff --git a/src/DistMatrix/DistMatrix.h b/src/DistMatrix/DistMatrix.h index f56df2b4..175fbd9b 100644 --- a/src/DistMatrix/DistMatrix.h +++ b/src/DistMatrix/DistMatrix.h @@ -28,7 +28,7 @@ std::cerr << "ERROR in file " << __FILE__ << " at line " << __LINE__ \ << std::endl; \ std::cerr << "Error Message: " << X << std::endl; \ - MPI_Abort(comm_global_, 2); + MPI_Abort(comm_global_, EXIT_FAILURE); #endif diff --git a/src/DistanceConstraint.cc b/src/DistanceConstraint.cc index d5175d7e..6d046f3b 100644 --- a/src/DistanceConstraint.cc +++ b/src/DistanceConstraint.cc @@ -99,7 +99,7 @@ bool DistanceConstraint::enforce(void) { cerr << "mype=" << mype << ", tau1p_[0]=" << tau1p_[0] << endl; cerr << "mype=" << mype << ", tau2p_[0]=" << tau2p_[0] << endl; - MPI_Abort(MPI_COMM_WORLD, 0); + MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); } if (locally_owned_) (*MPIdata::sout) << setprecision(8) << "DistanceConstraint, d=" << d diff --git a/src/Electrostatic.cc b/src/Electrostatic.cc index 6ac4fef3..aba09d07 100644 --- a/src/Electrostatic.cc +++ b/src/Electrostatic.cc @@ -65,7 +65,7 @@ Electrostatic::~Electrostatic() if (grhoc_ != nullptr) delete grhoc_; } -void Electrostatic::setupInitialVh(const POTDTYPE* const vh_init) +void Electrostatic::setupInitialVh(const std::vector& vh_init) { poisson_solver_->set_vh(vh_init); @@ -153,7 +153,7 @@ void Electrostatic::setupPB( // initialize vh with last trial solution pb::GridFunc gf_vh(*pbGrid_, bc_[0], bc_[1], bc_[2]); - gf_vh.assign(pot.vh_rho()); + gf_vh.assign((pot.vh_rho()).data()); poisson_solver_->set_vh(gf_vh); } diff --git a/src/Electrostatic.h b/src/Electrostatic.h index a8eb4709..f49ad12f 100644 --- a/src/Electrostatic.h +++ b/src/Electrostatic.h @@ -63,7 +63,7 @@ class Electrostatic template void computeVh(const pb::GridFunc& vhinit, const Ions& ions, Rho& rho, Potentials& pot); - void setupInitialVh(const POTDTYPE* const); + void setupInitialVh(const std::vector&); void setupInitialVh(const pb::GridFunc&); template void computeVhRho(Rho& rho); diff --git a/src/ExtendedGridOrbitals.cc b/src/ExtendedGridOrbitals.cc index 19c0d26b..07face7c 100644 --- a/src/ExtendedGridOrbitals.cc +++ b/src/ExtendedGridOrbitals.cc @@ -1239,7 +1239,7 @@ double ExtendedGridOrbitals::dotProduct( "dot product type" << std::endl; Control& ct = *(Control::instance()); - ct.global_exit(2); + ct.global_exit(); } dot_product_tm_.stop(); diff --git a/src/Forces.cc b/src/Forces.cc index 9ab19f6a..04e6ffd7 100644 --- a/src/Forces.cc +++ b/src/Forces.cc @@ -272,9 +272,10 @@ void Forces::get_loc_proj(RHODTYPE* rho, const int numpt = mymesh->numpt(); Potentials& pot = hamiltonian_->potential(); + const std::vector& vh_rho(pot.vh_rho()); for (int idx = 0; idx < numpt; idx++) { - const double vhrho = pot.vh_rho(idx); + const double vhrho = vh_rho[idx]; for (short dir = 0; dir < 3; dir++) { double* lproj = &(loc_proj[dir * NPTS]); diff --git a/src/HDFrestart.cc b/src/HDFrestart.cc index 0fac518e..1b2c7fb7 100644 --- a/src/HDFrestart.cc +++ b/src/HDFrestart.cc @@ -1440,8 +1440,8 @@ int HDFrestart::read_1func_hdf5(T* vv, const std::string& datasetname) } template -int HDFrestart::write_1func_hdf5( - T* vv, const std::string& datasetname, double* ll, double* cell_origin) +int HDFrestart::write_1func_hdf5(const T* const vv, + const std::string& datasetname, double* ll, double* cell_origin) { assert(ll != nullptr); assert(cell_origin != nullptr); @@ -1652,7 +1652,7 @@ int HDFrestart::readData( } template -int HDFrestart::writeData(T* data, hid_t space_id, hid_t memspace, +int HDFrestart::writeData(const T* const data, hid_t space_id, hid_t memspace, hid_t dset_id, const short precision) { if (precision == 1) @@ -1922,10 +1922,12 @@ int HDFrestart::readAtomicData(std::string datasetname, std::vector& data) // send data to inactive PEs if (gather_data_x_) gatherDataXdir(data); +#ifdef MGMOL_USE_HDF5P if (useHdf5p()) { data.erase(std::remove(data.begin(), data.end(), -1), data.end()); } +#endif return 0; } @@ -1974,10 +1976,12 @@ int HDFrestart::readAtomicData( } } +#ifdef MGMOL_USE_HDF5P if (useHdf5p()) { data.erase(std::remove(data.begin(), data.end(), 1e+32), data.end()); } +#endif if (gather_data_x_) gatherDataXdir(data); return 0; @@ -2017,7 +2021,8 @@ int HDFrestart::readAtomicData( { Control& ct = *(Control::instance()); if (onpe0 && ct.verbose > 0) - (*MPIdata::sout) << "HDFrestart::readAtomicNames()..." << std::endl; + (*MPIdata::sout) << "HDFrestart::readAtomicData(), dataset = " + << datasetname << std::endl; std::vector buffer; short name_length = 7; // default, value used before February 2016 @@ -2095,19 +2100,19 @@ int HDFrestart::readAtomicData( { std::string t(&buffer[i], name_length); assert(t.size() > 0); - // cout<<"name="< - int write_1func_hdf5( - T*, const std::string&, double* ll = nullptr, double* origin = nullptr); + int write_1func_hdf5(const T* const, const std::string&, + double* ll = nullptr, double* origin = nullptr); int read_att(const hid_t dset_id, const std::string& attname, std::vector& attr_data); // write data in file with precision "precision" template - int writeData(T* vv, hid_t filespace, hid_t memspace, hid_t dset_id, - const short precision); + int writeData(const T* const vv, hid_t filespace, hid_t memspace, + hid_t dset_id, const short precision); template int readData(T* vv, hid_t memspace, hid_t dset_id, const short precision); diff --git a/src/Ion.cc b/src/Ion.cc index 33465635..90350477 100644 --- a/src/Ion.cc +++ b/src/Ion.cc @@ -294,12 +294,18 @@ void Ion::getIonData(IonData& idata) const } } +void Ion::resetPositionsToPrevious() +{ + for (int pos = 0; pos < 3; pos++) + position_[pos] = old_position_[pos]; +} + void Ion::setFromIonData(const IonData& data) { // random state setRandomState(data.rand_state[0], data.rand_state[1], data.rand_state[2]); // previous position - setOldPosition( + setPreviousPosition( data.old_position[0], data.old_position[1], data.old_position[2]); // velocity setVelocity(data.velocity[0], data.velocity[1], data.velocity[2]); diff --git a/src/Ion.h b/src/Ion.h index ae870820..871205a3 100644 --- a/src/Ion.h +++ b/src/Ion.h @@ -74,12 +74,6 @@ class Ion position_[i] += shift; } - void setOldPosition(const double x, const double y, const double z) - { - old_position_[0] = x; - old_position_[1] = y; - old_position_[2] = z; - } public: Ion(const Species& species, const std::string& name, const double crds[3], @@ -96,6 +90,13 @@ class Ion void init(const double crds[3], const double velocity[3], const bool lock); void setup(); + void setPreviousPosition(const double x, const double y, const double z) + { + old_position_[0] = x; + old_position_[1] = y; + old_position_[2] = z; + } + std::shared_ptr kbproj() { return kbproj_; } const std::shared_ptr kbproj() const { return kbproj_; } @@ -189,6 +190,9 @@ class Ion kbproj_->clear(); } + + void resetPositionsToPrevious(); + void shiftPositionXLBOMDTest(Vector3D shift) { for (short dir = 0; dir < 3; dir++) diff --git a/src/IonicAlgorithm.cc b/src/IonicAlgorithm.cc index 37a077e4..e804077a 100644 --- a/src/IonicAlgorithm.cc +++ b/src/IonicAlgorithm.cc @@ -57,7 +57,7 @@ void IonicAlgorithm::init(HDFrestart* h5f_file) if (ct.restart_info > 0) { int status = stepper_->init(*h5f_file); - if (status < 0) ct.global_exit(2); + if (status < 0) ct.global_exit(); // if restart data for lbfgs found if (status == 0) diff --git a/src/Ions.cc b/src/Ions.cc index e93d82ea..10baad10 100644 --- a/src/Ions.cc +++ b/src/Ions.cc @@ -853,6 +853,42 @@ void Ions::writePositions(HDFrestart& h5f_file) } } +void Ions::writePreviousPositions(HDFrestart& h5f_file) +{ + Control& ct(*(Control::instance())); + + if (onpe0 && ct.verbose > 1) + { + (*MPIdata::sout) << "Ions::writePositions" << std::endl; + } + + std::vector data; + if (h5f_file.gatherDataX()) + { + Mesh* mymesh = Mesh::instance(); + const pb::PEenv& myPEenv = mymesh->peenv(); + MPI_Comm comm = myPEenv.comm_x(); + + gatherPreviousPositions(data, 0, comm); + } + else + { + for (auto& ion : local_ions_) + { + data.push_back(ion->getPreviousPosition(0)); + data.push_back(ion->getPreviousPosition(1)); + data.push_back(ion->getPreviousPosition(2)); + } + } + + hid_t file_id = h5f_file.file_id(); + if (file_id >= 0) + { + std::string datasetname("/Ionic_previous_positions"); + writeData2d(h5f_file, datasetname, data, 3, 1.e32); + } +} + void Ions::initFromRestartFile(HDFrestart& h5_file) { assert(list_ions_.empty()); @@ -911,10 +947,12 @@ void Ions::initFromRestartFile(HDFrestart& h5_file) assert(at_numbers.size() == at_nlprojIds.size()); num_ions_ = at_names.size(); +#ifdef MGMOL_USE_HDF5P if (!h5_file.useHdf5p()) { mmpi.allreduce(&num_ions_, 1, MPI_SUM); } +#endif if (onpe0 && ct.verbose > 0) { (*MPIdata::sout) << "Ions::setFromRestartFile(), read " << num_ions_ @@ -995,14 +1033,39 @@ void Ions::readRestartPositions(HDFrestart& h5_file) } } +void Ions::readRestartPreviousPositions(HDFrestart& h5_file) +{ + Control& ct = *(Control::instance()); + if (onpe0 && ct.verbose > 0) + (*MPIdata::sout) << "Read ionic positions from hdf5 file" << std::endl; + + std::vector data; + std::string datasetname("/Ionic_previous_positions"); + h5_file.readAtomicData(datasetname, data); + assert(data.size() == 3 * local_ions_.size()); + + int i = 0; + for (auto& ion : local_ions_) + { + ion->setPreviousPosition(data[3 * i], data[3 * i + 1], data[3 * i + 2]); + i++; + } +} + +void Ions::resetPositionsToPrevious() +{ + for (auto& ion : local_ions_) + { + ion->resetPositionsToPrevious(); + } +} + void Ions::writeVelocities(HDFrestart& h5f_file) { Control& ct(*(Control::instance())); if (onpe0 && ct.verbose > 1) - { (*MPIdata::sout) << "Ions::writeVelocities" << std::endl; - } std::vector data; if (h5f_file.gatherDataX()) @@ -1225,6 +1288,29 @@ void Ions::writeForces(HDFrestart& h5f_file) } } +void Ions::setLocalForces( + const std::vector& forces, const std::vector& names) +{ + assert(forces.size() == 3 * names.size()); + + // loop over global list of forces and atom names + std::vector::const_iterator s = names.begin(); + for (auto it = forces.begin(); it != forces.end(); it += 3) + { + // find possible matching ion + for (auto& ion : local_ions_) + { + if (ion->compareName(*s)) + { + ion->set_force(0, *it); + ion->set_force(1, *(it + 1)); + ion->set_force(2, *(it + 2)); + } + } + s++; + } +} + // Writes out the postions of the ions and the current forces on them by root void Ions::printForcesGlobal(std::ostream& os, const int root) const { @@ -1900,7 +1986,6 @@ int Ions::read1atom(std::ifstream* tfile, const bool cell_relative) double velocity[3] = { 0., 0., 0. }; MGmol_MPI& mmpi(*(MGmol_MPI::instance())); - Control& ct(*(Control::instance())); short movable = 0; std::string query; @@ -1944,7 +2029,7 @@ int Ions::read1atom(std::ifstream* tfile, const bool cell_relative) { std::cerr << "ERROR: Invalid name read in input file: " << name_read << std::endl; - ct.global_exit(2); + mmpi.abort(); } short dummy; ss >> dummy; // not used anymore (was species index) @@ -2125,6 +2210,22 @@ void Ions::getLocalPositions(std::vector& tau) const } } +void Ions::getLocalNames(std::vector& names) const +{ + for (auto& ion : local_ions_) + { + names.push_back(ion->name()); + } +} + +void Ions::getNames(std::vector& names) const +{ + for (auto& ion : list_ions_) + { + names.push_back(ion->name()); + } +} + void Ions::getPositions(std::vector& tau) { std::vector tau_local(3 * local_ions_.size()); @@ -2535,6 +2636,28 @@ void Ions::gatherPositions( if (mype == root) positions = data; } +void Ions::gatherPreviousPositions( + std::vector& positions, const int root, const MPI_Comm comm) const +{ + std::vector local_positions; + + for (auto& ion : local_ions_) + { + local_positions.push_back(ion->getPreviousPosition(0)); + local_positions.push_back(ion->getPreviousPosition(1)); + local_positions.push_back(ion->getPreviousPosition(2)); + } + + // gather data to PE root + std::vector data; + mgmol_tools::gatherV(local_positions, data, root, comm); + + int mype = 0; + MPI_Comm_rank(comm, &mype); + positions.clear(); + if (mype == root) positions = data; +} + void Ions::gatherForces( std::vector& forces, const int root, const MPI_Comm comm) const { diff --git a/src/Ions.h b/src/Ions.h index ae4f6adb..33b8982b 100644 --- a/src/Ions.h +++ b/src/Ions.h @@ -147,6 +147,8 @@ class Ions const MPI_Comm comm) const; void gatherPositions(std::vector& positions, const int root, const MPI_Comm comm) const; + void gatherPreviousPositions(std::vector& positions, const int root, + const MPI_Comm comm) const; void gatherLockedNames(std::vector& names, const int root, const MPI_Comm comm) const; void gatherIndexes( @@ -198,6 +200,7 @@ class Ions ion++; } } + void resetPositionsToPrevious(); void removeMassCenterMotion(); bool hasNLprojectors() @@ -237,6 +240,7 @@ class Ions double kinetic_E(void) const; void writePositions(HDFrestart& h5f_file); + void writePreviousPositions(HDFrestart& h5f_file); void writeVelocities(HDFrestart& h5f_file); void writeRandomStates(HDFrestart& h5f_file); void writeForces(HDFrestart& h5f_file); @@ -281,11 +285,20 @@ class Ions const std::vector& tau, const std::vector& anumbers); void getLocalPositions(std::vector& tau) const; + void getLocalNames(std::vector& names) const; + void getNames(std::vector& names) const; void getPositions(std::vector& tau); void getAtomicNumbers(std::vector& atnumbers); void getForces(std::vector& forces); void getLocalForces(std::vector& tau) const; + + /*! + * set forces for ions in local_ions_ based on names matching + */ + void setLocalForces(const std::vector& forces, + const std::vector& names); + void syncData(const std::vector& sp); // void syncNames(const int nions, std::vector& local_names, // std::vector& names); @@ -350,6 +363,7 @@ class Ions void addIonToList(const Species& sp, const std::string& name, const double crds[3], const double velocity[3], const bool lock); + void readRestartPreviousPositions(HDFrestart& h5_file); // void checkUnicityLocalIons(); }; diff --git a/src/KBPsiMatrixSparse.cc b/src/KBPsiMatrixSparse.cc index 604e2f8e..8f5b3d49 100644 --- a/src/KBPsiMatrixSparse.cc +++ b/src/KBPsiMatrixSparse.cc @@ -11,16 +11,15 @@ #include "Control.h" #include "ExtendedGridOrbitals.h" -#include "Ions.h" #include "LocGridOrbitals.h" #include "MGmol_MPI.h" #include "Mesh.h" #include "ProjectedMatrices.h" -#include "ProjectedMatricesSparse.h" #include "ReplicatedMatrix.h" #include "SquareSubMatrix2DistMatrix.h" #include + #define Ry2Ha 0.5; Timer KBPsiMatrixSparse::global_sum_tm_("KBPsiMatrixSparse::global_sum"); @@ -133,7 +132,7 @@ void KBPsiMatrixSparse::globalSumKBpsi() // Loop over the ions with projectors overlapping with local subdomain // and evaluate for some state. template -void KBPsiMatrixSparse::computeKBpsi(Ions& ions, T& orbitals, +void KBPsiMatrixSparse::computeKBpsi(const Ions& ions, T& orbitals, const int first_color, const int nb_colors, const bool flag) { assert(first_color >= 0); @@ -189,7 +188,7 @@ void KBPsiMatrixSparse::computeKBpsi(Ions& ions, T& orbitals, // Loop over the ions if (gid != -1) { - for (auto ion : ions.overlappingNL_ions()) + for (const auto& ion : ions.overlappingNL_ions()) { computeLocalElement( *ion, gid, iloc, ppsi + color * ldsize, flag); @@ -211,8 +210,8 @@ void KBPsiMatrixSparse::computeKBpsi(Ions& ions, T& orbitals, compute_kbpsi_tm_.stop(); } -void KBPsiMatrixSparse::computeKBpsi( - Ions& ions, pb::GridFunc* phi, const int istate, const bool flag) +void KBPsiMatrixSparse::computeKBpsi(const Ions& ions, + pb::GridFunc* phi, const int istate, const bool flag) { assert(lapop_ != nullptr); compute_kbpsi_tm_.start(); @@ -238,7 +237,7 @@ void KBPsiMatrixSparse::computeKBpsi( for (int iloc = 0; iloc < subdivx; iloc++) { // Loop over the ions - for (auto ion : ions.overlappingNL_ions()) + for (const auto& ion : ions.overlappingNL_ions()) { computeLocalElement(*ion, istate, iloc, ppsi, flag); } @@ -251,7 +250,7 @@ void KBPsiMatrixSparse::computeKBpsi( void KBPsiMatrixSparse::scaleWithKBcoeff(const Ions& ions) { - for (auto ion : ions.overlappingNL_ions()) + for (const auto& ion : ions.overlappingNL_ions()) { std::vector gids; ion->getGidsNLprojs(gids); @@ -266,8 +265,8 @@ void KBPsiMatrixSparse::scaleWithKBcoeff(const Ions& ions) // loop over states to multiply kbpsi_[st][gid] and kbBpsi_[st][gid] // by coeff - (*kbpsimat_).scaleRow(gid, coeff); - if (lapop_) (*kbBpsimat_).scaleRow(gid, coeff); + kbpsimat_->scaleRow(gid, coeff); + if (lapop_) kbBpsimat_->scaleRow(gid, coeff); } } } @@ -454,7 +453,7 @@ SquareSubMatrix KBPsiMatrixSparse::computeHvnlMatrix( // Loop over ions centered on current PE only // (distribution of work AND Hvnlij contributions) - for (auto ion : ions.local_ions()) + for (const auto& ion : ions.local_ions()) { computeHvnlMatrix((KBPsiMatrixSparse*)kbpsi2, *ion, Aij); } @@ -473,7 +472,7 @@ void KBPsiMatrixSparse::computeHvnlMatrix( // Loop over ions centered on current PE only // (distribution of work AND Hvnlij contributions) - for (auto ion : ions.local_ions()) + for (const auto& ion : ions.local_ions()) { computeHvnlMatrix((KBPsiMatrixSparse*)kbpsi2, *ion, mat); } @@ -538,14 +537,14 @@ void KBPsiMatrixSparse::getPsiKBPsiSym( { // loop over all the ions // parallelization over ions by including only those centered in subdomain - for (auto& ion : ions.local_ions()) + for (const auto& ion : ions.local_ions()) { getPsiKBPsiSym(*ion, sm); } } template -void KBPsiMatrixSparse::computeAll(Ions& ions, T& orbitals) +void KBPsiMatrixSparse::computeAll(const Ions& ions, T& orbitals) { assert(count_proj_subdomain_ == ions.countProjectorsSubdomain()); @@ -603,7 +602,7 @@ double KBPsiMatrixSparse::getEvnl( double trace = 0.0; // loop over all the ions // parallelization over ions by including only those centered in subdomain - for (auto& ion : ions.local_ions()) + for (const auto& ion : ions.local_ions()) { std::vector gids; ion->getGidsNLprojs(gids); @@ -618,10 +617,9 @@ double KBPsiMatrixSparse::getEvnl( /* gather trace result */ MGmol_MPI& mmpi = *(MGmol_MPI::instance()); - MPI_Comm comm = mmpi.commSpin(); double evnl = 0.0; - MPI_Allreduce(&trace, &evnl, 1, MPI_DOUBLE, MPI_SUM, comm); + mmpi.allreduce(&trace, &evnl, 1, MPI_SUM); return evnl * Ry2Ha; } @@ -637,7 +635,7 @@ double KBPsiMatrixSparse::getEvnl(const Ions& ions, double trace = 0.0; // loop over all the ions // parallelization over ions by including only those centered in subdomain - for (auto& ion : ions.local_ions()) + for (const auto& ion : ions.local_ions()) { std::vector gids; ion->getGidsNLprojs(gids); @@ -652,10 +650,9 @@ double KBPsiMatrixSparse::getEvnl(const Ions& ions, /* gather trace result */ MGmol_MPI& mmpi = *(MGmol_MPI::instance()); - MPI_Comm comm = mmpi.commSpin(); double evnl = 0.0; - MPI_Allreduce(&trace, &evnl, 1, MPI_DOUBLE, MPI_SUM, comm); + mmpi.allreduce(&trace, &evnl, 1, MPI_SUM); return evnl * Ry2Ha; } @@ -736,12 +733,12 @@ double KBPsiMatrixSparse::getTraceDM( return trace; } -template void KBPsiMatrixSparse::computeKBpsi(Ions& ions, +template void KBPsiMatrixSparse::computeKBpsi(const Ions& ions, LocGridOrbitals& orbitals, const int first_color, const int nb_colors, const bool flag); -template void KBPsiMatrixSparse::computeAll(Ions&, LocGridOrbitals&); +template void KBPsiMatrixSparse::computeAll(const Ions&, LocGridOrbitals&); -template void KBPsiMatrixSparse::computeKBpsi(Ions& ions, +template void KBPsiMatrixSparse::computeKBpsi(const Ions& ions, ExtendedGridOrbitals& orbitals, const int first_color, const int nb_colors, const bool flag); -template void KBPsiMatrixSparse::computeAll(Ions&, ExtendedGridOrbitals&); +template void KBPsiMatrixSparse::computeAll(const Ions&, ExtendedGridOrbitals&); diff --git a/src/KBPsiMatrixSparse.h b/src/KBPsiMatrixSparse.h index 91f6c583..073fbddc 100644 --- a/src/KBPsiMatrixSparse.h +++ b/src/KBPsiMatrixSparse.h @@ -12,7 +12,9 @@ #include "DataDistribution.h" #include "DensityMatrixSparse.h" +#include "Ions.h" #include "KBPsiMatrixInterface.h" +#include "ProjectedMatricesSparse.h" #include "SquareSubMatrix.h" #include "VariableSizeMatrix.h" @@ -23,11 +25,6 @@ #include #include -class Ions; -class Ion; -class ProjectedMatricesInterface; -class ProjectedMatricesSparse; - class KBPsiMatrixSparse : public KBPsiMatrixInterface { static Timer global_sum_tm_; @@ -79,8 +76,8 @@ class KBPsiMatrixSparse : public KBPsiMatrixInterface void getPsiKBPsiSym(const Ions& ions, VariableSizeMatrix& sm); void getPsiKBPsiSym(const Ion& ion, VariableSizeMatrix& sm); template - void computeKBpsi(Ions& ions, OrbitalsType& orbitals, const int first_color, - const int nb_colors, const bool flag); + void computeKBpsi(const Ions& ions, OrbitalsType& orbitals, + const int first_color, const int nb_colors, const bool flag); void clearData(); public: @@ -105,7 +102,7 @@ class KBPsiMatrixSparse : public KBPsiMatrixInterface double getEvnl( const Ions& ions, ProjectedMatrices* proj_matrices); void computeKBpsi( - Ions& ions, pb::GridFunc*, const int, const bool flag); + const Ions& ions, pb::GridFunc*, const int, const bool flag); double getValIonState(const int gid, const int st) const { return (*kbpsimat_).get_value(gid, st); @@ -127,7 +124,7 @@ class KBPsiMatrixSparse : public KBPsiMatrixInterface const Ions& ions, ProjectedMatricesInterface* proj_matrices) const; template - void computeAll(Ions& ions, T& orbitals); + void computeAll(const Ions& ions, T& orbitals); void setup(const Ions& ions); double getTraceDM( diff --git a/src/LDAFunctional.cc b/src/LDAFunctional.cc index 4b60b51e..89f5ee5f 100644 --- a/src/LDAFunctional.cc +++ b/src/LDAFunctional.cc @@ -120,8 +120,7 @@ double LDAFunctional::computeRhoDotExc() const if (rc != MPI_SUCCESS) { (*MPIdata::sout) << "MPI_Allreduce double sum failed!!!" << endl; - Control& ct = *(Control::instance()); - ct.global_exit(2); + mmpi.abort(); } exc = (POTDTYPE)sum; return exc; diff --git a/src/LocalizationRegions.cc b/src/LocalizationRegions.cc index 60f28301..f0f96412 100644 --- a/src/LocalizationRegions.cc +++ b/src/LocalizationRegions.cc @@ -492,7 +492,7 @@ void LocalizationRegions::bcastLRs() cerr << "ERROR!!!! LocalizationRegions::bcast(), Failure in MPI_Bcast " "of 'nglobal_'!!!" << endl; - MPI_Abort(comm, 0); + MPI_Abort(comm, EXIT_FAILURE); } if (nglobal_ == 0) return; @@ -522,7 +522,7 @@ void LocalizationRegions::bcastLRs() cerr << "ERROR!!!! LocalizationRegions::bcast(), Failure in " "MPI_Bcast of 'centers'!!!" << endl; - MPI_Abort(comm, 0); + MPI_Abort(comm, EXIT_FAILURE); } vector::iterator it = all_regions_.begin(); int i = 0; @@ -554,7 +554,7 @@ void LocalizationRegions::bcastLRs() cerr << "ERROR!!!! LocalizationRegions::bcast(), Failure in " "MPI_Bcast of 'radius'!!!" << endl; - MPI_Abort(comm, 0); + MPI_Abort(comm, EXIT_FAILURE); } it = all_regions_.begin(); i = 0; @@ -642,7 +642,7 @@ void LocalizationRegions::bcastLRs() cerr << "ERROR!!!! LocalizationRegions::bcast(), Failure in MPI_Bcast " "of 'volume'!!!" << endl; - MPI_Abort(comm, 0); + MPI_Abort(comm, EXIT_FAILURE); } if (ct.verbose > 0) printWithTimeStamp("LocalizationRegions::bcast() done...", cout); @@ -915,19 +915,9 @@ void LocalizationRegions::setupLocalRegionsFromOverlapRegions() cerr << "ERROR in distribution of localization centers: count=" << count << endl; cerr << "global number of regions=" << nglobal_ << endl; - MPI_Abort(MPI_COMM_WORLD, 1); + MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); } -#if 0 - //if(onpe0) - if( count!=nglobal_ ) - { - cerr<<"ERROR in distribution of localization centers: count="<::initial() mmpi.barrier(); if (ct.verbose > 0) current_orbitals_->printChromaticNumber(os_); - pot.initBackground(*ions_); - // Random initialization of the wavefunctions if (ct.restart_info <= 2) { @@ -360,8 +358,8 @@ int MGmol::initial() } // Initialize the nuclear local potential and the compensating charges - if (ct.verbose > 0) printWithTimeStamp("initNuc()...", os_); - initNuc(*ions_); + if (ct.verbose > 0) printWithTimeStamp("setupPotentials()...", os_); + setupPotentials(*ions_); // initialize Rho if (ct.verbose > 0) printWithTimeStamp("Initialize Rho...", os_); @@ -402,7 +400,6 @@ int MGmol::initial() current_orbitals_->setDataWithGhosts(); current_orbitals_->trade_boundaries(); - // if(ct.restart_info <= 1)pot.initWithVnuc(); // initialize matrices S and invB if (ct.numst > 0) @@ -418,9 +415,6 @@ int MGmol::initial() current_orbitals_->checkCond(100000., ct.AtomsMove()); } - if (ct.verbose > 0) printWithTimeStamp("Setup kbpsi...", os_); - g_kbpsi_->setup(*ions_); - if (ct.restart_info == 0) { if (ct.verbose > 0) printWithTimeStamp("update_pot...", os_); @@ -730,12 +724,6 @@ void MGmol::write_header() if (ct.isLocMode() && ct.verbose > 3) lrs_->printAllRegions(os_); } -template -void MGmol::global_exit(int i) -{ - MPI_Abort(comm_, i); -} - template void MGmol::check_anisotropy() { @@ -750,7 +738,8 @@ void MGmol::check_anisotropy() << ", hmin=" << mygrid.hmin() << std::endl; (*MPIdata::serr) << "init: Anisotropy too large: " << mygrid.anisotropy() << std::endl; - global_exit(2); + MGmol_MPI& mmpi = *(MGmol_MPI::instance()); + mmpi.abort(); } } @@ -825,7 +814,7 @@ double get_trilinval(const double xc, const double yc, const double zc, #endif template -void MGmol::initNuc(Ions& ions) +void MGmol::setupPotentials(Ions& ions) { init_nuc_tm_.start(); @@ -834,26 +823,15 @@ void MGmol::initNuc(Ions& ions) Potentials& pot = hamiltonian_->potential(); + // initialize poentials based on ionic positions and their species pot.initialize(ions); - // Check compensating charges - double comp_rho = getCharge(pot.rho_comp()); - - if (onpe0 && ct.verbose > 1) - { - os_ << std::setprecision(8) << std::fixed - << " Charge of rhoc: " << comp_rho << std::endl; - } - -#if 1 - pot.rescaleRhoComp(); -#endif - - pot.addBackgroundToRhoComp(); + if (ct.verbose > 0) printWithTimeStamp("Setup kbpsi...", os_); + g_kbpsi_->setup(*ions_); electrostat_->setupRhoc(pot.rho_comp()); - if (onpe0 && ct.verbose > 3) os_ << " initNuc done" << std::endl; + if (onpe0 && ct.verbose > 3) os_ << " setupPotentials done" << std::endl; init_nuc_tm_.stop(); } @@ -1062,7 +1040,8 @@ void MGmol::setup() total_tm_.start(); setup_tm_.start(); - Control& ct = *(Control::instance()); + Control& ct = *(Control::instance()); + MGmol_MPI& mmpi = *(MGmol_MPI::instance()); if (ct.verbose > 0) printWithTimeStamp("MGmol::setup()...", os_); @@ -1080,7 +1059,8 @@ void MGmol::setup() #else int ierr = initial(); #endif - if (ierr < 0) global_exit(0); + + if (ierr < 0) mmpi.abort(); // Write header to stdout write_header(); @@ -1112,7 +1092,6 @@ void MGmol::dumpRestart() // create restart file std::string filename(std::string(ct.out_restart_file)); - if (ct.out_restart_file_naming_strategy) filename += "0"; HDFrestart h5restartfile( filename, myPEenv, gdim, ct.out_restart_file_type); @@ -1411,14 +1390,14 @@ void MGmol::update_pot(const Ions& ions) const bool flag_mixing = (fabs(ct.mix_pot - 1.) > 1.e-3); - // evaluate potential correction + // update total potential if (flag_mixing) { - pot.delta_v(rho_->rho_); - pot.update(ct.mix_pot); + pot.computeDeltaV(rho_->rho_); + pot.updateVtot(ct.mix_pot); } else - pot.update(rho_->rho_); + pot.updateVtot(rho_->rho_); } template @@ -1461,7 +1440,7 @@ double MGmol::evaluateEnergyAndForces(Orbitals* orbitals, ions_->setPositions(tau, atnumbers); - moveVnuc(*ions_); + setupPotentials(*ions_); double eks = 0.; OrbitalsType* dorbitals = dynamic_cast(orbitals); @@ -1483,7 +1462,7 @@ double MGmol::evaluateDMandEnergyAndForces(Orbitals* orbitals, ions_->setPositions(tau, atnumbers); - moveVnuc(*ions_); + setupPotentials(*ions_); // initialize electronic density rho_->update(*dorbitals); diff --git a/src/MGmol.h b/src/MGmol.h index 315a0ff9..43df67f2 100644 --- a/src/MGmol.h +++ b/src/MGmol.h @@ -153,7 +153,6 @@ class MGmol : public MGmolInterface void initialMasks(); int setupLRsFromInput(const std::string filename); - void setup(); int setupLRs(const std::string input_file) override; int setupFromInput(const std::string input_file) override; int setupConstraintsFromInput(const std::string input_file) override; @@ -180,6 +179,8 @@ class MGmol : public MGmolInterface ~MGmol() override; + void setup(); + /* access functions */ OrbitalsType* getOrbitals() { return current_orbitals_; } std::shared_ptr> getHamiltonian() @@ -223,10 +224,9 @@ class MGmol : public MGmolInterface void getAtomicNumbers(std::vector& an); - void initNuc(Ions& ions); + void setupPotentials(Ions& ions); void initKBR(); - void global_exit(int i); void printEigAndOcc(); int readCoordinates(std::ifstream* tfile, const bool cell_relative); @@ -345,6 +345,11 @@ class MGmol : public MGmolInterface { forces_->force(orbitals, ions); } + void setPositions(const std::vector& positions, + const std::vector& atnumbers) + { + ions_->setPositions(positions, atnumbers); + } /* * simply dump current state diff --git a/src/MGmolInterface.h b/src/MGmolInterface.h index 9a9bf8a6..dc54a8e7 100644 --- a/src/MGmolInterface.h +++ b/src/MGmolInterface.h @@ -40,6 +40,9 @@ class MGmolInterface virtual void getAtomicPositions(std::vector& tau) = 0; virtual void getAtomicNumbers(std::vector& an) = 0; + virtual void setPositions(const std::vector& positions, + const std::vector& atnumbers) + = 0; virtual std::shared_ptr getProjectedMatrices() = 0; virtual void dumpRestart() = 0; diff --git a/src/NonOrthoDMStrategy.cc b/src/NonOrthoDMStrategy.cc index 5452592f..3835b039 100644 --- a/src/NonOrthoDMStrategy.cc +++ b/src/NonOrthoDMStrategy.cc @@ -46,7 +46,7 @@ int NonOrthoDMStrategy::update(OrbitalsType& orbitals) { std::cerr << "NonOrthoDMStrategy, Invalid mixing value: " << mix_ << std::endl; - MPI_Abort(mmpi.commSameSpin(), 0); + MPI_Abort(mmpi.commSameSpin(), EXIT_FAILURE); } if (mmpi.PE0() && ct.verbose > 2) diff --git a/src/PBEFunctional.cc b/src/PBEFunctional.cc index 4a878c3c..29151a14 100644 --- a/src/PBEFunctional.cc +++ b/src/PBEFunctional.cc @@ -167,8 +167,7 @@ double PBEFunctional::computeRhoDotExc() const if (rc != MPI_SUCCESS) { (*MPIdata::sout) << "MPI_Allreduce double sum failed!!!" << std::endl; - Control& ct = *(Control::instance()); - ct.global_exit(2); + mmpi.abort(); } exc = sum; return exc; diff --git a/src/Poisson.h b/src/Poisson.h index 14716cda..4bdfeeab 100644 --- a/src/Poisson.h +++ b/src/Poisson.h @@ -71,7 +71,10 @@ class Poisson : public PoissonInterface virtual void set_rhod(pb::GridFunc* /*rhod*/){}; void set_vh(const pb::GridFunc& vh) { (*vh_) = vh; }; - void set_vh(const POTDTYPE* const vh) { vh_->assign(vh, 'd'); }; + void set_vh(const std::vector& vh) + { + vh_->assign(vh.data(), 'd'); + }; void resetVh() { vh_->resetData(); } void set_vepsilon(const POTDTYPE* const vepsilon) { diff --git a/src/Potentials.cc b/src/Potentials.cc index 9916c021..a620cdeb 100644 --- a/src/Potentials.cc +++ b/src/Potentials.cc @@ -25,7 +25,6 @@ #include "mputils.h" #include -using namespace std; // unit conversion factor Ha -> Ry const double ha2ry = 2.; @@ -88,41 +87,24 @@ Potentials::Potentials() #endif } -void Potentials::initWithVnuc() -{ - assert(size_ > 0); - if (verbosity_level_ > 2 && onpe0) - (*MPIdata::sout) << "Potentials::initWithVnuc()" << endl; - itindex_vxc_ = 0; - itindex_vh_ = 0; - int ione = 1; - Tcopy(&size_, &v_nuc_[0], &ione, &vtot_[0], &ione); - double one = 1.; - LinearAlgebraUtils::MPaxpy( - size_, one, &v_ext_[0], &vtot_[0]); - // factor ha2ry to get total potential in [Ry] for calculations - LinearAlgebraUtils::MPscal(size_, ha2ry, &vtot_[0]); -} - double Potentials::max() const { - Mesh* mymesh = Mesh::instance(); - const pb::PEenv& myPEenv = mymesh->peenv(); - double vmax = (*max_element(vtot_.begin(), vtot_.end())); - vmax = myPEenv.double_max_all(vmax); + MGmol_MPI& mmpi = *(MGmol_MPI::instance()); + double vmax = (*max_element(vtot_.begin(), vtot_.end())); + vmax = mmpi.allreduce(&vmax, 1, MPI_MAX); return vmax; } double Potentials::min() const { - Mesh* mymesh = Mesh::instance(); - const pb::PEenv& myPEenv = mymesh->peenv(); - double vmin = -(*min_element(vtot_.begin(), vtot_.end())); - vmin = -myPEenv.double_max_all(vmin); + MGmol_MPI& mmpi = *(MGmol_MPI::instance()); + double vmin = -(*min_element(vtot_.begin(), vtot_.end())); + vmin = -mmpi.allreduce(&vmin, 1, MPI_MAX); return vmin; } -void Potentials::evalNormDeltaVtotRho(const vector>& rho) +void Potentials::evalNormDeltaVtotRho( + const std::vector>& rho) { Mesh* mymesh = Mesh::instance(); const pb::Grid& mygrid = mymesh->grid(); @@ -141,17 +123,16 @@ void Potentials::evalNormDeltaVtotRho(const vector>& rho) mmpi.allreduce(&scf_dvrho_, 1, MPI_SUM); } -double Potentials::update(const vector>& rho) +double Potentials::updateVtot(const std::vector>& rho) { assert(itindex_vxc_ >= 0); assert(itindex_vh_ >= 0); assert(itindex_vxc_ == itindex_vh_); if (verbosity_level_ > 2 && onpe0) - (*MPIdata::sout) << "Potentials::update(rho)" << endl; - int ione = 1; - Mesh* mymesh = Mesh::instance(); - const pb::PEenv& myPEenv = mymesh->peenv(); + (*MPIdata::sout) << "Potentials::update(rho)" << std::endl; + int ione = 1; + MGmol_MPI& mmpi = *(MGmol_MPI::instance()); // save old potentials Tcopy(&size_, &vtot_[0], &ione, &vtot_old_[0], &ione); @@ -181,28 +162,27 @@ double Potentials::update(const vector>& rho) = LinearAlgebraUtils::MPdot(size_, &dv_[0], &dv_[0]); double sum = 0.; - int rc - = MPI_Allreduce(&dvdot, &sum, 1, MPI_DOUBLE, MPI_SUM, myPEenv.comm()); + int rc = mmpi.allreduce(&dvdot, &sum, 1, MPI_SUM); if (rc != MPI_SUCCESS) { - cout << "MPI_Allreduce double sum failed!!!" << endl; - MPI_Abort(myPEenv.comm(), 2); + std::cerr << "MPI_Allreduce double sum failed!!!" << std::endl; + mmpi.abort(); } dvdot = sum; scf_dv_ = 0.5 * sqrt(dvdot); - const double gsize = (double)size_ * (double)myPEenv.n_mpi_tasks(); + const double gsize = (double)size_ * (double)mmpi.size(); scf_dv_ /= gsize; return scf_dv_; } -void Potentials::update(const double mix) +void Potentials::updateVtot(const double mix) { assert(itindex_vxc_ == itindex_vh_); #ifdef DEBUG - if (onpe0) (*MPIdata::sout) << "Potentials::update(mix)" << endl; + if (onpe0) (*MPIdata::sout) << "Potentials::update(mix)" << std::endl; #endif // int ione=1; double potmix = mix; @@ -210,17 +190,16 @@ void Potentials::update(const double mix) size_, potmix, &dv_[0], &vtot_[0]); } -double Potentials::delta_v(const vector>& rho) +double Potentials::computeDeltaV(const std::vector>& rho) { assert(itindex_vxc_ == itindex_vh_); assert(size_ > 0); if (verbosity_level_ > 2 && onpe0) - (*MPIdata::sout) << "Potentials::delta_v()" << endl; + (*MPIdata::sout) << "Potentials::delta_v()" << std::endl; - int ione = 1; - Mesh* mymesh = Mesh::instance(); - const pb::PEenv& myPEenv = mymesh->peenv(); + int ione = 1; + MGmol_MPI& mmpi = *(MGmol_MPI::instance()); // save old potentials Tcopy(&size_, &vtot_[0], &ione, &vtot_old_[0], &ione); @@ -249,24 +228,23 @@ double Potentials::delta_v(const vector>& rho) = LinearAlgebraUtils::MPdot(size_, &dv_[0], &dv_[0]); double sum = 0.; - int rc - = MPI_Allreduce(&dvdot, &sum, 1, MPI_DOUBLE, MPI_SUM, myPEenv.comm()); + int rc = mmpi.allreduce(&dvdot, &sum, 1, MPI_SUM); if (rc != MPI_SUCCESS) { - cout << "MPI_Allreduce double sum failed!!!" << endl; - MPI_Abort(myPEenv.comm(), 2); + std::cerr << "MPI_Allreduce double sum failed!!!" << std::endl; + mmpi.abort(); } dvdot = sum; scf_dv_ = 0.5 * sqrt(dvdot); - const double gsize = (double)size_ * (double)myPEenv.n_mpi_tasks(); + const double gsize = (double)size_ * (double)mmpi.size(); scf_dv_ /= gsize; return scf_dv_; } // in Ry -void Potentials::getVofRho(vector& vrho) const +void Potentials::getVofRho(std::vector& vrho) const { vrho.resize(size_); int ione = 1; @@ -283,7 +261,7 @@ void Potentials::getVofRho(vector& vrho) const // type: // 2->text // 3->binary -void Potentials::readExternalPot(const string filename, const char type) +void Potentials::readExternalPot(const std::string filename, const char type) { assert(type == 2 || type == 3); @@ -304,27 +282,27 @@ void Potentials::readExternalPot(const string filename, const char type) { from->seekg(0, ios::end); const int length = from->tellg(); - (*MPIdata::sout) << "Length file = " << length << endl; + (*MPIdata::sout) << "Length file = " << length << std::endl; from->seekg(0, ios::beg); if (length <= 0) { (*MPIdata::serr) - << "ERROR Potential: file length <=0!!!!" << endl; + << "ERROR Potential: file length <=0!!!!" << std::endl; mmpi.abort(); } } } if (!from) { - (*MPIdata::serr) << " Cannot open file " << filename << endl; + (*MPIdata::serr) << " Cannot open file " << filename << std::endl; mmpi.abort(); } if (onpe0) { (*MPIdata::sout) << "Potentials::read_ExternalPot(), filename=" - << filename << endl; - if (type == 2) (*MPIdata::sout) << "text file..." << endl; - if (type == 3) (*MPIdata::sout) << "binary file..." << endl; + << filename << std::endl; + if (type == 2) (*MPIdata::sout) << "text file..." << std::endl; + if (type == 3) (*MPIdata::sout) << "binary file..." << std::endl; } // read origin and end of cell (to check compatibility) @@ -348,29 +326,32 @@ void Potentials::readExternalPot(const string filename, const char type) if (onpe0) for (short d = 0; d < 3; d++) { - (*MPIdata::sout) << setprecision(8); + (*MPIdata::sout) << std::setprecision(8); if (fabs(origin[d] - mygrid.origin(d)) > 1.e-3) { (*MPIdata::serr) << "ERROR Potential: Incompatible cell origin in direction " - << d << endl; - (*MPIdata::serr) << "Potential origin=" << origin[d] << endl; - (*MPIdata::serr) << "MGmol origin=" << mygrid.origin(d) << endl; + << d << std::endl; + (*MPIdata::serr) + << "Potential origin=" << origin[d] << std::endl; + (*MPIdata::serr) + << "MGmol origin=" << mygrid.origin(d) << std::endl; (*MPIdata::serr) << "Difference=" << fabs(origin[d] - mygrid.origin(d)) - << endl; + << std::endl; mmpi.abort(); } if (fabs(ll[d] - mygrid.ll(d)) > 1.e-3) { (*MPIdata::serr) << "ERROR Potential: Incompatible cell " "dimension in direction " - << d << endl; - (*MPIdata::serr) << "Potential cell end=" << end[d] << endl; + << d << std::endl; (*MPIdata::serr) - << "Potential cell dimension=" << ll[d] << endl; + << "Potential cell end=" << end[d] << std::endl; (*MPIdata::serr) - << "MGmol cell dimension=" << mygrid.ll(d) << endl; + << "Potential cell dimension=" << ll[d] << std::endl; + (*MPIdata::serr) + << "MGmol cell dimension=" << mygrid.ll(d) << std::endl; mmpi.abort(); } } @@ -392,9 +373,9 @@ void Potentials::readExternalPot(const string filename, const char type) if (nxyz[i] != gdim_[i]) { (*MPIdata::serr) << "Potentials::read_ExternalPot(): dimension " - << i << " incompatible with Grid!!!" << endl; + << i << " incompatible with Grid!!!" << std::endl; (*MPIdata::serr) - << "n=" << nxyz[i] << ", gdim_=" << gdim_[i] << endl; + << "n=" << nxyz[i] << ", gdim_=" << gdim_[i] << std::endl; mmpi.abort(); } @@ -434,7 +415,6 @@ void Potentials::readExternalPot(const string filename, const char type) { assert(index < size_); (*from) >> v_ext_[index]; - //(*MPIdata::sout)<grid(); double hmin = mygrid.hmin(); if (verbosity_level_ > 2 && onpe0) - (*MPIdata::sout) << "hmin= " << hmin << endl; + (*MPIdata::sout) << "hmin= " << hmin << std::endl; - vector::const_iterator it_filename = pot_filenames_.begin(); - int isp = 0; + std::vector::const_iterator it_filename + = pot_filenames_.begin(); + int isp = 0; while (it_filename != pot_filenames_.end()) { if (pot_types_[isp] == 'n' || pot_types_[isp] == 's' @@ -556,13 +538,14 @@ void Potentials::readAll(vector& sp) #else (*MPIdata::sout) << "ERROR: cannot read external potential " - << " -> need to compile with Tricubic library" << endl; + << " -> need to compile with Tricubic library" << std::endl; #endif } it_filename++; isp++; } } + template void Potentials::setVxc(const T* const vxc, const int iterativeIndex) { @@ -571,13 +554,6 @@ void Potentials::setVxc(const T* const vxc, const int iterativeIndex) itindex_vxc_ = iterativeIndex; MPcpy(&vxc_rho_[0], vxc, size_); } -void Potentials::setVh(const POTDTYPE* const vh, const int iterativeIndex) -{ - assert(iterativeIndex >= 0); - int ione = 1; - itindex_vh_ = iterativeIndex; - Tcopy(&size_, vh, &ione, &vh_rho_[0], &ione); -} void Potentials::setVh( const pb::GridFunc& vh, const int iterativeIndex) @@ -594,11 +570,6 @@ void Potentials::axpVcompToVh(const double alpha) size_, alpha, &v_comp_[0], &vh_rho_[0]); } -void Potentials::axpVcomp(POTDTYPE* v, const double alpha) -{ - LinearAlgebraUtils::MPaxpy(size_, alpha, &v_comp_[0], v); -} - void Potentials::backupVh() { memcpy(vh_rho_backup_.data(), vh_rho_.data(), size_ * sizeof(POTDTYPE)); @@ -792,9 +763,12 @@ void Potentials::initialize(Ions& ions) const pb::Grid& mygrid = mymesh->grid(); const int numpt = mygrid.size(); - memset(&v_comp_[0], 0, numpt * sizeof(POTDTYPE)); - memset(&rho_comp_[0], 0, numpt * sizeof(RHODTYPE)); - memset(&v_nuc_[0], 0, numpt * sizeof(RHODTYPE)); + memset(v_comp_.data(), 0, numpt * sizeof(POTDTYPE)); + memset(rho_comp_.data(), 0, numpt * sizeof(RHODTYPE)); + memset(v_nuc_.data(), 0, numpt * sizeof(RHODTYPE)); + + // Count up the total ionic charge + ionic_charge_ = ions.computeIonicCharge(); char flag_filter = pot_type(0); @@ -805,6 +779,7 @@ void Potentials::initialize(Ions& ions) Vector3D position(ion->position(0), ion->position(1), ion->position(2)); + // initialize rho_comp_, v_comp_, v_nuc_ if (flag_filter == 's') { const int sampleRate = 3; @@ -820,6 +795,13 @@ void Potentials::initialize(Ions& ions) initializeRadialDataOnMesh(position, sp); } } + + // rescale rho_comp_ due to finite mesh effects + rescaleRhoComp(); + + initBackground(); + + addBackgroundToRhoComp(); } void Potentials::rescaleRhoComp() @@ -831,24 +813,33 @@ void Potentials::rescaleRhoComp() const pb::Grid& mygrid = mymesh->grid(); // Check compensating charges - double comp_rho = getCharge(&rho_comp_[0]); + double comp_rho = getCharge(rho_comp_.data()); + if (onpe0 && ct.verbose > 1) + { + std::cout << std::setprecision(8) << std::fixed + << " Charge of rhoc: " << comp_rho << std::endl; + } if (onpe0 && ct.verbose > 1) { - cout << " Rescaling rhoc" << endl; + std::cout << " Rescaling rhoc" << std::endl; } + + // rescale rho_comp_ (initialized by sampling on mesh) + // so that its integral exactly matches ionic_charge_ if (ionic_charge_ > 0.) { const int numpt = mygrid.size(); double t = ionic_charge_ / comp_rho; - LinearAlgebraUtils::MPscal(numpt, t, &rho_comp_[0]); + LinearAlgebraUtils::MPscal( + numpt, t, rho_comp_.data()); // Check new compensating charges - comp_rho = getCharge(&rho_comp_[0]); + comp_rho = getCharge(rho_comp_.data()); } if (onpe0 && ct.verbose > 1) - cout << " Rescaled compensating charges: " << setprecision(8) << fixed - << comp_rho << endl; + std::cout << " Rescaled compensating charges: " << std::setprecision(8) + << std::fixed << comp_rho << std::endl; if (comp_rho < 0.) mmpi.abort(); } @@ -868,26 +859,23 @@ void Potentials::addBackgroundToRhoComp() { if (onpe0) { - cout << setprecision(12) << scientific - << "Add background charge " << background << " to rhoc " - << endl; + std::cout << std::setprecision(12) << std::scientific + << "Add background charge " << background + << " to rhoc " << std::endl; } for (int i = 0; i < numpt; i++) rho_comp_[i] += background; // Check new compensating charges - getCharge(&rho_comp_[0]); + getCharge(rho_comp_.data()); } } } -void Potentials::initBackground(Ions& ions) +void Potentials::initBackground() { Control& ct = *(Control::instance()); - // Count up the total ionic charge - ionic_charge_ = ions.computeIonicCharge(); - // calculation the compensating background charge // for charged supercell calculations background_charge_ = 0.; @@ -898,9 +886,9 @@ void Potentials::initBackground(Ions& ions) } if (onpe0 && ct.verbose > 0) { - cout << "N electrons= " << ct.getNel() << endl; - cout << "ionic charge= " << ionic_charge_ << endl; - cout << "background charge=" << background_charge_ << endl; + std::cout << "N electrons= " << ct.getNel() << std::endl; + std::cout << "ionic charge= " << ionic_charge_ << std::endl; + std::cout << "background charge=" << background_charge_ << std::endl; } if (fabs(background_charge_) < 1.e-10) background_charge_ = 0.; @@ -918,8 +906,8 @@ void Potentials::evalIonDensityOnSamplePts( { if (onpe0) { - cout << "Potentials::evalIonDensityOnSamplePts - flag_filter s is not supported" - << endl; + std::cout << "Potentials::evalIonDensityOnSamplePts - flag_filter s is not supported" + << std::endl; } mmpi.abort(); } @@ -997,6 +985,75 @@ void Potentials::initializeRadialDataOnSampledPts( return; } +int Potentials::read(HDFrestart& h5f_file) +{ + Control& ct = *(Control::instance()); + + // Read total potential + h5f_file.read_1func_hdf5(vtot_.data(), "Vtotal"); + + // Read the hartree potential + h5f_file.read_1func_hdf5(vh_rho_.data(), "Hartree"); + itindex_vh_ = 0; + + // Read dielectric potential + if (ct.diel) + { + h5f_file.read_1func_hdf5(vepsilon_.data(), "VDielectric"); + } + + std::string datasetname("Preceding_Hartree"); + if (h5f_file.checkDataExists(datasetname)) + { + h5f_file.read_1func_hdf5(vh_rho_backup_.data(), datasetname); + } + + return 0; +} + +int Potentials::write(HDFrestart& h5f_file) +{ + Control& ct = *(Control::instance()); + + Mesh* mymesh = Mesh::instance(); + const pb::Grid& mygrid = mymesh->grid(); + + double ll[3] = { mygrid.ll(0), mygrid.ll(1), mygrid.ll(2) }; + double origin[3] = { mygrid.origin(0), mygrid.origin(1), mygrid.origin(2) }; + + // Write total potential + int ierr + = h5f_file.write_1func_hdf5(vtot_.data(), "Vtotal", &ll[0], &origin[0]); + if (ierr < 0) return ierr; + + // Write the hartree potential + ierr = h5f_file.write_1func_hdf5( + vh_rho_.data(), "Hartree", &ll[0], &origin[0]); + if (ierr < 0) return ierr; + + if (ct.AtomsDynamic() == AtomsDynamicType::MD) + { + // Write hartree potential before extrapolation + ierr = h5f_file.write_1func_hdf5( + vh_rho_backup_.data(), "Preceding_Hartree", &ll[0], &origin[0]); + if (ierr < 0) return ierr; + } + + // Write + if (ct.diel) + { + ierr = h5f_file.write_1func_hdf5( + vepsilon_.data(), "VDielectric", &ll[0], &origin[0]); + } + if (ierr < 0) return ierr; + + // Write external potential + ierr = h5f_file.write_1func_hdf5(v_ext_.data(), "Vext", &ll[0], &origin[0]); + if (ierr < 0) return ierr; + + return ierr; +} + template void Potentials::setVxc( const double* const vxc, const int iterativeIndex); template void Potentials::setVxc( diff --git a/src/Potentials.h b/src/Potentials.h index f0f762b3..6803edc3 100644 --- a/src/Potentials.h +++ b/src/Potentials.h @@ -10,6 +10,7 @@ #ifndef MGMOL_POTENTIALS_H #define MGMOL_POTENTIALS_H +#include "HDFrestart.h" #include "Rho.h" #include "TriCubic.h" @@ -25,7 +26,6 @@ class GridFunc; class Potentials { - int size_; int gdim_[3]; int dim_[3]; @@ -103,6 +103,12 @@ class Potentials void initializeRadialDataOnSampledPts( const Vector3D& position, const Species& sp, const std::vector &local_idx, std::vector &sampled_rhoc); + void rescaleRhoComp(); + + void addBackgroundToRhoComp(); + + void initBackground(); + public: Potentials(); @@ -142,54 +148,46 @@ class Potentials void turnOnDiel() { diel_ = true; } + int write(HDFrestart& h5f_file); + int read(HDFrestart& h5f_file); + int size() const { return size_; } double scf_dvrho(void) const { return scf_dvrho_; } double scf_dv(void) const { return scf_dv_; } POTDTYPE* vtot() { return vtot_.data(); } - POTDTYPE* vh_rho() { return vh_rho_.data(); } RHODTYPE* rho_comp() { return rho_comp_.data(); } const std::vector& vnuc() const { return v_nuc_; } - POTDTYPE* vnuc() { return v_nuc_.data(); } - POTDTYPE* vext() { return v_ext_.data(); } + const std::vector& vh_rho() const { return vh_rho_; } + POTDTYPE* vepsilon() { return vepsilon_.data(); } - POTDTYPE* vh_rho_backup() { return vh_rho_backup_.data(); } void axpVcompToVh(const double alpha); - void axpVcomp(POTDTYPE* v, const double alpha); - - POTDTYPE vtot(const int i) { return vtot_[i]; } - POTDTYPE vh_rho(const int i) { return vh_rho_[i]; } - POTDTYPE vxc_rho(const int i) { return vxc_rho_[i]; } - POTDTYPE vepsilon(const int i) { return vepsilon_[i]; } bool diel() const { return diel_; } double getChargeInCell() const { return charge_in_cell_; } - /*! - * initialize total potential as local pseudopotential - */ - void initWithVnuc(); + const double getBackgroundCharge() const { return background_charge_; } void getVofRho(std::vector& vrho) const; /*! * evaluate potential correction associated with a new rho */ - double delta_v(const std::vector>& rho); + double computeDeltaV(const std::vector>& rho); /*! - * update potentials based on argument rho + * update total potential with updated components */ - double update(const std::vector>& rho); + double updateVtot(const std::vector>& rho); /*! * update potentials based on potential correction delta v and mixing * parameter */ - void update(const double mix); + void updateVtot(const double mix); double max() const; double min() const; @@ -197,19 +195,17 @@ class Potentials template void setVxc(const T* const vxc, const int iterativeIndex); - void setVh(const POTDTYPE* const vh, const int iterativeIndex); void setVh(const pb::GridFunc& vh, const int iterativeIndex); void initialize(Ions& ions); - void rescaleRhoComp(); - void initBackground(Ions& ions); - void addBackgroundToRhoComp(); /*! * Save current Hartree potential into backup array */ void backupVh(); + void resetVhRho2Backup() { vh_rho_ = vh_rho_backup_; } + void evalIonDensityOnSamplePts(Ions& ions, const std::vector &local_idx, std::vector &sampled_rhoc); #ifdef HAVE_TRICUBIC diff --git a/src/ProjectedMatricesSparse.cc b/src/ProjectedMatricesSparse.cc index 8b328b1c..15ad6d6c 100644 --- a/src/ProjectedMatricesSparse.cc +++ b/src/ProjectedMatricesSparse.cc @@ -354,7 +354,7 @@ double ProjectedMatricesSparse::dotProductSimple( << std::endl; MGmol_MPI& mmpi = *(MGmol_MPI::instance()); - MPI_Abort(mmpi.commSameSpin(), 0); + MPI_Abort(mmpi.commSameSpin(), EXIT_FAILURE); return -1.; } diff --git a/src/main.cc b/src/main.cc index 3fcbf791..3c6830a0 100644 --- a/src/main.cc +++ b/src/main.cc @@ -45,7 +45,7 @@ int main(int argc, char** argv) if (mpirc != MPI_SUCCESS) { std::cerr << "MPI Initialization failed!!!" << std::endl; - MPI_Abort(MPI_COMM_WORLD, 0); + MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); } MPI_Comm comm = MPI_COMM_WORLD; diff --git a/src/md.cc b/src/md.cc index 1d5ee937..f21fe542 100644 --- a/src/md.cc +++ b/src/md.cc @@ -59,7 +59,7 @@ void MGmol::moveVnuc(Ions& ions) // Update items that change when the ionic coordinates change pot.axpVcompToVh(1.); - initNuc(ions); + setupPotentials(ions); pot.axpVcompToVh(-1.); proj_matrices_->setHiterativeIndex(-1, -1); @@ -242,9 +242,12 @@ int MGmol::dumpMDrestartFile(OrbitalsType& orbitals, Ions& ions, std::string filename(std::string(ct.out_restart_file)); // add an integer corresponding to attempt number/count // to allow several attempts at creating and writing file - std::stringstream s; - s << count; - filename += s.str(); + if (count > 0) + { + std::stringstream s; + s << count; + filename += s.str(); + } HDFrestart h5file(filename, myPEenv, gdim, ct.out_restart_file_type); @@ -737,11 +740,29 @@ void MGmol::loadRestartFile(const std::string filename) if (ierr < 0) { if (onpe0) - (*MPIdata::serr) - << "loadRestartFile: failed to read the restart file." - << std::endl; + std::cerr << "loadRestartFile: failed to read the restart file." + << std::endl; - global_exit(0); + mmpi.abort(); + } + if (!ct.fullyOccupied()) + { + // overwrite DM with restart data in dataset Density_Matrix_WF + if (h5file.checkDataExists("Density_Matrix_WF")) + ierr = proj_matrices_->readWFDM(h5file); + } + + if (h5file.checkDataExists("Preceding_Hartree")) + { + ions_->readRestartPreviousPositions(h5file); + ions_->resetPositionsToPrevious(); + ions_->setup(); + + Potentials& pot = hamiltonian_->potential(); + pot.initialize(*ions_); + if (onpe0) std::cout << "Reset VhRho to backup..." << std::endl; + pot.resetVhRho2Backup(); + electrostat_->setupRhoc(pot.rho_comp()); } if (!ct.fullyOccupied()) { diff --git a/src/mgmol_memory.cc b/src/mgmol_memory.cc index a1360c99..b1be12d6 100644 --- a/src/mgmol_memory.cc +++ b/src/mgmol_memory.cc @@ -62,7 +62,7 @@ void addTrack(long addr, long asize) if (nPos > MAXNUMALLOCATIONS) { printf("ERROR: Not enough memory slots!!!"); - MPI_Abort(MPI_COMM_WORLD, 1); + MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); } // if(onpe0)printf("nPos=%d, addr=%ld, size=%ld\n",nPos,addr,asize); } diff --git a/src/mgmol_run.cc b/src/mgmol_run.cc index a5de1ad5..468d61bd 100644 --- a/src/mgmol_run.cc +++ b/src/mgmol_run.cc @@ -96,7 +96,7 @@ int mgmol_check() { std::cerr << "Code should be called with " << myPEenv.n_mpi_tasks() << " MPI tasks only" << std::endl; - ct.global_exit(2); + ct.global_exit(); } assert(ct.getMGlevels() >= -1); diff --git a/src/pb/Lap.h b/src/pb/Lap.h index 33d90706..460e36c1 100644 --- a/src/pb/Lap.h +++ b/src/pb/Lap.h @@ -35,7 +35,7 @@ class Lap : public FDoper virtual void applyWithPot(GridFunc&, const double* const, T*) { std::cerr << "ERROR: Lap::applyWithPot() not implemented" << std::endl; - MPI_Abort(MPI_COMM_WORLD, 0); + MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); } std::string name() const { return name_; } diff --git a/src/pb/PEenv.cc b/src/pb/PEenv.cc index d5ff78f3..3e2fb777 100644 --- a/src/pb/PEenv.cc +++ b/src/pb/PEenv.cc @@ -197,7 +197,7 @@ PEenv::~PEenv() if (mpirc != MPI_SUCCESS) { std::cerr << "MPI_Comm_free failed!" << std::endl; - MPI_Abort(comm_, 2); + MPI_Abort(comm_, EXIT_FAILURE); } } if (cart_comm_ != MPI_COMM_NULL) MPI_Comm_free(&cart_comm_); @@ -277,7 +277,7 @@ void PEenv::task2xyz() if (rc != MPI_SUCCESS) { std::cerr << " error in MPI_Cart_coords()!!!" << std::endl; - MPI_Abort(comm_, 1); + MPI_Abort(comm_, EXIT_FAILURE); } #else mytask_dir_[2] = mytask_ % n_mpi_tasks_dir_[2]; @@ -650,7 +650,7 @@ void PEenv::split_comm(const int nx, const int ny, const int nz, const int bias) { std::cerr << "MPI_Comm_split failed!, my color_=" << color_ << std::endl; - MPI_Abort(comm_, 0); + MPI_Abort(comm_, EXIT_FAILURE); } MPI_Comm_size(comm_active_, &n_mpi_tasks_); #ifndef NDEBUG @@ -696,7 +696,7 @@ void PEenv::printPEnames(std::ostream& os) const { std::cerr << "PEenv::printPEnames, MPI_Recv() failed!!!" << std::endl; - MPI_Abort(comm_, 0); + MPI_Abort(comm_, EXIT_FAILURE); } } else if (ip == mytask_) @@ -708,7 +708,7 @@ void PEenv::printPEnames(std::ostream& os) const { std::cerr << "PEenv::printPEnames, MPI_Send() failed!!!" << std::endl; - MPI_Abort(comm_, 0); + MPI_Abort(comm_, EXIT_FAILURE); } } if (mytask_ == 0) @@ -717,7 +717,7 @@ void PEenv::printPEnames(std::ostream& os) const if (mytask_ == 0) os << std::endl; } -void PEenv::globalExit() const { MPI_Abort(comm_, 2); } +void PEenv::globalExit() const { MPI_Abort(comm_, EXIT_FAILURE); } void PEenv::bcast(int* val, const int n) const { diff --git a/src/pb/PEenv.h b/src/pb/PEenv.h index bc4751a5..9782ddb9 100644 --- a/src/pb/PEenv.h +++ b/src/pb/PEenv.h @@ -166,7 +166,7 @@ class PEenv { std::cerr << "ERROR in PEenv::maxXdir()" << std::endl; sleep(5); - MPI_Abort(comm_, 0); + MPI_Abort(comm_, EXIT_FAILURE); } delete[] sendbuf; } @@ -181,7 +181,7 @@ class PEenv { std::cerr << "ERROR in PEenv::maxYdir()" << std::endl; sleep(5); - MPI_Abort(comm_, 0); + MPI_Abort(comm_, EXIT_FAILURE); } delete[] sendbuf; } @@ -196,7 +196,7 @@ class PEenv { std::cerr << "ERROR in PEenv::maxZdir()" << std::endl; sleep(5); - MPI_Abort(comm_, 0); + MPI_Abort(comm_, EXIT_FAILURE); } delete[] sendbuf; } diff --git a/src/quench.cc b/src/quench.cc index 59a16d50..073798f6 100644 --- a/src/quench.cc +++ b/src/quench.cc @@ -545,7 +545,6 @@ int MGmol::quench(OrbitalsType& orbitals, Ions& ions, // get actual indexes of stored functions const std::vector>& gids(orbitals.getOverlappingGids()); - g_kbpsi_->setup(*ions_); electrostat_->setup(ct.vh_its); rho_->setup(ct.getOrthoType(), gids); diff --git a/src/radial/RadialMeshFunction.cc b/src/radial/RadialMeshFunction.cc index 499e482a..79b32518 100644 --- a/src/radial/RadialMeshFunction.cc +++ b/src/radial/RadialMeshFunction.cc @@ -98,7 +98,7 @@ void RadialMeshFunction::bcast(MPI_Comm comm, const int root) { (*MPIdata::sout) << "RadialMeshFunction::bcast() failed!!!" << std::endl; - MPI_Abort(comm, 0); + MPI_Abort(comm, EXIT_FAILURE); } for (int i = 0; i < nn[1]; i++) @@ -110,7 +110,7 @@ void RadialMeshFunction::bcast(MPI_Comm comm, const int root) { (*MPIdata::sout) << "RadialMeshFunction::bcast() failed!!!" << std::endl; - MPI_Abort(comm, 0); + MPI_Abort(comm, EXIT_FAILURE); } } } diff --git a/src/restart.cc b/src/restart.cc index 30bdad73..532043af 100644 --- a/src/restart.cc +++ b/src/restart.cc @@ -43,27 +43,11 @@ int MGmol::read_rho_and_pot_hdf5( os_ << "Try to read density and potentials" << std::endl; Potentials& pot = hamiltonian_->potential(); + pot.read(file); - Mesh* mymesh = Mesh::instance(); - const pb::Grid& mygrid = mymesh->grid(); - POTDTYPE* tmp = new POTDTYPE[mygrid.size()]; - - // Read total potential - file.read_1func_hdf5(pot.vtot(), "Vtotal"); - - // Read the hartree potential - file.read_1func_hdf5(tmp, "Hartree"); - pot.setVh(tmp, 0); - - // Read dielectric potential - if (ct.diel) - { - file.read_1func_hdf5(pot.vepsilon(), "VDielectric"); - } // Read the Density rho.readRestart(file); - delete[] tmp; return 0; } @@ -118,6 +102,8 @@ int MGmol::write_hdf5(HDFrestart& h5f_file, ions.writeAtomicNLprojIDs(h5f_file); ions.writePositions(h5f_file); if (ct.LangevinThermostat()) ions.writeRandomStates(h5f_file); + if (ct.AtomsDynamic() == AtomsDynamicType::MD) + ions.writePreviousPositions(h5f_file); ions.writeVelocities(h5f_file); ions.writeForces(h5f_file); @@ -131,41 +117,12 @@ int MGmol::write_hdf5(HDFrestart& h5f_file, if (ct.out_restart_info > 1) { - // Write total potential - int ierr = h5f_file.write_1func_hdf5( - pot.vtot(), "Vtotal", &ll[0], &origin[0]); - if (ierr < 0) return ierr; - - // Write the hartree potential - ierr = h5f_file.write_1func_hdf5( - pot.vh_rho(), "Hartree", &ll[0], &origin[0]); - if (ierr < 0) return ierr; - - if (ct.AtomsDynamic() == AtomsDynamicType::MD) - { - // Write hartree potential before extrapolation - ierr = h5f_file.write_1func_hdf5( - pot.vh_rho_backup(), "Preceding_Hartree", &ll[0], &origin[0]); - if (ierr < 0) return ierr; - } - - // Write - if (ct.diel) - { - ierr = h5f_file.write_1func_hdf5( - pot.vepsilon(), "VDielectric", &ll[0], &origin[0]); - } - if (ierr < 0) return ierr; + int ierr = pot.write(h5f_file); // Write the Density ierr = h5f_file.write_1func_hdf5( &rho[0][0], "Density", &ll[0], &origin[0]); if (ierr < 0) return ierr; - - // Write external potential - ierr - = h5f_file.write_1func_hdf5(pot.vext(), "Vext", &ll[0], &origin[0]); - if (ierr < 0) return ierr; } // Write wavefunctions and old centers. diff --git a/src/rom_workflows.cc b/src/rom_workflows.cc index 52ad6e47..8fbd4cb3 100644 --- a/src/rom_workflows.cc +++ b/src/rom_workflows.cc @@ -103,7 +103,7 @@ void readRestartFiles(MGmolInterface *mgmol_) break; default: - ct.global_exit(-1); + ct.global_exit(); break; } @@ -129,7 +129,8 @@ void readRestartFiles(MGmolInterface *mgmol_) case ROMVariable::POTENTIAL: basis_prefix += "_potential"; /* we save hartree potential */ - basis_generator.takeSample(pot.vh_rho()); + // TODO: consider create overloaded takeSample with const + basis_generator.takeSample(const_cast(pot.vh_rho().data())); break; } } diff --git a/src/setup.cc b/src/setup.cc index 9963bfc5..2ff6d79e 100644 --- a/src/setup.cc +++ b/src/setup.cc @@ -140,7 +140,7 @@ int MGmol::setupLRsFromInput(const std::string filename) if (!tfile->is_open()) { std::cerr << " Unable to open file " << filename << std::endl; - global_exit(0); + mmpi.abort(); } else { @@ -173,7 +173,7 @@ int MGmol::setupConstraintsFromInput(const std::string filename) if (!tfile->is_open()) { std::cerr << " Unable to open file " << filename << std::endl; - global_exit(0); + mmpi.abort(); } else { diff --git a/src/sparse_linear_algebra/DataDistribution.cc b/src/sparse_linear_algebra/DataDistribution.cc index d4b26d93..67bb1319 100644 --- a/src/sparse_linear_algebra/DataDistribution.cc +++ b/src/sparse_linear_algebra/DataDistribution.cc @@ -288,7 +288,7 @@ void DataDistribution::distributeLocalDataWithCommOvlp(const int nsteps, std::cout << "ERROR: " << name_ << ", dir=" << dir << ", remote_size=" << remote_size << ", bsiz=" << bsiz << std::endl; - MPI_Abort(cart_comm_, 0); + MPI_Abort(cart_comm_, EXIT_FAILURE); } // string stamp="DataDistribution ("+name_+"), buffer size checked..."; // printWithTimeStamp(stamp,cout); @@ -302,14 +302,14 @@ void DataDistribution::distributeLocalDataWithCommOvlp(const int nsteps, if (mpircv != MPI_SUCCESS) { std::cout << "ERROR in MPI_Irecv, code=" << mpircv << std::endl; - MPI_Abort(cart_comm_, 0); + MPI_Abort(cart_comm_, EXIT_FAILURE); } int mpisnd = MPI_Isend(packed_buffer.sendBuffer(), siz, MPI_CHAR, dest, 0, cart_comm_, &request[1]); if (mpisnd != MPI_SUCCESS) { std::cout << "ERROR in MPI_Isend, code=" << mpisnd << std::endl; - MPI_Abort(cart_comm_, 0); + MPI_Abort(cart_comm_, EXIT_FAILURE); } /* wait to complete communication */ MPI_Waitall(2, request, MPI_STATUSES_IGNORE); diff --git a/src/tools.cc b/src/tools.cc index 95790c59..aff34933 100644 --- a/src/tools.cc +++ b/src/tools.cc @@ -29,7 +29,7 @@ void noMoreMemory() std::cerr << "Unable to satisfy request for memory for MPI task " << mype << std::endl; Control& ct = *(Control::instance()); - ct.global_exit(3); + ct.global_exit(); } // an atom name should start with a capital letter and end with a number @@ -229,12 +229,12 @@ void printWithTimeStamp(const std::string& string2print, std::ostream& os) if( mpierr!=MPI_SUCCESS ) { cerr << " Error in MPI!!!" << std::endl; - MPI_Abort(mmpi.commGlobal(),1); + MPI_Abort(mmpi.commGlobal(),EXIT_FAILURE); } if( r!=mmpi.size()*s && onpe0 ) { cerr << " Error in barrier: "< #include #include +#include #include // full lower triangular part of symmetric matrix with compact storage diff --git a/src/tools/Vector3D.cc b/src/tools/Vector3D.cc index 445f8787..2fe9a0e4 100644 --- a/src/tools/Vector3D.cc +++ b/src/tools/Vector3D.cc @@ -80,7 +80,7 @@ void bcastvv3d(std::vector& vv, MPI_Comm comm) std::cerr << "ERROR!!!! bcastvv3d(), Failure in MPI_Bcast of 'radii_'!!!" << std::endl; - MPI_Abort(comm, 0); + MPI_Abort(comm, EXIT_FAILURE); } for (int j = 0; j < n; j++) for (short i = 0; i < 3; i++) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 6b5514d2..5141a887 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -250,6 +250,8 @@ add_executable(testDensityMatrix ${CMAKE_SOURCE_DIR}/src/ReplicatedWorkSpace.cc ${CMAKE_SOURCE_DIR}/src/hdf_tools.cc ${CMAKE_SOURCE_DIR}/tests/ut_magma_main.cc) +add_executable(testRhoVhRestart + ${CMAKE_SOURCE_DIR}/tests/RhoVhRestart/testRhoVhRestart.cc) add_executable(testEnergyAndForces ${CMAKE_SOURCE_DIR}/tests/EnergyAndForces/testEnergyAndForces.cc) add_executable(testWFEnergyAndForces @@ -392,6 +394,17 @@ add_test(NAME testPinnedH2O_3DOF ${CMAKE_CURRENT_SOURCE_DIR}/PinnedH2O_3DOF/coords.in ${CMAKE_CURRENT_SOURCE_DIR}/PinnedH2O_3DOF/lrs.in ${CMAKE_CURRENT_SOURCE_DIR}/../potentials) +add_test(NAME testRhoVhRestart + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/RhoVhRestart/test.py + ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} 4 ${MPIEXEC_PREFLAGS} + ${CMAKE_CURRENT_BINARY_DIR}/../src/mgmol-opt + ${CMAKE_CURRENT_BINARY_DIR}/testRhoVhRestart + ${CMAKE_CURRENT_SOURCE_DIR}/RhoVhRestart/mgmol.cfg + ${CMAKE_CURRENT_SOURCE_DIR}/RhoVhRestart/md.cfg + ${CMAKE_CURRENT_SOURCE_DIR}/RhoVhRestart/restart.cfg + ${CMAKE_CURRENT_SOURCE_DIR}/RhoVhRestart/h2o.xyz + ${CMAKE_CURRENT_SOURCE_DIR}/../potentials) + if(${MAGMA_FOUND}) add_test(NAME testOpenmpOffload @@ -587,6 +600,7 @@ target_include_directories(testDensityMatrix PRIVATE ${Boost_INCLUDE_DIRS} ${HDF target_include_directories(testGramMatrix PRIVATE ${Boost_INCLUDE_DIRS}) target_include_directories(testAndersonMix PRIVATE ${Boost_INCLUDE_DIRS}) target_include_directories(testIons PRIVATE ${Boost_INCLUDE_DIRS} ${HDF5_INCLUDE_DIRS}) +target_include_directories(testRhoVhRestart PRIVATE ${Boost_INCLUDE_DIRS}) target_link_libraries(testMPI PRIVATE MPI::MPI_CXX) target_link_libraries(testBlacsContext PRIVATE ${SCALAPACK_LIBRARIES} @@ -600,6 +614,7 @@ target_link_libraries(testRestartEnergyAndForces PRIVATE mgmol_src) target_link_libraries(testPinnedH2O_3DOF PRIVATE mgmol_src) target_link_libraries(testIons PRIVATE mgmol_src) target_link_libraries(testDensityMatrix PRIVATE ${HDF5_LIBRARIES}) +target_link_libraries(testRhoVhRestart mgmol_src) if(${MAGMA_FOUND}) target_link_libraries(testDistVector PRIVATE ${SCALAPACK_LIBRARIES} diff --git a/tests/DMandEnergyAndForces/mgmol.cfg b/tests/DMandEnergyAndForces/mgmol.cfg index 5278f40b..e7712706 100644 --- a/tests/DMandEnergyAndForces/mgmol.cfg +++ b/tests/DMandEnergyAndForces/mgmol.cfg @@ -2,16 +2,16 @@ verbosity=2 xcFunctional=LDA FDtype=4th [Mesh] -nx=64 -ny=64 -nz=64 +nx=48 +ny=48 +nz=48 [Domain] -ox=-6. -oy=-6. -oz=-6. -lx=12. -ly=12. -lz=12. +ox=-4.5 +oy=-4.5 +oz=-4.5 +lx=9. +ly=9. +lz=9. [Potentials] pseudopotential=pseudo.N_ONCVPSP_LDA [Run] diff --git a/tests/EnergyAndForces/mgmol.cfg b/tests/EnergyAndForces/mgmol.cfg index cf23889e..d1299db9 100644 --- a/tests/EnergyAndForces/mgmol.cfg +++ b/tests/EnergyAndForces/mgmol.cfg @@ -2,16 +2,16 @@ verbosity=1 xcFunctional=LDA FDtype=Mehrstellen [Mesh] -nx=64 -ny=64 -nz=64 +nx=48 +ny=48 +nz=48 [Domain] -ox=-6. -oy=-6. -oz=-6. -lx=12. -ly=12. -lz=12. +ox=-4.5 +oy=-4.5 +oz=-4.5 +lx=9. +ly=9. +lz=9. [Potentials] pseudopotential=pseudo.N_ONCVPSP_LDA [Run] diff --git a/tests/HDF5single/md.cfg b/tests/HDF5single/md.cfg index 1ff2adab..8d6b21e2 100644 --- a/tests/HDF5single/md.cfg +++ b/tests/HDF5single/md.cfg @@ -25,10 +25,10 @@ max_steps=24 atol=1.e-8 [Restart] input_level=4 -input_filename=WF +input_filename=wf.h5 input_type=single_file output_level=4 -output_filename=WF_MD +output_filename=wf_md.h5 output_type=single_file [Coloring] scope=global diff --git a/tests/HDF5single/mgmol.cfg b/tests/HDF5single/mgmol.cfg index 4dba942a..e8f01a29 100644 --- a/tests/HDF5single/mgmol.cfg +++ b/tests/HDF5single/mgmol.cfg @@ -25,7 +25,7 @@ initial_type=Random initial_width=1.5 [Restart] output_level=4 -output_filename=WF +output_filename=wf.h5 output_type=single_file [Coloring] scope=global diff --git a/tests/HDF5single/test.py b/tests/HDF5single/test.py index 080ee0ba..e2f0596a 100755 --- a/tests/HDF5single/test.py +++ b/tests/HDF5single/test.py @@ -47,7 +47,7 @@ output = subprocess.check_output(command,shell=True) lines=output.split(b'\n') -os.remove('WF') +os.remove('wf.h5') print("Check energy conservation...") tol = 1.e-4 @@ -71,5 +71,7 @@ print("ERROR needs 4 energy values for checking conservation!") sys.exit(1) +os.remove('wf_md.h5') + print("Test SUCCESSFUL!") sys.exit(0) diff --git a/tests/MD_D72/test.py b/tests/MD_D72/test.py index 17b70de2..3f98b2bd 100755 --- a/tests/MD_D72/test.py +++ b/tests/MD_D72/test.py @@ -42,7 +42,7 @@ print(line) #run MD -command = "ls -ld snapshot0* | awk '{ print $9 }' | tail -n1" +command = "ls -ld snapshot* | awk '{ print $9 }' | tail -n1" print(command) restart_file = subprocess.check_output(command,shell=True) restart_file=str(restart_file[:-1],'utf-8') diff --git a/tests/MD_MVP/md.cfg b/tests/MD_MVP/md.cfg index 83d5aa4c..e30a8807 100644 --- a/tests/MD_MVP/md.cfg +++ b/tests/MD_MVP/md.cfg @@ -34,6 +34,6 @@ solver=MVP nb_inner_it=1 mixing=1. [Restart] -input_filename=wave.out +input_filename=snapshotMVP input_level=3 output_level=3 diff --git a/tests/MD_MVP/test.py b/tests/MD_MVP/test.py index 489c9c8c..47b5462d 100755 --- a/tests/MD_MVP/test.py +++ b/tests/MD_MVP/test.py @@ -42,20 +42,20 @@ #run MD for i in range(2): - command = "ls -ld snapshot0* | awk '{ print $9 }' | tail -n1" + command = "ls -ld snapshot* | awk '{ print $9 }' | tail -n1" print(command) restart_file = subprocess.check_output(command,shell=True) restart_file=str(restart_file[:-1],'utf-8') print(restart_file) - os.rename(restart_file, 'wave.out') + os.rename(restart_file, 'snapshotMVP') #run MGmol command = "{} {} -c {} -i {}".format(mpicmd,exe,inp2,coords) output2 = subprocess.check_output(command,shell=True) #remove used restart files - shutil.rmtree('wave.out') + shutil.rmtree('snapshotMVP') #analyse mgmol standard output lines=output2.split(b'\n') @@ -81,7 +81,7 @@ sys.exit(1) #remove last restart files -command = "ls -ld snapshot0* | awk '{ print $9 }' | tail -n1" +command = "ls -ld snapshot* | awk '{ print $9 }' | tail -n1" restart_file = subprocess.check_output(command,shell=True) restart_file=str(restart_file[:-1],'utf-8') shutil.rmtree(restart_file) diff --git a/tests/RestartEnergyAndForces/h2o.xyz b/tests/RestartEnergyAndForces/h2o.xyz index cdc906f6..1616f79c 100644 --- a/tests/RestartEnergyAndForces/h2o.xyz +++ b/tests/RestartEnergyAndForces/h2o.xyz @@ -1,6 +1,5 @@ 3 -https://pubchem.ncbi.nlm.nih.gov/compound/Water -O 2.5369 -0.1550 0.0 -H 3.0739 0.1550 0.0 -H 2.0000 0.1550 0.0 +O 0.00 0.00 0.00 +H -0.76 0.59 0.00 +H 0.76 0.59 0.00 diff --git a/tests/RestartEnergyAndForces/mgmol.cfg b/tests/RestartEnergyAndForces/mgmol.cfg index e590f810..b96f3d9c 100644 --- a/tests/RestartEnergyAndForces/mgmol.cfg +++ b/tests/RestartEnergyAndForces/mgmol.cfg @@ -2,16 +2,16 @@ verbosity=2 xcFunctional=PBE FDtype=4th [Mesh] -nx=64 -ny=64 -nz=64 +nx=48 +ny=48 +nz=48 [Domain] -ox=-3.4 -oy=-6.4 -oz=-6.4 -lx=12.8 -ly=12.8 -lz=12.8 +ox=-4.5 +oy=-4.5 +oz=-4.5 +lx=9. +ly=9. +lz=9. [Potentials] pseudopotential=pseudo.O_ONCV_PBE_SG15 pseudopotential=pseudo.H_ONCV_PBE_SG15 diff --git a/tests/RestartEnergyAndForces/restart.cfg b/tests/RestartEnergyAndForces/restart.cfg index 99bc77d8..280778e9 100644 --- a/tests/RestartEnergyAndForces/restart.cfg +++ b/tests/RestartEnergyAndForces/restart.cfg @@ -2,16 +2,16 @@ verbosity=2 xcFunctional=PBE FDtype=4th [Mesh] -nx=64 -ny=64 -nz=64 +nx=48 +ny=48 +nz=48 [Domain] -ox=-3.4 -oy=-6.4 -oz=-6.4 -lx=12.8 -ly=12.8 -lz=12.8 +ox=-4.5 +oy=-4.5 +oz=-4.5 +lx=9. +ly=9. +lz=9. [Potentials] pseudopotential=pseudo.O_ONCV_PBE_SG15 pseudopotential=pseudo.H_ONCV_PBE_SG15 diff --git a/tests/RestartEnergyAndForces/test.py b/tests/RestartEnergyAndForces/test.py index b62d39f8..585722f0 100755 --- a/tests/RestartEnergyAndForces/test.py +++ b/tests/RestartEnergyAndForces/test.py @@ -64,7 +64,13 @@ shutil.rmtree('WF') test_energy=1.e18 +l=-1 for line in lines: + if line.count(b'Positions'): + l=0 + if l>=0 and l<4: + print(line) + l=l+1 if line.count(b'%%'): print(line) words=line.split() diff --git a/tests/RestartEnergyAndForces/testRestartEnergyAndForces.cc b/tests/RestartEnergyAndForces/testRestartEnergyAndForces.cc index e323afbc..3027ab55 100644 --- a/tests/RestartEnergyAndForces/testRestartEnergyAndForces.cc +++ b/tests/RestartEnergyAndForces/testRestartEnergyAndForces.cc @@ -124,6 +124,8 @@ int main(int argc, char** argv) } } + mgmol->setPositions(positions, anumbers); + Mesh* mymesh = Mesh::instance(); const pb::Grid& mygrid = mymesh->grid(); const pb::PEenv& myPEenv = mymesh->peenv(); @@ -165,6 +167,36 @@ int main(int argc, char** argv) projmatrices->setDMuniform(ct.getNelSpin(), 0); projmatrices->printDM(std::cout); + // swap H and O to make sure order of atoms in list does not matter + double x = positions[0]; + double y = positions[1]; + double z = positions[2]; + positions[0] = positions[3]; + positions[1] = positions[4]; + positions[2] = positions[5]; + positions[3] = x; + positions[4] = y; + positions[5] = z; + short tmp = anumbers[0]; + anumbers[0] = anumbers[1]; + anumbers[1] = tmp; + if (MPIdata::onpe0) + { + std::cout << "Positions:" << std::endl; + std::vector::iterator ita = anumbers.begin(); + for (std::vector::iterator it = positions.begin(); + it != positions.end(); it += 3) + { + std::cout << *ita; + for (int i = 0; i < 3; i++) + std::cout << " " << *(it + i); + std::cout << std::endl; + ita++; + } + } + + mgmol->setPositions(positions, anumbers); + // // evaluate energy and forces with wavefunctions just read // diff --git a/tests/RhoVhRestart/h2o.xyz b/tests/RhoVhRestart/h2o.xyz new file mode 100644 index 00000000..d5171c8b --- /dev/null +++ b/tests/RhoVhRestart/h2o.xyz @@ -0,0 +1,6 @@ +3 + +O 0.00 0.00 0.00 +H -0.76 0.59 0.00 +H 0.76 0.59 0.00 + diff --git a/tests/RhoVhRestart/md.cfg b/tests/RhoVhRestart/md.cfg new file mode 100644 index 00000000..2b8a378b --- /dev/null +++ b/tests/RhoVhRestart/md.cfg @@ -0,0 +1,30 @@ +verbosity=2 +xcFunctional=PBE +FDtype=4th +[Mesh] +nx=48 +ny=48 +nz=48 +[Domain] +ox=-4.5 +oy=-4.5 +oz=-4.5 +lx=9. +ly=9. +lz=9. +[Potentials] +pseudopotential=pseudo.O_ONCV_PBE_SG15 +pseudopotential=pseudo.H_ONCV_PBE_SG15 +[Run] +type=MD +[MD] +num_steps=5 +dt=40. +[Quench] +max_steps=24 +atol=1.e-8 +[Restart] +input_level=4 +input_filename=WF +output_level=4 +output_filename=WF_MD diff --git a/tests/RhoVhRestart/mgmol.cfg b/tests/RhoVhRestart/mgmol.cfg new file mode 100644 index 00000000..eee7f11c --- /dev/null +++ b/tests/RhoVhRestart/mgmol.cfg @@ -0,0 +1,28 @@ +verbosity=2 +xcFunctional=PBE +FDtype=4th +[Mesh] +nx=48 +ny=48 +nz=48 +[Domain] +ox=-4.5 +oy=-4.5 +oz=-4.5 +lx=9. +ly=9. +lz=9. +[Potentials] +pseudopotential=pseudo.O_ONCV_PBE_SG15 +pseudopotential=pseudo.H_ONCV_PBE_SG15 +[Run] +type=QUENCH +[Quench] +max_steps=120 +atol=1.e-8 +[Orbitals] +initial_type=Random +initial_width=1.5 +[Restart] +output_level=4 +output_filename=WF diff --git a/tests/RhoVhRestart/restart.cfg b/tests/RhoVhRestart/restart.cfg new file mode 100644 index 00000000..20f0293a --- /dev/null +++ b/tests/RhoVhRestart/restart.cfg @@ -0,0 +1,25 @@ +verbosity=2 +xcFunctional=PBE +FDtype=4th +[Mesh] +nx=48 +ny=48 +nz=48 +[Domain] +ox=-4.5 +oy=-4.5 +oz=-4.5 +lx=9. +ly=9. +lz=9. +[Potentials] +pseudopotential=pseudo.O_ONCV_PBE_SG15 +pseudopotential=pseudo.H_ONCV_PBE_SG15 +[Run] +type=QUENCH +[Quench] +max_steps=24 +atol=1.e-8 +[Restart] +input_level=4 +input_filename=WF_MD diff --git a/tests/RhoVhRestart/test.py b/tests/RhoVhRestart/test.py new file mode 100755 index 00000000..a34b962f --- /dev/null +++ b/tests/RhoVhRestart/test.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python +import sys +import os +import subprocess +import string + +print("Test test_rho_restart...") + +nargs=len(sys.argv) + +mpicmd = sys.argv[1]+" "+sys.argv[2]+" "+sys.argv[3] +for i in range(4,nargs-7): + mpicmd = mpicmd + " "+sys.argv[i] +print("MPI run command: {}".format(mpicmd)) + +mgmol_exe = sys.argv[nargs-7] +test_exe = sys.argv[nargs-6] +input1 = sys.argv[nargs-5] +input2 = sys.argv[nargs-4] +input3 = sys.argv[nargs-3] +coords = sys.argv[nargs-2] +print("coordinates file: %s"%coords) + +#create links to potentials files +dst1 = 'pseudo.H_ONCV_PBE_SG15' +src1 = sys.argv[-1] + '/' + dst1 + +dst2 = 'pseudo.O_ONCV_PBE_SG15' +src2 = sys.argv[-1] + '/' + dst2 + +if not os.path.exists(dst1): + print("Create link to %s"%dst1) + os.symlink(src1, dst1) + +if not os.path.exists(dst2): + print("Create link to %s"%dst2) + os.symlink(src2, dst2) + +#run mgmol to generate initial ground state +command = "{} {} -c {} -i {}".format(mpicmd,mgmol_exe,input1,coords) +print("Run command: {}".format(command)) + +output = subprocess.check_output(command,shell=True) +lines=output.split(b'\n') + +flag=0 +for line in lines: + if line.count(b'Run ended'): + flag=1 + +if flag==0: + print("Initial quench failed to complete!") + sys.exit(1) + +#run MD +command = "{} {} -c {} -i {}".format(mpicmd,mgmol_exe,input2,coords) +print("Run command: {}".format(command)) +output = subprocess.check_output(command,shell=True) +lines=output.split(b'\n') + +flag=0 +for line in lines: + if line.count(b'Run ended'): + flag=1 + +if flag==0: + print("MD failed to complete!") + sys.exit(1) + +#run test +command = "{} {} -c {} -i {}".format(mpicmd,test_exe,input3,coords) +print("Run command: {}".format(command)) +output = subprocess.check_output(command,shell=True) +lines=output.split(b'\n') +for line in lines: + print(line) + +print("Test SUCCESSFUL!") +sys.exit(0) diff --git a/tests/RhoVhRestart/testRhoVhRestart.cc b/tests/RhoVhRestart/testRhoVhRestart.cc new file mode 100644 index 00000000..09da3320 --- /dev/null +++ b/tests/RhoVhRestart/testRhoVhRestart.cc @@ -0,0 +1,220 @@ +// Copyright (c) 2017, Lawrence Livermore National Security, LLC and +// UT-Battelle, LLC. +// Produced at the Lawrence Livermore National Laboratory and the Oak Ridge +// National Laboratory. +// LLNL-CODE-743438 +// All rights reserved. +// This file is part of MGmol. For details, see https://github.com/llnl/mgmol. +// Please also read this link https://github.com/llnl/mgmol/LICENSE + +#include "Control.h" +#include "Electrostatic.h" +#include "ExtendedGridOrbitals.h" +#include "LocGridOrbitals.h" +#include "MGmol.h" +#include "MGmol_MPI.h" +#include "MPIdata.h" +#include "Poisson.h" +#include "Potentials.h" +#include "mgmol_run.h" + +#include +#include +#include +#include +#include + +#include +namespace po = boost::program_options; + +template +int testRhoRestart(MGmolInterface* mgmol_) +{ + MGmol_MPI& mmpi = *(MGmol_MPI::instance()); + const int rank = mmpi.mypeGlobal(); + + MGmol* mgmol = static_cast*>(mgmol_); + std::shared_ptr> rho = mgmol->getRho(); + + /* save density from the restart file to elsewhere */ + std::vector rho0(rho->rho_[0].size()); + rho0 = rho->rho_[0]; + + /* recompute rho from the orbital */ + rho->update(*mgmol->getOrbitals()); + + /* check if the recomputed density is the same */ + for (int d = 0; d < (int)rho0.size(); d++) + { + double error = abs(rho0[d] - rho->rho_[0][d]) / abs(rho0[d]); + if (error > 1e-10) + { + printf("rank %d, rho[%d]=%.15e, rho0[%d]=%.15e\n", rank, d, + rho->rho_[0][d], d, rho0[d]); + std::cerr << "Density is inconsistent!!!" << std::endl; + return -1; + } + } + if (rank == 0) std::cout << "Density is consistent..." << std::endl; + + return 0; +} + +template +int testPotRestart(MGmolInterface* mgmol_) +{ + Control& ct = *(Control::instance()); + + Mesh* mymesh = Mesh::instance(); + const pb::Grid& mygrid = mymesh->grid(); + + MGmol_MPI& mmpi = *(MGmol_MPI::instance()); + const int rank = mmpi.mypeGlobal(); + + MGmol* mgmol = static_cast*>(mgmol_); + Potentials& pot = mgmol->getHamiltonian()->potential(); + Poisson* poisson = mgmol->electrostat_->getPoissonSolver(); + std::shared_ptr> rho = mgmol->getRho(); + + /* GridFunc initialization inputs */ + short bc[3]; + for (int d = 0; d < 3; d++) + bc[d] = ct.bcPoisson[d]; + + /* save potential from the restart file to elsewhere */ + pb::GridFunc vh0_gf(mygrid, bc[0], bc[1], bc[2]); + vh0_gf.assign((pot.vh_rho()).data(), 'd'); + double n = vh0_gf.norm2(); + std::cout << "Norm2 of vh = " << n << std::endl; + + std::vector vh0(pot.size()); + const std::vector& d_vhrho(pot.vh_rho()); + for (int d = 0; d < (int)vh0.size(); d++) + vh0[d] = d_vhrho[d]; + + /* recompute potential */ + pb::GridFunc grho(mygrid, bc[0], bc[1], bc[2]); + grho.assign(&rho->rho_[0][0]); + pb::GridFunc* grhoc = mgmol->electrostat_->getRhoc(); + + poisson->solve(grho, *grhoc); + const pb::GridFunc& vh(poisson->vh()); + + pb::GridFunc error_gf(vh0_gf); + error_gf -= vh; + + double rel_error = error_gf.norm2() / vh0_gf.norm2(); + if (rank == 0) + { + printf("FOM potential relative error: %.3e\n", rel_error); + } + if (rel_error > 1e-9) + { + if (rank == 0) + { + std::cerr << "Potential is inconsistent!!!" << std::endl; + } + return -1; + } + if (rank == 0) std::cout << "Potential is consistent..." << std::endl; + + return 0; +} + +int main(int argc, char** argv) +{ + int mpirc = MPI_Init(&argc, &argv); + if (mpirc != MPI_SUCCESS) + { + std::cerr << "MPI Initialization failed!!!" << std::endl; + MPI_Abort(MPI_COMM_WORLD, 0); + } + + MPI_Comm comm = MPI_COMM_WORLD; + + /* + * Initialize general things, like magma, openmp, IO, ... + */ + mgmol_init(comm); + + /* + * read runtime parameters + */ + std::string input_filename(""); + std::string lrs_filename; + std::string constraints_filename(""); + + float total_spin = 0.; + bool with_spin = false; + + po::variables_map vm; + + // read from PE0 only + if (MPIdata::onpe0) + { + read_config(argc, argv, vm, input_filename, lrs_filename, + constraints_filename, total_spin, with_spin); + } + + MGmol_MPI::setup(comm, std::cout, with_spin); + MGmol_MPI& mmpi = *(MGmol_MPI::instance()); + MPI_Comm global_comm = mmpi.commGlobal(); + + /* + * Setup control struct with run time parameters + */ + Control::setup(global_comm, with_spin, total_spin); + Control& ct = *(Control::instance()); + + ct.setOptions(vm); + + int ret = ct.checkOptions(); + if (ret < 0) return ret; + + mmpi.bcastGlobal(input_filename); + mmpi.bcastGlobal(lrs_filename); + + int status = 0; + + // Enter main scope + { + MGmolInterface* mgmol = new MGmol(global_comm, + *MPIdata::sout, input_filename, lrs_filename, constraints_filename); + + mgmol->setup(); + + /* load a restart file */ + MGmol* mgmol_ext + = dynamic_cast*>(mgmol); + mgmol_ext->loadRestartFile(ct.restart_file); + + if (MPIdata::onpe0) + std::cout << "=============================" << std::endl; + if (MPIdata::onpe0) std::cout << "testRhoRestart..." << std::endl; + status = testRhoRestart(mgmol); + if (status < 0) return status; + + if (MPIdata::onpe0) + std::cout << "=============================" << std::endl; + if (MPIdata::onpe0) std::cout << "testPotRestart..." << std::endl; + status = testPotRestart(mgmol); + if (status < 0) return status; + + delete mgmol; + + } // close main scope + + mgmol_finalize(); + + mpirc = MPI_Finalize(); + if (mpirc != MPI_SUCCESS) + { + std::cerr << "MPI Finalize failed!!!" << std::endl; + } + + time_t tt; + time(&tt); + if (onpe0) std::cout << " Run ended at " << ctime(&tt) << std::endl; + + return 0; +} diff --git a/tests/ShortSighted/test.py b/tests/ShortSighted/test.py index 27da7f7d..68a617aa 100755 --- a/tests/ShortSighted/test.py +++ b/tests/ShortSighted/test.py @@ -42,7 +42,7 @@ print(line) #run MD -command = "ls -ld snapshot0* | awk '{ print $9 }' | tail -n1" +command = "ls -ld snapshot* | awk '{ print $9 }' | tail -n1" print(command) restart_file = subprocess.check_output(command,shell=True) restart_file=str(restart_file[:-1],'utf-8') diff --git a/tests/testIons.cc b/tests/testIons.cc index 30b2ad0c..e23e0e24 100644 --- a/tests/testIons.cc +++ b/tests/testIons.cc @@ -8,6 +8,8 @@ int main(int argc, char** argv) { + int status = 0; + int mpirc = MPI_Init(&argc, &argv); MPI_Comm comm = MPI_COMM_WORLD; @@ -41,7 +43,7 @@ int main(int argc, char** argv) // read species info from pseudopotential file std::string file_path = argv[1]; std::string filename(file_path + "/pseudo.C_ONCV_PBE_SG15"); - std::cout << "Potential = " << filename << std::endl; + if (myrank == 0) std::cout << "Potential = " << filename << std::endl; sp.read_1species(filename); sp.set_dim_nl(h[0]); @@ -80,25 +82,110 @@ int main(int argc, char** argv) ions.setup(); - std::vector& new_local_ions(ions.local_ions()); + // verify sum of local ions adds up to total number of ions + { + std::vector& new_local_ions(ions.local_ions()); + + int nlocal = new_local_ions.size(); + std::cout << "PE " << myrank << ", nlocal = " << nlocal << std::endl; + + int ntotal = 0; + MPI_Allreduce(&nlocal, &ntotal, 1, MPI_INT, MPI_SUM, comm); + if (ntotal != na) + { + std::cout << "ntotal = " << ntotal << std::endl; + status = 1; + } + } + MPI_Barrier(MPI_COMM_WORLD); - int nlocal = new_local_ions.size(); - std::cout << "PE " << myrank << ", nlocal = " << nlocal << std::endl; + // verify some functionalities of class Ions + { + std::vector positions; + std::vector anumbers; + ions.getPositions(positions); + ions.getAtomicNumbers(anumbers); + if (myrank == 0) + { + int i = 0; + for (auto& position : positions) + { + std::cout << position; + if (i % 3 == 2) + std::cout << std::endl; + else + std::cout << " "; + i++; + } + } + MPI_Barrier(MPI_COMM_WORLD); + + // swap x and z + for (size_t i = 0; i < positions.size() - 2; i++) + { + double x = positions[i]; + double z = positions[i + 2]; + positions[i] = z; + positions[i + 2] = x; + } + + ions.setPositions(positions, anumbers); + } - int ntotal = 0; - MPI_Allreduce(&nlocal, &ntotal, 1, MPI_INT, MPI_SUM, comm); - mpirc = MPI_Finalize(); - if (mpirc != MPI_SUCCESS) + MPI_Barrier(MPI_COMM_WORLD); { - std::cerr << "MPI Finalize failed!!!" << std::endl; - return 1; + std::vector& new_local_ions(ions.local_ions()); + + int nlocal = new_local_ions.size(); + std::cout << "PE " << myrank << ", nlocal = " << nlocal << std::endl; + + int ntotal = 0; + MPI_Allreduce(&nlocal, &ntotal, 1, MPI_INT, MPI_SUM, comm); + if (ntotal != na) + { + std::cerr << "ntotal = " << ntotal << std::endl; + MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); + } } - if (ntotal != na) + // get the names of all the ions + std::vector names; + ions.getNames(names); + if (myrank == 0) + for (auto& name : names) + std::cout << "name = " << name << std::endl; + if (names.size() != na) { - std::cout << "ntotal = " << ntotal << std::endl; - return 1; + std::cerr << "Incorrect count of names..." << std::endl; + MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); + } + MPI_Barrier(MPI_COMM_WORLD); + + std::vector forces(3 * na); + // arbitrary value + const double fval = 1.12; + for (auto& f : forces) + f = fval; + ions.setLocalForces(forces, names); + + int nlocal = ions.getNumLocIons(); + std::vector lforces(3 * nlocal); + ions.getLocalForces(lforces); + for (auto& f : lforces) + { + if (std::abs(f - fval) > 1.e-14) + { + std::cerr << "f = " << f << std::endl; + MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); + } + } + + mpirc = MPI_Finalize(); + if (mpirc != MPI_SUCCESS) + { + std::cerr << "MPI Finalize failed!!!" << std::endl; + status = 1; } - return 0; + return status; }