Skip to content

Commit

Permalink
Merged in extendBandParallelization (pull request #161)
Browse files Browse the repository at this point in the history
Extended band parallelization to XtHX, filling overlap matrix, subspace rotation and compute rho. Added relevant ctests. ctests passed

Approved-by: Phani Motamarri <[email protected]>
  • Loading branch information
dsambit committed Jun 22, 2018
2 parents 586274b + b048961 commit 5aaeaf9
Show file tree
Hide file tree
Showing 23 changed files with 904 additions and 400 deletions.
11 changes: 6 additions & 5 deletions include/dft.h
Original file line number Diff line number Diff line change
Expand Up @@ -296,12 +296,13 @@ namespace dftfe {
void computeNodalRhoFromQuadData();

/**
* sums rho cell quadratrure data from all kpoint pools
* sums rho cell quadratrure data from inter communicator
*/
void sumRhoDataKPointPools(std::map<dealii::CellId, std::vector<double> > * rhoValues,
std::map<dealii::CellId, std::vector<double> > * gradRhoValues,
std::map<dealii::CellId, std::vector<double> > * rhoValuesSpinPolarized,
std::map<dealii::CellId, std::vector<double> > * gradRhoValuesSpinPolarized);
void sumRhoData(std::map<dealii::CellId, std::vector<double> > * rhoValues,
std::map<dealii::CellId, std::vector<double> > * gradRhoValues,
std::map<dealii::CellId, std::vector<double> > * rhoValuesSpinPolarized,
std::map<dealii::CellId, std::vector<double> > * gradRhoValuesSpinPolarized,
const MPI_Comm &interComm);

/**
* resize and allocate table storage for rho cell quadratrue data
Expand Down
1 change: 1 addition & 0 deletions include/dftParameters.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ namespace dftfe {
extern bool enableSwitchToGS;
extern unsigned int nbandGrps;
extern bool computeEnergyEverySCF;
extern unsigned int scalapackParalProcs;

/**
* Declare parameters.
Expand Down
14 changes: 13 additions & 1 deletion include/dftUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,14 +53,26 @@ namespace dftfe {
*
* @param dataOut DataOut class object
* @param intralpoolcomm mpi communicator of domain decomposition inside each pool
* @param interpoolcomm mpi communicator across pools
* @param interpoolcomm mpi communicator across k point pools
* @param interBandGroupComm mpi communicator across band groups
* @param fileName
*/
void writeDataVTUParallelLowestPoolId(const dealii::DataOut<3> & dataOut,
const MPI_Comm & intrapoolcomm,
const MPI_Comm & interpoolcomm,
const MPI_Comm &interBandGroupComm,
const std::string & fileName);

/** @brief Create index vector which is used for band parallelization
*
* @[in]param interBandGroupComm mpi communicator across band groups
* @[in]param numBands
* @[out]param bandGroupLowHighPlusOneIndices
*/
void createBandParallelizationIndices(const MPI_Comm &interBandGroupComm,
const unsigned int numBands,
std::vector<unsigned int> & bandGroupLowHighPlusOneIndices);

/**
* A class to split the given communicator into a number of pools
*/
Expand Down
6 changes: 5 additions & 1 deletion include/linearAlgebraOperations.h
Original file line number Diff line number Diff line change
Expand Up @@ -149,12 +149,14 @@ namespace dftfe
* @param[in,out] X Given subspace as flattened array of multi-vectors.
* In-place update of the given subspace
* @param[in] numberComponents Number of multiple-fields
* @param[in] interBandGroupComm interpool communicator for parallelization over band groups
*
* @return flag indicating success/failure. 1 for failure, 0 for success
*/
template<typename T>
unsigned int pseudoGramSchmidtOrthogonalization(dealii::parallel::distributed::Vector<T> & X,
const unsigned int numberComponents);
const unsigned int numberComponents,
const MPI_Comm &interBandGroupComm);

/** @brief Compute Rayleigh-Ritz projection
*
Expand All @@ -176,12 +178,14 @@ namespace dftfe
* @param[in,out] X Given subspace as flattened array of multi-vectors.
* In-place rotated subspace
* @param[in] numberComponents Number of multiple-fields
* @param[in] interBandGroupComm interpool communicator for parallelization over band groups
* @param[out] eigenValues of the Projected Hamiltonian
*/
template<typename T>
void rayleighRitz(operatorDFTClass & operatorMatrix,
dealii::parallel::distributed::Vector<T> & X,
const unsigned int numberComponents,
const MPI_Comm &interBandGroupComm,
std::vector<double> & eigenValues);

/** @brief Compute Compute residual norm associated with eigenValue problem of the given operator
Expand Down
13 changes: 12 additions & 1 deletion include/linearAlgebraOperationsInternal.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ namespace dftfe
*/
void createProcessGridSquareMatrix(const MPI_Comm & mpi_communicator,
const unsigned size,
const unsigned int rowsBlockSize,
std::shared_ptr< const dealii::Utilities::MPI::ProcessGrid> & processGrid);


Expand All @@ -50,6 +49,16 @@ namespace dftfe
std::map<unsigned int, unsigned int> & globalToLocalRowIdMap,
std::map<unsigned int, unsigned int> & globalToLocalColumnIdMap);


/** @brief Mpi all reduce of ScaLAPACKMat across a given inter communicator.
* Used for band parallelization.
*
*/
template<typename T>
void sumAcrossInterCommScaLAPACKMat(const std::shared_ptr< const dealii::Utilities::MPI::ProcessGrid> & processGrid,
dealii::ScaLAPACKMatrix<T> & mat,
const MPI_Comm &interComm);

/** @brief Computes S=X^{T}*X and stores in a parallel ScaLAPACK matrix.
* X^{T} is the subspaceVectorsArray in the column major format. S is the
* overlapMatPar.
Expand All @@ -63,6 +72,7 @@ namespace dftfe
void fillParallelOverlapMatrix(const dealii::parallel::distributed::Vector<T> & X,
const unsigned int numberVectors,
const std::shared_ptr< const dealii::Utilities::MPI::ProcessGrid> & processGrid,
const MPI_Comm &interBandGroupComm,
dealii::ScaLAPACKMatrix<T> & overlapMatPar);

/** @brief Computes X^{T}=Q*X^{T} inplace. X^{T} is the subspaceVectorsArray in the column major
Expand All @@ -77,6 +87,7 @@ namespace dftfe
void subspaceRotation(dealii::parallel::distributed::Vector<T> & subspaceVectorsArray,
const unsigned int numberSubspaceVectors,
const std::shared_ptr< const dealii::Utilities::MPI::ProcessGrid> & processGrid,
const MPI_Comm &interBandGroupComm,
const dealii::ScaLAPACKMatrix<T> & rotationMatPar,
const bool rotationMatTranspose=false);

Expand Down
16 changes: 12 additions & 4 deletions include/triangulationManager.h
Original file line number Diff line number Diff line change
Expand Up @@ -157,13 +157,17 @@ namespace dftfe {
* @param [input]nComponents number of components of the dofHandler on which solution
* vectors are based upon
* @param [input]solutionVectors vector of parallel distributed solution vectors to be serialized
* @param [input]interpoolComm interpool communicator to ensure serialization happens only in pool
* @param [input]interpoolComm This communicator is used to ensure serialization
* happens only in k point pool
* @param [input]interBandGroupComm This communicator to ensure serialization happens
* only in band group
*/
void saveTriangulationsSolutionVectors
(const unsigned int feOrder,
const unsigned int nComponents,
const std::vector< const dealii::parallel::distributed::Vector<double> * > & solutionVectors,
const MPI_Comm & interpoolComm);
const MPI_Comm & interpoolComm,
const MPI_Comm &interBandGroupComm);

/**
* @brief de-serialize the triangulations and the associated solution vectors
Expand All @@ -183,11 +187,15 @@ namespace dftfe {
* @brief serialize the triangulations and the associated cell quadrature data container
*
* @param [input]cellQuadDataContainerIn container of input cell quadrature data to be serialized
* @param [input]interpoolComm interpool communicator to ensure serialization happens only in pool
* @param [input]interpoolComm This communicator is used to ensure serialization
* happens only in k point pool
* @param [input]interBandGroupComm This communicator to ensure serialization happens
* only in band group
*/
void saveTriangulationsCellQuadData
(const std::vector<const std::map<dealii::CellId, std::vector<double> > *> & cellQuadDataContainerIn,
const MPI_Comm & interpoolComm);
const MPI_Comm & interpoolComm,
const MPI_Comm &interBandGroupComm);

/**
* @brief de-serialize the triangulations and the associated cell quadrature data container
Expand Down
Loading

0 comments on commit 5aaeaf9

Please sign in to comment.