• R/O
  • HTTP
  • SSH
  • HTTPS

Commit

Tags
No Tags

Frequently used words (click to add to your profile)

javac++androidlinuxc#windowsobjective-ccocoa誰得qtpythonphprubygameguibathyscaphec計画中(planning stage)翻訳omegatframeworktwitterdomtestvb.netdirectxゲームエンジンbtronarduinopreviewer

Commit MetaInfo

Revision493bc212a1af46fcb3e7391c0b95af40dea73a2a (tree)
Time2013-10-04 19:26:37
AuthorMikiya Fujii <mikiya.fujii@gmai...>
CommiterMikiya Fujii

Log Message

MPI use in ZindoS is refactored. #31814

git-svn-id: https://svn.sourceforge.jp/svnroot/molds/trunk@1535 1136aad2-a195-0410-b898-f5ea1d11b9d8

Change Summary

Incremental Difference

--- a/src/cndo/Cndo2.cpp
+++ b/src/cndo/Cndo2.cpp
@@ -1437,26 +1437,24 @@ void Cndo2::CalcFockMatrix(double** fockMatrix,
14371437 } // end of if(mpiRank == calcRank)
14381438
14391439 // set data to gather in mpiHeadRank with asynchronous MPI
1440- int tag = mu;
1441- int source = calcRank;
1442- int dest = mpiHeadRank;
1440+ int tag = mu;
1441+ int source = calcRank;
1442+ int dest = mpiHeadRank;
1443+ double* buff = &fockMatrix[mu][mu];
1444+ MolDS_mpi::molds_mpi_int num = totalNumberAOs-mu;
14431445 if(mpiRank == mpiHeadRank && mpiRank != calcRank){
1444- asyncCommunicator.SetRecvedVector(&fockMatrix[mu][mu],
1445- totalNumberAOs-mu,
1446- source,
1447- tag);
1446+ asyncCommunicator.SetRecvedVector(buff, num, source, tag);
14481447 }
14491448 if(mpiRank != mpiHeadRank && mpiRank == calcRank){
1450- asyncCommunicator.SetSentVector(&fockMatrix[mu][mu],
1451- totalNumberAOs-mu,
1452- dest,
1453- tag);
1449+ asyncCommunicator.SetSentVector(buff, num, dest, tag);
14541450 }
14551451 } // end of loop mu parallelized with MPI
14561452 } // end of loop A
14571453 // Delete the communication thread.
14581454 communicationThread.join();
1459- MolDS_mpi::MpiProcess::GetInstance()->Broadcast(&fockMatrix[0][0], totalNumberAOs*totalNumberAOs, mpiHeadRank);
1455+ double* buff = &fockMatrix[0][0];
1456+ MolDS_mpi::molds_mpi_int num = totalNumberAOs*totalNumberAOs;
1457+ MolDS_mpi::MpiProcess::GetInstance()->Broadcast(buff, num, mpiHeadRank);
14601458
14611459 /*
14621460 this->OutputLog("fock matrix\n");
@@ -1669,24 +1667,22 @@ void Cndo2::CalcGammaAB(double** gammaAB, const Molecule& molecule) const{
16691667 } // end of if(mpiRank==calcRank)
16701668
16711669 // set data to gater in mpiHeadRank with asynchronous MPI
1672- int tag = A;
1673- int source = calcRank;
1674- int dest = mpiHeadRank;
1670+ int tag = A;
1671+ int source = calcRank;
1672+ int dest = mpiHeadRank;
1673+ double* buff = &gammaAB[A][A];
1674+ MolDS_mpi::molds_mpi_int num = totalAtomNumber-A;
16751675 if(mpiRank == mpiHeadRank && mpiRank != calcRank){
1676- asyncCommunicator.SetRecvedVector(&gammaAB[A][A],
1677- totalAtomNumber-A,
1678- source,
1679- tag);
1676+ asyncCommunicator.SetRecvedVector(buff, num, source, tag);
16801677 }
16811678 if(mpiRank != mpiHeadRank && mpiRank == calcRank){
1682- asyncCommunicator.SetSentVector(&gammaAB[A][A],
1683- totalAtomNumber-A,
1684- dest,
1685- tag);
1679+ asyncCommunicator.SetSentVector(buff, num, dest, tag);
16861680 }
16871681 } // end of loop A prallelized by MPI
16881682 communicationThread.join();
1689- MolDS_mpi::MpiProcess::GetInstance()->Broadcast(&gammaAB[0][0], totalAtomNumber*totalAtomNumber, mpiHeadRank);
1683+ double* buff = &gammaAB[0][0];
1684+ MolDS_mpi::molds_mpi_int num = totalAtomNumber*totalAtomNumber;
1685+ MolDS_mpi::MpiProcess::GetInstance()->Broadcast(buff, num, mpiHeadRank);
16901686
16911687 #pragma omp parallel for schedule(auto)
16921688 for(int A=0; A<totalAtomNumber; A++){
@@ -1836,43 +1832,31 @@ void Cndo2::CalcCartesianMatrixByGTOExpansion(double*** cartesianMatrix,
18361832 } // end lof if(mpiRank == calcRank)
18371833
18381834 // set data to gater in mpiHeadRank with asynchronous MPI
1839- int tagX = A* CartesianType_end + XAxis;
1840- int tagY = A* CartesianType_end + YAxis;
1841- int tagZ = A* CartesianType_end + ZAxis;
1842- int source = calcRank;
1843- int dest = mpiHeadRank;
1835+ int tagX = A* CartesianType_end + XAxis;
1836+ int tagY = A* CartesianType_end + YAxis;
1837+ int tagZ = A* CartesianType_end + ZAxis;
1838+ int source = calcRank;
1839+ int dest = mpiHeadRank;
1840+ double* buffX = &cartesianMatrix[XAxis][firstAOIndexA][0];
1841+ double* buffY = &cartesianMatrix[YAxis][firstAOIndexA][0];
1842+ double* buffZ = &cartesianMatrix[ZAxis][firstAOIndexA][0];
1843+ MolDS_mpi::molds_mpi_int num = numValenceAOsA*totalAONumber;
18441844 if(mpiRank == mpiHeadRank && mpiRank != calcRank){
1845- asyncCommunicator.SetRecvedVector(&cartesianMatrix[XAxis][firstAOIndexA][0],
1846- numValenceAOsA*totalAONumber,
1847- source,
1848- tagX);
1849- asyncCommunicator.SetRecvedVector(&cartesianMatrix[YAxis][firstAOIndexA][0],
1850- numValenceAOsA*totalAONumber,
1851- source,
1852- tagY);
1853- asyncCommunicator.SetRecvedVector(&cartesianMatrix[ZAxis][firstAOIndexA][0],
1854- numValenceAOsA*totalAONumber,
1855- source,
1856- tagZ);
1845+ asyncCommunicator.SetRecvedVector(buffX, num, source, tagX);
1846+ asyncCommunicator.SetRecvedVector(buffY, num, source, tagY);
1847+ asyncCommunicator.SetRecvedVector(buffZ, num, source, tagZ);
18571848 }
18581849 if(mpiRank != mpiHeadRank && mpiRank == calcRank){
1859- asyncCommunicator.SetSentVector(&cartesianMatrix[XAxis][firstAOIndexA][0],
1860- numValenceAOsA*totalAONumber,
1861- dest,
1862- tagX);
1863- asyncCommunicator.SetSentVector(&cartesianMatrix[YAxis][firstAOIndexA][0],
1864- numValenceAOsA*totalAONumber,
1865- dest,
1866- tagY);
1867- asyncCommunicator.SetSentVector(&cartesianMatrix[ZAxis][firstAOIndexA][0],
1868- numValenceAOsA*totalAONumber,
1869- dest,
1870- tagZ);
1850+ asyncCommunicator.SetSentVector(buffX, num, dest, tagX);
1851+ asyncCommunicator.SetSentVector(buffY, num, dest, tagY);
1852+ asyncCommunicator.SetSentVector(buffZ, num, dest, tagZ);
18711853 }
18721854 } // end of loop for int A with MPI
18731855 // Delete the communication thread.
18741856 communicationThread.join();
1875- MolDS_mpi::MpiProcess::GetInstance()->Broadcast(&cartesianMatrix[0][0][0], CartesianType_end*totalAONumber*totalAONumber, mpiHeadRank);
1857+ double* buff = &cartesianMatrix[0][0][0];
1858+ MolDS_mpi::molds_mpi_int num = CartesianType_end*totalAONumber*totalAONumber;
1859+ MolDS_mpi::MpiProcess::GetInstance()->Broadcast(buff, num, mpiHeadRank);
18761860
18771861 /*
18781862 // communication to collect all matrix data on head-rank
@@ -3976,24 +3960,22 @@ void Cndo2::CalcOverlapAOs(double** overlapAOs, const Molecule& molecule) const{
39763960 } // end of if(mpiRank == calcRnak)
39773961
39783962 // set data to gather in mpiHeadRank with asynchronous MPI
3979- int tag = A;
3980- int source = calcRank;
3981- int dest = mpiHeadRank;
3963+ int tag = A;
3964+ int source = calcRank;
3965+ int dest = mpiHeadRank;
3966+ double* buff = overlapAOs[firstAOIndexA];
3967+ MolDS_mpi::molds_mpi_int num = totalAONumber*numValenceAOs;
39823968 if(mpiRank == mpiHeadRank && mpiRank != calcRank){
3983- asyncCommunicator.SetRecvedVector(overlapAOs[firstAOIndexA],
3984- totalAONumber*numValenceAOs,
3985- source,
3986- tag);
3969+ asyncCommunicator.SetRecvedVector(buff, num, source, tag);
39873970 }
39883971 if(mpiRank != mpiHeadRank && mpiRank == calcRank){
3989- asyncCommunicator.SetSentVector(overlapAOs[firstAOIndexA],
3990- totalAONumber*numValenceAOs,
3991- dest,
3992- tag);
3972+ asyncCommunicator.SetSentVector(buff, num, dest, tag);
39933973 }
39943974 } // end of loop A parallelized with MPI
39953975 communicationThread.join();
3996- MolDS_mpi::MpiProcess::GetInstance()->Broadcast(&overlapAOs[0][0], totalAONumber*totalAONumber, mpiHeadRank);
3976+ double* buff = &overlapAOs[0][0];
3977+ MolDS_mpi::molds_mpi_int num = totalAONumber*totalAONumber;
3978+ MolDS_mpi::MpiProcess::GetInstance()->Broadcast(buff, num, mpiHeadRank);
39973979
39983980 #pragma omp parallel for schedule(auto)
39993981 for(int mu=0; mu<totalAONumber; mu++){