• R/O
  • HTTP
  • SSH
  • HTTPS

Commit

Tags
No Tags

Frequently used words (click to add to your profile)

javac++androidlinuxc#windowsobjective-ccocoa誰得qtpythonphprubygameguibathyscaphec計画中(planning stage)翻訳omegatframeworktwitterdomtestvb.netdirectxゲームエンジンbtronarduinopreviewer

Commit MetaInfo

Revision8c5c2c662e821c33edad4f9f17a1abc96c25950c (tree)
Time2013-10-09 14:15:29
AuthorMikiya Fujii <mikiya.fujii@gmai...>
CommiterMikiya Fujii

Log Message

AsyncCommunicator is refactored: Rename of methods. #31814

git-svn-id: https://svn.sourceforge.jp/svnroot/molds/trunk@1538 1136aad2-a195-0410-b898-f5ea1d11b9d8

Change Summary

Incremental Difference

--- a/src/cndo/Cndo2.cpp
+++ b/src/cndo/Cndo2.cpp
@@ -1444,10 +1444,10 @@ void Cndo2::CalcFockMatrix(double** fockMatrix,
14441444 double* buff = &fockMatrix[mu][mu];
14451445 MolDS_mpi::molds_mpi_int num = totalNumberAOs-mu;
14461446 if(mpiRank == mpiHeadRank && mpiRank != calcRank){
1447- asyncCommunicator.SetRecvedVector(buff, num, source, tag);
1447+ asyncCommunicator.SetRecvedMessage(buff, num, source, tag);
14481448 }
14491449 if(mpiRank != mpiHeadRank && mpiRank == calcRank){
1450- asyncCommunicator.SetSentVector(buff, num, dest, tag);
1450+ asyncCommunicator.SetSentMessage(buff, num, dest, tag);
14511451 }
14521452 } // end of loop mu parallelized with MPI
14531453 } // end of loop A
@@ -1672,10 +1672,10 @@ void Cndo2::CalcGammaAB(double** gammaAB, const Molecule& molecule) const{
16721672 double* buff = &gammaAB[A][A];
16731673 MolDS_mpi::molds_mpi_int num = totalAtomNumber-A;
16741674 if(mpiRank == mpiHeadRank && mpiRank != calcRank){
1675- asyncCommunicator.SetRecvedVector(buff, num, source, tag);
1675+ asyncCommunicator.SetRecvedMessage(buff, num, source, tag);
16761676 }
16771677 if(mpiRank != mpiHeadRank && mpiRank == calcRank){
1678- asyncCommunicator.SetSentVector(buff, num, dest, tag);
1678+ asyncCommunicator.SetSentMessage(buff, num, dest, tag);
16791679 }
16801680 } // end of loop A prallelized by MPI
16811681 communicationThread.join();
@@ -1841,14 +1841,14 @@ void Cndo2::CalcCartesianMatrixByGTOExpansion(double*** cartesianMatrix,
18411841 double* buffZ = &cartesianMatrix[ZAxis][firstAOIndexA][0];
18421842 MolDS_mpi::molds_mpi_int num = numValenceAOsA*totalAONumber;
18431843 if(mpiRank == mpiHeadRank && mpiRank != calcRank){
1844- asyncCommunicator.SetRecvedVector(buffX, num, source, tagX);
1845- asyncCommunicator.SetRecvedVector(buffY, num, source, tagY);
1846- asyncCommunicator.SetRecvedVector(buffZ, num, source, tagZ);
1844+ asyncCommunicator.SetRecvedMessage(buffX, num, source, tagX);
1845+ asyncCommunicator.SetRecvedMessage(buffY, num, source, tagY);
1846+ asyncCommunicator.SetRecvedMessage(buffZ, num, source, tagZ);
18471847 }
18481848 if(mpiRank != mpiHeadRank && mpiRank == calcRank){
1849- asyncCommunicator.SetSentVector(buffX, num, dest, tagX);
1850- asyncCommunicator.SetSentVector(buffY, num, dest, tagY);
1851- asyncCommunicator.SetSentVector(buffZ, num, dest, tagZ);
1849+ asyncCommunicator.SetSentMessage(buffX, num, dest, tagX);
1850+ asyncCommunicator.SetSentMessage(buffY, num, dest, tagY);
1851+ asyncCommunicator.SetSentMessage(buffZ, num, dest, tagZ);
18521852 }
18531853 } // end of loop for int A with MPI
18541854 // Delete the communication thread.
@@ -3965,10 +3965,10 @@ void Cndo2::CalcOverlapAOs(double** overlapAOs, const Molecule& molecule) const{
39653965 double* buff = overlapAOs[firstAOIndexA];
39663966 MolDS_mpi::molds_mpi_int num = totalAONumber*numValenceAOs;
39673967 if(mpiRank == mpiHeadRank && mpiRank != calcRank){
3968- asyncCommunicator.SetRecvedVector(buff, num, source, tag);
3968+ asyncCommunicator.SetRecvedMessage(buff, num, source, tag);
39693969 }
39703970 if(mpiRank != mpiHeadRank && mpiRank == calcRank){
3971- asyncCommunicator.SetSentVector(buff, num, dest, tag);
3971+ asyncCommunicator.SetSentMessage(buff, num, dest, tag);
39723972 }
39733973 } // end of loop A parallelized with MPI
39743974 communicationThread.join();
--- a/src/mndo/Mndo.cpp
+++ b/src/mndo/Mndo.cpp
@@ -3515,7 +3515,7 @@ void Mndo::CalcTwoElecTwoCore(double****** twoElecTwoCore,
35153515 OrbitalType twoElecLimit = dxy;
35163516 int numBuff = (twoElecLimit+1)*twoElecLimit/2;
35173517 int num = (totalNumberAtoms-b)*numBuff*numBuff;
3518- asyncCommunicator.SetBroadcastedVector(&this->twoElecTwoCoreMpiBuff[a][b][0][0], num, calcRank);
3518+ asyncCommunicator.SetBroadcastedMessage(&this->twoElecTwoCoreMpiBuff[a][b][0][0], num, calcRank);
35193519 }
35203520 } // end of loop a parallelized with MPI
35213521 communicationThread.join();
--- a/src/mpi/AsyncCommunicator.h
+++ b/src/mpi/AsyncCommunicator.h
@@ -33,23 +33,23 @@ public:
3333 while(0<passingTimes){
3434 boost::mutex::scoped_lock lk(this->stateGuard);
3535 try{
36- DataInfo dInfo = this->dataQueue.FrontPop();
37- if(dInfo.mpiFuncType == MolDS_base::Send){
38- MolDS_mpi::MpiProcess::GetInstance()->Send(dInfo.dest,
39- dInfo.tag,
40- reinterpret_cast<T*>(dInfo.vectorPtr),
41- dInfo.num);
36+ MessageInfo mInfo = this->messageQueue.FrontPop();
37+ if(mInfo.mpiFuncType == MolDS_base::Send){
38+ MolDS_mpi::MpiProcess::GetInstance()->Send(mInfo.dest,
39+ mInfo.tag,
40+ reinterpret_cast<T*>(mInfo.vectorPtr),
41+ mInfo.num);
4242 }
43- else if(dInfo.mpiFuncType == MolDS_base::Recv){
44- MolDS_mpi::MpiProcess::GetInstance()->Recv(dInfo.source,
45- dInfo.tag,
46- reinterpret_cast<T*>(dInfo.vectorPtr),
47- dInfo.num);
43+ else if(mInfo.mpiFuncType == MolDS_base::Recv){
44+ MolDS_mpi::MpiProcess::GetInstance()->Recv(mInfo.source,
45+ mInfo.tag,
46+ reinterpret_cast<T*>(mInfo.vectorPtr),
47+ mInfo.num);
4848 }
49- else if(dInfo.mpiFuncType == MolDS_base::Broadcast){
50- MolDS_mpi::MpiProcess::GetInstance()->Broadcast(reinterpret_cast<T*>(dInfo.vectorPtr),
51- dInfo.num,
52- dInfo.source);
49+ else if(mInfo.mpiFuncType == MolDS_base::Broadcast){
50+ MolDS_mpi::MpiProcess::GetInstance()->Broadcast(reinterpret_cast<T*>(mInfo.vectorPtr),
51+ mInfo.num,
52+ mInfo.source);
5353 }
5454 else{
5555 std::stringstream ss;
@@ -72,51 +72,51 @@ public:
7272 }
7373 }
7474
75- template<typename T> void SetSentVector(T* vector,
76- molds_mpi_int num,
77- int dest,
78- int tag){
75+ template<typename T> void SetSentMessage(T* vector,
76+ molds_mpi_int num,
77+ int dest,
78+ int tag){
7979 int source = NON_USED;
8080 MolDS_base::MpiFunctionType mpiFuncType = MolDS_base::Send;
81- this->SetVector(vector, num, source, dest, tag, mpiFuncType);
81+ this->SetMessage(vector, num, source, dest, tag, mpiFuncType);
8282 }
8383
84- template<typename T> void SetRecvedVector(T* vector,
85- molds_mpi_int num,
86- int source,
87- int tag){
84+ template<typename T> void SetRecvedMessage(T* vector,
85+ molds_mpi_int num,
86+ int source,
87+ int tag){
8888 int dest = NON_USED;
8989 MolDS_base::MpiFunctionType mpiFuncType = MolDS_base::Recv;
90- this->SetVector(vector, num, source, dest, tag, mpiFuncType);
90+ this->SetMessage(vector, num, source, dest, tag, mpiFuncType);
9191 }
9292
93- template<typename T> void SetBroadcastedVector(T* vector, molds_mpi_int num, int root){
93+ template<typename T> void SetBroadcastedMessage(T* vector, molds_mpi_int num, int root){
9494 int source = root;
9595 int dest = NON_USED;
9696 int tag = NON_USED;
9797 MolDS_base::MpiFunctionType mpiFuncType = MolDS_base::Broadcast;
98- this->SetVector(vector, num, source, dest, tag, mpiFuncType);
98+ this->SetMessage(vector, num, source, dest, tag, mpiFuncType);
9999 }
100100
101101 private:
102- struct DataInfo{intptr_t vectorPtr;
103- molds_mpi_int num;
104- int source;
105- int dest;
106- int tag;
107- MolDS_base::MpiFunctionType mpiFuncType;};
102+ struct MessageInfo{intptr_t vectorPtr;
103+ molds_mpi_int num;
104+ int source;
105+ int dest;
106+ int tag;
107+ MolDS_base::MpiFunctionType mpiFuncType;};
108108 boost::mutex stateGuard;
109109 boost::condition stateChange;
110- MolDS_base_containers::ThreadSafeQueue<DataInfo> dataQueue;
111- template<typename T> void SetVector(T* vector,
112- molds_mpi_int num,
113- int source,
114- int dest,
115- int tag,
116- MolDS_base::MpiFunctionType mpiFuncType){
110+ MolDS_base_containers::ThreadSafeQueue<MessageInfo> messageQueue;
111+ template<typename T> void SetMessage(T* vector,
112+ molds_mpi_int num,
113+ int source,
114+ int dest,
115+ int tag,
116+ MolDS_base::MpiFunctionType mpiFuncType){
117117 boost::mutex::scoped_lock lk(this->stateGuard);
118- DataInfo dInfo = {reinterpret_cast<intptr_t>(vector), num, source, dest, tag, mpiFuncType};
119- this->dataQueue.Push(dInfo);
118+ MessageInfo mInfo = {reinterpret_cast<intptr_t>(vector), num, source, dest, tag, mpiFuncType};
119+ this->messageQueue.Push(mInfo);
120120 this->stateChange.notify_all();
121121 }
122122 };
--- a/src/zindo/ZindoS.cpp
+++ b/src/zindo/ZindoS.cpp
@@ -2418,10 +2418,10 @@ void ZindoS::CalcCISMatrix(double** matrixCIS) const{
24182418 int num = this->matrixCISdimension - k;
24192419 double* buff = &this->matrixCIS[k][k];
24202420 if(mpiRank == mpiHeadRank && mpiRank != calcRank){
2421- asyncCommunicator.SetRecvedVector(buff, num, source, tag);
2421+ asyncCommunicator.SetRecvedMessage(buff, num, source, tag);
24222422 }
24232423 if(mpiRank != mpiHeadRank && mpiRank == calcRank){
2424- asyncCommunicator.SetSentVector(buff, num, dest, tag);
2424+ asyncCommunicator.SetSentMessage(buff, num, dest, tag);
24252425 }
24262426 } // end of k-loop which is MPI-parallelized
24272427 communicationThread.join();
@@ -3361,10 +3361,10 @@ void ZindoS::CalcGammaNRMinusKNRMatrix(double** gammaNRMinusKNR, const vector<Mo
33613361 int num = nonRedundantQIndecesSize - i;
33623362 double* buff = &gammaNRMinusKNR[i][i];
33633363 if(mpiRank == mpiHeadRank && mpiRank != calcRank){
3364- asyncCommunicator.SetRecvedVector(buff, num, source, tag);
3364+ asyncCommunicator.SetRecvedMessage(buff, num, source, tag);
33653365 }
33663366 if(mpiRank != mpiHeadRank && mpiRank == calcRank){
3367- asyncCommunicator.SetSentVector(buff, num, dest, tag);
3367+ asyncCommunicator.SetSentMessage(buff, num, dest, tag);
33683368 }
33693369 } // end of loop-i parallelized with MPI
33703370 communicationThread.join();
@@ -3425,10 +3425,10 @@ void ZindoS::CalcKRDagerGammaRInvMatrix(double** kRDagerGammaRInv,
34253425 int num = redundantQIndecesSize;
34263426 double* buff = &kRDagerGammaRInv[i][0];
34273427 if(mpiRank == mpiHeadRank && mpiRank != calcRank){
3428- asyncCommunicator.SetRecvedVector(buff, num, source, tag);
3428+ asyncCommunicator.SetRecvedMessage(buff, num, source, tag);
34293429 }
34303430 if(mpiRank != mpiHeadRank && mpiRank == calcRank){
3431- asyncCommunicator.SetSentVector(buff, num, dest, tag);
3431+ asyncCommunicator.SetSentMessage(buff, num, dest, tag);
34323432 }
34333433 } // end of loop-i parallelized with MPI
34343434 communicationThread.join();