Revision | 1dcb9a22eaf6cdaab95f10fac5d962eca59e03ad (tree) |
---|---|
Time | 2013-10-18 10:54:22 |
Author | Mikiya Fujii <mikiya.fujii@gmai...> |
Commiter | Mikiya Fujii |
Bug in handling of MolDSException in openMP/MP region is fixed. #32309
git-svn-id: https://svn.sourceforge.jp/svnroot/molds/trunk@1551 1136aad2-a195-0410-b898-f5ea1d11b9d8
@@ -1389,24 +1389,23 @@ void Cndo2::CalcFockMatrix(double** fockMatrix, | ||
1389 | 1389 | bool isGuess) const{ |
1390 | 1390 | int totalNumberAOs = molecule.GetTotalNumberAOs(); |
1391 | 1391 | int totalNumberAtoms = molecule.GetNumberAtoms(); |
1392 | + MallocerFreer::GetInstance()->Initialize<double>(fockMatrix, totalNumberAOs, totalNumberAOs); | |
1392 | 1393 | |
1393 | 1394 | // MPI setting of each rank |
1394 | 1395 | int mpiRank = MolDS_mpi::MpiProcess::GetInstance()->GetRank(); |
1395 | 1396 | int mpiSize = MolDS_mpi::MpiProcess::GetInstance()->GetSize(); |
1396 | 1397 | int mpiHeadRank = MolDS_mpi::MpiProcess::GetInstance()->GetHeadRank(); |
1398 | + stringstream errorStream; | |
1397 | 1399 | MolDS_mpi::AsyncCommunicator asyncCommunicator; |
1398 | - boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, | |
1399 | - &asyncCommunicator) ); | |
1400 | + boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, &asyncCommunicator) ); | |
1400 | 1401 | |
1401 | - MallocerFreer::GetInstance()->Initialize<double>(fockMatrix, totalNumberAOs, totalNumberAOs); | |
1402 | - for(int A=totalNumberAtoms-1; 0<=A; A--){ | |
1402 | + for(int A=0; A<totalNumberAtoms; A++){ | |
1403 | 1403 | const Atom& atomA = *molecule.GetAtom(A); |
1404 | 1404 | int firstAOIndexA = atomA.GetFirstAOIndex(); |
1405 | 1405 | int lastAOIndexA = atomA.GetLastAOIndex(); |
1406 | 1406 | for(int mu=firstAOIndexA; mu<=lastAOIndexA; mu++){ |
1407 | 1407 | int calcRank = mu%mpiSize; |
1408 | 1408 | if(mpiRank == calcRank){ |
1409 | - stringstream ompErrors; | |
1410 | 1409 | #pragma omp parallel for schedule(auto) |
1411 | 1410 | for(int B=A; B<totalNumberAtoms; B++){ |
1412 | 1411 | try{ |
@@ -1444,36 +1443,34 @@ void Cndo2::CalcFockMatrix(double** fockMatrix, | ||
1444 | 1443 | else{ |
1445 | 1444 | // lower left part (not calculated) |
1446 | 1445 | } |
1447 | - } // end of loop nu | |
1448 | - } // end of try | |
1446 | + } | |
1447 | + } | |
1449 | 1448 | catch(MolDSException ex){ |
1450 | -#pragma omp critical | |
1451 | - ex.Serialize(ompErrors); | |
1449 | +#pragma omp critical | |
1450 | + ex.Serialize(errorStream); | |
1452 | 1451 | } |
1453 | - } // end of loop B parallelized with openMP | |
1454 | - // Exception throwing for omp-region | |
1455 | - if(!ompErrors.str().empty()){ | |
1456 | - throw MolDSException::Deserialize(ompErrors); | |
1457 | 1452 | } |
1458 | - } // end of if(mpiRank == calcRank) | |
1459 | - | |
1460 | - // set data to gather in mpiHeadRank with asynchronous MPI | |
1461 | - int tag = mu; | |
1462 | - int source = calcRank; | |
1463 | - int dest = mpiHeadRank; | |
1464 | - double* buff = &fockMatrix[mu][mu]; | |
1465 | - MolDS_mpi::molds_mpi_int num = totalNumberAOs-mu; | |
1466 | - if(mpiRank == mpiHeadRank && mpiRank != calcRank){ | |
1467 | - asyncCommunicator.SetRecvedMessage(buff, num, source, tag); | |
1468 | 1453 | } |
1469 | - if(mpiRank != mpiHeadRank && mpiRank == calcRank){ | |
1470 | - asyncCommunicator.SetSentMessage(buff, num, dest, tag); | |
1454 | + if(errorStream.str().empty()){ | |
1455 | + int tag = mu; | |
1456 | + int source = calcRank; | |
1457 | + int dest = mpiHeadRank; | |
1458 | + double* buff = &fockMatrix[mu][mu]; | |
1459 | + MolDS_mpi::molds_mpi_int num = totalNumberAOs-mu; | |
1460 | + if(mpiRank == mpiHeadRank && mpiRank != calcRank){ | |
1461 | + asyncCommunicator.SetRecvedMessage(buff, num, source, tag); | |
1462 | + } | |
1463 | + if(mpiRank != mpiHeadRank && mpiRank == calcRank){ | |
1464 | + asyncCommunicator.SetSentMessage(buff, num, dest, tag); | |
1465 | + } | |
1471 | 1466 | } |
1472 | - } // end of loop mu parallelized with MPI | |
1473 | - } // end of loop A | |
1474 | - // Delete the communication thread. | |
1467 | + } | |
1468 | + } | |
1475 | 1469 | asyncCommunicator.Finalize(); |
1476 | 1470 | communicationThread.join(); |
1471 | + if(!errorStream.str().empty()){ | |
1472 | + throw MolDSException::Deserialize(errorStream); | |
1473 | + } | |
1477 | 1474 | double* buff = &fockMatrix[0][0]; |
1478 | 1475 | MolDS_mpi::molds_mpi_int num = totalNumberAOs*totalNumberAOs; |
1479 | 1476 | MolDS_mpi::MpiProcess::GetInstance()->Broadcast(buff, num, mpiHeadRank); |
@@ -1609,11 +1606,10 @@ void Cndo2::CalcGammaAB(double** gammaAB, const Molecule& molecule) const{ | ||
1609 | 1606 | int mpiRank = MolDS_mpi::MpiProcess::GetInstance()->GetRank(); |
1610 | 1607 | int mpiSize = MolDS_mpi::MpiProcess::GetInstance()->GetSize(); |
1611 | 1608 | int mpiHeadRank = MolDS_mpi::MpiProcess::GetInstance()->GetHeadRank(); |
1609 | + stringstream errorStream; | |
1612 | 1610 | MolDS_mpi::AsyncCommunicator asyncCommunicator; |
1613 | - boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, | |
1614 | - &asyncCommunicator) ); | |
1611 | + boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, &asyncCommunicator) ); | |
1615 | 1612 | |
1616 | - // This loop (A) is parallelized by MPI | |
1617 | 1613 | for(int A=0; A<totalAtomNumber; A++){ |
1618 | 1614 | int calcRank = A%mpiSize; |
1619 | 1615 | if(mpiRank == calcRank){ |
@@ -1621,7 +1617,6 @@ void Cndo2::CalcGammaAB(double** gammaAB, const Molecule& molecule) const{ | ||
1621 | 1617 | int na = atomA.GetValenceShellType() + 1; |
1622 | 1618 | double orbitalExponentA = atomA.GetOrbitalExponent( |
1623 | 1619 | atomA.GetValenceShellType(), s, this->theory); |
1624 | - stringstream ompErrors; | |
1625 | 1620 | #pragma omp parallel for schedule(auto) |
1626 | 1621 | for(int B=A; B<totalAtomNumber; B++){ |
1627 | 1622 | try{ |
@@ -1675,30 +1670,29 @@ void Cndo2::CalcGammaAB(double** gammaAB, const Molecule& molecule) const{ | ||
1675 | 1670 | } |
1676 | 1671 | catch(MolDSException ex){ |
1677 | 1672 | #pragma omp critical |
1678 | - ex.Serialize(ompErrors); | |
1673 | + ex.Serialize(errorStream); | |
1679 | 1674 | } |
1680 | - } // end of loop B parallelized by openMP | |
1681 | - // Exception throwing for omp-region | |
1682 | - if(!ompErrors.str().empty()){ | |
1683 | - throw MolDSException::Deserialize(ompErrors); | |
1684 | 1675 | } |
1685 | - } // end of if(mpiRank==calcRank) | |
1686 | - | |
1687 | - // set data to gater in mpiHeadRank with asynchronous MPI | |
1688 | - int tag = A; | |
1689 | - int source = calcRank; | |
1690 | - int dest = mpiHeadRank; | |
1691 | - double* buff = &gammaAB[A][A]; | |
1692 | - MolDS_mpi::molds_mpi_int num = totalAtomNumber-A; | |
1693 | - if(mpiRank == mpiHeadRank && mpiRank != calcRank){ | |
1694 | - asyncCommunicator.SetRecvedMessage(buff, num, source, tag); | |
1695 | 1676 | } |
1696 | - if(mpiRank != mpiHeadRank && mpiRank == calcRank){ | |
1697 | - asyncCommunicator.SetSentMessage(buff, num, dest, tag); | |
1677 | + if(errorStream.str().empty()){ | |
1678 | + int tag = A; | |
1679 | + int source = calcRank; | |
1680 | + int dest = mpiHeadRank; | |
1681 | + double* buff = &gammaAB[A][A]; | |
1682 | + MolDS_mpi::molds_mpi_int num = totalAtomNumber-A; | |
1683 | + if(mpiRank == mpiHeadRank && mpiRank != calcRank){ | |
1684 | + asyncCommunicator.SetRecvedMessage(buff, num, source, tag); | |
1685 | + } | |
1686 | + if(mpiRank != mpiHeadRank && mpiRank == calcRank){ | |
1687 | + asyncCommunicator.SetSentMessage(buff, num, dest, tag); | |
1688 | + } | |
1698 | 1689 | } |
1699 | - } // end of loop A prallelized by MPI | |
1690 | + } | |
1700 | 1691 | asyncCommunicator.Finalize(); |
1701 | 1692 | communicationThread.join(); |
1693 | + if(!errorStream.str().empty()){ | |
1694 | + throw MolDSException::Deserialize(errorStream); | |
1695 | + } | |
1702 | 1696 | double* buff = &gammaAB[0][0]; |
1703 | 1697 | MolDS_mpi::molds_mpi_int num = totalAtomNumber*totalAtomNumber; |
1704 | 1698 | MolDS_mpi::MpiProcess::GetInstance()->Broadcast(buff, num, mpiHeadRank); |
@@ -1807,11 +1801,10 @@ void Cndo2::CalcCartesianMatrixByGTOExpansion(double*** cartesianMatrix, | ||
1807 | 1801 | int mpiRank = MolDS_mpi::MpiProcess::GetInstance()->GetRank(); |
1808 | 1802 | int mpiSize = MolDS_mpi::MpiProcess::GetInstance()->GetSize(); |
1809 | 1803 | int mpiHeadRank = MolDS_mpi::MpiProcess::GetInstance()->GetHeadRank(); |
1804 | + stringstream errorStream; | |
1810 | 1805 | MolDS_mpi::AsyncCommunicator asyncCommunicator; |
1811 | - boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, | |
1812 | - &asyncCommunicator) ); | |
1806 | + boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, &asyncCommunicator) ); | |
1813 | 1807 | |
1814 | - // This loop (A and mu) is parallelized by MPI | |
1815 | 1808 | for(int A=0; A<totalAtomNumber; A++){ |
1816 | 1809 | const Atom& atomA = *molecule.GetAtom(A); |
1817 | 1810 | int firstAOIndexA = atomA.GetFirstAOIndex(); |
@@ -1820,7 +1813,6 @@ void Cndo2::CalcCartesianMatrixByGTOExpansion(double*** cartesianMatrix, | ||
1820 | 1813 | if(mpiRank == calcRank){ |
1821 | 1814 | for(int a=0; a<numValenceAOsA; a++){ |
1822 | 1815 | int mu = firstAOIndexA + a; |
1823 | - stringstream ompErrors; | |
1824 | 1816 | #pragma omp parallel for schedule(auto) |
1825 | 1817 | for(int B=0; B<totalAtomNumber; B++){ |
1826 | 1818 | try{ |
@@ -1837,40 +1829,38 @@ void Cndo2::CalcCartesianMatrixByGTOExpansion(double*** cartesianMatrix, | ||
1837 | 1829 | } |
1838 | 1830 | catch(MolDSException ex){ |
1839 | 1831 | #pragma omp critical |
1840 | - ex.Serialize(ompErrors); | |
1832 | + ex.Serialize(errorStream); | |
1841 | 1833 | } |
1842 | - }// end of loop for int B with openMP | |
1843 | - // Exception throwing for omp-region | |
1844 | - if(!ompErrors.str().empty()){ | |
1845 | - throw MolDSException::Deserialize(ompErrors); | |
1846 | - } | |
1847 | - } | |
1848 | - } // end lof if(mpiRank == calcRank) | |
1849 | - | |
1850 | - // set data to gater in mpiHeadRank with asynchronous MPI | |
1851 | - int tagX = A* CartesianType_end + XAxis; | |
1852 | - int tagY = A* CartesianType_end + YAxis; | |
1853 | - int tagZ = A* CartesianType_end + ZAxis; | |
1854 | - int source = calcRank; | |
1855 | - int dest = mpiHeadRank; | |
1856 | - double* buffX = &cartesianMatrix[XAxis][firstAOIndexA][0]; | |
1857 | - double* buffY = &cartesianMatrix[YAxis][firstAOIndexA][0]; | |
1858 | - double* buffZ = &cartesianMatrix[ZAxis][firstAOIndexA][0]; | |
1859 | - MolDS_mpi::molds_mpi_int num = numValenceAOsA*totalAONumber; | |
1860 | - if(mpiRank == mpiHeadRank && mpiRank != calcRank){ | |
1861 | - asyncCommunicator.SetRecvedMessage(buffX, num, source, tagX); | |
1862 | - asyncCommunicator.SetRecvedMessage(buffY, num, source, tagY); | |
1863 | - asyncCommunicator.SetRecvedMessage(buffZ, num, source, tagZ); | |
1864 | - } | |
1865 | - if(mpiRank != mpiHeadRank && mpiRank == calcRank){ | |
1866 | - asyncCommunicator.SetSentMessage(buffX, num, dest, tagX); | |
1867 | - asyncCommunicator.SetSentMessage(buffY, num, dest, tagY); | |
1868 | - asyncCommunicator.SetSentMessage(buffZ, num, dest, tagZ); | |
1869 | - } | |
1870 | - } // end of loop for int A with MPI | |
1871 | - // Delete the communication thread. | |
1834 | + } | |
1835 | + } | |
1836 | + } | |
1837 | + if(errorStream.str().empty()){ | |
1838 | + int tagX = A* CartesianType_end + XAxis; | |
1839 | + int tagY = A* CartesianType_end + YAxis; | |
1840 | + int tagZ = A* CartesianType_end + ZAxis; | |
1841 | + int source = calcRank; | |
1842 | + int dest = mpiHeadRank; | |
1843 | + double* buffX = &cartesianMatrix[XAxis][firstAOIndexA][0]; | |
1844 | + double* buffY = &cartesianMatrix[YAxis][firstAOIndexA][0]; | |
1845 | + double* buffZ = &cartesianMatrix[ZAxis][firstAOIndexA][0]; | |
1846 | + MolDS_mpi::molds_mpi_int num = numValenceAOsA*totalAONumber; | |
1847 | + if(mpiRank == mpiHeadRank && mpiRank != calcRank){ | |
1848 | + asyncCommunicator.SetRecvedMessage(buffX, num, source, tagX); | |
1849 | + asyncCommunicator.SetRecvedMessage(buffY, num, source, tagY); | |
1850 | + asyncCommunicator.SetRecvedMessage(buffZ, num, source, tagZ); | |
1851 | + } | |
1852 | + if(mpiRank != mpiHeadRank && mpiRank == calcRank){ | |
1853 | + asyncCommunicator.SetSentMessage(buffX, num, dest, tagX); | |
1854 | + asyncCommunicator.SetSentMessage(buffY, num, dest, tagY); | |
1855 | + asyncCommunicator.SetSentMessage(buffZ, num, dest, tagZ); | |
1856 | + } | |
1857 | + } | |
1858 | + } | |
1872 | 1859 | asyncCommunicator.Finalize(); |
1873 | 1860 | communicationThread.join(); |
1861 | + if(!errorStream.str().empty()){ | |
1862 | + throw MolDSException::Deserialize(errorStream); | |
1863 | + } | |
1874 | 1864 | double* buff = &cartesianMatrix[0][0][0]; |
1875 | 1865 | MolDS_mpi::molds_mpi_int num = CartesianType_end*totalAONumber*totalAONumber; |
1876 | 1866 | MolDS_mpi::MpiProcess::GetInstance()->Broadcast(buff, num, mpiHeadRank); |
@@ -3904,27 +3894,22 @@ void Cndo2::CalcOverlapESsWithAnotherElectronicStructure(double** overlapESs, | ||
3904 | 3894 | void Cndo2::CalcOverlapAOs(double** overlapAOs, const Molecule& molecule) const{ |
3905 | 3895 | int totalAONumber = molecule.GetTotalNumberAOs(); |
3906 | 3896 | int totalAtomNumber = molecule.GetNumberAtoms(); |
3897 | + MallocerFreer::GetInstance()->Initialize<double>(overlapAOs, totalAONumber, totalAONumber); | |
3907 | 3898 | |
3908 | 3899 | // MPI setting of each rank |
3909 | 3900 | int mpiRank = MolDS_mpi::MpiProcess::GetInstance()->GetRank(); |
3910 | 3901 | int mpiSize = MolDS_mpi::MpiProcess::GetInstance()->GetSize(); |
3911 | 3902 | int mpiHeadRank = MolDS_mpi::MpiProcess::GetInstance()->GetHeadRank(); |
3903 | + stringstream errorStream; | |
3912 | 3904 | MolDS_mpi::AsyncCommunicator asyncCommunicator; |
3913 | - boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, | |
3914 | - &asyncCommunicator) ); | |
3915 | - | |
3916 | - MallocerFreer::GetInstance()->Initialize<double>(overlapAOs, | |
3917 | - totalAONumber, | |
3918 | - totalAONumber); | |
3905 | + boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, &asyncCommunicator) ); | |
3919 | 3906 | |
3920 | - // This loop A is parallelized with MPI | |
3921 | - for(int A=totalAtomNumber-1; 0<=A; A--){ | |
3907 | + for(int A=0; A<totalAtomNumber; A++){ | |
3922 | 3908 | const Atom& atomA = *molecule.GetAtom(A); |
3923 | 3909 | int firstAOIndexA = atomA.GetFirstAOIndex(); |
3924 | 3910 | int numValenceAOs = atomA.GetValenceSize(); |
3925 | 3911 | int calcRank = A%mpiSize; |
3926 | 3912 | if(mpiRank == calcRank){ |
3927 | - stringstream ompErrors; | |
3928 | 3913 | #pragma omp parallel |
3929 | 3914 | { |
3930 | 3915 | double** diatomicOverlapAOs = NULL; |
@@ -3959,12 +3944,11 @@ void Cndo2::CalcOverlapAOs(double** overlapAOs, const Molecule& molecule) const{ | ||
3959 | 3944 | this->CalcRotatingMatrix(rotatingMatrix, atomA, atomB); |
3960 | 3945 | this->RotateDiatmicOverlapAOsToSpaceFrame(diatomicOverlapAOs, rotatingMatrix, tmpDiatomicOverlapAOs, tmpOldDiatomicOverlapAOs, tmpMatrixBC, tmpVectorBC); |
3961 | 3946 | this->SetOverlapAOsElement(overlapAOs, diatomicOverlapAOs, atomA, atomB, symmetrize); |
3962 | - } // end of loop B parallelized with openMP | |
3963 | - | |
3964 | - } // end of try | |
3947 | + } | |
3948 | + } | |
3965 | 3949 | catch(MolDSException ex){ |
3966 | 3950 | #pragma omp critical |
3967 | - ex.Serialize(ompErrors); | |
3951 | + ex.Serialize(errorStream); | |
3968 | 3952 | } |
3969 | 3953 | this->FreeDiatomicOverlapAOsAndRotatingMatrix(&diatomicOverlapAOs, &rotatingMatrix); |
3970 | 3954 | MallocerFreer::GetInstance()->Free<double>(&tmpDiatomicOverlapAOs, |
@@ -3977,28 +3961,27 @@ void Cndo2::CalcOverlapAOs(double** overlapAOs, const Molecule& molecule) const{ | ||
3977 | 3961 | OrbitalType_end); |
3978 | 3962 | MallocerFreer::GetInstance()->Free<double>(&tmpVectorBC, |
3979 | 3963 | OrbitalType_end*OrbitalType_end); |
3980 | - } // end of omp-parallelized region | |
3981 | - // Exception throwing for omp-region | |
3982 | - if(!ompErrors.str().empty()){ | |
3983 | - throw MolDSException::Deserialize(ompErrors); | |
3984 | 3964 | } |
3985 | - } // end of if(mpiRank == calcRnak) | |
3986 | - | |
3987 | - // set data to gather in mpiHeadRank with asynchronous MPI | |
3988 | - int tag = A; | |
3989 | - int source = calcRank; | |
3990 | - int dest = mpiHeadRank; | |
3991 | - double* buff = overlapAOs[firstAOIndexA]; | |
3992 | - MolDS_mpi::molds_mpi_int num = totalAONumber*numValenceAOs; | |
3993 | - if(mpiRank == mpiHeadRank && mpiRank != calcRank){ | |
3994 | - asyncCommunicator.SetRecvedMessage(buff, num, source, tag); | |
3995 | - } | |
3996 | - if(mpiRank != mpiHeadRank && mpiRank == calcRank){ | |
3997 | - asyncCommunicator.SetSentMessage(buff, num, dest, tag); | |
3965 | + } | |
3966 | + if(errorStream.str().empty()){ | |
3967 | + int tag = A; | |
3968 | + int source = calcRank; | |
3969 | + int dest = mpiHeadRank; | |
3970 | + double* buff = overlapAOs[firstAOIndexA]; | |
3971 | + MolDS_mpi::molds_mpi_int num = totalAONumber*numValenceAOs; | |
3972 | + if(mpiRank == mpiHeadRank && mpiRank != calcRank){ | |
3973 | + asyncCommunicator.SetRecvedMessage(buff, num, source, tag); | |
3974 | + } | |
3975 | + if(mpiRank != mpiHeadRank && mpiRank == calcRank){ | |
3976 | + asyncCommunicator.SetSentMessage(buff, num, dest, tag); | |
3977 | + } | |
3998 | 3978 | } |
3999 | - } // end of loop A parallelized with MPI | |
3979 | + } | |
4000 | 3980 | asyncCommunicator.Finalize(); |
4001 | 3981 | communicationThread.join(); |
3982 | + if(!errorStream.str().empty()){ | |
3983 | + throw MolDSException::Deserialize(errorStream); | |
3984 | + } | |
4002 | 3985 | double* buff = &overlapAOs[0][0]; |
4003 | 3986 | MolDS_mpi::molds_mpi_int num = totalAONumber*totalAONumber; |
4004 | 3987 | MolDS_mpi::MpiProcess::GetInstance()->Broadcast(buff, num, mpiHeadRank); |
@@ -3487,30 +3487,28 @@ double Mndo::GetAuxiliaryKNRKRElement(int moI, int moJ, int moK, int moL) const{ | ||
3487 | 3487 | |
3488 | 3488 | void Mndo::CalcTwoElecTwoCore(double****** twoElecTwoCore, |
3489 | 3489 | const Molecule& molecule) const{ |
3490 | - int totalNumberAtoms = molecule.GetNumberAtoms(); | |
3491 | - | |
3492 | - // MPI setting of each rank | |
3493 | - int mpiRank = MolDS_mpi::MpiProcess::GetInstance()->GetRank(); | |
3494 | - int mpiSize = MolDS_mpi::MpiProcess::GetInstance()->GetSize(); | |
3495 | - int mpiHeadRank = MolDS_mpi::MpiProcess::GetInstance()->GetHeadRank(); | |
3496 | - MolDS_mpi::AsyncCommunicator asyncCommunicator; | |
3497 | - boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, | |
3498 | - &asyncCommunicator) ); | |
3499 | 3490 | #ifdef MOLDS_DBG |
3500 | 3491 | if(twoElecTwoCore == NULL){ |
3501 | 3492 | throw MolDSException(this->errorMessageCalcTwoElecTwoCoreNullMatrix); |
3502 | 3493 | } |
3503 | 3494 | #endif |
3495 | + int totalNumberAtoms = molecule.GetNumberAtoms(); | |
3504 | 3496 | MallocerFreer::GetInstance()->Initialize<double>(twoElecTwoCore, |
3505 | 3497 | totalNumberAtoms, |
3506 | 3498 | totalNumberAtoms, |
3507 | 3499 | dxy, dxy, dxy, dxy); |
3508 | 3500 | |
3509 | - // this loop-a is MPI-parallelized | |
3510 | - for(int a=totalNumberAtoms-1; 0<=a; a--){ | |
3501 | + // MPI setting of each rank | |
3502 | + int mpiRank = MolDS_mpi::MpiProcess::GetInstance()->GetRank(); | |
3503 | + int mpiSize = MolDS_mpi::MpiProcess::GetInstance()->GetSize(); | |
3504 | + int mpiHeadRank = MolDS_mpi::MpiProcess::GetInstance()->GetHeadRank(); | |
3505 | + stringstream errorStream; | |
3506 | + MolDS_mpi::AsyncCommunicator asyncCommunicator; | |
3507 | + boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, &asyncCommunicator) ); | |
3508 | + | |
3509 | + for(int a=0; a<totalNumberAtoms; a++){ | |
3511 | 3510 | int calcRank = a%mpiSize; |
3512 | 3511 | if(mpiRank == calcRank){ |
3513 | - stringstream ompErrors; | |
3514 | 3512 | #pragma omp parallel |
3515 | 3513 | { |
3516 | 3514 | double**** diatomicTwoElecTwoCore = NULL; |
@@ -3533,14 +3531,12 @@ void Mndo::CalcTwoElecTwoCore(double****** twoElecTwoCore, | ||
3533 | 3531 | tmpMatrixBC, |
3534 | 3532 | tmpVectorBC, |
3535 | 3533 | a, b); |
3536 | - | |
3537 | 3534 | int i=0; |
3538 | 3535 | for(int mu=0; mu<dxy; mu++){ |
3539 | 3536 | for(int nu=mu; nu<dxy; nu++){ |
3540 | 3537 | int j=0; |
3541 | 3538 | for(int lambda=0; lambda<dxy; lambda++){ |
3542 | 3539 | for(int sigma=lambda; sigma<dxy; sigma++){ |
3543 | - //double value = diatomicTwoElecTwoCore[mu][nu][lambda][sigma]; | |
3544 | 3540 | this->twoElecTwoCoreMpiBuff[a][b][i][j] |
3545 | 3541 | = diatomicTwoElecTwoCore[mu][nu][lambda][sigma]; |
3546 | 3542 | j++; |
@@ -3549,36 +3545,34 @@ void Mndo::CalcTwoElecTwoCore(double****** twoElecTwoCore, | ||
3549 | 3545 | i++; |
3550 | 3546 | } |
3551 | 3547 | } |
3552 | - | |
3553 | - } // end of loop b parallelized with MPI | |
3554 | - | |
3555 | - } // end of try | |
3548 | + } | |
3549 | + } | |
3556 | 3550 | catch(MolDSException ex){ |
3557 | 3551 | #pragma omp critical |
3558 | - ex.Serialize(ompErrors); | |
3552 | + ex.Serialize(errorStream); | |
3559 | 3553 | } |
3560 | 3554 | MallocerFreer::GetInstance()->Free<double>(&diatomicTwoElecTwoCore, dxy, dxy, dxy, dxy); |
3561 | 3555 | MallocerFreer::GetInstance()->Free<double>(&tmpDiatomicTwoElecTwoCore, dxy*dxy*dxy*dxy); |
3562 | 3556 | MallocerFreer::GetInstance()->Free<double>(&tmpRotMat, OrbitalType_end, OrbitalType_end); |
3563 | 3557 | MallocerFreer::GetInstance()->Free<double>(&tmpMatrixBC, dxy*dxy, dxy*dxy); |
3564 | 3558 | MallocerFreer::GetInstance()->Free<double>(&tmpVectorBC, dxy*dxy*dxy*dxy); |
3565 | - } // end of omp-parallelized region | |
3566 | - // Exception throwing for omp-region | |
3567 | - if(!ompErrors.str().empty()){ | |
3568 | - throw MolDSException::Deserialize(ompErrors); | |
3569 | 3559 | } |
3570 | - } // end of if(mpiRnak == calcRank) | |
3571 | - // set data to gather in mpiHeadRank with asynchronous MPI | |
3572 | - if(a<totalNumberAtoms-1){ | |
3573 | - int b = a+1; | |
3574 | - OrbitalType twoElecLimit = dxy; | |
3575 | - int numBuff = (twoElecLimit+1)*twoElecLimit/2; | |
3576 | - int num = (totalNumberAtoms-b)*numBuff*numBuff; | |
3577 | - asyncCommunicator.SetBroadcastedMessage(&this->twoElecTwoCoreMpiBuff[a][b][0][0], num, calcRank); | |
3578 | 3560 | } |
3579 | - } // end of loop a parallelized with MPI | |
3561 | + if(errorStream.str().empty()){ | |
3562 | + if(a<totalNumberAtoms-1){ | |
3563 | + int b = a+1; | |
3564 | + OrbitalType twoElecLimit = dxy; | |
3565 | + int numBuff = (twoElecLimit+1)*twoElecLimit/2; | |
3566 | + int num = (totalNumberAtoms-b)*numBuff*numBuff; | |
3567 | + asyncCommunicator.SetBroadcastedMessage(&this->twoElecTwoCoreMpiBuff[a][b][0][0], num, calcRank); | |
3568 | + } | |
3569 | + } | |
3570 | + } | |
3580 | 3571 | asyncCommunicator.Finalize(); |
3581 | 3572 | communicationThread.join(); |
3573 | + if(!errorStream.str().empty()){ | |
3574 | + throw MolDSException::Deserialize(errorStream); | |
3575 | + } | |
3582 | 3576 | |
3583 | 3577 | #pragma omp parallel for schedule(auto) |
3584 | 3578 | for(int a=0; a<totalNumberAtoms; a++){ |
@@ -2353,18 +2353,16 @@ void ZindoS::CalcCISMatrix(double** matrixCIS) const{ | ||
2353 | 2353 | int mpiRank = MolDS_mpi::MpiProcess::GetInstance()->GetRank(); |
2354 | 2354 | int mpiSize = MolDS_mpi::MpiProcess::GetInstance()->GetSize(); |
2355 | 2355 | int mpiHeadRank = MolDS_mpi::MpiProcess::GetInstance()->GetHeadRank(); |
2356 | + stringstream errorStream; | |
2356 | 2357 | MolDS_mpi::AsyncCommunicator asyncCommunicator; |
2357 | - boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, | |
2358 | - &asyncCommunicator) ); | |
2358 | + boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, &asyncCommunicator) ); | |
2359 | 2359 | |
2360 | - // this loop-a is MPI-parallelized | |
2361 | - for(int k=this->matrixCISdimension-1; 0<=k; k--){ | |
2360 | + for(int k=0; k<this->matrixCISdimension; k++){ | |
2362 | 2361 | int calcRank = k%mpiSize; |
2363 | 2362 | if(calcRank == mpiRank){ |
2364 | 2363 | // single excitation from I-th (occupied)MO to A-th (virtual)MO |
2365 | 2364 | int moI = this->GetActiveOccIndex(*this->molecule, k); |
2366 | 2365 | int moA = this->GetActiveVirIndex(*this->molecule, k); |
2367 | - stringstream ompErrors; | |
2368 | 2366 | #pragma omp parallel for schedule(auto) |
2369 | 2367 | for(int l=k; l<this->matrixCISdimension; l++){ |
2370 | 2368 | try{ |
@@ -2401,30 +2399,29 @@ void ZindoS::CalcCISMatrix(double** matrixCIS) const{ | ||
2401 | 2399 | } |
2402 | 2400 | catch(MolDSException ex){ |
2403 | 2401 | #pragma omp critical |
2404 | - ex.Serialize(ompErrors); | |
2402 | + ex.Serialize(errorStream); | |
2405 | 2403 | } |
2406 | - } // end of l-loop | |
2407 | - // Exception throwing for omp-region | |
2408 | - if(!ompErrors.str().empty()){ | |
2409 | - throw MolDSException::Deserialize(ompErrors); | |
2410 | 2404 | } |
2411 | - } // end of if(calcRank == mpiRank) | |
2412 | - // Send data to head rank | |
2413 | - int tag = k; | |
2414 | - int source = calcRank; | |
2415 | - int dest = mpiHeadRank; | |
2416 | - int num = this->matrixCISdimension - k; | |
2417 | - double* buff = &this->matrixCIS[k][k]; | |
2418 | - if(mpiRank == mpiHeadRank && mpiRank != calcRank){ | |
2419 | - asyncCommunicator.SetRecvedMessage(buff, num, source, tag); | |
2420 | 2405 | } |
2421 | - if(mpiRank != mpiHeadRank && mpiRank == calcRank){ | |
2422 | - asyncCommunicator.SetSentMessage(buff, num, dest, tag); | |
2406 | + if(errorStream.str().empty()){ | |
2407 | + int tag = k; | |
2408 | + int source = calcRank; | |
2409 | + int dest = mpiHeadRank; | |
2410 | + int num = this->matrixCISdimension - k; | |
2411 | + double* buff = &this->matrixCIS[k][k]; | |
2412 | + if(mpiRank == mpiHeadRank && mpiRank != calcRank){ | |
2413 | + asyncCommunicator.SetRecvedMessage(buff, num, source, tag); | |
2414 | + } | |
2415 | + if(mpiRank != mpiHeadRank && mpiRank == calcRank){ | |
2416 | + asyncCommunicator.SetSentMessage(buff, num, dest, tag); | |
2417 | + } | |
2423 | 2418 | } |
2424 | - } // end of k-loop which is MPI-parallelized | |
2419 | + } | |
2425 | 2420 | asyncCommunicator.Finalize(); |
2426 | 2421 | communicationThread.join(); |
2427 | - // Broadcast data to all rank | |
2422 | + if(!errorStream.str().empty()){ | |
2423 | + throw MolDSException::Deserialize(errorStream); | |
2424 | + } | |
2428 | 2425 | for(int k=0; k<this->matrixCISdimension; k++){ |
2429 | 2426 | int num = this->matrixCISdimension - k; |
2430 | 2427 | double* buff = &this->matrixCIS[k][k]; |
@@ -3319,20 +3316,19 @@ void ZindoS::CalcAuxiliaryVector(double* y, | ||
3319 | 3316 | // Note taht K_{NR} is not calculated. |
3320 | 3317 | void ZindoS::CalcGammaNRMinusKNRMatrix(double** gammaNRMinusKNR, const vector<MoIndexPair>& nonRedundantQIndeces) const{ |
3321 | 3318 | int nonRedundantQIndecesSize = nonRedundantQIndeces.size(); |
3322 | - //MPI setting of each rank | |
3319 | + // MPI setting of each rank | |
3323 | 3320 | int mpiRank = MolDS_mpi::MpiProcess::GetInstance()->GetRank(); |
3324 | 3321 | int mpiSize = MolDS_mpi::MpiProcess::GetInstance()->GetSize(); |
3325 | 3322 | int mpiHeadRank = MolDS_mpi::MpiProcess::GetInstance()->GetHeadRank(); |
3323 | + stringstream errorStream; | |
3326 | 3324 | MolDS_mpi::AsyncCommunicator asyncCommunicator; |
3327 | - boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, | |
3328 | - &asyncCommunicator) ); | |
3329 | - // this loop-i is MPI-parallelized | |
3330 | - for(int i=nonRedundantQIndecesSize-1; 0<=i; i--){ | |
3325 | + boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, &asyncCommunicator) ); | |
3326 | + | |
3327 | + for(int i=0; i<nonRedundantQIndecesSize; i++){ | |
3331 | 3328 | int calcRank = i%mpiSize; |
3332 | 3329 | if(mpiRank == calcRank){ |
3333 | 3330 | int moI = nonRedundantQIndeces[i].moI; |
3334 | 3331 | int moJ = nonRedundantQIndeces[i].moJ; |
3335 | - stringstream ompErrors; | |
3336 | 3332 | #pragma omp parallel for schedule(auto) |
3337 | 3333 | for(int j=i; j<nonRedundantQIndecesSize; j++){ |
3338 | 3334 | try{ |
@@ -3343,30 +3339,29 @@ void ZindoS::CalcGammaNRMinusKNRMatrix(double** gammaNRMinusKNR, const vector<Mo | ||
3343 | 3339 | } // end of try |
3344 | 3340 | catch(MolDSException ex){ |
3345 | 3341 | #pragma omp critical |
3346 | - ex.Serialize(ompErrors); | |
3342 | + ex.Serialize(errorStream); | |
3347 | 3343 | } |
3348 | - } //end of loop j parallelized with openMP | |
3349 | - // Exception throwing for omp-region | |
3350 | - if(!ompErrors.str().empty()){ | |
3351 | - throw MolDSException::Deserialize(ompErrors); | |
3352 | - } | |
3353 | - } /// end of if(mpiRnak == calcRank) | |
3354 | - // Send data to head rank | |
3355 | - int tag = i; | |
3356 | - int source = calcRank; | |
3357 | - int dest = mpiHeadRank; | |
3358 | - int num = nonRedundantQIndecesSize - i; | |
3359 | - double* buff = &gammaNRMinusKNR[i][i]; | |
3360 | - if(mpiRank == mpiHeadRank && mpiRank != calcRank){ | |
3361 | - asyncCommunicator.SetRecvedMessage(buff, num, source, tag); | |
3344 | + } | |
3362 | 3345 | } |
3363 | - if(mpiRank != mpiHeadRank && mpiRank == calcRank){ | |
3364 | - asyncCommunicator.SetSentMessage(buff, num, dest, tag); | |
3346 | + if(errorStream.str().empty()){ | |
3347 | + int tag = i; | |
3348 | + int source = calcRank; | |
3349 | + int dest = mpiHeadRank; | |
3350 | + int num = nonRedundantQIndecesSize - i; | |
3351 | + double* buff = &gammaNRMinusKNR[i][i]; | |
3352 | + if(mpiRank == mpiHeadRank && mpiRank != calcRank){ | |
3353 | + asyncCommunicator.SetRecvedMessage(buff, num, source, tag); | |
3354 | + } | |
3355 | + if(mpiRank != mpiHeadRank && mpiRank == calcRank){ | |
3356 | + asyncCommunicator.SetSentMessage(buff, num, dest, tag); | |
3357 | + } | |
3365 | 3358 | } |
3366 | - } // end of loop-i parallelized with MPI | |
3359 | + } | |
3367 | 3360 | asyncCommunicator.Finalize(); |
3368 | 3361 | communicationThread.join(); |
3369 | - // broadcast data to all rank | |
3362 | + if(!errorStream.str().empty()){ | |
3363 | + throw MolDSException::Deserialize(errorStream); | |
3364 | + } | |
3370 | 3365 | for(int i=0; i<nonRedundantQIndecesSize; i++){ |
3371 | 3366 | int num = nonRedundantQIndecesSize - i; |
3372 | 3367 | double* buff = &gammaNRMinusKNR[i][i]; |
@@ -3382,20 +3377,19 @@ void ZindoS::CalcKRDagerGammaRInvMatrix(double** kRDagerGammaRInv, | ||
3382 | 3377 | const vector<MoIndexPair>& redundantQIndeces) const{ |
3383 | 3378 | int nonRedundantQIndecesSize = nonRedundantQIndeces.size(); |
3384 | 3379 | int redundantQIndecesSize = redundantQIndeces.size(); |
3385 | - //MPI setting of each rank | |
3380 | + // MPI setting of each rank | |
3386 | 3381 | int mpiRank = MolDS_mpi::MpiProcess::GetInstance()->GetRank(); |
3387 | 3382 | int mpiSize = MolDS_mpi::MpiProcess::GetInstance()->GetSize(); |
3388 | 3383 | int mpiHeadRank = MolDS_mpi::MpiProcess::GetInstance()->GetHeadRank(); |
3384 | + stringstream errorStream; | |
3389 | 3385 | MolDS_mpi::AsyncCommunicator asyncCommunicator; |
3390 | - boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, | |
3391 | - &asyncCommunicator) ); | |
3392 | - // this loop-i is MPI-parallelized | |
3386 | + boost::thread communicationThread( boost::bind(&MolDS_mpi::AsyncCommunicator::Run<double>, &asyncCommunicator) ); | |
3387 | + | |
3393 | 3388 | for(int i=0; i<nonRedundantQIndecesSize; i++){ |
3394 | 3389 | int calcRank = i%mpiSize; |
3395 | 3390 | if(mpiRank == calcRank){ |
3396 | 3391 | int moI = nonRedundantQIndeces[i].moI; |
3397 | 3392 | int moJ = nonRedundantQIndeces[i].moJ; |
3398 | - stringstream ompErrors; | |
3399 | 3393 | #pragma omp parallel for schedule(auto) |
3400 | 3394 | for(int j=0; j<redundantQIndecesSize; j++){ |
3401 | 3395 | try{ |
@@ -3406,30 +3400,29 @@ void ZindoS::CalcKRDagerGammaRInvMatrix(double** kRDagerGammaRInv, | ||
3406 | 3400 | } // end of try |
3407 | 3401 | catch(MolDSException ex){ |
3408 | 3402 | #pragma omp critical |
3409 | - ex.Serialize(ompErrors); | |
3403 | + ex.Serialize(errorStream); | |
3410 | 3404 | } |
3411 | - } // end of loop-j parallelized with openMP | |
3412 | - // Exception throwing for omp-region | |
3413 | - if(!ompErrors.str().empty()){ | |
3414 | - throw MolDSException::Deserialize(ompErrors); | |
3415 | 3405 | } |
3416 | - } // // end of if(mpiRnak == calcRank) | |
3417 | - // Send data to head rank | |
3418 | - int tag = i; | |
3419 | - int source = calcRank; | |
3420 | - int dest = mpiHeadRank; | |
3421 | - int num = redundantQIndecesSize; | |
3422 | - double* buff = &kRDagerGammaRInv[i][0]; | |
3423 | - if(mpiRank == mpiHeadRank && mpiRank != calcRank){ | |
3424 | - asyncCommunicator.SetRecvedMessage(buff, num, source, tag); | |
3425 | 3406 | } |
3426 | - if(mpiRank != mpiHeadRank && mpiRank == calcRank){ | |
3427 | - asyncCommunicator.SetSentMessage(buff, num, dest, tag); | |
3407 | + if(errorStream.str().empty()){ | |
3408 | + int tag = i; | |
3409 | + int source = calcRank; | |
3410 | + int dest = mpiHeadRank; | |
3411 | + int num = redundantQIndecesSize; | |
3412 | + double* buff = &kRDagerGammaRInv[i][0]; | |
3413 | + if(mpiRank == mpiHeadRank && mpiRank != calcRank){ | |
3414 | + asyncCommunicator.SetRecvedMessage(buff, num, source, tag); | |
3415 | + } | |
3416 | + if(mpiRank != mpiHeadRank && mpiRank == calcRank){ | |
3417 | + asyncCommunicator.SetSentMessage(buff, num, dest, tag); | |
3418 | + } | |
3428 | 3419 | } |
3429 | - } // end of loop-i parallelized with MPI | |
3420 | + } | |
3430 | 3421 | asyncCommunicator.Finalize(); |
3431 | 3422 | communicationThread.join(); |
3432 | - // broadcast data to all rank | |
3423 | + if(!errorStream.str().empty()){ | |
3424 | + throw MolDSException::Deserialize(errorStream); | |
3425 | + } | |
3433 | 3426 | for(int i=0; i<nonRedundantQIndecesSize; i++){ |
3434 | 3427 | int num = redundantQIndecesSize; |
3435 | 3428 | double* buff = &kRDagerGammaRInv[i][0]; |