-
Notifications
You must be signed in to change notification settings - Fork 3
/
ED_HAMILTONIAN.f90
226 lines (198 loc) · 5.89 KB
/
ED_HAMILTONIAN.f90
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
MODULE ED_HAMILTONIAN
USE ED_HAMILTONIAN_COMMON
USE ED_HAMILTONIAN_SPARSE_HxV
USE ED_HAMILTONIAN_DIRECT_HxV
!
implicit none
private
!>Build sparse hamiltonian of the sector
public :: build_Hv_sector
public :: delete_Hv_sector
public :: vecDim_Hv_sector
!>Sparse Mat-Vec product using stored sparse matrix
public :: spMatVec_main
#ifdef _MPI
public :: spMatVec_MPI_main
#endif
!>Sparse Mat-Vec direct on-the-fly product
public :: directMatVec_main
#ifdef _MPI
public :: directMatVec_MPI_main
#endif
contains
!####################################################################
! MAIN ROUTINES: BUILD/DELETE SECTOR
!####################################################################
subroutine build_Hv_sector(isector,Hmat)
integer :: isector,SectorDim
complex(8),dimension(:,:),optional :: Hmat
integer :: irank,ierr
integer :: i,iup,idw
integer :: j,jup,jdw
!
Hsector=isector
Hstatus=.true.
!
allocate(Hs(2*Ns_Ud))
allocate(DimUps(Ns_Ud))
allocate(DimDws(Ns_Ud))
call build_sector(isector,Hs)
!
call get_DimUp(isector,DimUps)
call get_DimDw(isector,DimDws)
DimUp = product(DimUps)
DimDw = product(DimDws)
Dim = getDim(isector)
!
mpiAllThreads=.true.
!>PREAMBLE: check that split of the DW is performed with the minimum #cpu: no idle cpus allowed (with zero elements)
#ifdef _MPI
if(MpiStatus)then
if(DimDw < MpiSize)then
if(MpiMaster.AND.ed_verbose>4)write(*,*)"Reducing N_cpu to DimDw:",DimDw,MpiSize-DimDw
allocate(MpiMembers(0:DimDw-1))
forall(irank=0:DimDw-1)MpiMembers(irank)=irank
call Mpi_Group_Incl(MpiGroup_Global,DimDw,MpiMembers,MpiGroup,ierr)
call Mpi_Comm_create(MpiComm_Global,MpiGroup,MpiComm,ierr)
deallocate(MpiMembers)
mpiAllThreads=.false.
call Barrier_MPI(MpiComm_Global)
#ifdef _DEBUG
if(ed_verbose>4)then
if(MpiMaster)write(LOGfile,*)&
" mpiRank, MpiComm, Comm_Global, Comm_World, Comm_Null, Undefined"
do i=0,MpiSize-1
call Barrier_MPI(MpiComm_Global)
if(MpiRank==i)write(*,*)i,MpiComm,MpiComm_Global,Mpi_Comm_World,Mpi_comm_null,Mpi_Undefined
enddo
call Barrier_MPI(MpiComm_Global)
endif
#endif
endif
if( MpiComm /= MPI_COMM_NULL )then
MpiRank = Get_Rank_MPI(MpiComm)
MpiSize = Get_Size_MPI(MpiComm)
endif
endif
#endif
!
!Dw split:
mpiQdw = DimDw/MpiSize
mpiRdw = mod(DimDw,MpiSize)
if(MpiRank < mod(DimDw,MpiSize) ) then
mpiRdw = 0
MpiQdw = MpiQdw+1
endif
!
!Total split: split DW \times UP
mpiQ = DimUp*mpiQdw
mpiR = DimUp*mpiRdw
mpiIstart = 1 + MpiRank*mpiQ+mpiR
mpiIend = (MpiRank+1)*mpiQ+mpiR
mpiIshift = MpiRank*mpiQ+mpiR
!
!
#ifdef _MPI
#ifdef _DEBUG
if(MpiStatus.AND.ed_verbose>4.AND.(MpiComm/=Mpi_Comm_Null).AND.MpiSize>=1)then
if(MpiMaster)write(LOGfile,*)&
" mpiRank, mpi_Q, mpi_R, mpi_Qdw, mpiR_dw, mpi_Istart, mpi_Iend, Iend-Istart, Comm, Comm_Global"
do irank=0,MpiSize-1
call Barrier_MPI(MpiComm)
if(MpiRank==irank)write(*,*)MpiRank,MpiQ,MpiR,mpiQdw,MpiRdw,MpiIstart,MpiIend,MpiIend-MpiIstart+1,MpiComm,MpiComm_Global
enddo
call Barrier_MPI(MpiComm)
endif
#endif
#endif
!
!
if(present(Hmat))then
spHtimesV_p => null()
call ed_buildh_main(isector,Hmat)
return
endif
!
select case (ed_sparse_H)
case (.true.)
spHtimesV_p => spMatVec_main
#ifdef _MPI
if(MpiStatus)spHtimesV_p => spMatVec_MPI_main
#endif
call ed_buildh_main(isector)
case (.false.)
spHtimesV_p => directMatVec_main
#ifdef _MPI
if(MpiStatus)spHtimesV_p => directMatVec_MPI_main
#endif
end select
!
end subroutine build_Hv_sector
subroutine delete_Hv_sector()
integer :: iud,ierr,i
call delete_sector(Hsector,Hs)
deallocate(Hs)
deallocate(DimUps)
deallocate(DimDws)
Hsector=0
Hstatus=.false.
!
!There is no difference here between Mpi and serial version, as Local part was removed.
#ifdef _MPI
if(MpiStatus)then
call sp_delete_matrix(MpiComm,spH0d)
if(Jhflag)call sp_delete_matrix(MpiComm,spH0nd)
else
call sp_delete_matrix(spH0d)
if(Jhflag)call sp_delete_matrix(spH0nd)
endif
#else
call sp_delete_matrix(spH0d)
if(Jhflag)call sp_delete_matrix(spH0nd)
#endif
do iud=1,Ns_Ud
call sp_delete_matrix(spH0ups(iud))
call sp_delete_matrix(spH0dws(iud))
enddo
!
spHtimesV_p => null()
!
#ifdef _MPI
if(MpiStatus)then
if(MpiGroup/=Mpi_Group_Null)call Mpi_Group_free(MpiGroup,ierr)
if(MpiComm/=Mpi_Comm_Null.AND.MpiComm/=Mpi_Comm_World)call Mpi_Comm_Free(MpiComm,ierr)
MpiComm = MpiComm_Global
MpiSize = get_Size_MPI(MpiComm_Global)
MpiRank = get_Rank_MPI(MpiComm_Global)
!call Mpi_Comm_Dup(MpiComm_Global,MpiComm,ierr)
endif
#endif
iter=0
!
end subroutine delete_Hv_sector
function vecDim_Hv_sector(isector) result(vecDim)
integer :: isector
integer :: vecDim
integer :: mpiQdw
integer :: DimUps(Ns_Ud),DimUp
integer :: DimDws(Ns_Ud),DimDw
!
call get_DimUp(isector,DimUps) ; DimUp = product(DimUps)
call get_DimDw(isector,DimDws) ; DimDw = product(DimDws)
!
#ifdef _MPI
if(MpiStatus)then
!Dw split:
mpiQdw = DimDw/MpiSize
if(MpiRank < mod(DimDw,MpiSize) ) MpiQdw = MpiQdw+1
else
mpiQdw = DimDw
endif
#else
mpiQdw = DimDw
#endif
!
vecDim=DimUp*mpiQdw
!
end function vecDim_Hv_sector
end MODULE ED_HAMILTONIAN