CheMPS2
Heff.cpp
1 /*
2  CheMPS2: a spin-adapted implementation of DMRG for ab initio quantum chemistry
3  Copyright (C) 2013-2016 Sebastian Wouters
4 
5  This program is free software; you can redistribute it and/or modify
6  it under the terms of the GNU General Public License as published by
7  the Free Software Foundation; either version 2 of the License, or
8  (at your option) any later version.
9 
10  This program is distributed in the hope that it will be useful,
11  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  GNU General Public License for more details.
14 
15  You should have received a copy of the GNU General Public License along
16  with this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19 
20 #include <math.h>
21 #include <stdlib.h>
22 #include <iostream>
23 #include <algorithm>
24 #include <assert.h>
25 
26 #include "Heff.h"
27 #include "Davidson.h"
28 #include "Lapack.h"
29 #include "MPIchemps2.h"
30 
31 CheMPS2::Heff::Heff(const SyBookkeeper * denBKIn, const Problem * ProbIn, const double dvdson_rtol_in){
32 
33  denBK = denBKIn;
34  Prob = ProbIn;
35  dvdson_rtol = dvdson_rtol_in;
36 
37 }
38 
40 
41 }
42 
43 void CheMPS2::Heff::makeHeff(double * memS, double * memHeff, const Sobject * denS, TensorL *** Ltensors, TensorOperator **** Atensors, TensorOperator **** Btensors, TensorOperator **** Ctensors, TensorOperator **** Dtensors, TensorS0 **** S0tensors, TensorS1 **** S1tensors, TensorF0 **** F0tensors, TensorF1 **** F1tensors, TensorQ *** Qtensors, TensorX ** Xtensors, int nLower, double ** VeffTilde) const{
44 
45  const int indexS = denS->gIndex();
46  const bool atLeft = (indexS==0)?true:false;
47  const bool atRight = (indexS==Prob->gL()-2)?true:false;
48  const int DIM = std::max(denBK->gMaxDimAtBound(indexS), denBK->gMaxDimAtBound(indexS+2));
49  #ifdef CHEMPS2_MPI_COMPILATION
50  const int MPIRANK = MPIchemps2::mpi_rank();
51  #endif
52 
53  //PARALLEL
54  #pragma omp parallel
55  {
56 
57  double * temp = new double[DIM*DIM];
58  double * temp2 = new double[DIM*DIM];
59 
60  #pragma omp for schedule(dynamic)
61  for (int ikappaBIS=0; ikappaBIS<denS->gNKappa(); ikappaBIS++){
62 
63  const int ikappa = denS->gReorder(ikappaBIS);
64  for (int cnt=denS->gKappa2index(ikappa); cnt<denS->gKappa2index(ikappa+1); cnt++){ memHeff[cnt] = 0.0; }
65 
66  #ifdef CHEMPS2_MPI_COMPILATION
67  if ( MPIchemps2::owner_1cd2d3eh() == MPIRANK )
68  #endif
69  {
70  addDiagram1C(ikappa, memS,memHeff,denS,Prob->gMxElement(indexS,indexS,indexS,indexS));
71  addDiagram1D(ikappa, memS,memHeff,denS,Prob->gMxElement(indexS+1,indexS+1,indexS+1,indexS+1));
72  addDiagram2dall(ikappa, memS, memHeff, denS);
73  addDiagram3Eand3H(ikappa, memS, memHeff, denS);
74  }
75  addDiagramExcitations(ikappa, memS, memHeff, denS, nLower, VeffTilde); //The MPI check occurs in this function
76 
77  if (!atLeft){
78 
79  /*********************
80  * Diagrams group 1 *
81  *********************/
82  #ifdef CHEMPS2_MPI_COMPILATION
83  if ( MPIchemps2::owner_x() == MPIRANK )
84  #endif
85  { addDiagram1A(ikappa, memS, memHeff, denS, Xtensors[indexS-1]); }
86 
87  /*********************
88  * Diagrams group 2 *
89  *********************/
90  #ifdef CHEMPS2_MPI_COMPILATION
91  if ( MPIchemps2::owner_absigma( indexS, indexS ) == MPIRANK )
92  #endif
93  { addDiagram2b1and2b2(ikappa, memS, memHeff, denS, Atensors[indexS-1][0][0]); }
94  #ifdef CHEMPS2_MPI_COMPILATION
95  if ( MPIchemps2::owner_absigma( indexS+1, indexS+1 ) == MPIRANK )
96  #endif
97  { addDiagram2c1and2c2(ikappa, memS, memHeff, denS, Atensors[indexS-1][0][1]); }
98  #ifdef CHEMPS2_MPI_COMPILATION
99  if ( MPIchemps2::owner_cdf( Prob->gL(), indexS, indexS ) == MPIRANK )
100  #endif
101  { addDiagram2b3spin0(ikappa, memS, memHeff, denS, Ctensors[indexS-1][0][0]);
102  addDiagram2b3spin1(ikappa, memS, memHeff, denS, Dtensors[indexS-1][0][0]); }
103  #ifdef CHEMPS2_MPI_COMPILATION
104  if ( MPIchemps2::owner_cdf( Prob->gL(), indexS+1, indexS+1 ) == MPIRANK )
105  #endif
106  { addDiagram2c3spin0(ikappa, memS, memHeff, denS, Ctensors[indexS-1][0][1]);
107  addDiagram2c3spin1(ikappa, memS, memHeff, denS, Dtensors[indexS-1][0][1]); }
108 
109  /*********************
110  * Diagrams group 3 *
111  *********************/
112  #ifdef CHEMPS2_MPI_COMPILATION
113  if ( MPIchemps2::owner_q( Prob->gL(), indexS ) == MPIRANK )
114  #endif
115  { addDiagram3Aand3D(ikappa, memS, memHeff, denS, Qtensors[indexS-1][0], Ltensors[indexS-1], temp); }
116  #ifdef CHEMPS2_MPI_COMPILATION
117  if ( MPIchemps2::owner_q( Prob->gL(), indexS+1 ) == MPIRANK )
118  #endif
119  { addDiagram3Band3I(ikappa, memS, memHeff, denS, Qtensors[indexS-1][1], Ltensors[indexS-1], temp); }
120 
121  /*********************
122  * Diagrams group 4 *
123  *********************/
124  #ifdef CHEMPS2_MPI_COMPILATION
125  if ( MPIchemps2::owner_absigma( indexS, indexS+1 ) == MPIRANK )
126  #endif
127  { addDiagram4A1and4A2spin0(ikappa, memS, memHeff, denS, Atensors[indexS-1][1][0]);
128  addDiagram4A1and4A2spin1(ikappa, memS, memHeff, denS, Btensors[indexS-1][1][0]); }
129  #ifdef CHEMPS2_MPI_COMPILATION
130  if ( MPIchemps2::owner_cdf( Prob->gL(), indexS, indexS+1 ) == MPIRANK )
131  #endif
132  { addDiagram4A3and4A4spin0(ikappa, memS, memHeff, denS, Ctensors[indexS-1][1][0]);
133  addDiagram4A3and4A4spin1(ikappa, memS, memHeff, denS, Dtensors[indexS-1][1][0]); }
134  addDiagram4D(ikappa, memS, memHeff, denS, Ltensors[indexS-1], temp); //The MPI check occurs in this function
135  addDiagram4I(ikappa, memS, memHeff, denS, Ltensors[indexS-1], temp); //The MPI check occurs in this function
136 
137  }
138 
139  if (!atRight){
140 
141  /*********************
142  * Diagrams group 1 *
143  *********************/
144  #ifdef CHEMPS2_MPI_COMPILATION
145  if ( MPIchemps2::owner_x() == MPIRANK )
146  #endif
147  { addDiagram1B(ikappa, memS, memHeff, denS, Xtensors[indexS+1]); }
148 
149  /*********************
150  * Diagrams group 2 *
151  *********************/
152  #ifdef CHEMPS2_MPI_COMPILATION
153  if ( MPIchemps2::owner_absigma( indexS, indexS ) == MPIRANK )
154  #endif
155  { addDiagram2e1and2e2(ikappa, memS, memHeff, denS, Atensors[indexS+1][0][1]); }
156  #ifdef CHEMPS2_MPI_COMPILATION
157  if ( MPIchemps2::owner_absigma( indexS+1, indexS+1 ) == MPIRANK )
158  #endif
159  { addDiagram2f1and2f2(ikappa, memS, memHeff, denS, Atensors[indexS+1][0][0]); }
160  #ifdef CHEMPS2_MPI_COMPILATION
161  if ( MPIchemps2::owner_cdf( Prob->gL(), indexS, indexS ) == MPIRANK )
162  #endif
163  { addDiagram2e3spin0(ikappa, memS, memHeff, denS, Ctensors[indexS+1][0][1]);
164  addDiagram2e3spin1(ikappa, memS, memHeff, denS, Dtensors[indexS+1][0][1]); }
165  #ifdef CHEMPS2_MPI_COMPILATION
166  if ( MPIchemps2::owner_cdf( Prob->gL(), indexS+1, indexS+1 ) == MPIRANK )
167  #endif
168  { addDiagram2f3spin0(ikappa, memS, memHeff, denS, Ctensors[indexS+1][0][0]);
169  addDiagram2f3spin1(ikappa, memS, memHeff, denS, Dtensors[indexS+1][0][0]); }
170 
171  /*********************
172  * Diagrams group 3 *
173  *********************/
174  #ifdef CHEMPS2_MPI_COMPILATION
175  if ( MPIchemps2::owner_q( Prob->gL(), indexS ) == MPIRANK )
176  #endif
177  { addDiagram3Kand3F(ikappa, memS, memHeff, denS, Qtensors[indexS+1][1], Ltensors[indexS+1], temp); }
178  #ifdef CHEMPS2_MPI_COMPILATION
179  if ( MPIchemps2::owner_q( Prob->gL(), indexS+1 ) == MPIRANK )
180  #endif
181  { addDiagram3Land3G(ikappa, memS, memHeff, denS, Qtensors[indexS+1][0], Ltensors[indexS+1], temp); }
182 
183  /*********************
184  * Diagrams group 4 *
185  *********************/
186  #ifdef CHEMPS2_MPI_COMPILATION
187  if ( MPIchemps2::owner_absigma( indexS, indexS+1 ) == MPIRANK )
188  #endif
189  { addDiagram4J1and4J2spin0(ikappa, memS, memHeff, denS, Atensors[indexS+1][1][0]);
190  addDiagram4J1and4J2spin1(ikappa, memS, memHeff, denS, Btensors[indexS+1][1][0]); }
191  #ifdef CHEMPS2_MPI_COMPILATION
192  if ( MPIchemps2::owner_cdf( Prob->gL(), indexS, indexS+1 ) == MPIRANK )
193  #endif
194  { addDiagram4J3and4J4spin0(ikappa, memS, memHeff, denS, Ctensors[indexS+1][1][0]);
195  addDiagram4J3and4J4spin1(ikappa, memS, memHeff, denS, Dtensors[indexS+1][1][0]); }
196  addDiagram4F(ikappa, memS, memHeff, denS, Ltensors[indexS+1], temp); //The MPI check occurs in this function
197  addDiagram4G(ikappa, memS, memHeff, denS, Ltensors[indexS+1], temp); //The MPI check occurs in this function
198 
199  }
200 
201  if ((!atLeft) && (!atRight)){
202 
203  addDiagram2a1spin0(ikappa, memS, memHeff, denS, Atensors, S0tensors, temp); //The MPI check occurs in this function
204  addDiagram2a2spin0(ikappa, memS, memHeff, denS, Atensors, S0tensors, temp); //The MPI check occurs in this function
205  addDiagram2a1spin1(ikappa, memS, memHeff, denS, Btensors, S1tensors, temp); //The MPI check occurs in this function
206  addDiagram2a2spin1(ikappa, memS, memHeff, denS, Btensors, S1tensors, temp); //The MPI check occurs in this function
207  addDiagram2a3spin0(ikappa, memS, memHeff, denS, Ctensors, F0tensors, temp); //The MPI check occurs in this function
208  addDiagram2a3spin1(ikappa, memS, memHeff, denS, Dtensors, F1tensors, temp); //The MPI check occurs in this function
209 
210  addDiagram3C(ikappa, memS, memHeff, denS, Qtensors[indexS-1], Ltensors[indexS+1], temp); //The MPI check occurs in this function
211  addDiagram3J(ikappa, memS, memHeff, denS, Qtensors[indexS+1], Ltensors[indexS-1], temp); //The MPI check occurs in this function
212 
213  addDiagram4B1and4B2spin0(ikappa, memS, memHeff, denS, Atensors[indexS-1], Ltensors[indexS+1], temp); //The MPI check occurs in this function
214  addDiagram4B1and4B2spin1(ikappa, memS, memHeff, denS, Btensors[indexS-1], Ltensors[indexS+1], temp); //The MPI check occurs in this function
215  addDiagram4B3and4B4spin0(ikappa, memS, memHeff, denS, Ctensors[indexS-1], Ltensors[indexS+1], temp); //The MPI check occurs in this function
216  addDiagram4B3and4B4spin1(ikappa, memS, memHeff, denS, Dtensors[indexS-1], Ltensors[indexS+1], temp); //The MPI check occurs in this function
217  addDiagram4C1and4C2spin0(ikappa, memS, memHeff, denS, Atensors[indexS-1], Ltensors[indexS+1], temp); //The MPI check occurs in this function
218  addDiagram4C1and4C2spin1(ikappa, memS, memHeff, denS, Btensors[indexS-1], Ltensors[indexS+1], temp); //The MPI check occurs in this function
219  addDiagram4C3and4C4spin0(ikappa, memS, memHeff, denS, Ctensors[indexS-1], Ltensors[indexS+1], temp); //The MPI check occurs in this function
220  addDiagram4C3and4C4spin1(ikappa, memS, memHeff, denS, Dtensors[indexS-1], Ltensors[indexS+1], temp); //The MPI check occurs in this function
221  addDiagram4E(ikappa, memS, memHeff, denS, Ltensors[indexS-1], Ltensors[indexS+1], temp, temp2); //The MPI check occurs in this function
222  addDiagram4H(ikappa, memS, memHeff, denS, Ltensors[indexS-1], Ltensors[indexS+1], temp, temp2); //The MPI check occurs in this function
223  addDiagram4K1and4K2spin0(ikappa, memS, memHeff, denS, Ltensors[indexS-1], Atensors[indexS+1], temp); //The MPI check occurs in this function
224  addDiagram4L1and4L2spin0(ikappa, memS, memHeff, denS, Ltensors[indexS-1], Atensors[indexS+1], temp); //The MPI check occurs in this function
225  addDiagram4K1and4K2spin1(ikappa, memS, memHeff, denS, Ltensors[indexS-1], Btensors[indexS+1], temp); //The MPI check occurs in this function
226  addDiagram4L1and4L2spin1(ikappa, memS, memHeff, denS, Ltensors[indexS-1], Btensors[indexS+1], temp); //The MPI check occurs in this function
227  addDiagram4K3and4K4spin0(ikappa, memS, memHeff, denS, Ltensors[indexS-1], Ctensors[indexS+1], temp); //The MPI check occurs in this function
228  addDiagram4L3and4L4spin0(ikappa, memS, memHeff, denS, Ltensors[indexS-1], Ctensors[indexS+1], temp); //The MPI check occurs in this function
229  addDiagram4K3and4K4spin1(ikappa, memS, memHeff, denS, Ltensors[indexS-1], Dtensors[indexS+1], temp); //The MPI check occurs in this function
230  addDiagram4L3and4L4spin1(ikappa, memS, memHeff, denS, Ltensors[indexS-1], Dtensors[indexS+1], temp); //The MPI check occurs in this function
231 
232  addDiagram5A(ikappa, memS, memHeff, denS, Ltensors[indexS-1], Ltensors[indexS+1], temp, temp2); //The MPI check occurs in this function
233  addDiagram5B(ikappa, memS, memHeff, denS, Ltensors[indexS-1], Ltensors[indexS+1], temp, temp2); //The MPI check occurs in this function
234  addDiagram5C(ikappa, memS, memHeff, denS, Ltensors[indexS-1], Ltensors[indexS+1], temp, temp2); //The MPI check occurs in this function
235  addDiagram5D(ikappa, memS, memHeff, denS, Ltensors[indexS-1], Ltensors[indexS+1], temp, temp2); //The MPI check occurs in this function
236  addDiagram5E(ikappa, memS, memHeff, denS, Ltensors[indexS-1], Ltensors[indexS+1], temp, temp2); //The MPI check occurs in this function
237  addDiagram5F(ikappa, memS, memHeff, denS, Ltensors[indexS-1], Ltensors[indexS+1], temp, temp2); //The MPI check occurs in this function
238 
239  }
240 
241  }
242 
243  delete [] temp;
244  delete [] temp2;
245 
246  }
247 
248 }
249 
250 void CheMPS2::Heff::fillHeffDiag(double * memHeffDiag, const Sobject * denS, TensorOperator **** Ctensors, TensorOperator **** Dtensors, TensorF0 **** F0tensors, TensorF1 **** F1tensors, TensorX ** Xtensors, int nLower, double ** VeffTilde) const{
251 
252  const int indexS = denS->gIndex();
253  const bool atLeft = (indexS==0)?true:false;
254  const bool atRight = (indexS==Prob->gL()-2)?true:false;
255  #ifdef CHEMPS2_MPI_COMPILATION
256  const int MPIRANK = MPIchemps2::mpi_rank();
257  #endif
258 
259  //PARALLEL
260  #pragma omp parallel for schedule(dynamic)
261  for (int ikappaBIS=0; ikappaBIS<denS->gNKappa(); ikappaBIS++){
262 
263  const int ikappa = denS->gReorder(ikappaBIS);
264  for (int cnt=denS->gKappa2index(ikappa); cnt<denS->gKappa2index(ikappa+1); cnt++){ memHeffDiag[cnt] = 0.0; }
265 
266  #ifdef CHEMPS2_MPI_COMPILATION
267  if ( MPIchemps2::owner_1cd2d3eh() == MPIRANK )
268  #endif
269  { addDiagonal1C(ikappa, memHeffDiag,denS,Prob->gMxElement(indexS,indexS,indexS,indexS));
270  addDiagonal1D(ikappa, memHeffDiag,denS,Prob->gMxElement(indexS+1,indexS+1,indexS+1,indexS+1));
271  addDiagonal2d3all(ikappa, memHeffDiag, denS); }
272  if (nLower>0){ addDiagonalExcitations(ikappa, memHeffDiag, denS, nLower, VeffTilde); } //The MPI check occurs in this function
273 
274  if (!atLeft){
275  #ifdef CHEMPS2_MPI_COMPILATION
276  if ( MPIchemps2::owner_x() == MPIRANK )
277  #endif
278  { addDiagonal1A(ikappa, memHeffDiag, denS, Xtensors[indexS-1]); }
279  #ifdef CHEMPS2_MPI_COMPILATION
280  if ( MPIchemps2::owner_cdf( Prob->gL(), indexS, indexS ) == MPIRANK )
281  #endif
282  { addDiagonal2b3spin0(ikappa, memHeffDiag, denS, Ctensors[indexS-1][0][0]);
283  addDiagonal2b3spin1(ikappa, memHeffDiag, denS, Dtensors[indexS-1][0][0]); }
284  #ifdef CHEMPS2_MPI_COMPILATION
285  if ( MPIchemps2::owner_cdf( Prob->gL(), indexS+1, indexS+1 ) == MPIRANK )
286  #endif
287  { addDiagonal2c3spin0(ikappa, memHeffDiag, denS, Ctensors[indexS-1][0][1]);
288  addDiagonal2c3spin1(ikappa, memHeffDiag, denS, Dtensors[indexS-1][0][1]); }
289  }
290 
291  if (!atRight){
292  #ifdef CHEMPS2_MPI_COMPILATION
293  if ( MPIchemps2::owner_x() == MPIRANK )
294  #endif
295  { addDiagonal1B(ikappa, memHeffDiag, denS, Xtensors[indexS+1]); }
296  #ifdef CHEMPS2_MPI_COMPILATION
297  if ( MPIchemps2::owner_cdf( Prob->gL(), indexS, indexS ) == MPIRANK )
298  #endif
299  { addDiagonal2e3spin0(ikappa, memHeffDiag, denS, Ctensors[indexS+1][0][1]);
300  addDiagonal2e3spin1(ikappa, memHeffDiag, denS, Dtensors[indexS+1][0][1]); }
301  #ifdef CHEMPS2_MPI_COMPILATION
302  if ( MPIchemps2::owner_cdf( Prob->gL(), indexS+1, indexS+1 ) == MPIRANK )
303  #endif
304  { addDiagonal2f3spin0(ikappa, memHeffDiag, denS, Ctensors[indexS+1][0][0]);
305  addDiagonal2f3spin1(ikappa, memHeffDiag, denS, Dtensors[indexS+1][0][0]); }
306  }
307 
308  if ((!atLeft) && (!atRight)){
309  addDiagonal2a3spin0(ikappa, memHeffDiag, denS, Ctensors, F0tensors); //The MPI check occurs in this function
310  addDiagonal2a3spin1(ikappa, memHeffDiag, denS, Dtensors, F1tensors); //The MPI check occurs in this function
311  }
312 
313  }
314 
315 }
316 
317 double CheMPS2::Heff::SolveDAVIDSON(Sobject * denS, TensorL *** Ltensors, TensorOperator **** Atensors, TensorOperator **** Btensors, TensorOperator **** Ctensors, TensorOperator **** Dtensors, TensorS0 **** S0tensors, TensorS1 **** S1tensors, TensorF0 **** F0tensors, TensorF1 **** F1tensors, TensorQ *** Qtensors, TensorX ** Xtensors, int nLower, double ** VeffTilde) const{
318 
319  #ifdef CHEMPS2_MPI_COMPILATION
320  if ( MPIchemps2::mpi_rank() == MPI_CHEMPS2_MASTER ){
321  return SolveDAVIDSON_main(denS, Ltensors, Atensors, Btensors, Ctensors, Dtensors, S0tensors, S1tensors, F0tensors, F1tensors, Qtensors, Xtensors, nLower, VeffTilde);
322  } else {
323  return SolveDAVIDSON_help(denS, Ltensors, Atensors, Btensors, Ctensors, Dtensors, S0tensors, S1tensors, F0tensors, F1tensors, Qtensors, Xtensors, nLower, VeffTilde);
324  }
325  #else
326  return SolveDAVIDSON_main(denS, Ltensors, Atensors, Btensors, Ctensors, Dtensors, S0tensors, S1tensors, F0tensors, F1tensors, Qtensors, Xtensors, nLower, VeffTilde);
327  #endif
328 
329 }
330 
331 double CheMPS2::Heff::SolveDAVIDSON_main(Sobject * denS, TensorL *** Ltensors, TensorOperator **** Atensors, TensorOperator **** Btensors, TensorOperator **** Ctensors, TensorOperator **** Dtensors, TensorS0 **** S0tensors, TensorS1 **** S1tensors, TensorF0 **** F0tensors, TensorF1 **** F1tensors, TensorQ *** Qtensors, TensorX ** Xtensors, int nLower, double ** VeffTilde) const{
332 
333  int inc1 = 1;
334  int veclength = denS->gKappa2index( denS->gNKappa() );
335 
336  Davidson deBoskabouter( veclength, CheMPS2::DAVIDSON_NUM_VEC,
337  CheMPS2::DAVIDSON_NUM_VEC_KEEP,
338  // CheMPS2::DAVIDSON_DMRG_RTOL,
339  dvdson_rtol,
340  CheMPS2::DAVIDSON_PRECOND_CUTOFF, CheMPS2::HEFF_debugPrint );
341  double ** whichpointers = new double*[2];
342 
343  char instruction = deBoskabouter.FetchInstruction( whichpointers );
344  assert( instruction == 'A' );
345  denS->prog2symm(); // Convert mem of Sobject to symmetric conventions
346  dcopy_(&veclength, denS->gStorage(), &inc1, whichpointers[0], &inc1); // Starting vector for Davidson is the current state of the Sobject in symmetric conventions
347  #ifdef CHEMPS2_MPI_COMPILATION
348  double * workspace = new double[ veclength ];
349  fillHeffDiag(workspace, denS, Ctensors, Dtensors, F0tensors, F1tensors, Xtensors, nLower, VeffTilde);
350  MPIchemps2::reduce_array_double( workspace, whichpointers[1], veclength, MPI_CHEMPS2_MASTER );
351  #else
352  fillHeffDiag(whichpointers[1], denS, Ctensors, Dtensors, F0tensors, F1tensors, Xtensors, nLower, VeffTilde);
353  #endif
354 
355  instruction = deBoskabouter.FetchInstruction( whichpointers );
356  while ( instruction == 'B' ){
357 
358  #ifdef CHEMPS2_MPI_COMPILATION
359  {
360  int mpi_instruction = 2;
361  MPIchemps2::broadcast_array_int( &mpi_instruction, 1, MPI_CHEMPS2_MASTER );
362  MPIchemps2::broadcast_array_double( whichpointers[0], veclength, MPI_CHEMPS2_MASTER );
363  makeHeff(whichpointers[0], workspace, denS, Ltensors, Atensors, Btensors, Ctensors, Dtensors, S0tensors, S1tensors, F0tensors, F1tensors, Qtensors, Xtensors, nLower, VeffTilde);
364  MPIchemps2::reduce_array_double( workspace, whichpointers[1], veclength, MPI_CHEMPS2_MASTER );
365  }
366  #else
367  makeHeff(whichpointers[0], whichpointers[1], denS, Ltensors, Atensors, Btensors, Ctensors, Dtensors, S0tensors, S1tensors, F0tensors, F1tensors, Qtensors, Xtensors, nLower, VeffTilde);
368  #endif
369  instruction = deBoskabouter.FetchInstruction( whichpointers );
370  }
371 
372  assert( instruction == 'C' );
373  dcopy_( &veclength, whichpointers[0], &inc1, denS->gStorage(), &inc1 ); // Copy the solution in symmetric conventions back
374  denS->symm2prog(); // Convert mem of Sobject to program conventions
375  double eigenvalue = whichpointers[1][0];
376  if (CheMPS2::HEFF_debugPrint){ std::cout << " Stats: nIt(DAVIDSON) = " << deBoskabouter.GetNumMultiplications() << std::endl; }
377  delete [] whichpointers;
378  #ifdef CHEMPS2_MPI_COMPILATION
379  delete [] workspace;
380  int mpi_instruction = 3;
381  MPIchemps2::broadcast_array_int( &mpi_instruction, 1, MPI_CHEMPS2_MASTER );
382  MPIchemps2::broadcast_array_double( &eigenvalue, 1, MPI_CHEMPS2_MASTER );
383  #endif
384  return eigenvalue;
385 
386 }
387 
388 #ifdef CHEMPS2_MPI_COMPILATION
389 double CheMPS2::Heff::SolveDAVIDSON_help(Sobject * denS, TensorL *** Ltensors, TensorOperator **** Atensors, TensorOperator **** Btensors, TensorOperator **** Ctensors, TensorOperator **** Dtensors, TensorS0 **** S0tensors, TensorS1 **** S1tensors, TensorF0 **** F0tensors, TensorF1 **** F1tensors, TensorQ *** Qtensors, TensorX ** Xtensors, int nLower, double ** VeffTilde) const{
390 
391  int veclength = denS->gKappa2index( denS->gNKappa() );
392  double * vecin = new double[ veclength ];
393  double * vecout = new double[ veclength ];
394  int mpi_instruction = -1;
395 
396  fillHeffDiag( vecout, denS, Ctensors, Dtensors, F0tensors, F1tensors, Xtensors, nLower, VeffTilde );
397  MPIchemps2::reduce_array_double( vecout, vecin, veclength, MPI_CHEMPS2_MASTER );
398  MPIchemps2::broadcast_array_int( &mpi_instruction, 1, MPI_CHEMPS2_MASTER );
399 
400  while ( mpi_instruction == 2 ){ // Mat Vec
401 
402  MPIchemps2::broadcast_array_double( vecin, veclength, MPI_CHEMPS2_MASTER );
403  makeHeff(vecin, vecout, denS, Ltensors, Atensors, Btensors, Ctensors, Dtensors, S0tensors, S1tensors, F0tensors, F1tensors, Qtensors, Xtensors, nLower, VeffTilde);
404  MPIchemps2::reduce_array_double( vecout, vecin, veclength, MPI_CHEMPS2_MASTER );
405  MPIchemps2::broadcast_array_int( &mpi_instruction, 1, MPI_CHEMPS2_MASTER );
406 
407  }
408 
409  assert( mpi_instruction == 3 ); // Receive energy
410  double eigenvalue = 0.0;
411  MPIchemps2::broadcast_array_double( &eigenvalue, 1, MPI_CHEMPS2_MASTER );
412  delete [] vecin;
413  delete [] vecout;
414 
415  return eigenvalue; // The eigenvalue is correct on each process, denS not
416 
417 }
418 #endif
419 
420 
int GetNumMultiplications() const
Get the number of matrix vector multiplications which have been performed.
Definition: Davidson.cpp:103
int gKappa2index(const int kappa) const
Get the storage jump corresponding to a certain symmetry block.
Definition: Sobject.cpp:190
static int mpi_rank()
Get the rank of this MPI process.
Definition: MPIchemps2.h:131
void symm2prog()
Convert the storage from symmetric Hamiltonian convention to diagram convention.
Definition: Sobject.cpp:636
int gNKappa() const
Get the number of symmetry blocks.
Definition: Sobject.cpp:166
Heff(const SyBookkeeper *denBKIn, const Problem *ProbIn, const double dvdson_rtol_in)
Constructor.
Definition: Heff.cpp:31
virtual ~Heff()
Destructor.
Definition: Heff.cpp:39
double * gStorage()
Get the pointer to the storage.
Definition: Sobject.cpp:168
double gMxElement(const int alpha, const int beta, const int gamma, const int delta) const
Get a specific interaction matrix element.
Definition: Problem.cpp:350
void prog2symm()
Convert the storage from diagram convention to symmetric Hamiltonian convention.
Definition: Sobject.cpp:622
int gL() const
Get the number of orbitals.
Definition: Problem.h:51
int gIndex() const
Get the location index.
Definition: Sobject.cpp:200
int gMaxDimAtBound(const int boundary) const
Get the maximum virtual dimension at a certain boundary.
double SolveDAVIDSON(Sobject *denS, TensorL ***Ltensors, TensorOperator ****Atensors, TensorOperator ****Btensors, TensorOperator ****Ctensors, TensorOperator ****Dtensors, TensorS0 ****S0tensors, TensorS1 ****S1tensors, TensorF0 ****F0tensors, TensorF1 ****F1tensors, TensorQ ***Qtensors, TensorX **Xtensors, int nLower=0, double **VeffTilde=NULL) const
Davidson Solver.
Definition: Heff.cpp:317
char FetchInstruction(double **pointers)
The iterator to converge the ground state vector.
Definition: Davidson.cpp:105
int gReorder(const int ikappa) const
Get the blocks from large to small: blocksize(reorder[i])>=blocksize(reorder[i+1]) ...
Definition: Sobject.cpp:170