CheMPS2
DMRGoperators.cpp
1 /*
2  CheMPS2: a spin-adapted implementation of DMRG for ab initio quantum chemistry
3  Copyright (C) 2013-2016 Sebastian Wouters
4 
5  This program is free software; you can redistribute it and/or modify
6  it under the terms of the GNU General Public License as published by
7  the Free Software Foundation; either version 2 of the License, or
8  (at your option) any later version.
9 
10  This program is distributed in the hope that it will be useful,
11  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  GNU General Public License for more details.
14 
15  You should have received a copy of the GNU General Public License along
16  with this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19 
20 #include <stdlib.h>
21 #include <string.h>
22 #include <iostream>
23 #include <sstream>
24 #include <sys/stat.h>
25 #include <sys/time.h>
26 #include <assert.h>
27 
28 #include "DMRG.h"
29 #include "Lapack.h"
30 #include "MPIchemps2.h"
31 #include "Special.h"
32 
33 void CheMPS2::DMRG::updateMovingRightSafeFirstTime(const int cnt){
34 
35  if (isAllocated[cnt]==2){
36  deleteTensors(cnt, false);
37  isAllocated[cnt]=0;
38  }
39  if (isAllocated[cnt]==0){
40  allocateTensors(cnt, true);
41  isAllocated[cnt]=1;
42  }
43  updateMovingRight(cnt);
44 
45  if (CheMPS2::DMRG_storeRenormOptrOnDisk){
46  if (cnt>0){
47  if (isAllocated[cnt-1]==1){
48  OperatorsOnDisk(cnt-1, true, true);
49  deleteTensors(cnt-1, true);
50  isAllocated[cnt-1]=0;
51  }
52  }
53  }
54 
55 }
56 
57 void CheMPS2::DMRG::updateMovingLeftSafeFirstTime(const int cnt){
58 
59  if (isAllocated[cnt]==1){
60  deleteTensors(cnt, true);
61  isAllocated[cnt]=0;
62  }
63  if (isAllocated[cnt]==0){
64  allocateTensors(cnt, false);
65  isAllocated[cnt]=2;
66  }
67  updateMovingLeft(cnt);
68 
69  if (CheMPS2::DMRG_storeRenormOptrOnDisk){
70  if (cnt+1<L-1){
71  if (isAllocated[cnt+1]==2){
72  OperatorsOnDisk(cnt+1, false, true);
73  deleteTensors(cnt+1, false);
74  isAllocated[cnt+1]=0;
75  }
76  }
77  }
78 
79 }
80 
81 void CheMPS2::DMRG::updateMovingRightSafe(const int cnt){
82 
83  if (isAllocated[cnt]==2){
84  deleteTensors(cnt, false);
85  isAllocated[cnt]=0;
86  }
87  if (isAllocated[cnt]==0){
88  allocateTensors(cnt, true);
89  isAllocated[cnt]=1;
90  }
91  updateMovingRight(cnt);
92 
93  if (CheMPS2::DMRG_storeRenormOptrOnDisk){
94  if (cnt>0){
95  if (isAllocated[cnt-1]==1){
96  OperatorsOnDisk(cnt-1, true, true);
97  deleteTensors(cnt-1, true);
98  isAllocated[cnt-1]=0;
99  }
100  }
101  if (cnt+1<L-1){
102  if (isAllocated[cnt+1]==2){
103  deleteTensors(cnt+1, false);
104  isAllocated[cnt+1]=0;
105  }
106  }
107  if (cnt+2<L-1){
108  if (isAllocated[cnt+2]==1){
109  deleteTensors(cnt+2, true);
110  isAllocated[cnt+2]=0;
111  }
112  if (isAllocated[cnt+2]==0){
113  allocateTensors(cnt+2, false);
114  isAllocated[cnt+2]=2;
115  }
116  OperatorsOnDisk(cnt+2, false, false);
117  }
118  }
119 
120 }
121 
122 void CheMPS2::DMRG::updateMovingLeftSafe(const int cnt){
123 
124  if (isAllocated[cnt]==1){
125  deleteTensors(cnt, true);
126  isAllocated[cnt]=0;
127  }
128  if (isAllocated[cnt]==0){
129  allocateTensors(cnt, false);
130  isAllocated[cnt]=2;
131  }
132  updateMovingLeft(cnt);
133 
134  if (CheMPS2::DMRG_storeRenormOptrOnDisk){
135  if (cnt+1<L-1){
136  if (isAllocated[cnt+1]==2){
137  OperatorsOnDisk(cnt+1, false, true);
138  deleteTensors(cnt+1, false);
139  isAllocated[cnt+1]=0;
140  }
141  }
142  if (cnt-1>=0){
143  if (isAllocated[cnt-1]==1){
144  deleteTensors(cnt-1, true);
145  isAllocated[cnt-1]=0;
146  }
147  }
148  if (cnt-2>=0){
149  if (isAllocated[cnt-2]==2){
150  deleteTensors(cnt-2, false);
151  isAllocated[cnt-2]=0;
152  }
153  if (isAllocated[cnt-2]==0){
154  allocateTensors(cnt-2, true);
155  isAllocated[cnt-2]=1;
156  }
157  OperatorsOnDisk(cnt-2, true, false);
158  }
159  }
160 
161 }
162 
163 void CheMPS2::DMRG::updateMovingRightSafe2DM(const int cnt){
164 
165  if (isAllocated[cnt]==2){
166  deleteTensors(cnt, false);
167  isAllocated[cnt]=0;
168  }
169  if (isAllocated[cnt]==0){
170  allocateTensors(cnt, true);
171  isAllocated[cnt]=1;
172  }
173  updateMovingRight(cnt);
174 
175  if (CheMPS2::DMRG_storeRenormOptrOnDisk){
176  if (cnt>0){
177  if (isAllocated[cnt-1]==1){
178  OperatorsOnDisk(cnt-1, true, true);
179  deleteTensors(cnt-1, true);
180  isAllocated[cnt-1]=0;
181  }
182  }
183  if (cnt+1<L-1){
184  if (isAllocated[cnt+1]==1){
185  deleteTensors(cnt+1, true);
186  isAllocated[cnt+1]=0;
187  }
188  if (isAllocated[cnt+1]==0){
189  allocateTensors(cnt+1, false);
190  isAllocated[cnt+1]=2;
191  }
192  OperatorsOnDisk(cnt+1, false, false);
193  }
194  }
195 
196 }
197 
198 void CheMPS2::DMRG::updateMovingLeftSafe2DM(const int cnt){
199 
200  if (isAllocated[cnt]==1){
201  deleteTensors(cnt, true);
202  isAllocated[cnt]=0;
203  }
204  if (isAllocated[cnt]==0){
205  allocateTensors(cnt, false);
206  isAllocated[cnt]=2;
207  }
208  updateMovingLeft(cnt);
209 
210  if (CheMPS2::DMRG_storeRenormOptrOnDisk){
211  if (cnt+1<L-1){
212  if (isAllocated[cnt+1]==2){
213  OperatorsOnDisk(cnt+1, false, true);
214  deleteTensors(cnt+1, false);
215  isAllocated[cnt+1]=0;
216  }
217  }
218  if (cnt-1>=0){
219  if (isAllocated[cnt-1]==2){
220  deleteTensors(cnt-1, false);
221  isAllocated[cnt-1]=0;
222  }
223  if (isAllocated[cnt-1]==0){
224  allocateTensors(cnt-1, true);
225  isAllocated[cnt-1]=1;
226  }
227  OperatorsOnDisk(cnt-1, true, false);
228  }
229  }
230 
231 }
232 
233 void CheMPS2::DMRG::deleteAllBoundaryOperators(){
234 
235  for (int cnt=0; cnt<L-1; cnt++){
236  if (isAllocated[cnt]==1){ deleteTensors(cnt, true); }
237  if (isAllocated[cnt]==2){ deleteTensors(cnt, false); }
238  isAllocated[cnt] = 0;
239  }
240 
241 }
242 
243 void CheMPS2::DMRG::updateMovingRight( const int index ){
244 
245  struct timeval start, end;
246  gettimeofday( &start, NULL );
247 
248  const int dimL = denBK->gMaxDimAtBound( index );
249  const int dimR = denBK->gMaxDimAtBound( index + 1 );
250  #ifdef CHEMPS2_MPI_COMPILATION
251  const int MPIRANK = MPIchemps2::mpi_rank();
252  #endif
253 
254  #pragma omp parallel
255  {
256 
257  double * workmem = new double[ dimL * dimR ];
258 
259  //Ltensors : all processes own all Ltensors
260  #pragma omp for schedule(static) nowait
261  for ( int cnt2 = 0; cnt2 < index + 1; cnt2++ ){
262  if ( cnt2 == 0 ){
263  Ltensors[ index ][ cnt2 ]->create( MPS[ index ] );
264  } else {
265  Ltensors[ index ][ cnt2 ]->update( Ltensors[ index - 1 ][ cnt2 - 1 ], MPS[ index ], MPS[ index ], workmem );
266  }
267  }
268 
269  // Two-operator tensors : certain processes own certain two-operator tensors
270  const int k1 = index + 1;
271  const int upperbound1 = ( k1 * ( k1 + 1 ) ) / 2;
272  int result[ 2 ];
273  // After this parallel region, WAIT because F0,F1,S0,S1[ index ][ cnt2 ][ cnt3 == 0 ] is required for the complementary operators
274  #ifdef CHEMPS2_MPI_COMPILATION
275  #pragma omp for schedule(dynamic)
276  #else
277  #pragma omp for schedule(static)
278  #endif
279  for ( int global = 0; global < upperbound1; global++ ){
280  Special::invert_triangle_two( global, result );
281  const int cnt2 = index - result[ 1 ];
282  const int cnt3 = result[ 0 ];
283  #ifdef CHEMPS2_MPI_COMPILATION
284  const int siteindex1 = index - cnt3 - cnt2;
285  const int siteindex2 = index - cnt3;
286  #endif
287  if ( cnt3 == 0 ){ // Every MPI process owns the Operator[ index ][ cnt2 ][ cnt3 == 0 ]
288  if ( cnt2 == 0 ){
289  F0tensors[ index ][ cnt2 ][ cnt3 ]->makenew( MPS[ index ] );
290  F1tensors[ index ][ cnt2 ][ cnt3 ]->makenew( MPS[ index ] );
291  S0tensors[ index ][ cnt2 ][ cnt3 ]->makenew( MPS[ index ] );
292  // S1[ index ][ 0 ][ cnt3 ] doesn't exist
293  } else {
294  F0tensors[ index ][ cnt2 ][ cnt3 ]->makenew( Ltensors[ index - 1 ][ cnt2 - 1 ], MPS[ index ], workmem );
295  F1tensors[ index ][ cnt2 ][ cnt3 ]->makenew( Ltensors[ index - 1 ][ cnt2 - 1 ], MPS[ index ], workmem );
296  S0tensors[ index ][ cnt2 ][ cnt3 ]->makenew( Ltensors[ index - 1 ][ cnt2 - 1 ], MPS[ index ], workmem );
297  S1tensors[ index ][ cnt2 ][ cnt3 ]->makenew( Ltensors[ index - 1 ][ cnt2 - 1 ], MPS[ index ], workmem );
298  }
299  } else {
300  #ifdef CHEMPS2_MPI_COMPILATION
301  if ( MPIchemps2::owner_cdf( L, siteindex1, siteindex2 ) == MPIRANK )
302  #endif
303  {
304  F0tensors[ index ][ cnt2 ][ cnt3 ]->update( F0tensors[ index - 1 ][ cnt2 ][ cnt3 - 1 ], MPS[ index ], MPS[ index ], workmem );
305  F1tensors[ index ][ cnt2 ][ cnt3 ]->update( F1tensors[ index - 1 ][ cnt2 ][ cnt3 - 1 ], MPS[ index ], MPS[ index ], workmem );
306  }
307  #ifdef CHEMPS2_MPI_COMPILATION
308  if ( MPIchemps2::owner_absigma( siteindex1, siteindex2 ) == MPIRANK )
309  #endif
310  {
311  S0tensors[ index ][ cnt2 ][ cnt3 ]->update( S0tensors[ index - 1 ][ cnt2 ][ cnt3 - 1 ], MPS[ index ], MPS[ index ], workmem );
312  if ( cnt2 > 0 ){ S1tensors[ index ][ cnt2 ][ cnt3 ]->update( S1tensors[ index - 1 ][ cnt2 ][ cnt3 - 1 ], MPS[ index ], MPS[ index ], workmem ); }
313  }
314  }
315  }
316 
317  // Complementary two-operator tensors : certain processes own certain complementary two-operator tensors
318  const int k2 = L - 1 - index;
319  const int upperbound2 = ( k2 * ( k2 + 1 ) ) / 2;
320  #ifdef CHEMPS2_MPI_COMPILATION
321  #pragma omp for schedule(dynamic)
322  #else
323  #pragma omp for schedule(static) nowait
324  #endif
325  for ( int global = 0; global < upperbound2; global++ ){
326  Special::invert_triangle_two( global, result );
327  const int cnt2 = k2 - 1 - result[ 1 ];
328  const int cnt3 = result[ 0 ];
329  const int siteindex1 = index + 1 + cnt3;
330  const int siteindex2 = index + 1 + cnt2 + cnt3;
331  const int irrep_prod = Irreps::directProd( denBK->gIrrep( siteindex1 ), denBK->gIrrep( siteindex2 ) );
332  #ifdef CHEMPS2_MPI_COMPILATION
333  const bool do_absigma = ( MPIchemps2::owner_absigma( siteindex1, siteindex2 ) == MPIRANK );
334  const bool do_cdf = ( MPIchemps2::owner_cdf( L, siteindex1, siteindex2 ) == MPIRANK );
335  #endif
336  if ( index == 0 ){
337  #ifdef CHEMPS2_MPI_COMPILATION
338  if ( do_absigma )
339  #endif
340  {
341  Atensors[ index ][ cnt2 ][ cnt3 ]->clear();
342  if ( cnt2 > 0 ){ Btensors[ index ][ cnt2 ][ cnt3 ]->clear(); }
343  }
344  #ifdef CHEMPS2_MPI_COMPILATION
345  if ( do_cdf )
346  #endif
347  {
348  Ctensors[ index ][ cnt2 ][ cnt3 ]->clear();
349  Dtensors[ index ][ cnt2 ][ cnt3 ]->clear();
350  }
351  } else {
352  #ifdef CHEMPS2_MPI_COMPILATION
353  if ( do_absigma )
354  #endif
355  {
356  Atensors[ index ][ cnt2 ][ cnt3 ]->update( Atensors[ index - 1 ][ cnt2 ][ cnt3 + 1 ], MPS[ index ], MPS[ index ], workmem );
357  if ( cnt2 > 0 ){ Btensors[ index ][ cnt2 ][ cnt3 ]->update( Btensors[ index - 1 ][ cnt2 ][ cnt3 + 1 ], MPS[ index ], MPS[ index ], workmem ); }
358  }
359  #ifdef CHEMPS2_MPI_COMPILATION
360  if ( do_cdf )
361  #endif
362  {
363  Ctensors[ index ][ cnt2 ][ cnt3 ]->update( Ctensors[ index - 1 ][ cnt2 ][ cnt3 + 1 ], MPS[ index ], MPS[ index ], workmem );
364  Dtensors[ index ][ cnt2 ][ cnt3 ]->update( Dtensors[ index - 1 ][ cnt2 ][ cnt3 + 1 ], MPS[ index ], MPS[ index ], workmem );
365  }
366  }
367  for ( int num = 0; num < index + 1; num++ ){
368  if ( irrep_prod == S0tensors[ index ][ num ][ 0 ]->get_irrep() ){ // Then the matrix elements are not 0 due to symm.
369  #ifdef CHEMPS2_MPI_COMPILATION
370  if ( do_absigma )
371  #endif
372  {
373  double alpha = Prob->gMxElement( index - num, index, siteindex1, siteindex2 );
374  if (( cnt2 == 0 ) && ( num == 0 )){ alpha *= 0.5; }
375  if (( cnt2 > 0 ) && ( num > 0 )){ alpha += Prob->gMxElement( index - num, index, siteindex2, siteindex1 ); }
376  Atensors[ index ][ cnt2 ][ cnt3 ]->daxpy( alpha, S0tensors[ index ][ num ][ 0 ] );
377 
378  if (( num > 0 ) && ( cnt2 > 0 )){
379  alpha = Prob->gMxElement( index - num, index, siteindex1, siteindex2 )
380  - Prob->gMxElement( index - num, index, siteindex2, siteindex1 );
381  Btensors[ index ][ cnt2 ][ cnt3 ]->daxpy( alpha, S1tensors[ index ][ num ][ 0 ]);
382  }
383  }
384  #ifdef CHEMPS2_MPI_COMPILATION
385  if ( do_cdf )
386  #endif
387  {
388  double alpha = 2 * Prob->gMxElement( index - num, siteindex1, index, siteindex2 )
389  - Prob->gMxElement( index - num, siteindex1, siteindex2, index );
390  Ctensors[ index ][ cnt2 ][ cnt3 ]->daxpy( alpha, F0tensors[ index ][ num ][ 0 ] );
391 
392  alpha = - Prob->gMxElement( index - num, siteindex1, siteindex2, index ); // Second line for Ctensors
393  Dtensors[ index ][ cnt2 ][ cnt3 ]->daxpy( alpha, F1tensors[ index ][ num ][ 0 ] );
394 
395  if ( num > 0 ){
396  alpha = 2 * Prob->gMxElement( index - num, siteindex2, index, siteindex1 )
397  - Prob->gMxElement( index - num, siteindex2, siteindex1, index );
398  Ctensors[ index ][ cnt2 ][ cnt3 ]->daxpy_transpose_tensorCD( alpha, F0tensors[ index ][ num ][ 0 ] );
399 
400  alpha = - Prob->gMxElement( index - num, siteindex2, siteindex1, index ); // Second line for Ctensors
401  Dtensors[ index ][ cnt2 ][ cnt3 ]->daxpy_transpose_tensorCD( alpha, F1tensors[ index ][ num ][ 0 ] );
402  }
403  }
404  }
405  }
406  }
407 
408  // Qtensors : certain processes own certain Qtensors --- You don't want to locally parallellize when sending and receiving buffers!
409  #ifdef CHEMPS2_MPI_COMPILATION
410  #pragma omp single
411  #else
412  #pragma omp for schedule(static) nowait
413  #endif
414  for ( int cnt2 = 0; cnt2 < L - 1 - index; cnt2++ ){
415 
416  #ifdef CHEMPS2_MPI_COMPILATION
417  const int siteindex = index + 1 + cnt2; // Corresponds to this site
418  const int owner_q = MPIchemps2::owner_q( L, siteindex );
419  #endif
420  if ( index == 0 ){
421 
422  #ifdef CHEMPS2_MPI_COMPILATION
423  if ( owner_q == MPIRANK )
424  #endif
425  {
426  Qtensors[ index ][ cnt2 ]->clear();
427  Qtensors[ index ][ cnt2 ]->AddTermSimple( MPS[ index ] );
428  }
429 
430  } else {
431 
432  #ifdef CHEMPS2_MPI_COMPILATION
433  const int owner_absigma = MPIchemps2::owner_absigma( index, siteindex );
434  const int owner_cdf = MPIchemps2::owner_cdf( L, index, siteindex );
435  if (( owner_q == owner_absigma ) && ( owner_q == owner_cdf ) && ( owner_q == MPIRANK )){ // No MPI needed
436  #endif
437 
438  double * workmemBIS = new double[ dimL * dimL ];
439  Qtensors[ index ][ cnt2 ]->update( Qtensors[ index - 1 ][ cnt2 + 1 ], MPS[ index ], MPS[ index ], workmem );
440  Qtensors[ index ][ cnt2 ]->AddTermSimple( MPS[ index ] );
441  Qtensors[ index ][ cnt2 ]->AddTermsL( Ltensors[ index - 1 ], MPS[ index ], workmemBIS, workmem );
442  Qtensors[ index ][ cnt2 ]->AddTermsAB( Atensors[ index - 1 ][ cnt2 + 1 ][ 0 ], Btensors[ index - 1 ][ cnt2 + 1 ][ 0 ], MPS[ index ], workmemBIS, workmem );
443  Qtensors[ index ][ cnt2 ]->AddTermsCD( Ctensors[ index - 1 ][ cnt2 + 1 ][ 0 ], Dtensors[ index - 1 ][ cnt2 + 1 ][ 0 ], MPS[ index ], workmemBIS, workmem );
444  delete [] workmemBIS;
445 
446  #ifdef CHEMPS2_MPI_COMPILATION
447  } else { // There's going to have to be some communication
448 
449  if (( owner_q == MPIRANK ) || ( owner_absigma == MPIRANK ) || ( owner_cdf == MPIRANK )){
450 
451  TensorQ * tempQ = new TensorQ( index + 1, denBK->gIrrep( siteindex ), true, denBK, Prob, siteindex );
452  tempQ->clear();
453 
454  // Everyone creates his/her piece
455  double * workmemBIS = new double[ dimL * dimL ];
456  if ( owner_q == MPIRANK ){
457  tempQ->update( Qtensors[ index - 1 ][ cnt2 + 1 ], MPS[ index ], MPS[ index ], workmem );
458  tempQ->AddTermSimple( MPS[ index ] );
459  tempQ->AddTermsL( Ltensors[ index - 1 ], MPS[ index ], workmemBIS, workmem );
460  }
461  if ( owner_absigma == MPIRANK ){
462  tempQ->AddTermsAB( Atensors[ index - 1 ][ cnt2 + 1 ][ 0 ], Btensors[ index - 1 ][ cnt2 + 1 ][ 0 ], MPS[ index ], workmemBIS, workmem );
463  }
464  if ( owner_cdf == MPIRANK ){
465  tempQ->AddTermsCD( Ctensors[ index - 1 ][ cnt2 + 1 ][ 0 ], Dtensors[ index - 1 ][ cnt2 + 1 ][ 0 ], MPS[ index ], workmemBIS, workmem );
466  }
467  delete [] workmemBIS;
468 
469  // Add everything to owner_q's Qtensors[index][cnt2]: replace later with custom communication group?
470  int inc = 1;
471  int arraysize = tempQ->gKappa2index( tempQ->gNKappa() );
472  double alpha = 1.0;
473  if ( owner_q == MPIRANK ){ dcopy_( &arraysize, tempQ->gStorage(), &inc, Qtensors[ index ][ cnt2 ]->gStorage(), &inc ); }
474  if ( owner_q != owner_absigma ){
475  MPIchemps2::sendreceive_tensor( tempQ, owner_absigma, owner_q, 2 * siteindex );
476  if ( owner_q == MPIRANK ){ daxpy_( &arraysize, &alpha, tempQ->gStorage(), &inc, Qtensors[ index ][ cnt2 ]->gStorage(), &inc ); }
477  }
478  if (( owner_q != owner_cdf ) && ( owner_absigma != owner_cdf )){
479  MPIchemps2::sendreceive_tensor( tempQ, owner_cdf, owner_q, 2 * siteindex + 1 );
480  if ( owner_q == MPIRANK ){ daxpy_( &arraysize, &alpha, tempQ->gStorage(), &inc, Qtensors[ index ][ cnt2 ]->gStorage(), &inc ); }
481  }
482  delete tempQ;
483 
484  }
485  }
486  #endif
487  }
488  }
489 
490  delete [] workmem;
491 
492  }
493 
494  //Xtensors
495  #ifdef CHEMPS2_MPI_COMPILATION
496  const int owner_x = MPIchemps2::owner_x();
497  #endif
498  if ( index == 0 ){
499 
500  #ifdef CHEMPS2_MPI_COMPILATION
501  if ( owner_x == MPIRANK )
502  #endif
503  { Xtensors[ index ]->update( MPS[ index ] ); }
504 
505  } else {
506 
507  #ifdef CHEMPS2_MPI_COMPILATION
508  //Make sure that owner_x has all required tensors to construct X. Not as optimal as Q-tensor case, but easier hack.
509  const int owner_q = MPIchemps2::owner_q( L, index );
510  const int owner_absigma = MPIchemps2::owner_absigma( index, index );
511  const int owner_cdf = MPIchemps2::owner_cdf( L, index, index );
512  const int Idiff = 0; // Irreps::directProd( denBK->gIrrep( index ), denBK->gIrrep( index ) );
513 
514  if ( owner_x != owner_q ){
515  if ( owner_x == MPIRANK ){ Qtensors[ index - 1 ][ 0 ] = new TensorQ( index, denBK->gIrrep( index ), true, denBK, Prob, index ); }
516  if (( owner_x == MPIRANK ) || ( owner_q == MPIRANK )){ MPIchemps2::sendreceive_tensor( Qtensors[ index - 1 ][ 0 ], owner_q, owner_x, 3 * L + 3 ); }
517  }
518 
519  if ( owner_x != owner_absigma ){
520  if ( owner_x == MPIRANK ){ Atensors[ index - 1 ][ 0 ][ 0 ] = new TensorOperator( index, 0, 2, Idiff, true, true, false, denBK, denBK ); }
521  if (( owner_x == MPIRANK ) || ( owner_absigma == MPIRANK )){ MPIchemps2::sendreceive_tensor( Atensors[ index - 1 ][ 0 ][ 0 ], owner_absigma, owner_x, 3 * L + 4 ); }
522  }
523 
524  if ( owner_x != owner_cdf ){
525  if ( owner_x == MPIRANK ){
526  Ctensors[ index - 1 ][ 0 ][ 0 ] = new TensorOperator( index, 0, 0, Idiff, true, true, false, denBK, denBK );
527  Dtensors[ index - 1 ][ 0 ][ 0 ] = new TensorOperator( index, 2, 0, Idiff, true, true, false, denBK, denBK );
528  }
529  if (( owner_x == MPIRANK ) || ( owner_cdf == MPIRANK )){
530  MPIchemps2::sendreceive_tensor( Ctensors[ index - 1 ][ 0 ][ 0 ], owner_cdf, owner_x, 3 * L + 5 );
531  MPIchemps2::sendreceive_tensor( Dtensors[ index - 1 ][ 0 ][ 0 ], owner_cdf, owner_x, 3 * L + 6 );
532  }
533  }
534 
535  if ( owner_x == MPIRANK ){
536  #endif
537 
538  Xtensors[ index ]->update( MPS[ index ], Ltensors[ index - 1 ],
539  Xtensors[ index - 1 ],
540  Qtensors[ index - 1 ][ 0 ],
541  Atensors[ index - 1 ][ 0 ][ 0 ],
542  Ctensors[ index - 1 ][ 0 ][ 0 ],
543  Dtensors[ index - 1 ][ 0 ][ 0 ] );
544 
545  #ifdef CHEMPS2_MPI_COMPILATION
546  if ( owner_x != owner_q ){ delete Qtensors[ index - 1 ][ 0 ]; }
547  if ( owner_x != owner_absigma ){ delete Atensors[ index - 1 ][ 0 ][ 0 ]; }
548  if ( owner_x != owner_cdf ){ delete Ctensors[ index - 1 ][ 0 ][ 0 ];
549  delete Dtensors[ index - 1 ][ 0 ][ 0 ]; }
550  }
551  #endif
552 
553  }
554 
555  //Otensors : certain processes own certain excitations
556  if ( Exc_activated ){
557  for ( int state = 0; state < nStates-1; state++ ){
558  #ifdef CHEMPS2_MPI_COMPILATION
559  if ( MPIchemps2::owner_specific_excitation( L, state ) == MPIRANK )
560  #endif
561  {
562  if ( index == 0 ){
563  Exc_Overlaps[ state ][ index ]->create( MPS[ index ], Exc_MPSs[ state ][ index ] );
564  } else {
565  Exc_Overlaps[ state ][ index ]->update_ownmem( MPS[ index ], Exc_MPSs[ state ][ index ], Exc_Overlaps[ state ][ index - 1 ] );
566  }
567  }
568  }
569  }
570 
571  gettimeofday( &end, NULL );
572  timings[ CHEMPS2_TIME_TENS_CALC ] += ( end.tv_sec - start.tv_sec ) + 1e-6 * ( end.tv_usec - start.tv_usec );
573 
574 }
575 
576 void CheMPS2::DMRG::updateMovingLeft( const int index ){
577 
578  struct timeval start, end;
579  gettimeofday( &start, NULL );
580 
581  const int dimL = denBK->gMaxDimAtBound( index + 1 );
582  const int dimR = denBK->gMaxDimAtBound( index + 2 );
583  #ifdef CHEMPS2_MPI_COMPILATION
584  const int MPIRANK = MPIchemps2::mpi_rank();
585  #endif
586 
587  #pragma omp parallel
588  {
589 
590  double * workmem = new double[ dimL * dimR ];
591 
592  // Ltensors : all processes own all Ltensors
593  #pragma omp for schedule(static) nowait
594  for ( int cnt2 = 0; cnt2 < L - 1 - index; cnt2++ ){
595  if ( cnt2 == 0 ){
596  Ltensors[ index ][ cnt2 ]->create( MPS[ index + 1 ] );
597  } else {
598  Ltensors[ index ][ cnt2 ]->update( Ltensors[ index + 1 ][ cnt2 - 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem );
599  }
600  }
601 
602  // Two-operator tensors : certain processes own certain two-operator tensors
603  const int k1 = L - 1 - index;
604  const int upperbound1 = ( k1 * ( k1 + 1 ) ) / 2;
605  int result[ 2 ];
606  // After this parallel region, WAIT because F0,F1,S0,S1[ index ][ cnt2 ][ cnt3 == 0 ] is required for the complementary operators
607  #ifdef CHEMPS2_MPI_COMPILATION
608  #pragma omp for schedule(dynamic)
609  #else
610  #pragma omp for schedule(static)
611  #endif
612  for ( int global = 0; global < upperbound1; global++ ){
613  Special::invert_triangle_two( global, result );
614  const int cnt2 = k1 - 1 - result[ 1 ];
615  const int cnt3 = result[ 0 ];
616  #ifdef CHEMPS2_MPI_COMPILATION
617  const int siteindex1 = index + 1 + cnt3;
618  const int siteindex2 = index + 1 + cnt2 + cnt3;
619  #endif
620  if ( cnt3 == 0 ){ // Every MPI process owns the Operator[ index ][ cnt2 ][ cnt3==0 ]
621  if ( cnt2 == 0 ){
622  F0tensors[ index ][ cnt2 ][ cnt3 ]->makenew( MPS[ index + 1 ] );
623  F1tensors[ index ][ cnt2 ][ cnt3 ]->makenew( MPS[ index + 1 ] );
624  S0tensors[ index ][ cnt2 ][ cnt3 ]->makenew( MPS[ index + 1 ] );
625  //S1[index][0] doesn't exist
626  } else {
627  F0tensors[ index ][ cnt2 ][ cnt3 ]->makenew( Ltensors[ index + 1 ][ cnt2 - 1 ], MPS[ index + 1 ], workmem );
628  F1tensors[ index ][ cnt2 ][ cnt3 ]->makenew( Ltensors[ index + 1 ][ cnt2 - 1 ], MPS[ index + 1 ], workmem );
629  S0tensors[ index ][ cnt2 ][ cnt3 ]->makenew( Ltensors[ index + 1 ][ cnt2 - 1 ], MPS[ index + 1 ], workmem );
630  S1tensors[ index ][ cnt2 ][ cnt3 ]->makenew( Ltensors[ index + 1 ][ cnt2 - 1 ], MPS[ index + 1 ], workmem );
631  }
632  } else {
633  #ifdef CHEMPS2_MPI_COMPILATION
634  if ( MPIchemps2::owner_cdf( L, siteindex1, siteindex2 ) == MPIRANK )
635  #endif
636  {
637  F0tensors[ index ][ cnt2 ][ cnt3 ]->update( F0tensors[ index + 1 ][ cnt2 ][ cnt3 - 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem );
638  F1tensors[ index ][ cnt2 ][ cnt3 ]->update( F1tensors[ index + 1 ][ cnt2 ][ cnt3 - 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem );
639  }
640  #ifdef CHEMPS2_MPI_COMPILATION
641  if ( MPIchemps2::owner_absigma( siteindex1, siteindex2 ) == MPIRANK )
642  #endif
643  {
644  S0tensors[ index ][ cnt2 ][ cnt3 ]->update( S0tensors[ index + 1 ][ cnt2 ][ cnt3 - 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem );
645  if ( cnt2 > 0 ){ S1tensors[ index ][ cnt2 ][ cnt3 ]->update( S1tensors[ index + 1 ][ cnt2 ][ cnt3 - 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem ); }
646  }
647  }
648  }
649 
650  // Complementary two-operator tensors : certain processes own certain complementary two-operator tensors
651  const int k2 = index + 1;
652  const int upperbound2 = ( k2 * ( k2 + 1 ) ) / 2;
653  #ifdef CHEMPS2_MPI_COMPILATION
654  #pragma omp for schedule(dynamic)
655  #else
656  #pragma omp for schedule(static) nowait
657  #endif
658  for ( int global = 0; global < upperbound2; global++ ){
659  Special::invert_triangle_two( global, result );
660  const int cnt2 = k2 - 1 - result[ 1 ];
661  const int cnt3 = result[ 0 ];
662  const int siteindex1 = index - cnt3 - cnt2;
663  const int siteindex2 = index - cnt3;
664  const int irrep_prod = Irreps::directProd( denBK->gIrrep( siteindex1 ), denBK->gIrrep( siteindex2 ) );
665  #ifdef CHEMPS2_MPI_COMPILATION
666  const bool do_absigma = ( MPIchemps2::owner_absigma( siteindex1, siteindex2 ) == MPIRANK );
667  const bool do_cdf = ( MPIchemps2::owner_cdf( L, siteindex1, siteindex2 ) == MPIRANK );
668  #endif
669  if ( index == L - 2 ){
670  #ifdef CHEMPS2_MPI_COMPILATION
671  if ( do_absigma )
672  #endif
673  {
674  Atensors[ index ][ cnt2 ][ cnt3 ]->clear();
675  if ( cnt2 > 0 ){ Btensors[ index ][ cnt2 ][ cnt3 ]->clear(); }
676  }
677  #ifdef CHEMPS2_MPI_COMPILATION
678  if ( do_cdf )
679  #endif
680  {
681  Ctensors[ index ][ cnt2 ][ cnt3 ]->clear();
682  Dtensors[ index ][ cnt2 ][ cnt3 ]->clear();
683  }
684  } else {
685  #ifdef CHEMPS2_MPI_COMPILATION
686  if ( do_absigma )
687  #endif
688  {
689  Atensors[ index ][ cnt2 ][ cnt3 ]->update( Atensors[ index + 1 ][ cnt2 ][ cnt3 + 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem );
690  if ( cnt2 > 0 ){ Btensors[ index ][ cnt2 ][ cnt3 ]->update( Btensors[ index + 1 ][ cnt2 ][ cnt3 + 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem ); }
691  }
692  #ifdef CHEMPS2_MPI_COMPILATION
693  if ( do_cdf )
694  #endif
695  {
696  Ctensors[ index ][ cnt2 ][ cnt3 ]->update( Ctensors[ index + 1 ][ cnt2 ][ cnt3 + 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem );
697  Dtensors[ index ][ cnt2 ][ cnt3 ]->update( Dtensors[ index + 1 ][ cnt2 ][ cnt3 + 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem );
698  }
699  }
700  for ( int num = 0; num < L - index - 1; num++ ){
701  if ( irrep_prod == S0tensors[ index ][ num ][ 0 ]->get_irrep() ){ // Then the matrix elements are not 0 due to symm.
702  #ifdef CHEMPS2_MPI_COMPILATION
703  if ( do_absigma )
704  #endif
705  {
706  double alpha = Prob->gMxElement( siteindex1, siteindex2, index + 1, index + 1 + num );
707  if (( cnt2 == 0 ) && ( num == 0 )) alpha *= 0.5;
708  if (( cnt2 > 0 ) && ( num > 0 )) alpha += Prob->gMxElement( siteindex1, siteindex2, index + 1 + num, index + 1 );
709  Atensors[ index ][ cnt2 ][ cnt3 ]->daxpy( alpha, S0tensors[ index ][ num ][ 0 ]);
710 
711  if (( num > 0 ) && ( cnt2 > 0 )){
712  alpha = Prob->gMxElement( siteindex1, siteindex2, index + 1, index + 1 + num )
713  - Prob->gMxElement( siteindex1, siteindex2, index + 1 + num, index + 1 );
714  Btensors[ index ][ cnt2 ][ cnt3 ]->daxpy( alpha, S1tensors[ index ][ num ][ 0 ] );
715  }
716  }
717  #ifdef CHEMPS2_MPI_COMPILATION
718  if ( do_cdf )
719  #endif
720  {
721  double alpha = 2 * Prob->gMxElement( siteindex1, index + 1, siteindex2, index + 1 + num )
722  - Prob->gMxElement( siteindex1, index + 1, index + 1 + num, siteindex2 );
723  Ctensors[ index ][ cnt2 ][ cnt3 ]->daxpy( alpha, F0tensors[ index ][ num ][ 0 ]);
724 
725  alpha = - Prob->gMxElement( siteindex1, index + 1, index + 1 + num, siteindex2 ); // Second line for Ctensors
726  Dtensors[ index ][ cnt2 ][ cnt3 ]->daxpy( alpha, F1tensors[ index ][ num ][ 0 ]);
727 
728  if ( num > 0 ){
729  alpha = 2 * Prob->gMxElement( siteindex1, index + 1 + num, siteindex2, index + 1 )
730  - Prob->gMxElement( siteindex1, index + 1 + num, index + 1, siteindex2 );
731  Ctensors[ index ][ cnt2 ][ cnt3 ]->daxpy_transpose_tensorCD( alpha, F0tensors[ index ][ num ][ 0 ] );
732 
733  alpha = - Prob->gMxElement( siteindex1, index + 1 + num, index + 1, siteindex2 ); // Second line for Ctensors
734  Dtensors[ index ][ cnt2 ][ cnt3 ]->daxpy_transpose_tensorCD( alpha, F1tensors[ index ][ num ][ 0 ] );
735  }
736  }
737  }
738  }
739  }
740 
741  // Qtensors : certain processes own certain Qtensors --- You don't want to locally parallellize when sending and receiving buffers!
742  #ifdef CHEMPS2_MPI_COMPILATION
743  #pragma omp single
744  #else
745  #pragma omp for schedule(static) nowait
746  #endif
747  for ( int cnt2 = 0; cnt2 < index + 1; cnt2++ ){
748 
749  #ifdef CHEMPS2_MPI_COMPILATION
750  const int siteindex = index - cnt2; // Corresponds to this site
751  const int owner_q = MPIchemps2::owner_q( L, siteindex );
752  #endif
753  if ( index == L - 2 ){
754 
755  #ifdef CHEMPS2_MPI_COMPILATION
756  if ( owner_q == MPIRANK )
757  #endif
758  {
759  Qtensors[ index ][ cnt2 ]->clear();
760  Qtensors[ index ][ cnt2 ]->AddTermSimple( MPS[ index + 1 ] );
761  }
762 
763  } else {
764 
765  #ifdef CHEMPS2_MPI_COMPILATION
766  const int owner_absigma = MPIchemps2::owner_absigma( siteindex, index + 1 );
767  const int owner_cdf = MPIchemps2::owner_cdf( L, siteindex, index + 1 );
768  if (( owner_q == owner_absigma ) && ( owner_q == owner_cdf ) && ( owner_q == MPIRANK )){ // No MPI needed
769  #endif
770 
771  double * workmemBIS = new double[ dimR * dimR ];
772  Qtensors[ index ][ cnt2 ]->update( Qtensors[ index + 1 ][ cnt2 + 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem );
773  Qtensors[ index ][ cnt2 ]->AddTermSimple( MPS[ index + 1 ] );
774  Qtensors[ index ][ cnt2 ]->AddTermsL( Ltensors[ index + 1 ], MPS[ index + 1 ], workmemBIS, workmem );
775  Qtensors[ index ][ cnt2 ]->AddTermsAB( Atensors[ index + 1 ][ cnt2 + 1 ][ 0 ], Btensors[ index + 1 ][ cnt2 + 1 ][ 0 ], MPS[ index + 1 ], workmemBIS, workmem );
776  Qtensors[ index ][ cnt2 ]->AddTermsCD( Ctensors[ index + 1 ][ cnt2 + 1 ][ 0 ], Dtensors[ index + 1 ][ cnt2 + 1 ][ 0 ], MPS[ index + 1 ], workmemBIS, workmem );
777  delete [] workmemBIS;
778 
779  #ifdef CHEMPS2_MPI_COMPILATION
780  } else { // There's going to have to be some communication
781 
782  if (( owner_q == MPIRANK ) || ( owner_absigma == MPIRANK ) || ( owner_cdf == MPIRANK )){
783 
784  TensorQ * tempQ = new TensorQ( index + 1, denBK->gIrrep( siteindex ), false, denBK, Prob, siteindex );
785  tempQ->clear();
786 
787  // Everyone creates his/her piece
788  double * workmemBIS = new double[ dimR * dimR ];
789  if ( owner_q == MPIRANK ){
790  tempQ->update( Qtensors[ index + 1 ][ cnt2 + 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem );
791  tempQ->AddTermSimple( MPS[ index + 1 ] );
792  tempQ->AddTermsL( Ltensors[ index + 1 ], MPS[ index + 1 ], workmemBIS, workmem );
793  }
794  if ( owner_absigma == MPIRANK ){
795  tempQ->AddTermsAB( Atensors[ index + 1 ][ cnt2 + 1 ][ 0 ], Btensors[ index + 1 ][ cnt2 + 1 ][ 0 ], MPS[ index + 1 ], workmemBIS, workmem );
796  }
797  if ( owner_cdf == MPIRANK ){
798  tempQ->AddTermsCD( Ctensors[ index + 1 ][ cnt2 + 1 ][ 0 ], Dtensors[ index + 1 ][ cnt2 + 1 ][ 0 ], MPS[ index + 1 ], workmemBIS, workmem );
799  }
800  delete [] workmemBIS;
801 
802  // Add everything to owner_q's Qtensors[index][cnt2]: replace later with custom communication group?
803  int inc = 1;
804  int arraysize = tempQ->gKappa2index( tempQ->gNKappa() );
805  double alpha = 1.0;
806  if ( owner_q == MPIRANK ){ dcopy_( &arraysize, tempQ->gStorage(), &inc, Qtensors[index][cnt2]->gStorage(), &inc ); }
807  if ( owner_q != owner_absigma ){
808  MPIchemps2::sendreceive_tensor( tempQ, owner_absigma, owner_q, 2 * siteindex );
809  if ( owner_q == MPIRANK ){ daxpy_( &arraysize, &alpha, tempQ->gStorage(), &inc, Qtensors[ index ][ cnt2 ]->gStorage(), &inc ); }
810  }
811  if (( owner_q != owner_cdf ) && ( owner_absigma != owner_cdf )){
812  MPIchemps2::sendreceive_tensor( tempQ, owner_cdf, owner_q, 2 * siteindex + 1 );
813  if ( owner_q == MPIRANK ){ daxpy_( &arraysize, &alpha, tempQ->gStorage(), &inc, Qtensors[ index ][ cnt2 ]->gStorage(), &inc ); }
814  }
815  delete tempQ;
816 
817  }
818  }
819  #endif
820  }
821  }
822 
823  delete [] workmem;
824 
825  }
826 
827  //Xtensors
828  #ifdef CHEMPS2_MPI_COMPILATION
829  const int owner_x = MPIchemps2::owner_x();
830  #endif
831  if ( index == L - 2 ){
832 
833  #ifdef CHEMPS2_MPI_COMPILATION
834  if ( owner_x == MPIRANK )
835  #endif
836  { Xtensors[ index ]->update( MPS[ index + 1 ] ); }
837 
838  } else {
839 
840  #ifdef CHEMPS2_MPI_COMPILATION
841  //Make sure that owner_x has all required tensors to construct X. Not as optimal as Q-tensor case, but easier hack.
842  const int owner_q = MPIchemps2::owner_q( L, index + 1 );
843  const int owner_absigma = MPIchemps2::owner_absigma( index + 1, index + 1 );
844  const int owner_cdf = MPIchemps2::owner_cdf( L, index + 1, index + 1 );
845  const int Idiff = 0; // Irreps::directProd( denBK->gIrrep( index + 1 ), denBK->gIrrep( index + 1 ) );
846 
847  if ( owner_x != owner_q ){
848  if ( owner_x == MPIRANK ){ Qtensors[ index + 1 ][ 0 ] = new TensorQ( index + 2, denBK->gIrrep( index + 1 ), false, denBK, Prob, index + 1 ); }
849  if (( owner_x == MPIRANK ) || ( owner_q == MPIRANK )){ MPIchemps2::sendreceive_tensor( Qtensors[ index + 1 ][ 0 ], owner_q, owner_x, 3 * L + 3 ); }
850  }
851 
852  if ( owner_x != owner_absigma ){
853  if ( owner_x == MPIRANK ){ Atensors[ index + 1 ][ 0 ][ 0 ] = new TensorOperator( index + 2, 0, 2, Idiff, false, true, false, denBK, denBK ); }
854  if (( owner_x == MPIRANK ) || ( owner_absigma == MPIRANK )){ MPIchemps2::sendreceive_tensor( Atensors[ index + 1 ][ 0 ][ 0 ], owner_absigma, owner_x, 3 * L + 4 ); }
855  }
856 
857  if ( owner_x != owner_cdf ){
858  if ( owner_x == MPIRANK ){
859  Ctensors[ index + 1 ][ 0 ][ 0 ] = new TensorOperator( index + 2, 0, 0, Idiff, false, true, false, denBK, denBK );
860  Dtensors[ index + 1 ][ 0 ][ 0 ] = new TensorOperator( index + 2, 2, 0, Idiff, false, false, false, denBK, denBK );
861  }
862  if (( owner_x == MPIRANK ) || ( owner_cdf == MPIRANK )){
863  MPIchemps2::sendreceive_tensor( Ctensors[ index + 1 ][ 0 ][ 0 ], owner_cdf, owner_x, 3 * L + 5 );
864  MPIchemps2::sendreceive_tensor( Dtensors[ index + 1 ][ 0 ][ 0 ], owner_cdf, owner_x, 3 * L + 6 );
865  }
866  }
867 
868  if ( owner_x == MPIRANK ){
869  #endif
870 
871  Xtensors[ index ]->update( MPS[ index + 1 ], Ltensors[ index + 1 ],
872  Xtensors[ index + 1 ],
873  Qtensors[ index + 1 ][ 0 ],
874  Atensors[ index + 1 ][ 0 ][ 0 ],
875  Ctensors[ index + 1 ][ 0 ][ 0 ],
876  Dtensors[ index + 1 ][ 0 ][ 0 ] );
877 
878  #ifdef CHEMPS2_MPI_COMPILATION
879  if ( owner_x != owner_q ){ delete Qtensors[ index + 1 ][ 0 ]; }
880  if ( owner_x != owner_absigma ){ delete Atensors[ index + 1 ][ 0 ][ 0 ]; }
881  if ( owner_x != owner_cdf ){ delete Ctensors[ index + 1 ][ 0 ][ 0 ];
882  delete Dtensors[ index + 1 ][ 0 ][ 0 ]; }
883  }
884  #endif
885 
886  }
887 
888  //Otensors
889  if ( Exc_activated ){
890  for ( int state = 0; state < nStates - 1; state++ ){
891  #ifdef CHEMPS2_MPI_COMPILATION
892  if ( MPIchemps2::owner_specific_excitation( L, state ) == MPIRANK )
893  #endif
894  {
895  if ( index == L - 2 ){
896  Exc_Overlaps[ state ][ index ]->create( MPS[ index + 1 ], Exc_MPSs[ state ][ index + 1 ] );
897  } else {
898  Exc_Overlaps[ state ][ index ]->update_ownmem( MPS[ index + 1 ], Exc_MPSs[ state ][ index + 1 ], Exc_Overlaps[ state ][ index + 1 ] );
899  }
900  }
901  }
902  }
903 
904  gettimeofday( &end, NULL );
905  timings[ CHEMPS2_TIME_TENS_CALC ] += ( end.tv_sec - start.tv_sec ) + 1e-6 * ( end.tv_usec - start.tv_usec );
906 
907 }
908 
909 void CheMPS2::DMRG::allocateTensors(const int index, const bool movingRight){
910 
911  struct timeval start, end;
912  gettimeofday(&start, NULL);
913 
914  #ifdef CHEMPS2_MPI_COMPILATION
915  const int MPIRANK = MPIchemps2::mpi_rank();
916  #endif
917 
918  if (movingRight){
919 
920  // Ltensors : all processes own all Ltensors
921  // To right: Ltens[cnt][cnt2] = operator on site cnt-cnt2; at boundary cnt+1
922  Ltensors[ index ] = new TensorL * [ index + 1 ];
923  for ( int cnt2 = 0; cnt2 < index + 1; cnt2++ ){ Ltensors[ index ][ cnt2 ] = new TensorL( index + 1, denBK->gIrrep( index - cnt2 ), movingRight, denBK, denBK ); }
924 
925  //Two-operator tensors : certain processes own certain two-operator tensors
926  //To right: F0tens[cnt][cnt2][cnt3] = operators on sites cnt-cnt3-cnt2 and cnt-cnt3; at boundary cnt+1
927  F0tensors[index] = new TensorF0 ** [index+1];
928  F1tensors[index] = new TensorF1 ** [index+1];
929  S0tensors[index] = new TensorS0 ** [index+1];
930  S1tensors[index] = new TensorS1 ** [index+1];
931  for (int cnt2=0; cnt2<(index+1); cnt2++){
932  F0tensors[index][cnt2] = new TensorF0 * [index-cnt2+1];
933  F1tensors[index][cnt2] = new TensorF1 * [index-cnt2+1];
934  S0tensors[index][cnt2] = new TensorS0 * [index-cnt2+1];
935  if (cnt2>0){ S1tensors[index][cnt2] = new TensorS1 * [index-cnt2+1]; }
936  for (int cnt3=0; cnt3<(index-cnt2+1); cnt3++){
937  const int Iprod = Irreps::directProd(denBK->gIrrep(index-cnt2-cnt3),denBK->gIrrep(index-cnt3));
938  #ifdef CHEMPS2_MPI_COMPILATION
939  if (( cnt3 == 0 ) || ( MPIchemps2::owner_cdf(L, index-cnt2-cnt3, index-cnt3) == MPIRANK )){
940  #endif
941  F0tensors[index][cnt2][cnt3] = new TensorF0(index+1,Iprod,movingRight,denBK);
942  F1tensors[index][cnt2][cnt3] = new TensorF1(index+1,Iprod,movingRight,denBK);
943  #ifdef CHEMPS2_MPI_COMPILATION
944  } else {
945  F0tensors[index][cnt2][cnt3] = NULL;
946  F1tensors[index][cnt2][cnt3] = NULL;
947  }
948  if (( cnt3 == 0 ) || ( MPIchemps2::owner_absigma(index-cnt2-cnt3, index-cnt3) == MPIRANK )){
949  #endif
950  S0tensors[index][cnt2][cnt3] = new TensorS0(index+1,Iprod,movingRight,denBK);
951  if (cnt2>0){ S1tensors[index][cnt2][cnt3] = new TensorS1(index+1,Iprod,movingRight,denBK); }
952  #ifdef CHEMPS2_MPI_COMPILATION
953  } else {
954  S0tensors[index][cnt2][cnt3] = NULL;
955  if (cnt2>0){ S1tensors[index][cnt2][cnt3] = NULL; }
956  }
957  #endif
958  }
959  }
960 
961  //Complementary two-operator tensors : certain processes own certain complementary two-operator tensors
962  //To right: Atens[cnt][cnt2][cnt3] = operators on sites cnt+1+cnt3 and cnt+1+cnt2+cnt3; at boundary cnt+1
963  Atensors[index] = new TensorOperator ** [L-1-index];
964  Btensors[index] = new TensorOperator ** [L-1-index];
965  Ctensors[index] = new TensorOperator ** [L-1-index];
966  Dtensors[index] = new TensorOperator ** [L-1-index];
967  for (int cnt2=0; cnt2<L-1-index; cnt2++){
968  Atensors[index][cnt2] = new TensorOperator * [L-1-index-cnt2];
969  if (cnt2>0){ Btensors[index][cnt2] = new TensorOperator * [L-1-index-cnt2]; }
970  Ctensors[index][cnt2] = new TensorOperator * [L-1-index-cnt2];
971  Dtensors[index][cnt2] = new TensorOperator * [L-1-index-cnt2];
972  for (int cnt3=0; cnt3<L-1-index-cnt2; cnt3++){
973  const int Idiff = Irreps::directProd(denBK->gIrrep(index+1+cnt2+cnt3),denBK->gIrrep(index+1+cnt3));
974  #ifdef CHEMPS2_MPI_COMPILATION
975  if ( MPIchemps2::owner_absigma(index+1+cnt3, index+1+cnt2+cnt3) == MPIRANK ){
976  #endif
977  Atensors[index][cnt2][cnt3] = new TensorOperator( index+1, 0, 2, Idiff, movingRight, true, false, denBK, denBK );
978  if (cnt2>0){ Btensors[index][cnt2][cnt3] = new TensorOperator( index+1, 2, 2, Idiff, movingRight, true, false, denBK, denBK ); }
979  #ifdef CHEMPS2_MPI_COMPILATION
980  } else {
981  Atensors[index][cnt2][cnt3] = NULL;
982  if (cnt2>0){ Btensors[index][cnt2][cnt3] = NULL; }
983  }
984  if ( MPIchemps2::owner_cdf(L, index+1+cnt3, index+1+cnt2+cnt3) == MPIRANK ){
985  #endif
986  Ctensors[index][cnt2][cnt3] = new TensorOperator( index+1, 0, 0, Idiff, movingRight, true, false, denBK, denBK );
987  Dtensors[index][cnt2][cnt3] = new TensorOperator( index+1, 2, 0, Idiff, movingRight, movingRight, false, denBK, denBK );
988  #ifdef CHEMPS2_MPI_COMPILATION
989  } else {
990  Ctensors[index][cnt2][cnt3] = NULL;
991  Dtensors[index][cnt2][cnt3] = NULL;
992  }
993  #endif
994  }
995  }
996 
997  //Qtensors
998  //To right: Qtens[cnt][cnt2] = operator on site cnt+1+cnt2; at boundary cnt+1
999  Qtensors[index] = new TensorQ * [L-1-index];
1000  for (int cnt2=0; cnt2<L-1-index; cnt2++){
1001  #ifdef CHEMPS2_MPI_COMPILATION
1002  if ( MPIchemps2::owner_q( L, index+1+cnt2 ) == MPIRANK ){
1003  #endif
1004  Qtensors[index][cnt2] = new TensorQ(index+1,denBK->gIrrep(index+1+cnt2),movingRight,denBK,Prob,index+1+cnt2);
1005  #ifdef CHEMPS2_MPI_COMPILATION
1006  } else { Qtensors[index][cnt2] = NULL; }
1007  #endif
1008  }
1009 
1010  //Xtensors : a certain process owns the Xtensors
1011  #ifdef CHEMPS2_MPI_COMPILATION
1012  if ( MPIchemps2::owner_x() == MPIRANK ){
1013  #endif
1014  Xtensors[index] = new TensorX(index+1,movingRight,denBK,Prob);
1015  #ifdef CHEMPS2_MPI_COMPILATION
1016  } else { Xtensors[index] = NULL; }
1017  #endif
1018 
1019  //Otensors : certain processes own certain excitations
1020  if (Exc_activated){
1021  for (int state=0; state<nStates-1; state++){
1022  #ifdef CHEMPS2_MPI_COMPILATION
1023  if ( MPIchemps2::owner_specific_excitation( L, state ) == MPIRANK )
1024  #endif
1025  { Exc_Overlaps[state][index] = new TensorO( index + 1, movingRight, denBK, Exc_BKs[ state ] ); }
1026  }
1027  }
1028 
1029  } else {
1030 
1031  // Ltensors : all processes own all Ltensors
1032  // To left: Ltens[cnt][cnt2] = operator on site cnt+1+cnt2; at boundary cnt+1
1033  Ltensors[ index ] = new TensorL * [ L - 1 - index ];
1034  for ( int cnt2 = 0; cnt2 < L - 1 - index; cnt2++ ){ Ltensors[ index ][ cnt2 ] = new TensorL( index + 1, denBK->gIrrep( index + 1 + cnt2 ), movingRight, denBK, denBK ); }
1035 
1036  //Two-operator tensors : certain processes own certain two-operator tensors
1037  //To left: F0tens[cnt][cnt2][cnt3] = operators on sites cnt+1+cnt3 and cnt+1+cnt3+cnt2; at boundary cnt+1
1038  F0tensors[index] = new TensorF0 ** [L-1-index];
1039  F1tensors[index] = new TensorF1 ** [L-1-index];
1040  S0tensors[index] = new TensorS0 ** [L-1-index];
1041  S1tensors[index] = new TensorS1 ** [L-1-index];
1042  for (int cnt2=0; cnt2<L-1-index; cnt2++){
1043  F0tensors[index][cnt2] = new TensorF0 * [L-1-index-cnt2];
1044  F1tensors[index][cnt2] = new TensorF1 * [L-1-index-cnt2];
1045  S0tensors[index][cnt2] = new TensorS0 * [L-1-index-cnt2];
1046  if (cnt2>0){ S1tensors[index][cnt2] = new TensorS1 * [L-1-index-cnt2]; }
1047  for (int cnt3=0; cnt3<L-1-index-cnt2; cnt3++){
1048  const int Iprod = Irreps::directProd(denBK->gIrrep(index+1+cnt3),denBK->gIrrep(index+1+cnt2+cnt3));
1049  #ifdef CHEMPS2_MPI_COMPILATION
1050  if (( cnt3 == 0 ) || ( MPIchemps2::owner_cdf(L, index+1+cnt3, index+1+cnt2+cnt3) == MPIRANK )){
1051  #endif
1052  F0tensors[index][cnt2][cnt3] = new TensorF0(index+1,Iprod,movingRight,denBK);
1053  F1tensors[index][cnt2][cnt3] = new TensorF1(index+1,Iprod,movingRight,denBK);
1054  #ifdef CHEMPS2_MPI_COMPILATION
1055  } else {
1056  F0tensors[index][cnt2][cnt3] = NULL;
1057  F1tensors[index][cnt2][cnt3] = NULL;
1058  }
1059  if (( cnt3 == 0 ) || ( MPIchemps2::owner_absigma(index+1+cnt3, index+1+cnt2+cnt3) == MPIRANK )){
1060  #endif
1061  S0tensors[index][cnt2][cnt3] = new TensorS0(index+1,Iprod,movingRight,denBK);
1062  if (cnt2>0){ S1tensors[index][cnt2][cnt3] = new TensorS1(index+1,Iprod,movingRight,denBK); }
1063  #ifdef CHEMPS2_MPI_COMPILATION
1064  } else {
1065  S0tensors[index][cnt2][cnt3] = NULL;
1066  if (cnt2>0){ S1tensors[index][cnt2][cnt3] = NULL; }
1067  }
1068  #endif
1069  }
1070  }
1071 
1072  //Complementary two-operator tensors : certain processes own certain complementary two-operator tensors
1073  //To left: Atens[cnt][cnt2][cnt3] = operators on sites cnt-cnt2-cnt3 and cnt-cnt3; at boundary cnt+1
1074  Atensors[index] = new TensorOperator ** [index+1];
1075  Btensors[index] = new TensorOperator ** [index+1];
1076  Ctensors[index] = new TensorOperator ** [index+1];
1077  Dtensors[index] = new TensorOperator ** [index+1];
1078  for (int cnt2=0; cnt2<index+1; cnt2++){
1079  Atensors[index][cnt2] = new TensorOperator * [index + 1 - cnt2];
1080  if (cnt2>0){ Btensors[index][cnt2] = new TensorOperator * [index + 1 - cnt2]; }
1081  Ctensors[index][cnt2] = new TensorOperator * [index + 1 - cnt2];
1082  Dtensors[index][cnt2] = new TensorOperator * [index + 1 - cnt2];
1083  for (int cnt3=0; cnt3<index+1-cnt2; cnt3++){
1084  const int Idiff = Irreps::directProd(denBK->gIrrep(index-cnt2-cnt3),denBK->gIrrep(index-cnt3));
1085  #ifdef CHEMPS2_MPI_COMPILATION
1086  if ( MPIchemps2::owner_absigma(index-cnt2-cnt3, index-cnt3) == MPIRANK ){
1087  #endif
1088  Atensors[index][cnt2][cnt3] = new TensorOperator( index+1, 0, 2, Idiff, movingRight, true, false, denBK, denBK );
1089  if (cnt2>0){ Btensors[index][cnt2][cnt3] = new TensorOperator( index+1, 2, 2, Idiff, movingRight, true, false, denBK, denBK ); }
1090  #ifdef CHEMPS2_MPI_COMPILATION
1091  } else {
1092  Atensors[index][cnt2][cnt3] = NULL;
1093  if (cnt2>0){ Btensors[index][cnt2][cnt3] = NULL; }
1094  }
1095  if ( MPIchemps2::owner_cdf(L, index-cnt2-cnt3, index-cnt3) == MPIRANK ){
1096  #endif
1097  Ctensors[index][cnt2][cnt3] = new TensorOperator( index+1, 0, 0, Idiff, movingRight, true, false, denBK, denBK );
1098  Dtensors[index][cnt2][cnt3] = new TensorOperator( index+1, 2, 0, Idiff, movingRight, movingRight, false, denBK, denBK );
1099  #ifdef CHEMPS2_MPI_COMPILATION
1100  } else {
1101  Ctensors[index][cnt2][cnt3] = NULL;
1102  Dtensors[index][cnt2][cnt3] = NULL;
1103  }
1104  #endif
1105  }
1106  }
1107 
1108  //Qtensors : certain processes own certain Qtensors
1109  //To left: Qtens[cnt][cnt2] = operator on site cnt-cnt2; at boundary cnt+1
1110  Qtensors[index] = new TensorQ*[index+1];
1111  for (int cnt2=0; cnt2<index+1; cnt2++){
1112  #ifdef CHEMPS2_MPI_COMPILATION
1113  if ( MPIchemps2::owner_q(L, index-cnt2) == MPIRANK ){
1114  #endif
1115  Qtensors[index][cnt2] = new TensorQ(index+1,denBK->gIrrep(index-cnt2),movingRight,denBK,Prob,index-cnt2);
1116  #ifdef CHEMPS2_MPI_COMPILATION
1117  } else { Qtensors[index][cnt2] = NULL; }
1118  #endif
1119  }
1120 
1121  //Xtensors : a certain process owns the Xtensors
1122  #ifdef CHEMPS2_MPI_COMPILATION
1123  if ( MPIchemps2::owner_x() == MPIRANK ){
1124  #endif
1125  Xtensors[index] = new TensorX(index+1,movingRight,denBK,Prob);
1126  #ifdef CHEMPS2_MPI_COMPILATION
1127  } else { Xtensors[index] = NULL; }
1128  #endif
1129 
1130  //Otensors : certain processes own certain excitations
1131  if ( Exc_activated ){
1132  for ( int state = 0; state < nStates - 1; state++ ){
1133  #ifdef CHEMPS2_MPI_COMPILATION
1134  if ( MPIchemps2::owner_specific_excitation( L, state ) == MPIRANK )
1135  #endif
1136  { Exc_Overlaps[ state ][ index ] = new TensorO( index + 1, movingRight, denBK, Exc_BKs[ state ] ); }
1137  }
1138  }
1139 
1140  }
1141 
1142  gettimeofday(&end, NULL);
1143  timings[ CHEMPS2_TIME_TENS_ALLOC ] += (end.tv_sec - start.tv_sec) + 1e-6 * (end.tv_usec - start.tv_usec);
1144 
1145 }
1146 
1147 void CheMPS2::DMRG::MY_HDF5_READ_BATCH( const hid_t file_id, const int number, Tensor ** batch, const long long totalsize, const std::string tag ){
1148 
1149  const hid_t group_id = H5Gopen(file_id, tag.c_str(), H5P_DEFAULT);
1150  const hsize_t dimarray = totalsize;
1151  const hid_t dataspace_id = H5Screate_simple(1, &dimarray, NULL);
1152  const hid_t dataset_id = H5Dopen(group_id, "storage", H5P_DEFAULT);
1153 
1154  long long offset = 0;
1155  for (int cnt=0; cnt<number; cnt++){
1156  const int tensor_size = batch[cnt]->gKappa2index(batch[cnt]->gNKappa());
1157  if ( tensor_size > 0 ){
1158 
1159  const hsize_t start = offset;
1160  const hsize_t count = tensor_size;
1161  H5Sselect_hyperslab(dataspace_id, H5S_SELECT_SET, &start, NULL, &count, NULL);
1162  const hid_t memspace_id = H5Screate_simple(1, &count, NULL);
1163  H5Dread(dataset_id, H5T_NATIVE_DOUBLE, memspace_id, dataspace_id, H5P_DEFAULT, batch[cnt]->gStorage());
1164  H5Sclose(memspace_id);
1165 
1166  offset += tensor_size;
1167  }
1168  }
1169 
1170  H5Dclose(dataset_id);
1171  H5Sclose(dataspace_id);
1172  H5Gclose(group_id);
1173 
1174  assert( totalsize == offset );
1175  num_double_read_disk += totalsize;
1176 
1177 }
1178 
1179 void CheMPS2::DMRG::MY_HDF5_WRITE_BATCH( const hid_t file_id, const int number, Tensor ** batch, const long long totalsize, const std::string tag ){
1180 
1181  const hid_t group_id = H5Gcreate(file_id, tag.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
1182  const hsize_t dimarray = totalsize;
1183  const hid_t dataspace_id = H5Screate_simple(1, &dimarray, NULL);
1184  const hid_t dataset_id = H5Dcreate(group_id, "storage", H5T_NATIVE_DOUBLE, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
1185  /* Switch from H5T_IEEE_F64LE to H5T_NATIVE_DOUBLE to avoid processing of the doubles
1186  --> only MPS checkpoint is reused in between calculations anyway */
1187 
1188  long long offset = 0;
1189  for (int cnt=0; cnt<number; cnt++){
1190  const int tensor_size = batch[cnt]->gKappa2index(batch[cnt]->gNKappa());
1191  if ( tensor_size > 0 ){
1192 
1193  const hsize_t start = offset;
1194  const hsize_t count = tensor_size;
1195  H5Sselect_hyperslab(dataspace_id, H5S_SELECT_SET, &start, NULL, &count, NULL);
1196  const hid_t memspace_id = H5Screate_simple(1, &count, NULL);
1197  H5Dwrite(dataset_id, H5T_NATIVE_DOUBLE, memspace_id, dataspace_id, H5P_DEFAULT, batch[cnt]->gStorage());
1198  H5Sclose(memspace_id);
1199 
1200  offset += tensor_size;
1201  }
1202  }
1203 
1204  H5Dclose(dataset_id);
1205  H5Sclose(dataspace_id);
1206  H5Gclose(group_id);
1207 
1208  assert( totalsize == offset );
1209  num_double_write_disk += totalsize;
1210 
1211 }
1212 
1213 void CheMPS2::DMRG::OperatorsOnDisk(const int index, const bool movingRight, const bool store){
1214 
1215  /*
1216 
1217  By working with hyperslabs and batches of tensors, there
1218  are exactly 11 groups which need to be written to the file
1219  ( 12 when there are excitations ).
1220 
1221  */
1222 
1223  struct timeval start, end;
1224  gettimeofday(&start, NULL);
1225 
1226  const int Nbound = movingRight ? index+1 : L-1-index;
1227  const int Cbound = movingRight ? L-1-index : index+1;
1228  #ifdef CHEMPS2_MPI_COMPILATION
1229  const int MPIRANK = MPIchemps2::mpi_rank();
1230  #endif
1231 
1232  std::stringstream thefilename;
1233  //The PID is different for each MPI process
1234  thefilename << tempfolder << "/" << CheMPS2::DMRG_OPERATOR_storage_prefix << thePID << "_index_" << index << ".h5";
1235 
1236  //The hdf5 file
1237  const hid_t file_id = ( store ) ? H5Fcreate( thefilename.str().c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT )
1238  : H5Fopen( thefilename.str().c_str(), H5F_ACC_RDONLY, H5P_DEFAULT );
1239 
1240  //Ltensors : all processes own all Ltensors
1241  {
1242  long long totalsizeL = 0;
1243  Tensor ** batchL = new Tensor*[ Nbound ];
1244  for (int cnt2=0; cnt2<Nbound; cnt2++){
1245  totalsizeL += Ltensors[index][cnt2]->gKappa2index(Ltensors[index][cnt2]->gNKappa());
1246  batchL[cnt2] = Ltensors[index][cnt2];
1247  }
1248  if ( totalsizeL > 0 ){
1249  const std::string tag = "Ltensors";
1250  if ( store ){ MY_HDF5_WRITE_BATCH( file_id, Nbound, batchL, totalsizeL, tag ); }
1251  else{ MY_HDF5_READ_BATCH( file_id, Nbound, batchL, totalsizeL, tag ); }
1252  }
1253  delete [] batchL;
1254  }
1255 
1256  //Renormalized two-operator tensors : certain processes own certain two-operator tensors
1257  {
1258  long long totalsizeF0 = 0; int numF0 = 0; Tensor ** batchF0 = new Tensor*[ (Nbound*(Nbound + 1))/2 ];
1259  long long totalsizeF1 = 0; int numF1 = 0; Tensor ** batchF1 = new Tensor*[ (Nbound*(Nbound + 1))/2 ];
1260  long long totalsizeS0 = 0; int numS0 = 0; Tensor ** batchS0 = new Tensor*[ (Nbound*(Nbound + 1))/2 ];
1261  long long totalsizeS1 = 0; int numS1 = 0; Tensor ** batchS1 = new Tensor*[ (Nbound*(Nbound + 1))/2 ];
1262 
1263  for (int cnt2=0; cnt2<Nbound; cnt2++){
1264  for (int cnt3=0; cnt3<Nbound-cnt2; cnt3++){
1265  #ifdef CHEMPS2_MPI_COMPILATION
1266  const int siteindex1 = movingRight ? index - cnt2 - cnt3 : index + 1 + cnt3;
1267  const int siteindex2 = movingRight ? index - cnt3 : index + 1 + cnt2 + cnt3;
1268  if (( cnt3 == 0 ) || ( MPIchemps2::owner_cdf(L, siteindex1, siteindex2) == MPIRANK ))
1269  #endif
1270  {
1271  batchF0[numF0] = F0tensors[index][cnt2][cnt3]; totalsizeF0 += batchF0[numF0]->gKappa2index(batchF0[numF0]->gNKappa()); numF0++;
1272  batchF1[numF1] = F1tensors[index][cnt2][cnt3]; totalsizeF1 += batchF1[numF1]->gKappa2index(batchF1[numF1]->gNKappa()); numF1++;
1273  }
1274  #ifdef CHEMPS2_MPI_COMPILATION
1275  if (( cnt3 == 0 ) || ( MPIchemps2::owner_absigma(siteindex1, siteindex2) == MPIRANK ))
1276  #endif
1277  {
1278  batchS0[numS0] = S0tensors[index][cnt2][cnt3]; totalsizeS0 += batchS0[numS0]->gKappa2index(batchS0[numS0]->gNKappa()); numS0++;
1279  if (cnt2>0){ batchS1[numS1] = S1tensors[index][cnt2][cnt3]; totalsizeS1 += batchS1[numS1]->gKappa2index(batchS1[numS1]->gNKappa()); numS1++; }
1280  }
1281  }
1282  }
1283 
1284  if ( totalsizeF0 > 0 ){
1285  const std::string tag = "F0tensors";
1286  if ( store ){ MY_HDF5_WRITE_BATCH( file_id, numF0, batchF0, totalsizeF0, tag ); }
1287  else{ MY_HDF5_READ_BATCH( file_id, numF0, batchF0, totalsizeF0, tag ); }
1288  }
1289  if ( totalsizeF1 > 0 ){
1290  const std::string tag = "F1tensors";
1291  if ( store ){ MY_HDF5_WRITE_BATCH( file_id, numF1, batchF1, totalsizeF1, tag ); }
1292  else{ MY_HDF5_READ_BATCH( file_id, numF1, batchF1, totalsizeF1, tag ); }
1293  }
1294  if ( totalsizeS0 > 0 ){
1295  const std::string tag = "S0tensors";
1296  if ( store ){ MY_HDF5_WRITE_BATCH( file_id, numS0, batchS0, totalsizeS0, tag ); }
1297  else{ MY_HDF5_READ_BATCH( file_id, numS0, batchS0, totalsizeS0, tag ); }
1298  }
1299  if ( totalsizeS1 > 0 ){
1300  const std::string tag = "S1tensors";
1301  if ( store ){ MY_HDF5_WRITE_BATCH( file_id, numS1, batchS1, totalsizeS1, tag ); }
1302  else{ MY_HDF5_READ_BATCH( file_id, numS1, batchS1, totalsizeS1, tag ); }
1303  }
1304 
1305  delete [] batchF0;
1306  delete [] batchF1;
1307  delete [] batchS0;
1308  delete [] batchS1;
1309 
1310  }
1311 
1312  //Complementary two-operator tensors : certain processes own certain complementary two-operator tensors
1313  {
1314  long long totalsizeA = 0; int numA = 0; Tensor ** batchA = new Tensor*[ (Cbound*(Cbound + 1))/2 ];
1315  long long totalsizeB = 0; int numB = 0; Tensor ** batchB = new Tensor*[ (Cbound*(Cbound + 1))/2 ];
1316  long long totalsizeC = 0; int numC = 0; Tensor ** batchC = new Tensor*[ (Cbound*(Cbound + 1))/2 ];
1317  long long totalsizeD = 0; int numD = 0; Tensor ** batchD = new Tensor*[ (Cbound*(Cbound + 1))/2 ];
1318 
1319  for (int cnt2=0; cnt2<Cbound; cnt2++){
1320  for (int cnt3=0; cnt3<Cbound-cnt2; cnt3++){
1321  #ifdef CHEMPS2_MPI_COMPILATION
1322  const int siteindex1 = movingRight ? index + 1 + cnt3 : index - cnt2 - cnt3;
1323  const int siteindex2 = movingRight ? index + 1 + cnt2 + cnt3 : index - cnt3;
1324  if ( MPIchemps2::owner_absigma(siteindex1, siteindex2) == MPIRANK )
1325  #endif
1326  {
1327  batchA[numA] = Atensors[index][cnt2][cnt3]; totalsizeA += batchA[numA]->gKappa2index(batchA[numA]->gNKappa()); numA++;
1328  if (cnt2>0){ batchB[numB] = Btensors[index][cnt2][cnt3]; totalsizeB += batchB[numB]->gKappa2index(batchB[numB]->gNKappa()); numB++; }
1329  }
1330  #ifdef CHEMPS2_MPI_COMPILATION
1331  if ( MPIchemps2::owner_cdf(L, siteindex1, siteindex2) == MPIRANK )
1332  #endif
1333  {
1334  batchC[numC] = Ctensors[index][cnt2][cnt3]; totalsizeC += batchC[numC]->gKappa2index(batchC[numC]->gNKappa()); numC++;
1335  batchD[numD] = Dtensors[index][cnt2][cnt3]; totalsizeD += batchD[numD]->gKappa2index(batchD[numD]->gNKappa()); numD++;
1336  }
1337  }
1338  }
1339 
1340  if ( totalsizeA > 0 ){
1341  const std::string tag = "Atensors";
1342  if ( store ){ MY_HDF5_WRITE_BATCH( file_id, numA, batchA, totalsizeA, tag ); }
1343  else{ MY_HDF5_READ_BATCH( file_id, numA, batchA, totalsizeA, tag ); }
1344  }
1345  if ( totalsizeB > 0 ){
1346  const std::string tag = "Btensors";
1347  if ( store ){ MY_HDF5_WRITE_BATCH( file_id, numB, batchB, totalsizeB, tag ); }
1348  else{ MY_HDF5_READ_BATCH( file_id, numB, batchB, totalsizeB, tag ); }
1349  }
1350  if ( totalsizeC > 0 ){
1351  const std::string tag = "Ctensors";
1352  if ( store ){ MY_HDF5_WRITE_BATCH( file_id, numC, batchC, totalsizeC, tag ); }
1353  else{ MY_HDF5_READ_BATCH( file_id, numC, batchC, totalsizeC, tag ); }
1354  }
1355  if ( totalsizeD > 0 ){
1356  const std::string tag = "Dtensors";
1357  if ( store ){ MY_HDF5_WRITE_BATCH( file_id, numD, batchD, totalsizeD, tag ); }
1358  else{ MY_HDF5_READ_BATCH( file_id, numD, batchD, totalsizeD, tag ); }
1359  }
1360 
1361  delete [] batchA;
1362  delete [] batchB;
1363  delete [] batchC;
1364  delete [] batchD;
1365 
1366  }
1367 
1368  //Complementary Q-tensors : certain processes own certain complementary Q-tensors
1369  {
1370  long long totalsizeQ = 0;
1371  int numQ = 0;
1372  Tensor ** batchQ = new Tensor*[ Cbound ];
1373  for (int cnt2=0; cnt2<Cbound; cnt2++){
1374  #ifdef CHEMPS2_MPI_COMPILATION
1375  const int siteindex = movingRight ? index + 1 + cnt2 : index - cnt2;
1376  if ( MPIchemps2::owner_q(L, siteindex) == MPIRANK )
1377  #endif
1378  {
1379  batchQ[numQ] = Qtensors[index][cnt2]; totalsizeQ += batchQ[numQ]->gKappa2index(batchQ[numQ]->gNKappa()); numQ++;
1380  }
1381  }
1382  if ( totalsizeQ > 0 ){
1383  const std::string tag = "Qtensors";
1384  if ( store ){ MY_HDF5_WRITE_BATCH( file_id, numQ, batchQ, totalsizeQ, tag ); }
1385  else{ MY_HDF5_READ_BATCH( file_id, numQ, batchQ, totalsizeQ, tag ); }
1386  }
1387  delete [] batchQ;
1388  }
1389 
1390  //Complementary X-tensor : one process owns the X-tensors
1391  #ifdef CHEMPS2_MPI_COMPILATION
1392  if ( MPIchemps2::owner_x() == MPIRANK )
1393  #endif
1394  {
1395  Tensor ** batchX = new Tensor*[ 1 ];
1396  const long long totalsizeX = Xtensors[index]->gKappa2index(Xtensors[index]->gNKappa());
1397  batchX[0] = Xtensors[index];
1398  if ( totalsizeX > 0 ){
1399  const std::string tag = "Xtensors";
1400  if ( store ){ MY_HDF5_WRITE_BATCH( file_id, 1, batchX, totalsizeX, tag ); }
1401  else{ MY_HDF5_READ_BATCH( file_id, 1, batchX, totalsizeX, tag ); }
1402  }
1403  delete [] batchX;
1404  }
1405 
1406  //O-tensors : certain processes own certain excitations
1407  if (Exc_activated){
1408  long long totalsizeO = 0;
1409  int numO = 0;
1410  Tensor ** batchO = new Tensor*[ nStates-1 ];
1411  for (int state=0; state<nStates-1; state++){
1412  #ifdef CHEMPS2_MPI_COMPILATION
1413  if ( MPIchemps2::owner_specific_excitation( L, state ) == MPIRANK )
1414  #endif
1415  {
1416  batchO[numO] = Exc_Overlaps[state][index]; totalsizeO += batchO[numO]->gKappa2index(batchO[numO]->gNKappa()); numO++;
1417  }
1418  }
1419  if ( totalsizeO > 0 ){
1420  const std::string tag = "Otensors";
1421  if ( store ){ MY_HDF5_WRITE_BATCH( file_id, numO, batchO, totalsizeO, tag ); }
1422  else{ MY_HDF5_READ_BATCH( file_id, numO, batchO, totalsizeO, tag ); }
1423  }
1424  delete [] batchO;
1425  }
1426 
1427  H5Fclose(file_id);
1428 
1429  gettimeofday(&end, NULL);
1430  if ( store ){ timings[ CHEMPS2_TIME_DISK_WRITE ] += (end.tv_sec - start.tv_sec) + 1e-6 * (end.tv_usec - start.tv_usec); }
1431  else { timings[ CHEMPS2_TIME_DISK_READ ] += (end.tv_sec - start.tv_sec) + 1e-6 * (end.tv_usec - start.tv_usec); }
1432 
1433 }
1434 
1435 void CheMPS2::DMRG::deleteTensors(const int index, const bool movingRight){
1436 
1437  struct timeval start, end;
1438  gettimeofday(&start, NULL);
1439 
1440  const int Nbound = movingRight ? index+1 : L-1-index;
1441  const int Cbound = movingRight ? L-1-index : index+1;
1442  #ifdef CHEMPS2_MPI_COMPILATION
1443  const int MPIRANK = MPIchemps2::mpi_rank();
1444  #endif
1445 
1446  //Ltensors : all processes own all Ltensors
1447  for (int cnt2=0; cnt2<Nbound; cnt2++){ delete Ltensors[index][cnt2]; }
1448  delete [] Ltensors[index];
1449 
1450  //Two-operator tensors : certain processes own certain two-operator tensors
1451  for (int cnt2=0; cnt2<Nbound; cnt2++){
1452  for (int cnt3=0; cnt3<Nbound-cnt2; cnt3++){
1453  #ifdef CHEMPS2_MPI_COMPILATION
1454  const int siteindex1 = movingRight ? index - cnt2 - cnt3 : index + 1 + cnt3;
1455  const int siteindex2 = movingRight ? index - cnt3 : index + 1 + cnt2 + cnt3;
1456  if (( cnt3 == 0 ) || ( MPIchemps2::owner_cdf(L, siteindex1, siteindex2) == MPIRANK ))
1457  #endif
1458  {
1459  delete F0tensors[index][cnt2][cnt3];
1460  delete F1tensors[index][cnt2][cnt3];
1461  }
1462  #ifdef CHEMPS2_MPI_COMPILATION
1463  if (( cnt3 == 0 ) || ( MPIchemps2::owner_absigma(siteindex1, siteindex2) == MPIRANK ))
1464  #endif
1465  {
1466  delete S0tensors[index][cnt2][cnt3];
1467  if (cnt2>0){ delete S1tensors[index][cnt2][cnt3]; }
1468  }
1469  }
1470  delete [] F0tensors[index][cnt2];
1471  delete [] F1tensors[index][cnt2];
1472  delete [] S0tensors[index][cnt2];
1473  if (cnt2>0){ delete [] S1tensors[index][cnt2]; }
1474  }
1475  delete [] F0tensors[index];
1476  delete [] F1tensors[index];
1477  delete [] S0tensors[index];
1478  delete [] S1tensors[index];
1479 
1480  //Complementary two-operator tensors : certain processes own certain complementary two-operator tensors
1481  for (int cnt2=0; cnt2<Cbound; cnt2++){
1482  for (int cnt3=0; cnt3<Cbound-cnt2; cnt3++){
1483  #ifdef CHEMPS2_MPI_COMPILATION
1484  const int siteindex1 = movingRight ? index + 1 + cnt3 : index - cnt2 - cnt3;
1485  const int siteindex2 = movingRight ? index + 1 + cnt2 + cnt3 : index - cnt3;
1486  if ( MPIchemps2::owner_absigma(siteindex1, siteindex2) == MPIRANK )
1487  #endif
1488  {
1489  delete Atensors[index][cnt2][cnt3];
1490  if (cnt2>0){ delete Btensors[index][cnt2][cnt3]; }
1491  }
1492  #ifdef CHEMPS2_MPI_COMPILATION
1493  if ( MPIchemps2::owner_cdf(L, siteindex1, siteindex2) == MPIRANK )
1494  #endif
1495  {
1496  delete Ctensors[index][cnt2][cnt3];
1497  delete Dtensors[index][cnt2][cnt3];
1498  }
1499  }
1500  delete [] Atensors[index][cnt2];
1501  if (cnt2>0){ delete [] Btensors[index][cnt2]; }
1502  delete [] Ctensors[index][cnt2];
1503  delete [] Dtensors[index][cnt2];
1504  }
1505  delete [] Atensors[index];
1506  delete [] Btensors[index];
1507  delete [] Ctensors[index];
1508  delete [] Dtensors[index];
1509 
1510  //Qtensors : certain processes own certain Qtensors
1511  for (int cnt2=0; cnt2<Cbound; cnt2++){
1512  #ifdef CHEMPS2_MPI_COMPILATION
1513  const int siteindex = movingRight ? index + 1 + cnt2 : index - cnt2;
1514  if ( MPIchemps2::owner_q(L, siteindex) == MPIRANK )
1515  #endif
1516  { delete Qtensors[index][cnt2]; }
1517  }
1518  delete [] Qtensors[index];
1519 
1520  //Xtensors
1521  #ifdef CHEMPS2_MPI_COMPILATION
1522  if ( MPIchemps2::owner_x() == MPIRANK )
1523  #endif
1524  { delete Xtensors[index]; }
1525 
1526  //Otensors
1527  if (Exc_activated){
1528  for (int state=0; state<nStates-1; state++){
1529  #ifdef CHEMPS2_MPI_COMPILATION
1530  if ( MPIchemps2::owner_specific_excitation( L, state ) == MPIRANK )
1531  #endif
1532  { delete Exc_Overlaps[state][index]; }
1533  }
1534  }
1535 
1536  gettimeofday(&end, NULL);
1537  timings[ CHEMPS2_TIME_TENS_FREE ] += (end.tv_sec - start.tv_sec) + 1e-6 * (end.tv_usec - start.tv_usec);
1538 
1539 }
1540 
1542 
1543  std::stringstream temp;
1544  temp << "rm " << tempfolder << "/" << CheMPS2::DMRG_OPERATOR_storage_prefix << thePID << "*.h5";
1545  int info = system(temp.str().c_str());
1546  std::cout << "Info on DMRG::operators rm call to system: " << info << std::endl;
1547 
1548 }
1549 
void makenew(TensorT *denT)
Definition: TensorS0.cpp:39
void update(TensorOperator *previous, TensorT *mps_tensor_up, TensorT *mps_tensor_down, double *workmem)
Clear and update.
void update(TensorT *denT, TensorL **Ltensors, TensorX *Xtensor, TensorQ *Qtensor, TensorOperator *Atensor, TensorOperator *Ctensor, TensorOperator *Dtensor)
Clear and add the relevant terms to the TensorX.
Definition: TensorX.cpp:58
void create(TensorT *mps_tensor)
Create a new TensorL.
Definition: TensorL.cpp:41
void AddTermsL(TensorL **Ltensors, TensorT *denT, double *workmem, double *workmem2)
Add terms after update/clear with previous TensorL&#39;s.
Definition: TensorQ.cpp:106
static int mpi_rank()
Get the rank of this MPI process.
Definition: MPIchemps2.h:131
void makenew(TensorL *denL, TensorT *denT, double *workmem)
Definition: TensorS1.cpp:40
int gKappa2index(const int kappa) const
Get the storage jump corresponding to a certain tensor block.
void makenew(TensorT *denT)
Definition: TensorF1.cpp:40
void create(TensorT *mps_tensor_up, TensorT *mps_tensor_down)
Clear and add the relevant terms to the TensorO.
Definition: TensorO.cpp:80
void update_ownmem(TensorT *mps_tensor_up, TensorT *mps_tensor_down, TensorO *previous)
Update the previous TensorO.
Definition: TensorO.cpp:40
double * gStorage()
Get the pointer to the storage.
double gMxElement(const int alpha, const int beta, const int gamma, const int delta) const
Get a specific interaction matrix element.
Definition: Problem.cpp:350
void daxpy(double alpha, TensorOperator *to_add)
daxpy for TensorOperator
void AddTermsCD(TensorOperator *denC, TensorOperator *denD, TensorT *denT, double *workmem, double *workmem2)
Add terms after update/clear with previous C-tensors and D-tensors.
Definition: TensorQ.cpp:601
static void invert_triangle_two(const int global, int *result)
Triangle function for two variables.
Definition: Special.h:41
static int directProd(const int Irrep1, const int Irrep2)
Get the direct product of the irreps with numbers Irrep1 and Irrep2: a bitwise XOR for psi4&#39;s convent...
Definition: Irreps.h:123
void AddTermSimple(TensorT *denT)
Add terms after update/clear without previous tensors.
Definition: TensorQ.cpp:45
void daxpy_transpose_tensorCD(const double alpha, TensorOperator *to_add)
daxpy_transpose for C- and D-tensors (with special spin-dependent factors)
void makenew(TensorT *denT)
Definition: TensorF0.cpp:39
void AddTermsAB(TensorOperator *denA, TensorOperator *denB, TensorT *denT, double *workmem, double *workmem2)
Add terms after update/clear with previous A-tensors and B-tensors.
Definition: TensorQ.cpp:386
int gMaxDimAtBound(const int boundary) const
Get the maximum virtual dimension at a certain boundary.
void deleteStoredOperators()
Call "rm " + tempfolder + "/" + CheMPS2::DMRG_OPERATOR_storage_prefix + string(thePID) + "*...
void clear()
Set all storage variables to 0.0.
int gIrrep(const int orbital) const
Get an orbital irrep.