30 #include "MPIchemps2.h" 33 void CheMPS2::DMRG::updateMovingRightSafeFirstTime(
const int cnt){
35 if (isAllocated[cnt]==2){
36 deleteTensors(cnt,
false);
39 if (isAllocated[cnt]==0){
40 allocateTensors(cnt,
true);
43 updateMovingRight(cnt);
45 if (CheMPS2::DMRG_storeRenormOptrOnDisk){
47 if (isAllocated[cnt-1]==1){
48 OperatorsOnDisk(cnt-1,
true,
true);
49 deleteTensors(cnt-1,
true);
57 void CheMPS2::DMRG::updateMovingLeftSafeFirstTime(
const int cnt){
59 if (isAllocated[cnt]==1){
60 deleteTensors(cnt,
true);
63 if (isAllocated[cnt]==0){
64 allocateTensors(cnt,
false);
67 updateMovingLeft(cnt);
69 if (CheMPS2::DMRG_storeRenormOptrOnDisk){
71 if (isAllocated[cnt+1]==2){
72 OperatorsOnDisk(cnt+1,
false,
true);
73 deleteTensors(cnt+1,
false);
81 void CheMPS2::DMRG::updateMovingRightSafe(
const int cnt){
83 if (isAllocated[cnt]==2){
84 deleteTensors(cnt,
false);
87 if (isAllocated[cnt]==0){
88 allocateTensors(cnt,
true);
91 updateMovingRight(cnt);
93 if (CheMPS2::DMRG_storeRenormOptrOnDisk){
95 if (isAllocated[cnt-1]==1){
96 OperatorsOnDisk(cnt-1,
true,
true);
97 deleteTensors(cnt-1,
true);
102 if (isAllocated[cnt+1]==2){
103 deleteTensors(cnt+1,
false);
104 isAllocated[cnt+1]=0;
108 if (isAllocated[cnt+2]==1){
109 deleteTensors(cnt+2,
true);
110 isAllocated[cnt+2]=0;
112 if (isAllocated[cnt+2]==0){
113 allocateTensors(cnt+2,
false);
114 isAllocated[cnt+2]=2;
116 OperatorsOnDisk(cnt+2,
false,
false);
122 void CheMPS2::DMRG::updateMovingLeftSafe(
const int cnt){
124 if (isAllocated[cnt]==1){
125 deleteTensors(cnt,
true);
128 if (isAllocated[cnt]==0){
129 allocateTensors(cnt,
false);
132 updateMovingLeft(cnt);
134 if (CheMPS2::DMRG_storeRenormOptrOnDisk){
136 if (isAllocated[cnt+1]==2){
137 OperatorsOnDisk(cnt+1,
false,
true);
138 deleteTensors(cnt+1,
false);
139 isAllocated[cnt+1]=0;
143 if (isAllocated[cnt-1]==1){
144 deleteTensors(cnt-1,
true);
145 isAllocated[cnt-1]=0;
149 if (isAllocated[cnt-2]==2){
150 deleteTensors(cnt-2,
false);
151 isAllocated[cnt-2]=0;
153 if (isAllocated[cnt-2]==0){
154 allocateTensors(cnt-2,
true);
155 isAllocated[cnt-2]=1;
157 OperatorsOnDisk(cnt-2,
true,
false);
163 void CheMPS2::DMRG::updateMovingRightSafe2DM(
const int cnt){
165 if (isAllocated[cnt]==2){
166 deleteTensors(cnt,
false);
169 if (isAllocated[cnt]==0){
170 allocateTensors(cnt,
true);
173 updateMovingRight(cnt);
175 if (CheMPS2::DMRG_storeRenormOptrOnDisk){
177 if (isAllocated[cnt-1]==1){
178 OperatorsOnDisk(cnt-1,
true,
true);
179 deleteTensors(cnt-1,
true);
180 isAllocated[cnt-1]=0;
184 if (isAllocated[cnt+1]==1){
185 deleteTensors(cnt+1,
true);
186 isAllocated[cnt+1]=0;
188 if (isAllocated[cnt+1]==0){
189 allocateTensors(cnt+1,
false);
190 isAllocated[cnt+1]=2;
192 OperatorsOnDisk(cnt+1,
false,
false);
198 void CheMPS2::DMRG::updateMovingLeftSafe2DM(
const int cnt){
200 if (isAllocated[cnt]==1){
201 deleteTensors(cnt,
true);
204 if (isAllocated[cnt]==0){
205 allocateTensors(cnt,
false);
208 updateMovingLeft(cnt);
210 if (CheMPS2::DMRG_storeRenormOptrOnDisk){
212 if (isAllocated[cnt+1]==2){
213 OperatorsOnDisk(cnt+1,
false,
true);
214 deleteTensors(cnt+1,
false);
215 isAllocated[cnt+1]=0;
219 if (isAllocated[cnt-1]==2){
220 deleteTensors(cnt-1,
false);
221 isAllocated[cnt-1]=0;
223 if (isAllocated[cnt-1]==0){
224 allocateTensors(cnt-1,
true);
225 isAllocated[cnt-1]=1;
227 OperatorsOnDisk(cnt-1,
true,
false);
233 void CheMPS2::DMRG::deleteAllBoundaryOperators(){
235 for (
int cnt=0; cnt<L-1; cnt++){
236 if (isAllocated[cnt]==1){ deleteTensors(cnt,
true); }
237 if (isAllocated[cnt]==2){ deleteTensors(cnt,
false); }
238 isAllocated[cnt] = 0;
243 void CheMPS2::DMRG::updateMovingRight(
const int index ){
245 struct timeval start, end;
246 gettimeofday( &start, NULL );
250 #ifdef CHEMPS2_MPI_COMPILATION 257 double * workmem =
new double[ dimL * dimR ];
260 #pragma omp for schedule(static) nowait 261 for (
int cnt2 = 0; cnt2 < index + 1; cnt2++ ){
263 Ltensors[ index ][ cnt2 ]->
create( MPS[ index ] );
265 Ltensors[ index ][ cnt2 ]->
update( Ltensors[ index - 1 ][ cnt2 - 1 ], MPS[ index ], MPS[ index ], workmem );
270 const int k1 = index + 1;
271 const int upperbound1 = ( k1 * ( k1 + 1 ) ) / 2;
274 #ifdef CHEMPS2_MPI_COMPILATION 275 #pragma omp for schedule(dynamic) 277 #pragma omp for schedule(static) 279 for (
int global = 0; global < upperbound1; global++ ){
281 const int cnt2 = index - result[ 1 ];
282 const int cnt3 = result[ 0 ];
283 #ifdef CHEMPS2_MPI_COMPILATION 284 const int siteindex1 = index - cnt3 - cnt2;
285 const int siteindex2 = index - cnt3;
289 F0tensors[ index ][ cnt2 ][ cnt3 ]->
makenew( MPS[ index ] );
290 F1tensors[ index ][ cnt2 ][ cnt3 ]->
makenew( MPS[ index ] );
291 S0tensors[ index ][ cnt2 ][ cnt3 ]->
makenew( MPS[ index ] );
294 F0tensors[ index ][ cnt2 ][ cnt3 ]->
makenew( Ltensors[ index - 1 ][ cnt2 - 1 ], MPS[ index ], workmem );
295 F1tensors[ index ][ cnt2 ][ cnt3 ]->
makenew( Ltensors[ index - 1 ][ cnt2 - 1 ], MPS[ index ], workmem );
296 S0tensors[ index ][ cnt2 ][ cnt3 ]->
makenew( Ltensors[ index - 1 ][ cnt2 - 1 ], MPS[ index ], workmem );
297 S1tensors[ index ][ cnt2 ][ cnt3 ]->
makenew( Ltensors[ index - 1 ][ cnt2 - 1 ], MPS[ index ], workmem );
300 #ifdef CHEMPS2_MPI_COMPILATION 301 if ( MPIchemps2::owner_cdf( L, siteindex1, siteindex2 ) == MPIRANK )
304 F0tensors[ index ][ cnt2 ][ cnt3 ]->
update( F0tensors[ index - 1 ][ cnt2 ][ cnt3 - 1 ], MPS[ index ], MPS[ index ], workmem );
305 F1tensors[ index ][ cnt2 ][ cnt3 ]->
update( F1tensors[ index - 1 ][ cnt2 ][ cnt3 - 1 ], MPS[ index ], MPS[ index ], workmem );
307 #ifdef CHEMPS2_MPI_COMPILATION 308 if ( MPIchemps2::owner_absigma( siteindex1, siteindex2 ) == MPIRANK )
311 S0tensors[ index ][ cnt2 ][ cnt3 ]->
update( S0tensors[ index - 1 ][ cnt2 ][ cnt3 - 1 ], MPS[ index ], MPS[ index ], workmem );
312 if ( cnt2 > 0 ){ S1tensors[ index ][ cnt2 ][ cnt3 ]->
update( S1tensors[ index - 1 ][ cnt2 ][ cnt3 - 1 ], MPS[ index ], MPS[ index ], workmem ); }
318 const int k2 = L - 1 - index;
319 const int upperbound2 = ( k2 * ( k2 + 1 ) ) / 2;
320 #ifdef CHEMPS2_MPI_COMPILATION 321 #pragma omp for schedule(dynamic) 323 #pragma omp for schedule(static) nowait 325 for (
int global = 0; global < upperbound2; global++ ){
327 const int cnt2 = k2 - 1 - result[ 1 ];
328 const int cnt3 = result[ 0 ];
329 const int siteindex1 = index + 1 + cnt3;
330 const int siteindex2 = index + 1 + cnt2 + cnt3;
332 #ifdef CHEMPS2_MPI_COMPILATION 333 const bool do_absigma = ( MPIchemps2::owner_absigma( siteindex1, siteindex2 ) == MPIRANK );
334 const bool do_cdf = ( MPIchemps2::owner_cdf( L, siteindex1, siteindex2 ) == MPIRANK );
337 #ifdef CHEMPS2_MPI_COMPILATION 341 Atensors[ index ][ cnt2 ][ cnt3 ]->
clear();
342 if ( cnt2 > 0 ){ Btensors[ index ][ cnt2 ][ cnt3 ]->
clear(); }
344 #ifdef CHEMPS2_MPI_COMPILATION 348 Ctensors[ index ][ cnt2 ][ cnt3 ]->
clear();
349 Dtensors[ index ][ cnt2 ][ cnt3 ]->
clear();
352 #ifdef CHEMPS2_MPI_COMPILATION 356 Atensors[ index ][ cnt2 ][ cnt3 ]->
update( Atensors[ index - 1 ][ cnt2 ][ cnt3 + 1 ], MPS[ index ], MPS[ index ], workmem );
357 if ( cnt2 > 0 ){ Btensors[ index ][ cnt2 ][ cnt3 ]->
update( Btensors[ index - 1 ][ cnt2 ][ cnt3 + 1 ], MPS[ index ], MPS[ index ], workmem ); }
359 #ifdef CHEMPS2_MPI_COMPILATION 363 Ctensors[ index ][ cnt2 ][ cnt3 ]->
update( Ctensors[ index - 1 ][ cnt2 ][ cnt3 + 1 ], MPS[ index ], MPS[ index ], workmem );
364 Dtensors[ index ][ cnt2 ][ cnt3 ]->
update( Dtensors[ index - 1 ][ cnt2 ][ cnt3 + 1 ], MPS[ index ], MPS[ index ], workmem );
367 for (
int num = 0; num < index + 1; num++ ){
368 if ( irrep_prod == S0tensors[ index ][ num ][ 0 ]->get_irrep() ){
369 #ifdef CHEMPS2_MPI_COMPILATION 373 double alpha = Prob->
gMxElement( index - num, index, siteindex1, siteindex2 );
374 if (( cnt2 == 0 ) && ( num == 0 )){ alpha *= 0.5; }
375 if (( cnt2 > 0 ) && ( num > 0 )){ alpha += Prob->
gMxElement( index - num, index, siteindex2, siteindex1 ); }
376 Atensors[ index ][ cnt2 ][ cnt3 ]->
daxpy( alpha, S0tensors[ index ][ num ][ 0 ] );
378 if (( num > 0 ) && ( cnt2 > 0 )){
379 alpha = Prob->
gMxElement( index - num, index, siteindex1, siteindex2 )
380 - Prob->
gMxElement( index - num, index, siteindex2, siteindex1 );
381 Btensors[ index ][ cnt2 ][ cnt3 ]->
daxpy( alpha, S1tensors[ index ][ num ][ 0 ]);
384 #ifdef CHEMPS2_MPI_COMPILATION 388 double alpha = 2 * Prob->
gMxElement( index - num, siteindex1, index, siteindex2 )
389 - Prob->
gMxElement( index - num, siteindex1, siteindex2, index );
390 Ctensors[ index ][ cnt2 ][ cnt3 ]->
daxpy( alpha, F0tensors[ index ][ num ][ 0 ] );
392 alpha = - Prob->
gMxElement( index - num, siteindex1, siteindex2, index );
393 Dtensors[ index ][ cnt2 ][ cnt3 ]->
daxpy( alpha, F1tensors[ index ][ num ][ 0 ] );
396 alpha = 2 * Prob->
gMxElement( index - num, siteindex2, index, siteindex1 )
397 - Prob->
gMxElement( index - num, siteindex2, siteindex1, index );
400 alpha = - Prob->
gMxElement( index - num, siteindex2, siteindex1, index );
409 #ifdef CHEMPS2_MPI_COMPILATION 412 #pragma omp for schedule(static) nowait 414 for (
int cnt2 = 0; cnt2 < L - 1 - index; cnt2++ ){
416 #ifdef CHEMPS2_MPI_COMPILATION 417 const int siteindex = index + 1 + cnt2;
418 const int owner_q = MPIchemps2::owner_q( L, siteindex );
422 #ifdef CHEMPS2_MPI_COMPILATION 423 if ( owner_q == MPIRANK )
426 Qtensors[ index ][ cnt2 ]->
clear();
432 #ifdef CHEMPS2_MPI_COMPILATION 433 const int owner_absigma = MPIchemps2::owner_absigma( index, siteindex );
434 const int owner_cdf = MPIchemps2::owner_cdf( L, index, siteindex );
435 if (( owner_q == owner_absigma ) && ( owner_q == owner_cdf ) && ( owner_q == MPIRANK )){
438 double * workmemBIS =
new double[ dimL * dimL ];
439 Qtensors[ index ][ cnt2 ]->
update( Qtensors[ index - 1 ][ cnt2 + 1 ], MPS[ index ], MPS[ index ], workmem );
441 Qtensors[ index ][ cnt2 ]->
AddTermsL( Ltensors[ index - 1 ], MPS[ index ], workmemBIS, workmem );
442 Qtensors[ index ][ cnt2 ]->
AddTermsAB( Atensors[ index - 1 ][ cnt2 + 1 ][ 0 ], Btensors[ index - 1 ][ cnt2 + 1 ][ 0 ], MPS[ index ], workmemBIS, workmem );
443 Qtensors[ index ][ cnt2 ]->
AddTermsCD( Ctensors[ index - 1 ][ cnt2 + 1 ][ 0 ], Dtensors[ index - 1 ][ cnt2 + 1 ][ 0 ], MPS[ index ], workmemBIS, workmem );
444 delete [] workmemBIS;
446 #ifdef CHEMPS2_MPI_COMPILATION 449 if (( owner_q == MPIRANK ) || ( owner_absigma == MPIRANK ) || ( owner_cdf == MPIRANK )){
451 TensorQ * tempQ =
new TensorQ( index + 1, denBK->
gIrrep( siteindex ),
true, denBK, Prob, siteindex );
455 double * workmemBIS =
new double[ dimL * dimL ];
456 if ( owner_q == MPIRANK ){
457 tempQ->update( Qtensors[ index - 1 ][ cnt2 + 1 ], MPS[ index ], MPS[ index ], workmem );
458 tempQ->AddTermSimple( MPS[ index ] );
459 tempQ->AddTermsL( Ltensors[ index - 1 ], MPS[ index ], workmemBIS, workmem );
461 if ( owner_absigma == MPIRANK ){
462 tempQ->AddTermsAB( Atensors[ index - 1 ][ cnt2 + 1 ][ 0 ], Btensors[ index - 1 ][ cnt2 + 1 ][ 0 ], MPS[ index ], workmemBIS, workmem );
464 if ( owner_cdf == MPIRANK ){
465 tempQ->AddTermsCD( Ctensors[ index - 1 ][ cnt2 + 1 ][ 0 ], Dtensors[ index - 1 ][ cnt2 + 1 ][ 0 ], MPS[ index ], workmemBIS, workmem );
467 delete [] workmemBIS;
471 int arraysize = tempQ->gKappa2index( tempQ->gNKappa() );
473 if ( owner_q == MPIRANK ){ dcopy_( &arraysize, tempQ->gStorage(), &inc, Qtensors[ index ][ cnt2 ]->
gStorage(), &inc ); }
474 if ( owner_q != owner_absigma ){
475 MPIchemps2::sendreceive_tensor( tempQ, owner_absigma, owner_q, 2 * siteindex );
476 if ( owner_q == MPIRANK ){ daxpy_( &arraysize, &alpha, tempQ->gStorage(), &inc, Qtensors[ index ][ cnt2 ]->
gStorage(), &inc ); }
478 if (( owner_q != owner_cdf ) && ( owner_absigma != owner_cdf )){
479 MPIchemps2::sendreceive_tensor( tempQ, owner_cdf, owner_q, 2 * siteindex + 1 );
480 if ( owner_q == MPIRANK ){ daxpy_( &arraysize, &alpha, tempQ->gStorage(), &inc, Qtensors[ index ][ cnt2 ]->
gStorage(), &inc ); }
495 #ifdef CHEMPS2_MPI_COMPILATION 496 const int owner_x = MPIchemps2::owner_x();
500 #ifdef CHEMPS2_MPI_COMPILATION 501 if ( owner_x == MPIRANK )
503 { Xtensors[ index ]->
update( MPS[ index ] ); }
507 #ifdef CHEMPS2_MPI_COMPILATION 509 const int owner_q = MPIchemps2::owner_q( L, index );
510 const int owner_absigma = MPIchemps2::owner_absigma( index, index );
511 const int owner_cdf = MPIchemps2::owner_cdf( L, index, index );
514 if ( owner_x != owner_q ){
515 if ( owner_x == MPIRANK ){ Qtensors[ index - 1 ][ 0 ] =
new TensorQ( index, denBK->
gIrrep( index ),
true, denBK, Prob, index ); }
516 if (( owner_x == MPIRANK ) || ( owner_q == MPIRANK )){ MPIchemps2::sendreceive_tensor( Qtensors[ index - 1 ][ 0 ], owner_q, owner_x, 3 * L + 3 ); }
519 if ( owner_x != owner_absigma ){
520 if ( owner_x == MPIRANK ){ Atensors[ index - 1 ][ 0 ][ 0 ] =
new TensorOperator( index, 0, 2, Idiff,
true,
true,
false, denBK, denBK ); }
521 if (( owner_x == MPIRANK ) || ( owner_absigma == MPIRANK )){ MPIchemps2::sendreceive_tensor( Atensors[ index - 1 ][ 0 ][ 0 ], owner_absigma, owner_x, 3 * L + 4 ); }
524 if ( owner_x != owner_cdf ){
525 if ( owner_x == MPIRANK ){
526 Ctensors[ index - 1 ][ 0 ][ 0 ] =
new TensorOperator( index, 0, 0, Idiff,
true,
true,
false, denBK, denBK );
527 Dtensors[ index - 1 ][ 0 ][ 0 ] =
new TensorOperator( index, 2, 0, Idiff,
true,
true,
false, denBK, denBK );
529 if (( owner_x == MPIRANK ) || ( owner_cdf == MPIRANK )){
530 MPIchemps2::sendreceive_tensor( Ctensors[ index - 1 ][ 0 ][ 0 ], owner_cdf, owner_x, 3 * L + 5 );
531 MPIchemps2::sendreceive_tensor( Dtensors[ index - 1 ][ 0 ][ 0 ], owner_cdf, owner_x, 3 * L + 6 );
535 if ( owner_x == MPIRANK ){
538 Xtensors[ index ]->
update( MPS[ index ], Ltensors[ index - 1 ],
539 Xtensors[ index - 1 ],
540 Qtensors[ index - 1 ][ 0 ],
541 Atensors[ index - 1 ][ 0 ][ 0 ],
542 Ctensors[ index - 1 ][ 0 ][ 0 ],
543 Dtensors[ index - 1 ][ 0 ][ 0 ] );
545 #ifdef CHEMPS2_MPI_COMPILATION 546 if ( owner_x != owner_q ){
delete Qtensors[ index - 1 ][ 0 ]; }
547 if ( owner_x != owner_absigma ){
delete Atensors[ index - 1 ][ 0 ][ 0 ]; }
548 if ( owner_x != owner_cdf ){
delete Ctensors[ index - 1 ][ 0 ][ 0 ];
549 delete Dtensors[ index - 1 ][ 0 ][ 0 ]; }
556 if ( Exc_activated ){
557 for (
int state = 0; state < nStates-1; state++ ){
558 #ifdef CHEMPS2_MPI_COMPILATION 559 if ( MPIchemps2::owner_specific_excitation( L, state ) == MPIRANK )
563 Exc_Overlaps[ state ][ index ]->
create( MPS[ index ], Exc_MPSs[ state ][ index ] );
565 Exc_Overlaps[ state ][ index ]->
update_ownmem( MPS[ index ], Exc_MPSs[ state ][ index ], Exc_Overlaps[ state ][ index - 1 ] );
571 gettimeofday( &end, NULL );
572 timings[ CHEMPS2_TIME_TENS_CALC ] += ( end.tv_sec - start.tv_sec ) + 1e-6 * ( end.tv_usec - start.tv_usec );
576 void CheMPS2::DMRG::updateMovingLeft(
const int index ){
578 struct timeval start, end;
579 gettimeofday( &start, NULL );
583 #ifdef CHEMPS2_MPI_COMPILATION 590 double * workmem =
new double[ dimL * dimR ];
593 #pragma omp for schedule(static) nowait 594 for (
int cnt2 = 0; cnt2 < L - 1 - index; cnt2++ ){
596 Ltensors[ index ][ cnt2 ]->
create( MPS[ index + 1 ] );
598 Ltensors[ index ][ cnt2 ]->
update( Ltensors[ index + 1 ][ cnt2 - 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem );
603 const int k1 = L - 1 - index;
604 const int upperbound1 = ( k1 * ( k1 + 1 ) ) / 2;
607 #ifdef CHEMPS2_MPI_COMPILATION 608 #pragma omp for schedule(dynamic) 610 #pragma omp for schedule(static) 612 for (
int global = 0; global < upperbound1; global++ ){
614 const int cnt2 = k1 - 1 - result[ 1 ];
615 const int cnt3 = result[ 0 ];
616 #ifdef CHEMPS2_MPI_COMPILATION 617 const int siteindex1 = index + 1 + cnt3;
618 const int siteindex2 = index + 1 + cnt2 + cnt3;
622 F0tensors[ index ][ cnt2 ][ cnt3 ]->
makenew( MPS[ index + 1 ] );
623 F1tensors[ index ][ cnt2 ][ cnt3 ]->
makenew( MPS[ index + 1 ] );
624 S0tensors[ index ][ cnt2 ][ cnt3 ]->
makenew( MPS[ index + 1 ] );
627 F0tensors[ index ][ cnt2 ][ cnt3 ]->
makenew( Ltensors[ index + 1 ][ cnt2 - 1 ], MPS[ index + 1 ], workmem );
628 F1tensors[ index ][ cnt2 ][ cnt3 ]->
makenew( Ltensors[ index + 1 ][ cnt2 - 1 ], MPS[ index + 1 ], workmem );
629 S0tensors[ index ][ cnt2 ][ cnt3 ]->
makenew( Ltensors[ index + 1 ][ cnt2 - 1 ], MPS[ index + 1 ], workmem );
630 S1tensors[ index ][ cnt2 ][ cnt3 ]->
makenew( Ltensors[ index + 1 ][ cnt2 - 1 ], MPS[ index + 1 ], workmem );
633 #ifdef CHEMPS2_MPI_COMPILATION 634 if ( MPIchemps2::owner_cdf( L, siteindex1, siteindex2 ) == MPIRANK )
637 F0tensors[ index ][ cnt2 ][ cnt3 ]->
update( F0tensors[ index + 1 ][ cnt2 ][ cnt3 - 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem );
638 F1tensors[ index ][ cnt2 ][ cnt3 ]->
update( F1tensors[ index + 1 ][ cnt2 ][ cnt3 - 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem );
640 #ifdef CHEMPS2_MPI_COMPILATION 641 if ( MPIchemps2::owner_absigma( siteindex1, siteindex2 ) == MPIRANK )
644 S0tensors[ index ][ cnt2 ][ cnt3 ]->
update( S0tensors[ index + 1 ][ cnt2 ][ cnt3 - 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem );
645 if ( cnt2 > 0 ){ S1tensors[ index ][ cnt2 ][ cnt3 ]->
update( S1tensors[ index + 1 ][ cnt2 ][ cnt3 - 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem ); }
651 const int k2 = index + 1;
652 const int upperbound2 = ( k2 * ( k2 + 1 ) ) / 2;
653 #ifdef CHEMPS2_MPI_COMPILATION 654 #pragma omp for schedule(dynamic) 656 #pragma omp for schedule(static) nowait 658 for (
int global = 0; global < upperbound2; global++ ){
660 const int cnt2 = k2 - 1 - result[ 1 ];
661 const int cnt3 = result[ 0 ];
662 const int siteindex1 = index - cnt3 - cnt2;
663 const int siteindex2 = index - cnt3;
665 #ifdef CHEMPS2_MPI_COMPILATION 666 const bool do_absigma = ( MPIchemps2::owner_absigma( siteindex1, siteindex2 ) == MPIRANK );
667 const bool do_cdf = ( MPIchemps2::owner_cdf( L, siteindex1, siteindex2 ) == MPIRANK );
669 if ( index == L - 2 ){
670 #ifdef CHEMPS2_MPI_COMPILATION 674 Atensors[ index ][ cnt2 ][ cnt3 ]->
clear();
675 if ( cnt2 > 0 ){ Btensors[ index ][ cnt2 ][ cnt3 ]->
clear(); }
677 #ifdef CHEMPS2_MPI_COMPILATION 681 Ctensors[ index ][ cnt2 ][ cnt3 ]->
clear();
682 Dtensors[ index ][ cnt2 ][ cnt3 ]->
clear();
685 #ifdef CHEMPS2_MPI_COMPILATION 689 Atensors[ index ][ cnt2 ][ cnt3 ]->
update( Atensors[ index + 1 ][ cnt2 ][ cnt3 + 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem );
690 if ( cnt2 > 0 ){ Btensors[ index ][ cnt2 ][ cnt3 ]->
update( Btensors[ index + 1 ][ cnt2 ][ cnt3 + 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem ); }
692 #ifdef CHEMPS2_MPI_COMPILATION 696 Ctensors[ index ][ cnt2 ][ cnt3 ]->
update( Ctensors[ index + 1 ][ cnt2 ][ cnt3 + 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem );
697 Dtensors[ index ][ cnt2 ][ cnt3 ]->
update( Dtensors[ index + 1 ][ cnt2 ][ cnt3 + 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem );
700 for (
int num = 0; num < L - index - 1; num++ ){
701 if ( irrep_prod == S0tensors[ index ][ num ][ 0 ]->get_irrep() ){
702 #ifdef CHEMPS2_MPI_COMPILATION 706 double alpha = Prob->gMxElement( siteindex1, siteindex2, index + 1, index + 1 + num );
707 if (( cnt2 == 0 ) && ( num == 0 )) alpha *= 0.5;
708 if (( cnt2 > 0 ) && ( num > 0 )) alpha += Prob->gMxElement( siteindex1, siteindex2, index + 1 + num, index + 1 );
709 Atensors[ index ][ cnt2 ][ cnt3 ]->
daxpy( alpha, S0tensors[ index ][ num ][ 0 ]);
711 if (( num > 0 ) && ( cnt2 > 0 )){
712 alpha = Prob->gMxElement( siteindex1, siteindex2, index + 1, index + 1 + num )
713 - Prob->gMxElement( siteindex1, siteindex2, index + 1 + num, index + 1 );
714 Btensors[ index ][ cnt2 ][ cnt3 ]->
daxpy( alpha, S1tensors[ index ][ num ][ 0 ] );
717 #ifdef CHEMPS2_MPI_COMPILATION 721 double alpha = 2 * Prob->gMxElement( siteindex1, index + 1, siteindex2, index + 1 + num )
722 - Prob->gMxElement( siteindex1, index + 1, index + 1 + num, siteindex2 );
723 Ctensors[ index ][ cnt2 ][ cnt3 ]->
daxpy( alpha, F0tensors[ index ][ num ][ 0 ]);
725 alpha = - Prob->gMxElement( siteindex1, index + 1, index + 1 + num, siteindex2 );
726 Dtensors[ index ][ cnt2 ][ cnt3 ]->
daxpy( alpha, F1tensors[ index ][ num ][ 0 ]);
729 alpha = 2 * Prob->gMxElement( siteindex1, index + 1 + num, siteindex2, index + 1 )
730 - Prob->gMxElement( siteindex1, index + 1 + num, index + 1, siteindex2 );
733 alpha = - Prob->gMxElement( siteindex1, index + 1 + num, index + 1, siteindex2 );
742 #ifdef CHEMPS2_MPI_COMPILATION 745 #pragma omp for schedule(static) nowait 747 for (
int cnt2 = 0; cnt2 < index + 1; cnt2++ ){
749 #ifdef CHEMPS2_MPI_COMPILATION 750 const int siteindex = index - cnt2;
751 const int owner_q = MPIchemps2::owner_q( L, siteindex );
753 if ( index == L - 2 ){
755 #ifdef CHEMPS2_MPI_COMPILATION 756 if ( owner_q == MPIRANK )
759 Qtensors[ index ][ cnt2 ]->
clear();
760 Qtensors[ index ][ cnt2 ]->
AddTermSimple( MPS[ index + 1 ] );
765 #ifdef CHEMPS2_MPI_COMPILATION 766 const int owner_absigma = MPIchemps2::owner_absigma( siteindex, index + 1 );
767 const int owner_cdf = MPIchemps2::owner_cdf( L, siteindex, index + 1 );
768 if (( owner_q == owner_absigma ) && ( owner_q == owner_cdf ) && ( owner_q == MPIRANK )){
771 double * workmemBIS =
new double[ dimR * dimR ];
772 Qtensors[ index ][ cnt2 ]->
update( Qtensors[ index + 1 ][ cnt2 + 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem );
773 Qtensors[ index ][ cnt2 ]->
AddTermSimple( MPS[ index + 1 ] );
774 Qtensors[ index ][ cnt2 ]->
AddTermsL( Ltensors[ index + 1 ], MPS[ index + 1 ], workmemBIS, workmem );
775 Qtensors[ index ][ cnt2 ]->
AddTermsAB( Atensors[ index + 1 ][ cnt2 + 1 ][ 0 ], Btensors[ index + 1 ][ cnt2 + 1 ][ 0 ], MPS[ index + 1 ], workmemBIS, workmem );
776 Qtensors[ index ][ cnt2 ]->
AddTermsCD( Ctensors[ index + 1 ][ cnt2 + 1 ][ 0 ], Dtensors[ index + 1 ][ cnt2 + 1 ][ 0 ], MPS[ index + 1 ], workmemBIS, workmem );
777 delete [] workmemBIS;
779 #ifdef CHEMPS2_MPI_COMPILATION 782 if (( owner_q == MPIRANK ) || ( owner_absigma == MPIRANK ) || ( owner_cdf == MPIRANK )){
784 TensorQ * tempQ =
new TensorQ( index + 1, denBK->
gIrrep( siteindex ),
false, denBK, Prob, siteindex );
788 double * workmemBIS =
new double[ dimR * dimR ];
789 if ( owner_q == MPIRANK ){
790 tempQ->update( Qtensors[ index + 1 ][ cnt2 + 1 ], MPS[ index + 1 ], MPS[ index + 1 ], workmem );
791 tempQ->AddTermSimple( MPS[ index + 1 ] );
792 tempQ->AddTermsL( Ltensors[ index + 1 ], MPS[ index + 1 ], workmemBIS, workmem );
794 if ( owner_absigma == MPIRANK ){
795 tempQ->AddTermsAB( Atensors[ index + 1 ][ cnt2 + 1 ][ 0 ], Btensors[ index + 1 ][ cnt2 + 1 ][ 0 ], MPS[ index + 1 ], workmemBIS, workmem );
797 if ( owner_cdf == MPIRANK ){
798 tempQ->AddTermsCD( Ctensors[ index + 1 ][ cnt2 + 1 ][ 0 ], Dtensors[ index + 1 ][ cnt2 + 1 ][ 0 ], MPS[ index + 1 ], workmemBIS, workmem );
800 delete [] workmemBIS;
804 int arraysize = tempQ->gKappa2index( tempQ->gNKappa() );
806 if ( owner_q == MPIRANK ){ dcopy_( &arraysize, tempQ->gStorage(), &inc, Qtensors[index][cnt2]->
gStorage(), &inc ); }
807 if ( owner_q != owner_absigma ){
808 MPIchemps2::sendreceive_tensor( tempQ, owner_absigma, owner_q, 2 * siteindex );
809 if ( owner_q == MPIRANK ){ daxpy_( &arraysize, &alpha, tempQ->gStorage(), &inc, Qtensors[ index ][ cnt2 ]->
gStorage(), &inc ); }
811 if (( owner_q != owner_cdf ) && ( owner_absigma != owner_cdf )){
812 MPIchemps2::sendreceive_tensor( tempQ, owner_cdf, owner_q, 2 * siteindex + 1 );
813 if ( owner_q == MPIRANK ){ daxpy_( &arraysize, &alpha, tempQ->gStorage(), &inc, Qtensors[ index ][ cnt2 ]->
gStorage(), &inc ); }
828 #ifdef CHEMPS2_MPI_COMPILATION 829 const int owner_x = MPIchemps2::owner_x();
831 if ( index == L - 2 ){
833 #ifdef CHEMPS2_MPI_COMPILATION 834 if ( owner_x == MPIRANK )
836 { Xtensors[ index ]->
update( MPS[ index + 1 ] ); }
840 #ifdef CHEMPS2_MPI_COMPILATION 842 const int owner_q = MPIchemps2::owner_q( L, index + 1 );
843 const int owner_absigma = MPIchemps2::owner_absigma( index + 1, index + 1 );
844 const int owner_cdf = MPIchemps2::owner_cdf( L, index + 1, index + 1 );
847 if ( owner_x != owner_q ){
848 if ( owner_x == MPIRANK ){ Qtensors[ index + 1 ][ 0 ] =
new TensorQ( index + 2, denBK->
gIrrep( index + 1 ),
false, denBK, Prob, index + 1 ); }
849 if (( owner_x == MPIRANK ) || ( owner_q == MPIRANK )){ MPIchemps2::sendreceive_tensor( Qtensors[ index + 1 ][ 0 ], owner_q, owner_x, 3 * L + 3 ); }
852 if ( owner_x != owner_absigma ){
853 if ( owner_x == MPIRANK ){ Atensors[ index + 1 ][ 0 ][ 0 ] =
new TensorOperator( index + 2, 0, 2, Idiff,
false,
true,
false, denBK, denBK ); }
854 if (( owner_x == MPIRANK ) || ( owner_absigma == MPIRANK )){ MPIchemps2::sendreceive_tensor( Atensors[ index + 1 ][ 0 ][ 0 ], owner_absigma, owner_x, 3 * L + 4 ); }
857 if ( owner_x != owner_cdf ){
858 if ( owner_x == MPIRANK ){
859 Ctensors[ index + 1 ][ 0 ][ 0 ] =
new TensorOperator( index + 2, 0, 0, Idiff,
false,
true,
false, denBK, denBK );
860 Dtensors[ index + 1 ][ 0 ][ 0 ] =
new TensorOperator( index + 2, 2, 0, Idiff,
false,
false,
false, denBK, denBK );
862 if (( owner_x == MPIRANK ) || ( owner_cdf == MPIRANK )){
863 MPIchemps2::sendreceive_tensor( Ctensors[ index + 1 ][ 0 ][ 0 ], owner_cdf, owner_x, 3 * L + 5 );
864 MPIchemps2::sendreceive_tensor( Dtensors[ index + 1 ][ 0 ][ 0 ], owner_cdf, owner_x, 3 * L + 6 );
868 if ( owner_x == MPIRANK ){
871 Xtensors[ index ]->
update( MPS[ index + 1 ], Ltensors[ index + 1 ],
872 Xtensors[ index + 1 ],
873 Qtensors[ index + 1 ][ 0 ],
874 Atensors[ index + 1 ][ 0 ][ 0 ],
875 Ctensors[ index + 1 ][ 0 ][ 0 ],
876 Dtensors[ index + 1 ][ 0 ][ 0 ] );
878 #ifdef CHEMPS2_MPI_COMPILATION 879 if ( owner_x != owner_q ){
delete Qtensors[ index + 1 ][ 0 ]; }
880 if ( owner_x != owner_absigma ){
delete Atensors[ index + 1 ][ 0 ][ 0 ]; }
881 if ( owner_x != owner_cdf ){
delete Ctensors[ index + 1 ][ 0 ][ 0 ];
882 delete Dtensors[ index + 1 ][ 0 ][ 0 ]; }
889 if ( Exc_activated ){
890 for (
int state = 0; state < nStates - 1; state++ ){
891 #ifdef CHEMPS2_MPI_COMPILATION 892 if ( MPIchemps2::owner_specific_excitation( L, state ) == MPIRANK )
895 if ( index == L - 2 ){
896 Exc_Overlaps[ state ][ index ]->
create( MPS[ index + 1 ], Exc_MPSs[ state ][ index + 1 ] );
898 Exc_Overlaps[ state ][ index ]->
update_ownmem( MPS[ index + 1 ], Exc_MPSs[ state ][ index + 1 ], Exc_Overlaps[ state ][ index + 1 ] );
904 gettimeofday( &end, NULL );
905 timings[ CHEMPS2_TIME_TENS_CALC ] += ( end.tv_sec - start.tv_sec ) + 1e-6 * ( end.tv_usec - start.tv_usec );
909 void CheMPS2::DMRG::allocateTensors(
const int index,
const bool movingRight){
911 struct timeval start, end;
912 gettimeofday(&start, NULL);
914 #ifdef CHEMPS2_MPI_COMPILATION 922 Ltensors[ index ] =
new TensorL * [ index + 1 ];
923 for (
int cnt2 = 0; cnt2 < index + 1; cnt2++ ){ Ltensors[ index ][ cnt2 ] =
new TensorL( index + 1, denBK->
gIrrep( index - cnt2 ), movingRight, denBK, denBK ); }
927 F0tensors[index] =
new TensorF0 ** [index+1];
928 F1tensors[index] =
new TensorF1 ** [index+1];
929 S0tensors[index] =
new TensorS0 ** [index+1];
930 S1tensors[index] =
new TensorS1 ** [index+1];
931 for (
int cnt2=0; cnt2<(index+1); cnt2++){
932 F0tensors[index][cnt2] =
new TensorF0 * [index-cnt2+1];
933 F1tensors[index][cnt2] =
new TensorF1 * [index-cnt2+1];
934 S0tensors[index][cnt2] =
new TensorS0 * [index-cnt2+1];
935 if (cnt2>0){ S1tensors[index][cnt2] =
new TensorS1 * [index-cnt2+1]; }
936 for (
int cnt3=0; cnt3<(index-cnt2+1); cnt3++){
938 #ifdef CHEMPS2_MPI_COMPILATION 939 if (( cnt3 == 0 ) || ( MPIchemps2::owner_cdf(L, index-cnt2-cnt3, index-cnt3) == MPIRANK )){
941 F0tensors[index][cnt2][cnt3] =
new TensorF0(index+1,Iprod,movingRight,denBK);
942 F1tensors[index][cnt2][cnt3] =
new TensorF1(index+1,Iprod,movingRight,denBK);
943 #ifdef CHEMPS2_MPI_COMPILATION 945 F0tensors[index][cnt2][cnt3] = NULL;
946 F1tensors[index][cnt2][cnt3] = NULL;
948 if (( cnt3 == 0 ) || ( MPIchemps2::owner_absigma(index-cnt2-cnt3, index-cnt3) == MPIRANK )){
950 S0tensors[index][cnt2][cnt3] =
new TensorS0(index+1,Iprod,movingRight,denBK);
951 if (cnt2>0){ S1tensors[index][cnt2][cnt3] =
new TensorS1(index+1,Iprod,movingRight,denBK); }
952 #ifdef CHEMPS2_MPI_COMPILATION 954 S0tensors[index][cnt2][cnt3] = NULL;
955 if (cnt2>0){ S1tensors[index][cnt2][cnt3] = NULL; }
963 Atensors[index] =
new TensorOperator ** [L-1-index];
964 Btensors[index] =
new TensorOperator ** [L-1-index];
965 Ctensors[index] =
new TensorOperator ** [L-1-index];
966 Dtensors[index] =
new TensorOperator ** [L-1-index];
967 for (
int cnt2=0; cnt2<L-1-index; cnt2++){
968 Atensors[index][cnt2] =
new TensorOperator * [L-1-index-cnt2];
969 if (cnt2>0){ Btensors[index][cnt2] =
new TensorOperator * [L-1-index-cnt2]; }
970 Ctensors[index][cnt2] =
new TensorOperator * [L-1-index-cnt2];
971 Dtensors[index][cnt2] =
new TensorOperator * [L-1-index-cnt2];
972 for (
int cnt3=0; cnt3<L-1-index-cnt2; cnt3++){
974 #ifdef CHEMPS2_MPI_COMPILATION 975 if ( MPIchemps2::owner_absigma(index+1+cnt3, index+1+cnt2+cnt3) == MPIRANK ){
977 Atensors[index][cnt2][cnt3] =
new TensorOperator( index+1, 0, 2, Idiff, movingRight,
true,
false, denBK, denBK );
978 if (cnt2>0){ Btensors[index][cnt2][cnt3] =
new TensorOperator( index+1, 2, 2, Idiff, movingRight,
true,
false, denBK, denBK ); }
979 #ifdef CHEMPS2_MPI_COMPILATION 981 Atensors[index][cnt2][cnt3] = NULL;
982 if (cnt2>0){ Btensors[index][cnt2][cnt3] = NULL; }
984 if ( MPIchemps2::owner_cdf(L, index+1+cnt3, index+1+cnt2+cnt3) == MPIRANK ){
986 Ctensors[index][cnt2][cnt3] =
new TensorOperator( index+1, 0, 0, Idiff, movingRight,
true,
false, denBK, denBK );
987 Dtensors[index][cnt2][cnt3] =
new TensorOperator( index+1, 2, 0, Idiff, movingRight, movingRight,
false, denBK, denBK );
988 #ifdef CHEMPS2_MPI_COMPILATION 990 Ctensors[index][cnt2][cnt3] = NULL;
991 Dtensors[index][cnt2][cnt3] = NULL;
999 Qtensors[index] =
new TensorQ * [L-1-index];
1000 for (
int cnt2=0; cnt2<L-1-index; cnt2++){
1001 #ifdef CHEMPS2_MPI_COMPILATION 1002 if ( MPIchemps2::owner_q( L, index+1+cnt2 ) == MPIRANK ){
1004 Qtensors[index][cnt2] =
new TensorQ(index+1,denBK->
gIrrep(index+1+cnt2),movingRight,denBK,Prob,index+1+cnt2);
1005 #ifdef CHEMPS2_MPI_COMPILATION 1006 }
else { Qtensors[index][cnt2] = NULL; }
1011 #ifdef CHEMPS2_MPI_COMPILATION 1012 if ( MPIchemps2::owner_x() == MPIRANK ){
1014 Xtensors[index] =
new TensorX(index+1,movingRight,denBK,Prob);
1015 #ifdef CHEMPS2_MPI_COMPILATION 1016 }
else { Xtensors[index] = NULL; }
1021 for (
int state=0; state<nStates-1; state++){
1022 #ifdef CHEMPS2_MPI_COMPILATION 1023 if ( MPIchemps2::owner_specific_excitation( L, state ) == MPIRANK )
1025 { Exc_Overlaps[state][index] =
new TensorO( index + 1, movingRight, denBK, Exc_BKs[ state ] ); }
1033 Ltensors[ index ] =
new TensorL * [ L - 1 - index ];
1034 for (
int cnt2 = 0; cnt2 < L - 1 - index; cnt2++ ){ Ltensors[ index ][ cnt2 ] =
new TensorL( index + 1, denBK->
gIrrep( index + 1 + cnt2 ), movingRight, denBK, denBK ); }
1038 F0tensors[index] =
new TensorF0 ** [L-1-index];
1039 F1tensors[index] =
new TensorF1 ** [L-1-index];
1040 S0tensors[index] =
new TensorS0 ** [L-1-index];
1041 S1tensors[index] =
new TensorS1 ** [L-1-index];
1042 for (
int cnt2=0; cnt2<L-1-index; cnt2++){
1043 F0tensors[index][cnt2] =
new TensorF0 * [L-1-index-cnt2];
1044 F1tensors[index][cnt2] =
new TensorF1 * [L-1-index-cnt2];
1045 S0tensors[index][cnt2] =
new TensorS0 * [L-1-index-cnt2];
1046 if (cnt2>0){ S1tensors[index][cnt2] =
new TensorS1 * [L-1-index-cnt2]; }
1047 for (
int cnt3=0; cnt3<L-1-index-cnt2; cnt3++){
1049 #ifdef CHEMPS2_MPI_COMPILATION 1050 if (( cnt3 == 0 ) || ( MPIchemps2::owner_cdf(L, index+1+cnt3, index+1+cnt2+cnt3) == MPIRANK )){
1052 F0tensors[index][cnt2][cnt3] =
new TensorF0(index+1,Iprod,movingRight,denBK);
1053 F1tensors[index][cnt2][cnt3] =
new TensorF1(index+1,Iprod,movingRight,denBK);
1054 #ifdef CHEMPS2_MPI_COMPILATION 1056 F0tensors[index][cnt2][cnt3] = NULL;
1057 F1tensors[index][cnt2][cnt3] = NULL;
1059 if (( cnt3 == 0 ) || ( MPIchemps2::owner_absigma(index+1+cnt3, index+1+cnt2+cnt3) == MPIRANK )){
1061 S0tensors[index][cnt2][cnt3] =
new TensorS0(index+1,Iprod,movingRight,denBK);
1062 if (cnt2>0){ S1tensors[index][cnt2][cnt3] =
new TensorS1(index+1,Iprod,movingRight,denBK); }
1063 #ifdef CHEMPS2_MPI_COMPILATION 1065 S0tensors[index][cnt2][cnt3] = NULL;
1066 if (cnt2>0){ S1tensors[index][cnt2][cnt3] = NULL; }
1074 Atensors[index] =
new TensorOperator ** [index+1];
1075 Btensors[index] =
new TensorOperator ** [index+1];
1076 Ctensors[index] =
new TensorOperator ** [index+1];
1077 Dtensors[index] =
new TensorOperator ** [index+1];
1078 for (
int cnt2=0; cnt2<index+1; cnt2++){
1079 Atensors[index][cnt2] =
new TensorOperator * [index + 1 - cnt2];
1080 if (cnt2>0){ Btensors[index][cnt2] =
new TensorOperator * [index + 1 - cnt2]; }
1081 Ctensors[index][cnt2] =
new TensorOperator * [index + 1 - cnt2];
1082 Dtensors[index][cnt2] =
new TensorOperator * [index + 1 - cnt2];
1083 for (
int cnt3=0; cnt3<index+1-cnt2; cnt3++){
1085 #ifdef CHEMPS2_MPI_COMPILATION 1086 if ( MPIchemps2::owner_absigma(index-cnt2-cnt3, index-cnt3) == MPIRANK ){
1088 Atensors[index][cnt2][cnt3] =
new TensorOperator( index+1, 0, 2, Idiff, movingRight,
true,
false, denBK, denBK );
1089 if (cnt2>0){ Btensors[index][cnt2][cnt3] =
new TensorOperator( index+1, 2, 2, Idiff, movingRight,
true,
false, denBK, denBK ); }
1090 #ifdef CHEMPS2_MPI_COMPILATION 1092 Atensors[index][cnt2][cnt3] = NULL;
1093 if (cnt2>0){ Btensors[index][cnt2][cnt3] = NULL; }
1095 if ( MPIchemps2::owner_cdf(L, index-cnt2-cnt3, index-cnt3) == MPIRANK ){
1097 Ctensors[index][cnt2][cnt3] =
new TensorOperator( index+1, 0, 0, Idiff, movingRight,
true,
false, denBK, denBK );
1098 Dtensors[index][cnt2][cnt3] =
new TensorOperator( index+1, 2, 0, Idiff, movingRight, movingRight,
false, denBK, denBK );
1099 #ifdef CHEMPS2_MPI_COMPILATION 1101 Ctensors[index][cnt2][cnt3] = NULL;
1102 Dtensors[index][cnt2][cnt3] = NULL;
1110 Qtensors[index] =
new TensorQ*[index+1];
1111 for (
int cnt2=0; cnt2<index+1; cnt2++){
1112 #ifdef CHEMPS2_MPI_COMPILATION 1113 if ( MPIchemps2::owner_q(L, index-cnt2) == MPIRANK ){
1115 Qtensors[index][cnt2] =
new TensorQ(index+1,denBK->
gIrrep(index-cnt2),movingRight,denBK,Prob,index-cnt2);
1116 #ifdef CHEMPS2_MPI_COMPILATION 1117 }
else { Qtensors[index][cnt2] = NULL; }
1122 #ifdef CHEMPS2_MPI_COMPILATION 1123 if ( MPIchemps2::owner_x() == MPIRANK ){
1125 Xtensors[index] =
new TensorX(index+1,movingRight,denBK,Prob);
1126 #ifdef CHEMPS2_MPI_COMPILATION 1127 }
else { Xtensors[index] = NULL; }
1131 if ( Exc_activated ){
1132 for (
int state = 0; state < nStates - 1; state++ ){
1133 #ifdef CHEMPS2_MPI_COMPILATION 1134 if ( MPIchemps2::owner_specific_excitation( L, state ) == MPIRANK )
1136 { Exc_Overlaps[ state ][ index ] =
new TensorO( index + 1, movingRight, denBK, Exc_BKs[ state ] ); }
1142 gettimeofday(&end, NULL);
1143 timings[ CHEMPS2_TIME_TENS_ALLOC ] += (end.tv_sec - start.tv_sec) + 1e-6 * (end.tv_usec - start.tv_usec);
1147 void CheMPS2::DMRG::MY_HDF5_READ_BATCH(
const hid_t file_id,
const int number, Tensor ** batch,
const long long totalsize,
const std::string tag ){
1149 const hid_t group_id = H5Gopen(file_id, tag.c_str(), H5P_DEFAULT);
1150 const hsize_t dimarray = totalsize;
1151 const hid_t dataspace_id = H5Screate_simple(1, &dimarray, NULL);
1152 const hid_t dataset_id = H5Dopen(group_id,
"storage", H5P_DEFAULT);
1154 long long offset = 0;
1155 for (
int cnt=0; cnt<number; cnt++){
1156 const int tensor_size = batch[cnt]->gKappa2index(batch[cnt]->gNKappa());
1157 if ( tensor_size > 0 ){
1159 const hsize_t start = offset;
1160 const hsize_t count = tensor_size;
1161 H5Sselect_hyperslab(dataspace_id, H5S_SELECT_SET, &start, NULL, &count, NULL);
1162 const hid_t memspace_id = H5Screate_simple(1, &count, NULL);
1163 H5Dread(dataset_id, H5T_NATIVE_DOUBLE, memspace_id, dataspace_id, H5P_DEFAULT, batch[cnt]->gStorage());
1164 H5Sclose(memspace_id);
1166 offset += tensor_size;
1170 H5Dclose(dataset_id);
1171 H5Sclose(dataspace_id);
1174 assert( totalsize == offset );
1175 num_double_read_disk += totalsize;
1179 void CheMPS2::DMRG::MY_HDF5_WRITE_BATCH(
const hid_t file_id,
const int number, Tensor ** batch,
const long long totalsize,
const std::string tag ){
1181 const hid_t group_id = H5Gcreate(file_id, tag.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
1182 const hsize_t dimarray = totalsize;
1183 const hid_t dataspace_id = H5Screate_simple(1, &dimarray, NULL);
1184 const hid_t dataset_id = H5Dcreate(group_id,
"storage", H5T_NATIVE_DOUBLE, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
1188 long long offset = 0;
1189 for (
int cnt=0; cnt<number; cnt++){
1190 const int tensor_size = batch[cnt]->gKappa2index(batch[cnt]->gNKappa());
1191 if ( tensor_size > 0 ){
1193 const hsize_t start = offset;
1194 const hsize_t count = tensor_size;
1195 H5Sselect_hyperslab(dataspace_id, H5S_SELECT_SET, &start, NULL, &count, NULL);
1196 const hid_t memspace_id = H5Screate_simple(1, &count, NULL);
1197 H5Dwrite(dataset_id, H5T_NATIVE_DOUBLE, memspace_id, dataspace_id, H5P_DEFAULT, batch[cnt]->gStorage());
1198 H5Sclose(memspace_id);
1200 offset += tensor_size;
1204 H5Dclose(dataset_id);
1205 H5Sclose(dataspace_id);
1208 assert( totalsize == offset );
1209 num_double_write_disk += totalsize;
1213 void CheMPS2::DMRG::OperatorsOnDisk(
const int index,
const bool movingRight,
const bool store){
1223 struct timeval start, end;
1224 gettimeofday(&start, NULL);
1226 const int Nbound = movingRight ? index+1 : L-1-index;
1227 const int Cbound = movingRight ? L-1-index : index+1;
1228 #ifdef CHEMPS2_MPI_COMPILATION 1232 std::stringstream thefilename;
1234 thefilename << tempfolder <<
"/" << CheMPS2::DMRG_OPERATOR_storage_prefix << thePID <<
"_index_" << index <<
".h5";
1237 const hid_t file_id = ( store ) ? H5Fcreate( thefilename.str().c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT )
1238 : H5Fopen( thefilename.str().c_str(), H5F_ACC_RDONLY, H5P_DEFAULT );
1242 long long totalsizeL = 0;
1243 Tensor ** batchL =
new Tensor*[ Nbound ];
1244 for (
int cnt2=0; cnt2<Nbound; cnt2++){
1245 totalsizeL += Ltensors[index][cnt2]->
gKappa2index(Ltensors[index][cnt2]->gNKappa());
1246 batchL[cnt2] = Ltensors[index][cnt2];
1248 if ( totalsizeL > 0 ){
1249 const std::string tag =
"Ltensors";
1250 if ( store ){ MY_HDF5_WRITE_BATCH( file_id, Nbound, batchL, totalsizeL, tag ); }
1251 else{ MY_HDF5_READ_BATCH( file_id, Nbound, batchL, totalsizeL, tag ); }
1258 long long totalsizeF0 = 0;
int numF0 = 0; Tensor ** batchF0 =
new Tensor*[ (Nbound*(Nbound + 1))/2 ];
1259 long long totalsizeF1 = 0;
int numF1 = 0; Tensor ** batchF1 =
new Tensor*[ (Nbound*(Nbound + 1))/2 ];
1260 long long totalsizeS0 = 0;
int numS0 = 0; Tensor ** batchS0 =
new Tensor*[ (Nbound*(Nbound + 1))/2 ];
1261 long long totalsizeS1 = 0;
int numS1 = 0; Tensor ** batchS1 =
new Tensor*[ (Nbound*(Nbound + 1))/2 ];
1263 for (
int cnt2=0; cnt2<Nbound; cnt2++){
1264 for (
int cnt3=0; cnt3<Nbound-cnt2; cnt3++){
1265 #ifdef CHEMPS2_MPI_COMPILATION 1266 const int siteindex1 = movingRight ? index - cnt2 - cnt3 : index + 1 + cnt3;
1267 const int siteindex2 = movingRight ? index - cnt3 : index + 1 + cnt2 + cnt3;
1268 if (( cnt3 == 0 ) || ( MPIchemps2::owner_cdf(L, siteindex1, siteindex2) == MPIRANK ))
1271 batchF0[numF0] = F0tensors[index][cnt2][cnt3]; totalsizeF0 += batchF0[numF0]->gKappa2index(batchF0[numF0]->gNKappa()); numF0++;
1272 batchF1[numF1] = F1tensors[index][cnt2][cnt3]; totalsizeF1 += batchF1[numF1]->
gKappa2index(batchF1[numF1]->gNKappa()); numF1++;
1274 #ifdef CHEMPS2_MPI_COMPILATION 1275 if (( cnt3 == 0 ) || ( MPIchemps2::owner_absigma(siteindex1, siteindex2) == MPIRANK ))
1278 batchS0[numS0] = S0tensors[index][cnt2][cnt3]; totalsizeS0 += batchS0[numS0]->
gKappa2index(batchS0[numS0]->gNKappa()); numS0++;
1279 if (cnt2>0){ batchS1[numS1] = S1tensors[index][cnt2][cnt3]; totalsizeS1 += batchS1[numS1]->
gKappa2index(batchS1[numS1]->gNKappa()); numS1++; }
1284 if ( totalsizeF0 > 0 ){
1285 const std::string tag =
"F0tensors";
1286 if ( store ){ MY_HDF5_WRITE_BATCH( file_id, numF0, batchF0, totalsizeF0, tag ); }
1287 else{ MY_HDF5_READ_BATCH( file_id, numF0, batchF0, totalsizeF0, tag ); }
1289 if ( totalsizeF1 > 0 ){
1290 const std::string tag =
"F1tensors";
1291 if ( store ){ MY_HDF5_WRITE_BATCH( file_id, numF1, batchF1, totalsizeF1, tag ); }
1292 else{ MY_HDF5_READ_BATCH( file_id, numF1, batchF1, totalsizeF1, tag ); }
1294 if ( totalsizeS0 > 0 ){
1295 const std::string tag =
"S0tensors";
1296 if ( store ){ MY_HDF5_WRITE_BATCH( file_id, numS0, batchS0, totalsizeS0, tag ); }
1297 else{ MY_HDF5_READ_BATCH( file_id, numS0, batchS0, totalsizeS0, tag ); }
1299 if ( totalsizeS1 > 0 ){
1300 const std::string tag =
"S1tensors";
1301 if ( store ){ MY_HDF5_WRITE_BATCH( file_id, numS1, batchS1, totalsizeS1, tag ); }
1302 else{ MY_HDF5_READ_BATCH( file_id, numS1, batchS1, totalsizeS1, tag ); }
1314 long long totalsizeA = 0;
int numA = 0; Tensor ** batchA =
new Tensor*[ (Cbound*(Cbound + 1))/2 ];
1315 long long totalsizeB = 0;
int numB = 0; Tensor ** batchB =
new Tensor*[ (Cbound*(Cbound + 1))/2 ];
1316 long long totalsizeC = 0;
int numC = 0; Tensor ** batchC =
new Tensor*[ (Cbound*(Cbound + 1))/2 ];
1317 long long totalsizeD = 0;
int numD = 0; Tensor ** batchD =
new Tensor*[ (Cbound*(Cbound + 1))/2 ];
1319 for (
int cnt2=0; cnt2<Cbound; cnt2++){
1320 for (
int cnt3=0; cnt3<Cbound-cnt2; cnt3++){
1321 #ifdef CHEMPS2_MPI_COMPILATION 1322 const int siteindex1 = movingRight ? index + 1 + cnt3 : index - cnt2 - cnt3;
1323 const int siteindex2 = movingRight ? index + 1 + cnt2 + cnt3 : index - cnt3;
1324 if ( MPIchemps2::owner_absigma(siteindex1, siteindex2) == MPIRANK )
1327 batchA[numA] = Atensors[index][cnt2][cnt3]; totalsizeA += batchA[numA]->
gKappa2index(batchA[numA]->gNKappa()); numA++;
1328 if (cnt2>0){ batchB[numB] = Btensors[index][cnt2][cnt3]; totalsizeB += batchB[numB]->
gKappa2index(batchB[numB]->gNKappa()); numB++; }
1330 #ifdef CHEMPS2_MPI_COMPILATION 1331 if ( MPIchemps2::owner_cdf(L, siteindex1, siteindex2) == MPIRANK )
1334 batchC[numC] = Ctensors[index][cnt2][cnt3]; totalsizeC += batchC[numC]->
gKappa2index(batchC[numC]->gNKappa()); numC++;
1335 batchD[numD] = Dtensors[index][cnt2][cnt3]; totalsizeD += batchD[numD]->
gKappa2index(batchD[numD]->gNKappa()); numD++;
1340 if ( totalsizeA > 0 ){
1341 const std::string tag =
"Atensors";
1342 if ( store ){ MY_HDF5_WRITE_BATCH( file_id, numA, batchA, totalsizeA, tag ); }
1343 else{ MY_HDF5_READ_BATCH( file_id, numA, batchA, totalsizeA, tag ); }
1345 if ( totalsizeB > 0 ){
1346 const std::string tag =
"Btensors";
1347 if ( store ){ MY_HDF5_WRITE_BATCH( file_id, numB, batchB, totalsizeB, tag ); }
1348 else{ MY_HDF5_READ_BATCH( file_id, numB, batchB, totalsizeB, tag ); }
1350 if ( totalsizeC > 0 ){
1351 const std::string tag =
"Ctensors";
1352 if ( store ){ MY_HDF5_WRITE_BATCH( file_id, numC, batchC, totalsizeC, tag ); }
1353 else{ MY_HDF5_READ_BATCH( file_id, numC, batchC, totalsizeC, tag ); }
1355 if ( totalsizeD > 0 ){
1356 const std::string tag =
"Dtensors";
1357 if ( store ){ MY_HDF5_WRITE_BATCH( file_id, numD, batchD, totalsizeD, tag ); }
1358 else{ MY_HDF5_READ_BATCH( file_id, numD, batchD, totalsizeD, tag ); }
1370 long long totalsizeQ = 0;
1372 Tensor ** batchQ =
new Tensor*[ Cbound ];
1373 for (
int cnt2=0; cnt2<Cbound; cnt2++){
1374 #ifdef CHEMPS2_MPI_COMPILATION 1375 const int siteindex = movingRight ? index + 1 + cnt2 : index - cnt2;
1376 if ( MPIchemps2::owner_q(L, siteindex) == MPIRANK )
1379 batchQ[numQ] = Qtensors[index][cnt2]; totalsizeQ += batchQ[numQ]->
gKappa2index(batchQ[numQ]->gNKappa()); numQ++;
1382 if ( totalsizeQ > 0 ){
1383 const std::string tag =
"Qtensors";
1384 if ( store ){ MY_HDF5_WRITE_BATCH( file_id, numQ, batchQ, totalsizeQ, tag ); }
1385 else{ MY_HDF5_READ_BATCH( file_id, numQ, batchQ, totalsizeQ, tag ); }
1391 #ifdef CHEMPS2_MPI_COMPILATION 1392 if ( MPIchemps2::owner_x() == MPIRANK )
1395 Tensor ** batchX =
new Tensor*[ 1 ];
1396 const long long totalsizeX = Xtensors[index]->
gKappa2index(Xtensors[index]->gNKappa());
1397 batchX[0] = Xtensors[index];
1398 if ( totalsizeX > 0 ){
1399 const std::string tag =
"Xtensors";
1400 if ( store ){ MY_HDF5_WRITE_BATCH( file_id, 1, batchX, totalsizeX, tag ); }
1401 else{ MY_HDF5_READ_BATCH( file_id, 1, batchX, totalsizeX, tag ); }
1408 long long totalsizeO = 0;
1410 Tensor ** batchO =
new Tensor*[ nStates-1 ];
1411 for (
int state=0; state<nStates-1; state++){
1412 #ifdef CHEMPS2_MPI_COMPILATION 1413 if ( MPIchemps2::owner_specific_excitation( L, state ) == MPIRANK )
1416 batchO[numO] = Exc_Overlaps[state][index]; totalsizeO += batchO[numO]->
gKappa2index(batchO[numO]->gNKappa()); numO++;
1419 if ( totalsizeO > 0 ){
1420 const std::string tag =
"Otensors";
1421 if ( store ){ MY_HDF5_WRITE_BATCH( file_id, numO, batchO, totalsizeO, tag ); }
1422 else{ MY_HDF5_READ_BATCH( file_id, numO, batchO, totalsizeO, tag ); }
1429 gettimeofday(&end, NULL);
1430 if ( store ){ timings[ CHEMPS2_TIME_DISK_WRITE ] += (end.tv_sec - start.tv_sec) + 1e-6 * (end.tv_usec - start.tv_usec); }
1431 else { timings[ CHEMPS2_TIME_DISK_READ ] += (end.tv_sec - start.tv_sec) + 1e-6 * (end.tv_usec - start.tv_usec); }
1435 void CheMPS2::DMRG::deleteTensors(
const int index,
const bool movingRight){
1437 struct timeval start, end;
1438 gettimeofday(&start, NULL);
1440 const int Nbound = movingRight ? index+1 : L-1-index;
1441 const int Cbound = movingRight ? L-1-index : index+1;
1442 #ifdef CHEMPS2_MPI_COMPILATION 1447 for (
int cnt2=0; cnt2<Nbound; cnt2++){
delete Ltensors[index][cnt2]; }
1448 delete [] Ltensors[index];
1451 for (
int cnt2=0; cnt2<Nbound; cnt2++){
1452 for (
int cnt3=0; cnt3<Nbound-cnt2; cnt3++){
1453 #ifdef CHEMPS2_MPI_COMPILATION 1454 const int siteindex1 = movingRight ? index - cnt2 - cnt3 : index + 1 + cnt3;
1455 const int siteindex2 = movingRight ? index - cnt3 : index + 1 + cnt2 + cnt3;
1456 if (( cnt3 == 0 ) || ( MPIchemps2::owner_cdf(L, siteindex1, siteindex2) == MPIRANK ))
1459 delete F0tensors[index][cnt2][cnt3];
1460 delete F1tensors[index][cnt2][cnt3];
1462 #ifdef CHEMPS2_MPI_COMPILATION 1463 if (( cnt3 == 0 ) || ( MPIchemps2::owner_absigma(siteindex1, siteindex2) == MPIRANK ))
1466 delete S0tensors[index][cnt2][cnt3];
1467 if (cnt2>0){
delete S1tensors[index][cnt2][cnt3]; }
1470 delete [] F0tensors[index][cnt2];
1471 delete [] F1tensors[index][cnt2];
1472 delete [] S0tensors[index][cnt2];
1473 if (cnt2>0){
delete [] S1tensors[index][cnt2]; }
1475 delete [] F0tensors[index];
1476 delete [] F1tensors[index];
1477 delete [] S0tensors[index];
1478 delete [] S1tensors[index];
1481 for (
int cnt2=0; cnt2<Cbound; cnt2++){
1482 for (
int cnt3=0; cnt3<Cbound-cnt2; cnt3++){
1483 #ifdef CHEMPS2_MPI_COMPILATION 1484 const int siteindex1 = movingRight ? index + 1 + cnt3 : index - cnt2 - cnt3;
1485 const int siteindex2 = movingRight ? index + 1 + cnt2 + cnt3 : index - cnt3;
1486 if ( MPIchemps2::owner_absigma(siteindex1, siteindex2) == MPIRANK )
1489 delete Atensors[index][cnt2][cnt3];
1490 if (cnt2>0){
delete Btensors[index][cnt2][cnt3]; }
1492 #ifdef CHEMPS2_MPI_COMPILATION 1493 if ( MPIchemps2::owner_cdf(L, siteindex1, siteindex2) == MPIRANK )
1496 delete Ctensors[index][cnt2][cnt3];
1497 delete Dtensors[index][cnt2][cnt3];
1500 delete [] Atensors[index][cnt2];
1501 if (cnt2>0){
delete [] Btensors[index][cnt2]; }
1502 delete [] Ctensors[index][cnt2];
1503 delete [] Dtensors[index][cnt2];
1505 delete [] Atensors[index];
1506 delete [] Btensors[index];
1507 delete [] Ctensors[index];
1508 delete [] Dtensors[index];
1511 for (
int cnt2=0; cnt2<Cbound; cnt2++){
1512 #ifdef CHEMPS2_MPI_COMPILATION 1513 const int siteindex = movingRight ? index + 1 + cnt2 : index - cnt2;
1514 if ( MPIchemps2::owner_q(L, siteindex) == MPIRANK )
1516 {
delete Qtensors[index][cnt2]; }
1518 delete [] Qtensors[index];
1521 #ifdef CHEMPS2_MPI_COMPILATION 1522 if ( MPIchemps2::owner_x() == MPIRANK )
1524 {
delete Xtensors[index]; }
1528 for (
int state=0; state<nStates-1; state++){
1529 #ifdef CHEMPS2_MPI_COMPILATION 1530 if ( MPIchemps2::owner_specific_excitation( L, state ) == MPIRANK )
1532 {
delete Exc_Overlaps[state][index]; }
1536 gettimeofday(&end, NULL);
1537 timings[ CHEMPS2_TIME_TENS_FREE ] += (end.tv_sec - start.tv_sec) + 1e-6 * (end.tv_usec - start.tv_usec);
1543 std::stringstream temp;
1544 temp <<
"rm " << tempfolder <<
"/" << CheMPS2::DMRG_OPERATOR_storage_prefix << thePID <<
"*.h5";
1545 int info = system(temp.str().c_str());
1546 std::cout <<
"Info on DMRG::operators rm call to system: " << info << std::endl;
void makenew(TensorT *denT)
void update(TensorOperator *previous, TensorT *mps_tensor_up, TensorT *mps_tensor_down, double *workmem)
Clear and update.
void update(TensorT *denT, TensorL **Ltensors, TensorX *Xtensor, TensorQ *Qtensor, TensorOperator *Atensor, TensorOperator *Ctensor, TensorOperator *Dtensor)
Clear and add the relevant terms to the TensorX.
void create(TensorT *mps_tensor)
Create a new TensorL.
void AddTermsL(TensorL **Ltensors, TensorT *denT, double *workmem, double *workmem2)
Add terms after update/clear with previous TensorL's.
static int mpi_rank()
Get the rank of this MPI process.
void makenew(TensorL *denL, TensorT *denT, double *workmem)
int gKappa2index(const int kappa) const
Get the storage jump corresponding to a certain tensor block.
void makenew(TensorT *denT)
void create(TensorT *mps_tensor_up, TensorT *mps_tensor_down)
Clear and add the relevant terms to the TensorO.
void update_ownmem(TensorT *mps_tensor_up, TensorT *mps_tensor_down, TensorO *previous)
Update the previous TensorO.
double * gStorage()
Get the pointer to the storage.
double gMxElement(const int alpha, const int beta, const int gamma, const int delta) const
Get a specific interaction matrix element.
void daxpy(double alpha, TensorOperator *to_add)
daxpy for TensorOperator
void AddTermsCD(TensorOperator *denC, TensorOperator *denD, TensorT *denT, double *workmem, double *workmem2)
Add terms after update/clear with previous C-tensors and D-tensors.
static void invert_triangle_two(const int global, int *result)
Triangle function for two variables.
static int directProd(const int Irrep1, const int Irrep2)
Get the direct product of the irreps with numbers Irrep1 and Irrep2: a bitwise XOR for psi4's convent...
void AddTermSimple(TensorT *denT)
Add terms after update/clear without previous tensors.
void daxpy_transpose_tensorCD(const double alpha, TensorOperator *to_add)
daxpy_transpose for C- and D-tensors (with special spin-dependent factors)
void makenew(TensorT *denT)
void AddTermsAB(TensorOperator *denA, TensorOperator *denB, TensorT *denT, double *workmem, double *workmem2)
Add terms after update/clear with previous A-tensors and B-tensors.
int gMaxDimAtBound(const int boundary) const
Get the maximum virtual dimension at a certain boundary.
void deleteStoredOperators()
Call "rm " + tempfolder + "/" + CheMPS2::DMRG_OPERATOR_storage_prefix + string(thePID) + "*...
void clear()
Set all storage variables to 0.0.
int gIrrep(const int orbital) const
Get an orbital irrep.