CheMPS2
CASSCFnewtonraphson.cpp
1 /*
2  CheMPS2: a spin-adapted implementation of DMRG for ab initio quantum chemistry
3  Copyright (C) 2013-2016 Sebastian Wouters
4 
5  This program is free software; you can redistribute it and/or modify
6  it under the terms of the GNU General Public License as published by
7  the Free Software Foundation; either version 2 of the License, or
8  (at your option) any later version.
9 
10  This program is distributed in the hope that it will be useful,
11  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  GNU General Public License for more details.
14 
15  You should have received a copy of the GNU General Public License along
16  with this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19 
20 #include <stdlib.h>
21 #include <iostream>
22 #include <string>
23 #include <math.h>
24 #include <algorithm>
25 #include <sys/stat.h>
26 #include <assert.h>
27 
28 #include "CASSCF.h"
29 #include "Lapack.h"
30 #include "DMRGSCFrotations.h"
31 #include "EdmistonRuedenberg.h"
32 #include "Davidson.h"
33 #include "FCI.h"
34 #include "MPIchemps2.h"
35 
36 using std::string;
37 using std::ifstream;
38 using std::cout;
39 using std::endl;
40 using std::max;
41 
42 void CheMPS2::CASSCF::delete_file( const string filename ){
43 
44  struct stat file_info;
45  const int thestat = stat( filename.c_str(), &file_info );
46  if ( thestat == 0 ){
47  const string temp = "rm " + filename;
48  int info = system( temp.c_str() );
49  cout << "Info on system( " << temp << " ) = " << info << endl;
50  } else {
51  cout << "No file " << filename << " found." << endl;
52  }
53 
54 }
55 
56 double CheMPS2::CASSCF::solve( const int Nelectrons, const int TwoS, const int Irrep, ConvergenceScheme * OptScheme, const int rootNum, DMRGSCFoptions * scf_options ){
57 
58  #ifdef CHEMPS2_MPI_COMPILATION
59  const bool am_i_master = ( MPIchemps2::mpi_rank() == MPI_CHEMPS2_MASTER );
60  #else
61  const bool am_i_master = true;
62  #endif
63 
64  const int num_elec = Nelectrons - 2 * iHandler->getNOCCsum();
65  assert( num_elec >= 0 );
66  assert(( OptScheme != NULL ) || ( rootNum == 1 ));
67 
68  // Convergence variables
69  double gradNorm = 1.0;
70  double updateNorm = 1.0;
71  double * gradient = NULL;
72  if ( am_i_master ){
73  gradient = new double[ unitary->getNumVariablesX() ];
74  for ( int cnt = 0; cnt < unitary->getNumVariablesX(); cnt++ ){ gradient[ cnt ] = 0.0; }
75  }
76  double * diis_vec = NULL;
77  double Energy = 1e8;
78 
79  // The CheMPS2::Problem for the inner DMRG calculation
80  Hamiltonian * HamDMRG = new Hamiltonian( nOrbDMRG, SymmInfo.getGroupNumber(), iHandler->getIrrepOfEachDMRGorbital() );
81  Problem * Prob = new Problem( HamDMRG, TwoS, num_elec, Irrep );
82  Prob->SetupReorderD2h(); // Doesn't matter if the group isn't D2h, Prob checks it.
83 
84  // Determine the maximum NORB( irrep ) and the max_block_size for the ERI orbital rotation
85  const int maxlinsize = iHandler->getNORBmax();
86  const long long fullsize = ((long long) maxlinsize ) * ((long long) maxlinsize ) * ((long long) maxlinsize ) * ((long long) maxlinsize );
87  const string tmp_filename = tmp_folder + "/" + CheMPS2::DMRGSCF_eri_storage_name;
88  const int dmrgsize_power4 = nOrbDMRG * nOrbDMRG * nOrbDMRG * nOrbDMRG;
89  // For ( ERI rotation, update unitary, block diagonalize, orbital localization )
90  DMRGSCFintegrals * theRotatedTEI = new DMRGSCFintegrals( iHandler );
91  DMRGSCFwtilde * wmattilde = new DMRGSCFwtilde( iHandler );
92  const int temp_work_size = (( fullsize > CheMPS2::DMRGSCF_max_mem_eri_tfo ) ? CheMPS2::DMRGSCF_max_mem_eri_tfo : fullsize );
93  const int work_mem_size = max( max( temp_work_size , maxlinsize * maxlinsize * 4 ) , dmrgsize_power4 );
94  double * mem1 = new double[ work_mem_size ];
95  double * mem2 = new double[ work_mem_size ];
96 
97  // Only MASTER process has Edmiston-Ruedenberg active space localizer
98  EdmistonRuedenberg * theLocalizer = NULL;
99  if (( scf_options->getWhichActiveSpace() >= 2 ) && ( am_i_master )){ theLocalizer = new EdmistonRuedenberg( HamDMRG->getVmat(), iHandler->getGroupNumber() ); }
100 
101  // Load unitary from disk
102  if ( scf_options->getStoreUnitary() ){
103  if ( am_i_master ){
104  struct stat file_info;
105  int master_stat = stat( (scf_options->getUnitaryStorageName()).c_str(), &file_info );
106  if ( master_stat == 0 ){ unitary->loadU( scf_options->getUnitaryStorageName() ); }
107  }
108  #ifdef CHEMPS2_MPI_COMPILATION
109  unitary->broadcast( MPI_CHEMPS2_MASTER ); // Unitary was set to the identity at construction
110  #endif
111  }
112 
113  // Only master has DIIS; load DIIS from disk
114  DIIS * diis = NULL;
115  if (( scf_options->getDoDIIS() ) && ( scf_options->getStoreDIIS() ) && ( am_i_master )){
116  struct stat file_info;
117  int master_stat = stat( (scf_options->getDIISStorageName()).c_str(), &file_info );
118  if ( master_stat == 0 ){
119  const int diis_vec_size = iHandler->getROTparamsize();
120  diis = new DIIS( diis_vec_size, unitary->getNumVariablesX(), scf_options->getNumDIISVecs() );
121  diis->loadDIIS( scf_options->getDIISStorageName() );
122  diis_vec = new double[ diis_vec_size ];
123  }
124  }
125 
126  int nIterations = 0;
127 
128  /*******************************
129  *** Actual DMRGSCF loops ***
130  *******************************/
131  while (( gradNorm > scf_options->getGradientThreshold() ) && ( nIterations < scf_options->getMaxIterations() )){
132 
133  nIterations++;
134 
135  // Update the unitary transformation
136  if (( unitary->getNumVariablesX() > 0 ) && ( am_i_master )){
137 
138  unitary->updateUnitary( mem1, mem2, gradient, true, true ); //multiply = compact = true
139 
140  if (( scf_options->getDoDIIS() ) && ( updateNorm <= scf_options->getDIISGradientBranch() )){
141  if ( scf_options->getWhichActiveSpace() == 1 ){
142  cout << "DMRGSCF::solve : DIIS has started. Active space not rotated to NOs anymore!" << endl;
143  }
144  if ( scf_options->getWhichActiveSpace() == 2 ){
145  cout << "DMRGSCF::solve : DIIS has started. Active space not rotated to localized orbitals anymore!" << endl;
146  }
147  if ( scf_options->getWhichActiveSpace() == 3 ){
148  cout << "DMRGSCF::solve : DIIS has started. Active space not ordered according to the Fiedler vector of the exchange matrix anymore!" << endl;
149  }
150  if ( diis == NULL ){
151  const int diis_vec_size = iHandler->getROTparamsize();
152  diis = new DIIS( diis_vec_size, unitary->getNumVariablesX(), scf_options->getNumDIISVecs() );
153  diis_vec = new double[ diis_vec_size ];
154  unitary->makeSureAllBlocksDetOne( mem1, mem2 );
155  }
156  unitary->getLog( diis_vec, mem1, mem2 );
157  diis->appendNew( gradient, diis_vec );
158  diis->calculateParam( diis_vec );
159  unitary->updateUnitary( mem1, mem2, diis_vec, false, false ); //multiply = compact = false
160  }
161  }
162  if (( am_i_master ) && ( scf_options->getStoreUnitary() ) && ( gradNorm != 1.0 )){ unitary->saveU( scf_options->getUnitaryStorageName() ); }
163  if (( am_i_master ) && ( scf_options->getStoreDIIS() ) && ( updateNorm != 1.0 ) && ( diis != NULL )){ diis->saveDIIS( scf_options->getDIISStorageName() ); }
164  int master_diis = (( diis != NULL ) ? 1 : 0 );
165  #ifdef CHEMPS2_MPI_COMPILATION
166  MPIchemps2::broadcast_array_int( &master_diis, 1, MPI_CHEMPS2_MASTER );
167  unitary->broadcast( MPI_CHEMPS2_MASTER );
168  #endif
169 
170  // Fill HamDMRG
171  buildTmatrix();
172  buildQmatOCC();
173  fillConstAndTmatDMRG( HamDMRG );
174  if ( am_i_master ){
175  DMRGSCFrotations::rotate( VMAT_ORIG, HamDMRG->getVmat(), NULL, 'A', 'A', 'A', 'A', iHandler, unitary, mem1, mem2, work_mem_size, tmp_filename );
176  }
177  #ifdef CHEMPS2_MPI_COMPILATION
178  HamDMRG->getVmat()->broadcast( MPI_CHEMPS2_MASTER );
179  #endif
180 
181  // Localize the active space and reorder the orbitals within each irrep based on the exchange matrix
182  if (( scf_options->getWhichActiveSpace() == 2 ) && ( master_diis == 0 )){ // When the DIIS has started: stop
183  if ( am_i_master ){
184  theLocalizer->Optimize(mem1, mem2, scf_options->getStartLocRandom());
185  theLocalizer->FiedlerExchange(maxlinsize, mem1, mem2);
186  fillLocalizedOrbitalRotations(theLocalizer->getUnitary(), iHandler, mem1);
187  unitary->rotateActiveSpaceVectors(mem1, mem2);
188  }
189  #ifdef CHEMPS2_MPI_COMPILATION
190  unitary->broadcast( MPI_CHEMPS2_MASTER );
191  #endif
192  buildTmatrix(); //With an updated unitary, the Qocc, Tmat, and HamDMRG objects need to be updated as well.
193  buildQmatOCC();
194  fillConstAndTmatDMRG( HamDMRG );
195  if ( am_i_master ){
196  DMRGSCFrotations::rotate( VMAT_ORIG, HamDMRG->getVmat(), NULL, 'A', 'A', 'A', 'A', iHandler, unitary, mem1, mem2, work_mem_size, tmp_filename );
197  cout << "DMRGSCF::solve : Rotated the active space to localized orbitals, sorted according to the exchange matrix." << endl;
198  }
199  #ifdef CHEMPS2_MPI_COMPILATION
200  HamDMRG->getVmat()->broadcast( MPI_CHEMPS2_MASTER );
201  #endif
202  }
203 
204  // Reorder the orbitals based on the Fiedler vector of the exchange matrix
205  if (( scf_options->getWhichActiveSpace() == 3 ) && ( master_diis == 0 )){ // When the DIIS has started: stop
206  int * dmrg2ham = new int[ nOrbDMRG ];
207  if ( am_i_master ){
208  theLocalizer->FiedlerGlobal( dmrg2ham );
209  }
210  #ifdef CHEMPS2_MPI_COMPILATION
211  MPIchemps2::broadcast_array_int( dmrg2ham, nOrbDMRG, MPI_CHEMPS2_MASTER );
212  #endif
213  Prob->setup_reorder_custom( dmrg2ham );
214  delete [] dmrg2ham;
215  }
216 
217  if (( OptScheme == NULL ) && ( rootNum == 1 )){ // Do FCI, and calculate the 2DM
218 
219  if ( am_i_master ){
220  const int nalpha = ( num_elec + TwoS ) / 2;
221  const int nbeta = ( num_elec - TwoS ) / 2;
222  const double workmem = 1000.0; // 1GB
223  const int verbose = 2;
224  CheMPS2::FCI * theFCI = new CheMPS2::FCI( HamDMRG, nalpha, nbeta, Irrep, workmem, verbose );
225  double * inoutput = new double[ theFCI->getVecLength(0) ];
226  theFCI->ClearVector( theFCI->getVecLength(0), inoutput );
227  inoutput[ theFCI->LowestEnergyDeterminant() ] = 1.0;
228  Energy = theFCI->GSDavidson( inoutput );
229  theFCI->Fill2RDM( inoutput, DMRG2DM );
230  delete theFCI;
231  delete [] inoutput;
232  }
233  #ifdef CHEMPS2_MPI_COMPILATION
234  MPIchemps2::broadcast_array_double( &Energy, 1, MPI_CHEMPS2_MASTER );
235  MPIchemps2::broadcast_array_double( DMRG2DM, dmrgsize_power4, MPI_CHEMPS2_MASTER );
236  #endif
237 
238  } else { // Do the DMRG sweeps, and calculate the 2DM
239 
240  assert( OptScheme != NULL );
241  for ( int cnt = 0; cnt < dmrgsize_power4; cnt++ ){ DMRG2DM[ cnt ] = 0.0; } // Clear the 2-RDM ( to allow for state-averaged calculations )
242  DMRG * theDMRG = new DMRG( Prob, OptScheme, CheMPS2::DMRG_storeMpsOnDisk, tmp_folder );
243  for ( int state = 0; state < rootNum; state++ ){
244  if ( state > 0 ){ theDMRG->newExcitation( fabs( Energy ) ); }
245  Energy = theDMRG->Solve();
246  if ( scf_options->getStateAveraging() ){ // When SA-DMRGSCF: 2DM += current 2DM
247  theDMRG->calc2DMandCorrelations();
248  copy2DMover( theDMRG->get2DM(), nOrbDMRG, DMRG2DM );
249  }
250  if (( state == 0 ) && ( rootNum > 1 )){ theDMRG->activateExcitations( rootNum - 1 ); }
251  }
252  if ( !( scf_options->getStateAveraging() )){ // When SS-DMRGSCF: 2DM += last 2DM
253  theDMRG->calc2DMandCorrelations();
254  copy2DMover( theDMRG->get2DM(), nOrbDMRG, DMRG2DM );
255  }
256  if (( scf_options->getDumpCorrelations() ) && ( am_i_master )){ theDMRG->getCorrelations()->Print(); }
257  if (CheMPS2::DMRG_storeMpsOnDisk){ theDMRG->deleteStoredMPS(); }
258  if (CheMPS2::DMRG_storeRenormOptrOnDisk){ theDMRG->deleteStoredOperators(); }
259  delete theDMRG;
260  if (( scf_options->getStateAveraging() ) && ( rootNum > 1 )){
261  const double averagingfactor = 1.0 / rootNum;
262  for ( int cnt = 0; cnt < dmrgsize_power4; cnt++ ){ DMRG2DM[ cnt ] *= averagingfactor; }
263  }
264 
265  }
266  setDMRG1DM( num_elec, nOrbDMRG, DMRG1DM, DMRG2DM );
267 
268  // Possibly rotate the active space to the natural orbitals
269  if (( scf_options->getWhichActiveSpace() == 1 ) && ( master_diis == 0 )){ // When the DIIS has started: stop
270  copy_active( DMRG1DM, theQmatWORK, iHandler, true );
271  block_diagonalize( 'A', theQmatWORK, unitary, mem1, mem2, iHandler, true, DMRG2DM, NULL, NULL ); // Unitary is updated and DMRG2DM rotated
272  setDMRG1DM( num_elec, nOrbDMRG, DMRG1DM, DMRG2DM );
273  buildTmatrix(); // With an updated unitary, the Qocc and Tmat matrices need to be updated as well.
274  buildQmatOCC();
275  if ( am_i_master ){ cout << "DMRGSCF::solve : Rotated the active space to natural orbitals, sorted according to the NOON." << endl; }
276  }
277 
278  if (( nIterations == scf_options->getMaxIterations() ) && ( am_i_master ) && ( scf_options->getStoreUnitary() )){
279  unitary->saveU( scf_options->getUnitaryStorageName() );
280  }
281 
282  // Calculate the matrix elements needed to calculate the gradient and hessian
283  buildQmatACT();
284  if ( am_i_master ){
285  DMRGSCFrotations::rotate( VMAT_ORIG, NULL, theRotatedTEI, 'C', 'C', 'F', 'F', iHandler, unitary, mem1, mem2, work_mem_size, tmp_filename );
286  DMRGSCFrotations::rotate( VMAT_ORIG, NULL, theRotatedTEI, 'C', 'V', 'C', 'V', iHandler, unitary, mem1, mem2, work_mem_size, tmp_filename );
287  buildFmat( theFmatrix, theTmatrix, theQmatOCC, theQmatACT, iHandler, theRotatedTEI, DMRG2DM, DMRG1DM );
288  buildWtilde( wmattilde, theTmatrix, theQmatOCC, theQmatACT, iHandler, theRotatedTEI, DMRG2DM, DMRG1DM );
289  augmentedHessianNR( theFmatrix, wmattilde, iHandler, unitary, gradient, &updateNorm, &gradNorm ); // On return the gradient contains the update
290  }
291  #ifdef CHEMPS2_MPI_COMPILATION
292  MPIchemps2::broadcast_array_double( &updateNorm, 1, MPI_CHEMPS2_MASTER );
293  MPIchemps2::broadcast_array_double( &gradNorm, 1, MPI_CHEMPS2_MASTER );
294  #endif
295 
296  }
297 
298  delete [] mem1;
299  delete [] mem2;
300  delete theRotatedTEI;
301  delete wmattilde;
302  if ( am_i_master ){ delete_file( tmp_filename ); }
303 
304  delete Prob;
305  delete HamDMRG;
306  if ( gradient != NULL ){ delete [] gradient; }
307  if ( diis_vec != NULL ){ delete [] diis_vec; }
308  if ( diis != NULL ){ delete diis; }
309  if ( theLocalizer != NULL ){ delete theLocalizer; }
310 
311  successful_solve = true;
312  return Energy;
313 
314 }
315 
316 void CheMPS2::CASSCF::augmentedHessianNR( DMRGSCFmatrix * localFmat, DMRGSCFwtilde * localwtilde, const DMRGSCFindices * localIdx, const DMRGSCFunitary * localUmat, double * theupdate, double * updateNorm, double * gradNorm ){
317 
318  /* A good read to understand
319  (1) how the augmented Hessian arises from a rational function optimization
320  (2) where the parameter lambda in Eq. (22) of Yanai, IJQC 109, 2178-2190 (2009) comes from
321  (3) why the smallest algebraic eigenvalue + corresponding eigenvector should be retained for minimizations
322  Banerjee, Adams, Simons, Shepard, "Search for stationary points on surfaces",
323  J. Phys. Chem. 1985, volume 89, pages 52-57, doi:10.1021/j100247a015 */
324 
325  //Calculate the gradient
326  const int x_linearlength = localUmat->getNumVariablesX();
327  gradNorm[ 0 ] = construct_gradient( localFmat, localIdx, theupdate );
328 
329  //Find the lowest eigenvalue and corresponding eigenvector of the augmented hessian
330  {
331  Davidson deBoskabouter( x_linearlength + 1, CheMPS2::DAVIDSON_NUM_VEC,
332  CheMPS2::DAVIDSON_NUM_VEC_KEEP,
333  CheMPS2::DAVIDSON_FCI_RTOL,
334  CheMPS2::DAVIDSON_PRECOND_CUTOFF, false ); // No debug printing
335  double ** whichpointers = new double*[ 2 ];
336  char instruction = deBoskabouter.FetchInstruction( whichpointers );
337  assert( instruction == 'A' );
338  diag_hessian( localFmat, localwtilde, localIdx, whichpointers[ 1 ] );
339  whichpointers[ 1 ][ x_linearlength ] = 0.0;
340  for ( int cnt = 0; cnt < x_linearlength; cnt++ ){ // Initial guess = [ -gradient / diag(hessian) , 1 ]
341  const double denom = ( whichpointers[ 1 ][ cnt ] > CheMPS2::DAVIDSON_PRECOND_CUTOFF ) ? whichpointers[ 1 ][ cnt ] : CheMPS2::DAVIDSON_PRECOND_CUTOFF;
342  whichpointers[ 0 ][ cnt ] = - theupdate[ cnt ] / denom;
343  }
344  whichpointers[ 0 ][ x_linearlength ] = 1.0;
345  instruction = deBoskabouter.FetchInstruction( whichpointers );
346  while ( instruction == 'B' ){
347  augmented_hessian( localFmat, localwtilde, localIdx, whichpointers[ 0 ], whichpointers[ 1 ], theupdate, x_linearlength );
348  instruction = deBoskabouter.FetchInstruction( whichpointers );
349  }
350  assert( instruction == 'C' );
351  double scalar = 1.0 / whichpointers[ 0 ][ x_linearlength ];
352  cout << "DMRGSCF::augmentedHessianNR : Augmented Hessian update found with " << deBoskabouter.GetNumMultiplications() << " Davidson iterations." << endl;
353  if ( CheMPS2::DMRGSCF_debugPrint ){
354  cout << "DMRGSCF::augmentedHessianNR : Lowest eigenvalue = " << whichpointers[ 1 ][ 0 ] << endl;
355  cout << "DMRGSCF::augmentedHessianNR : The last number of the eigenvector (which will be rescaled to one) = " << scalar << endl;
356  }
357  for ( int cnt = 0; cnt < x_linearlength; cnt++ ){ theupdate[ cnt ] = scalar * whichpointers[ 0 ][ cnt ]; }
358  delete [] whichpointers;
359  }
360 
361  //Calculate the update norm
362  updateNorm[ 0 ] = 0.0;
363  for ( int cnt = 0; cnt < x_linearlength; cnt++ ){ updateNorm[ 0 ] += theupdate[ cnt ] * theupdate[ cnt ]; }
364  updateNorm[ 0 ] = sqrt( updateNorm[ 0 ] );
365  cout << "DMRGSCF::augmentedHessianNR : Norm of the update = " << updateNorm[ 0 ] << endl;
366 
367 }
368 
369 double CheMPS2::CASSCF::construct_gradient( DMRGSCFmatrix * Fmatrix, const DMRGSCFindices * idx, double * gradient ){
370 
371  const int n_irreps = idx->getNirreps();
372 
373  int jump = 0;
374  for ( int irrep = 0; irrep < n_irreps; irrep++ ){
375 
376  const int NORB = idx->getNORB( irrep );
377  const int NOCC = idx->getNOCC( irrep );
378  const int NACT = idx->getNDMRG( irrep );
379  const int NVIR = idx->getNVIRT( irrep );
380  const int N_OA = NOCC + NACT;
381  double * FMAT = Fmatrix->getBlock( irrep );
382 
383  // Type 0: act-occ
384  for ( int occ = 0; occ < NOCC; occ++ ){
385  for ( int act = 0; act < NACT; act++ ){
386  gradient[ jump + act + NACT * occ ] = 2 * ( FMAT[ NOCC + act + NORB * occ ] - FMAT[ occ + NORB * ( NOCC + act ) ] );
387  }
388  }
389  jump += NOCC * NACT;
390 
391  // Type 1: vir-act
392  for ( int act = 0; act < NACT; act++ ){
393  for ( int vir = 0; vir < NVIR; vir++ ){
394  gradient[ jump + vir + NVIR * act ] = 2 * ( FMAT[ N_OA + vir + NORB * ( NOCC + act ) ] - FMAT[ NOCC + act + NORB * ( N_OA + vir ) ] );
395  }
396  }
397  jump += NACT * NVIR;
398 
399  // Type 2: vir-occ
400  for ( int occ = 0; occ < NOCC; occ++ ){
401  for ( int vir = 0; vir < NVIR; vir++ ){
402  gradient[ jump + vir + NVIR * occ ] = 2 * ( FMAT[ N_OA + vir + NORB * occ ] - FMAT[ occ + NORB * ( N_OA + vir ) ] );
403  }
404  }
405  jump += NOCC * NVIR;
406  }
407 
408  double gradient_norm = 0.0;
409  for ( int cnt = 0; cnt < jump; cnt++ ){ gradient_norm += gradient[ cnt ] * gradient[ cnt ]; }
410  gradient_norm = sqrt( gradient_norm );
411  cout << "DMRGSCF::construct_gradient : Norm of the gradient = " << gradient_norm << endl;
412  return gradient_norm;
413 
414 }
415 
416 void CheMPS2::CASSCF::augmented_hessian( DMRGSCFmatrix * Fmatrix, DMRGSCFwtilde * Wtilde, const DMRGSCFindices * idx, double * origin, double * target, double * gradient, const int linsize ){
417 
418  for ( int cnt = 0; cnt < linsize; cnt++ ){
419  target[ cnt ] = gradient[ cnt ] * origin[ linsize ];
420  }
421  add_hessian( Fmatrix, Wtilde, idx, origin, target );
422  target[ linsize ] = 0.0;
423  for ( int cnt = 0; cnt < linsize; cnt++ ){
424  target[ linsize ] += gradient[ cnt ] * origin[ cnt ];
425  }
426 
427 }
428 
429 void CheMPS2::CASSCF::diag_hessian( DMRGSCFmatrix * Fmatrix, const DMRGSCFwtilde * Wtilde, const DMRGSCFindices * idx, double * diagonal ){
430 
431  const int n_irreps = idx->getNirreps();
432 
433  int jump = 0;
434  for ( int irrep = 0; irrep < n_irreps; irrep++ ){
435 
436  const int NORB = idx->getNORB( irrep );
437  const int NOCC = idx->getNOCC( irrep );
438  const int NACT = idx->getNDMRG( irrep );
439  const int NVIR = idx->getNVIRT( irrep );
440  const int N_OA = NOCC + NACT;
441  double * FMAT = Fmatrix->getBlock( irrep );
442 
443  for ( int occ = 0; occ < NOCC; occ++ ){
444  const double F_occ = FMAT[ occ * ( NORB + 1 ) ];
445  for ( int act = 0; act < NACT; act++ ){
446  const double F_act = FMAT[ ( NOCC + act ) * ( NORB + 1 ) ];
447  diagonal[ jump + act + NACT * occ ] = - 2 * ( F_occ + F_act ) + ( Wtilde->get( irrep, irrep, NOCC + act, occ, NOCC + act, occ )
448  - Wtilde->get( irrep, irrep, NOCC + act, occ, occ, NOCC + act )
449  - Wtilde->get( irrep, irrep, occ, NOCC + act, NOCC + act, occ )
450  + Wtilde->get( irrep, irrep, occ, NOCC + act, occ, NOCC + act ) );
451  }
452  }
453  jump += NOCC * NACT;
454 
455  for ( int act = 0; act < NACT; act++ ){
456  const double F_act = FMAT[ ( NOCC + act ) * ( NORB + 1 ) ];
457  for ( int vir = 0; vir < NVIR; vir++ ){
458  const double F_vir = FMAT[ ( N_OA + vir ) * ( NORB + 1 ) ];
459  diagonal[ jump + vir + NVIR * act ] = - 2 * ( F_act + F_vir ) + Wtilde->get( irrep, irrep, NOCC + act, N_OA + vir, NOCC + act, N_OA + vir );
460  }
461  }
462  jump += NACT * NVIR;
463 
464  for ( int occ = 0; occ < NOCC; occ++ ){
465  const double F_occ = FMAT[ occ * ( NORB + 1 ) ];
466  for ( int vir = 0; vir < NVIR; vir++ ){
467  const double F_vir = FMAT[ ( N_OA + vir ) * ( NORB + 1 ) ];
468  diagonal[ jump + vir + NVIR * occ ] = - 2 * ( F_occ + F_vir ) + Wtilde->get( irrep, irrep, occ, N_OA + vir, occ, N_OA + vir );
469  }
470  }
471  jump += NOCC * NVIR;
472  }
473 
474 }
475 
476 void CheMPS2::CASSCF::add_hessian( DMRGSCFmatrix * Fmatrix, DMRGSCFwtilde * Wtilde, const DMRGSCFindices * idx, double * origin, double * target ){
477 
478  const int n_irreps = idx->getNirreps();
479 
480  int jump = 0;
481  for ( int irrep = 0; irrep < n_irreps; irrep++ ){
482 
483  const int NORB = idx->getNORB( irrep );
484  const int NOCC = idx->getNOCC( irrep );
485  const int NACT = idx->getNDMRG( irrep );
486  const int NVIR = idx->getNVIRT( irrep );
487  const int N_OA = NOCC + NACT;
488  double * FMAT = Fmatrix->getBlock( irrep );
489  const int ptr_AO = jump;
490  const int ptr_VA = jump + NACT * NOCC;
491  const int ptr_VO = jump + NACT * NOCC + NVIR * NACT;
492 
493  // OpenMP parallelism via dgemm_
494 
495  DGEMM_WRAP( +1.0, 'T', 'N', origin + ptr_VA, FMAT + N_OA, target + ptr_AO, NACT, NOCC, NVIR, NVIR, NORB, NACT );
496  DGEMM_WRAP( +1.0, 'T', 'T', origin + ptr_VA, FMAT + NORB * N_OA, target + ptr_AO, NACT, NOCC, NVIR, NVIR, NORB, NACT );
497  DGEMM_WRAP( -1.0, 'N', 'N', origin + ptr_AO, FMAT, target + ptr_AO, NACT, NOCC, NOCC, NACT, NORB, NACT );
498  DGEMM_WRAP( -1.0, 'N', 'T', origin + ptr_AO, FMAT, target + ptr_AO, NACT, NOCC, NOCC, NACT, NORB, NACT );
499  DGEMM_WRAP( -1.0, 'N', 'N', FMAT + NOCC + NORB * NOCC, origin + ptr_AO, target + ptr_AO, NACT, NOCC, NACT, NORB, NACT, NACT );
500  DGEMM_WRAP( -1.0, 'T', 'N', FMAT + NOCC + NORB * NOCC, origin + ptr_AO, target + ptr_AO, NACT, NOCC, NACT, NORB, NACT, NACT );
501  DGEMM_WRAP( -1.0, 'N', 'N', FMAT + NOCC + NORB * N_OA, origin + ptr_VO, target + ptr_AO, NACT, NOCC, NVIR, NORB, NVIR, NACT );
502  DGEMM_WRAP( -1.0, 'T', 'N', FMAT + N_OA + NORB * NOCC, origin + ptr_VO, target + ptr_AO, NACT, NOCC, NVIR, NORB, NVIR, NACT );
503 
504  DGEMM_WRAP( +1.0, 'N', 'T', FMAT + N_OA, origin + ptr_AO, target + ptr_VA, NVIR, NACT, NOCC, NORB, NACT, NVIR );
505  DGEMM_WRAP( +1.0, 'T', 'T', FMAT + NORB * N_OA, origin + ptr_AO, target + ptr_VA, NVIR, NACT, NOCC, NORB, NACT, NVIR );
506  DGEMM_WRAP( -1.0, 'N', 'N', origin + ptr_VO, FMAT + NORB * NOCC, target + ptr_VA, NVIR, NACT, NOCC, NVIR, NORB, NVIR );
507  DGEMM_WRAP( -1.0, 'N', 'T', origin + ptr_VO, FMAT + NOCC, target + ptr_VA, NVIR, NACT, NOCC, NVIR, NORB, NVIR );
508  DGEMM_WRAP( -1.0, 'N', 'N', origin + ptr_VA, FMAT + NOCC + NORB * NOCC, target + ptr_VA, NVIR, NACT, NACT, NVIR, NORB, NVIR );
509  DGEMM_WRAP( -1.0, 'N', 'T', origin + ptr_VA, FMAT + NOCC + NORB * NOCC, target + ptr_VA, NVIR, NACT, NACT, NVIR, NORB, NVIR );
510  DGEMM_WRAP( -1.0, 'N', 'N', FMAT + N_OA + NORB * N_OA, origin + ptr_VA, target + ptr_VA, NVIR, NACT, NVIR, NORB, NVIR, NVIR );
511  DGEMM_WRAP( -1.0, 'T', 'N', FMAT + N_OA + NORB * N_OA, origin + ptr_VA, target + ptr_VA, NVIR, NACT, NVIR, NORB, NVIR, NVIR );
512 
513  DGEMM_WRAP( -1.0, 'N', 'N', origin + ptr_VO, FMAT, target + ptr_VO, NVIR, NOCC, NOCC, NVIR, NORB, NVIR );
514  DGEMM_WRAP( -1.0, 'N', 'T', origin + ptr_VO, FMAT, target + ptr_VO, NVIR, NOCC, NOCC, NVIR, NORB, NVIR );
515  DGEMM_WRAP( -1.0, 'N', 'N', origin + ptr_VA, FMAT + NOCC, target + ptr_VO, NVIR, NOCC, NACT, NVIR, NORB, NVIR );
516  DGEMM_WRAP( -1.0, 'N', 'T', origin + ptr_VA, FMAT + NORB * NOCC, target + ptr_VO, NVIR, NOCC, NACT, NVIR, NORB, NVIR );
517  DGEMM_WRAP( -1.0, 'N', 'N', FMAT + N_OA + NORB * N_OA, origin + ptr_VO, target + ptr_VO, NVIR, NOCC, NVIR, NORB, NVIR, NVIR );
518  DGEMM_WRAP( -1.0, 'T', 'N', FMAT + N_OA + NORB * N_OA, origin + ptr_VO, target + ptr_VO, NVIR, NOCC, NVIR, NORB, NVIR, NVIR );
519  DGEMM_WRAP( -1.0, 'N', 'N', FMAT + N_OA + NORB * NOCC, origin + ptr_AO, target + ptr_VO, NVIR, NOCC, NACT, NORB, NACT, NVIR );
520  DGEMM_WRAP( -1.0, 'T', 'N', FMAT + NOCC + NORB * N_OA, origin + ptr_AO, target + ptr_VO, NVIR, NOCC, NACT, NORB, NACT, NVIR );
521 
522  jump += NACT * NOCC + NVIR * NACT + NVIR * NOCC;
523  }
524 
525  #pragma omp parallel
526  {
527 
528  int jump_row = 0;
529  for ( int irrep_row = 0; irrep_row < n_irreps; irrep_row++ ){
530 
531  const int NORB_row = idx->getNORB( irrep_row );
532  const int NOCC_row = idx->getNOCC( irrep_row );
533  const int NACT_row = idx->getNDMRG( irrep_row );
534  const int NVIR_row = idx->getNVIRT( irrep_row );
535  const int N_OA_row = NOCC_row + NACT_row;
536  double * resAO = target + jump_row;
537  double * resVA = resAO + NACT_row * NOCC_row;
538  double * resVO = resVA + NVIR_row * NACT_row;
539 
540  int jump_col = 0;
541  for ( int irrep_col = 0; irrep_col < n_irreps; irrep_col++ ){
542 
543  const int NOCC_col = idx->getNOCC( irrep_col );
544  const int NACT_col = idx->getNDMRG( irrep_col );
545  const int NVIR_col = idx->getNVIRT( irrep_col );
546  const int N_OA_col = NOCC_col + NACT_col;
547  double * vecAO = origin + jump_col;
548  double * vecVA = vecAO + NACT_col * NOCC_col;
549  double * vecVO = vecVA + NVIR_col * NACT_col;
550 
551  #pragma omp for schedule(static)
552  for ( int act_row = 0; act_row < NACT_row; act_row++ ){
553  for ( int occ_col = 0; occ_col < NOCC_col; occ_col++ ){
554  double * mat = Wtilde->getBlock( irrep_row, irrep_col, NOCC_row + act_row, occ_col );
555  DGEMV_WRAP( -1.0, mat + NORB_row * NOCC_col, resAO + act_row, vecAO + NACT_col * occ_col, NOCC_row, NACT_col, NORB_row, NACT_row, 1 );
556  DGEMV_WRAP( -1.0, mat + NORB_row * N_OA_col, resAO + act_row, vecVO + NVIR_col * occ_col, NOCC_row, NVIR_col, NORB_row, NACT_row, 1 );
557  DGEMV_WRAP( 1.0, mat + N_OA_row + NORB_row * NOCC_col, resVA + NVIR_row * act_row, vecAO + NACT_col * occ_col, NVIR_row, NACT_col, NORB_row, 1, 1 );
558  DGEMV_WRAP( 1.0, mat + N_OA_row + NORB_row * N_OA_col, resVA + NVIR_row * act_row, vecVO + NVIR_col * occ_col, NVIR_row, NVIR_col, NORB_row, 1, 1 );
559  }
560  for ( int act_col = 0; act_col < NACT_col; act_col++ ){
561  double * mat = Wtilde->getBlock( irrep_row, irrep_col, NOCC_row + act_row, NOCC_col + act_col );
562  DGEMV_WRAP( 1.0, mat, resAO + act_row, vecAO + act_col, NOCC_row, NOCC_col, NORB_row, NACT_row, NACT_col );
563  DGEMV_WRAP( -1.0, mat + NORB_row * N_OA_col, resAO + act_row, vecVA + NVIR_col * act_col, NOCC_row, NVIR_col, NORB_row, NACT_row, 1 );
564  DGEMV_WRAP( -1.0, mat + N_OA_row, resVA + NVIR_row * act_row, vecAO + act_col, NVIR_row, NOCC_col, NORB_row, 1, NACT_col );
565  DGEMV_WRAP( 1.0, mat + N_OA_row + NORB_row * N_OA_col, resVA + NVIR_row * act_row, vecVA + NVIR_col * act_col, NVIR_row, NVIR_col, NORB_row, 1, 1 );
566  }
567  }
568 
569  #pragma omp for schedule(static)
570  for ( int occ_row = 0; occ_row < NOCC_row; occ_row++ ){
571  for ( int occ_col = 0; occ_col < NOCC_col; occ_col++ ){
572  double * mat = Wtilde->getBlock( irrep_row, irrep_col, occ_row, occ_col );
573  DGEMV_WRAP( 1.0, mat + NOCC_row + NORB_row * NOCC_col, resAO + NACT_row * occ_row, vecAO + NACT_col * occ_col, NACT_row, NACT_col, NORB_row, 1, 1 );
574  DGEMV_WRAP( 1.0, mat + NOCC_row + NORB_row * N_OA_col, resAO + NACT_row * occ_row, vecVO + NVIR_col * occ_col, NACT_row, NVIR_col, NORB_row, 1, 1 );
575  DGEMV_WRAP( 1.0, mat + N_OA_row + NORB_row * NOCC_col, resVO + NVIR_row * occ_row, vecAO + NACT_col * occ_col, NVIR_row, NACT_col, NORB_row, 1, 1 );
576  DGEMV_WRAP( 1.0, mat + N_OA_row + NORB_row * N_OA_col, resVO + NVIR_row * occ_row, vecVO + NVIR_col * occ_col, NVIR_row, NVIR_col, NORB_row, 1, 1 );
577  }
578  for ( int act_col = 0; act_col < NACT_col; act_col++ ){
579  double * mat = Wtilde->getBlock( irrep_row, irrep_col, occ_row, NOCC_col + act_col );
580  DGEMV_WRAP( -1.0, mat + NOCC_row, resAO + NACT_row * occ_row, vecAO + act_col, NACT_row, NOCC_col, NORB_row, 1, NACT_col );
581  DGEMV_WRAP( 1.0, mat + NOCC_row + NORB_row * N_OA_col, resAO + NACT_row * occ_row, vecVA + NVIR_col * act_col, NACT_row, NVIR_col, NORB_row, 1, 1 );
582  DGEMV_WRAP( -1.0, mat + N_OA_row, resVO + NVIR_row * occ_row, vecAO + act_col, NVIR_row, NOCC_col, NORB_row, 1, NACT_col );
583  DGEMV_WRAP( 1.0, mat + N_OA_row + NORB_row * N_OA_col, resVO + NVIR_row * occ_row, vecVA + NVIR_col * act_col, NVIR_row, NVIR_col, NORB_row, 1, 1 );
584  }
585  }
586  jump_col += NACT_col * NOCC_col + NVIR_col * NACT_col + NVIR_col * NOCC_col;
587  }
588  jump_row += NACT_row * NOCC_row + NVIR_row * NACT_row + NVIR_row * NOCC_row;
589  }
590  }
591 
592 }
593 
594 void CheMPS2::CASSCF::DGEMM_WRAP( double prefactor, char transA, char transB, double * A, double * B, double * C, int m, int n, int k, int lda, int ldb, int ldc ){
595 
596  if ( m * k * n == 0 ){ return; }
597  double add = 1.0;
598  dgemm_( &transA, &transB, &m, &n, &k, &prefactor, A, &lda, B, &ldb, &add, C, &ldc );
599 
600 }
601 
602 void CheMPS2::CASSCF::DGEMV_WRAP( double prefactor, double * matrix, double * result, double * vector, int rowdim, int coldim, int ldmat, int incres, int incvec ){
603 
604  if ( rowdim * coldim == 0 ){ return; }
605  char notrans = 'N';
606  double add = 1.0;
607  dgemv_( &notrans, &rowdim, &coldim, &prefactor, matrix, &ldmat, vector, &incvec, &add, result, &incres );
608 
609 }
610 
611 void CheMPS2::CASSCF::buildWtilde(DMRGSCFwtilde * localwtilde, const DMRGSCFmatrix * localTmat, const DMRGSCFmatrix * localJKocc, const DMRGSCFmatrix * localJKact, const DMRGSCFindices * localIdx, const DMRGSCFintegrals * theInts, double * local2DM, double * local1DM){
612 
613  localwtilde->clear();
614  const int numIrreps = localIdx->getNirreps();
615  const int totOrbDMRG = localIdx->getDMRGcumulative( numIrreps );
616  for (int irrep_pq = 0; irrep_pq < numIrreps; irrep_pq++){
617 
618  const int NumOCCpq = localIdx->getNOCC( irrep_pq );
619  const int NumDMRGpq = localIdx->getNDMRG( irrep_pq );
620  const int NumORBpq = localIdx->getNORB( irrep_pq );
621  const int NumCOREpq = NumOCCpq + NumDMRGpq;
622 
623  //If irrep_pq == irrep_rs and P == R occupied --> QS only active or virtual
624  #pragma omp parallel for schedule(static)
625  for (int relindexP = 0; relindexP < NumOCCpq; relindexP++){
626  double * subblock = localwtilde->getBlock( irrep_pq, irrep_pq, relindexP, relindexP);
627  for (int relindexS = NumOCCpq; relindexS < NumORBpq; relindexS++){
628  for (int relindexQ = NumOCCpq; relindexQ < NumORBpq; relindexQ++){
629  subblock[ relindexQ + NumORBpq * relindexS ] += 4 * ( localTmat->get( irrep_pq, relindexQ, relindexS)
630  + localJKocc->get(irrep_pq, relindexQ, relindexS)
631  + localJKact->get(irrep_pq, relindexQ, relindexS) );
632  }
633  }
634  }
635 
636  //If irrep_pq == irrep_rs and P,R active --> QS only occupied or virtual
637  #pragma omp parallel for schedule(static)
638  for (int combined = 0; combined < NumDMRGpq*NumDMRGpq; combined++){
639 
640  const int relindexP = NumOCCpq + ( combined % NumDMRGpq );
641  const int relindexR = NumOCCpq + ( combined / NumDMRGpq );
642  double * subblock = localwtilde->getBlock( irrep_pq, irrep_pq, relindexP, relindexR );
643  const int DMRGindexP = relindexP - NumOCCpq + localIdx->getDMRGcumulative( irrep_pq );
644  const int DMRGindexR = relindexR - NumOCCpq + localIdx->getDMRGcumulative( irrep_pq );
645  const double OneDMvalue = local1DM[ DMRGindexP + totOrbDMRG * DMRGindexR ];
646 
647  for (int relindexS = 0; relindexS < NumOCCpq; relindexS++){
648  for (int relindexQ = 0; relindexQ < NumOCCpq; relindexQ++){
649  subblock[ relindexQ + NumORBpq * relindexS ] += 2 * OneDMvalue * ( localTmat->get( irrep_pq, relindexQ, relindexS)
650  + localJKocc->get(irrep_pq, relindexQ, relindexS) );
651  }
652  for (int relindexQ = NumCOREpq; relindexQ < NumORBpq; relindexQ++){
653  subblock[ relindexQ + NumORBpq * relindexS ] += 2 * OneDMvalue * ( localTmat->get( irrep_pq, relindexQ, relindexS)
654  + localJKocc->get(irrep_pq, relindexQ, relindexS) );
655  }
656  }
657  for (int relindexS = NumCOREpq; relindexS < NumORBpq; relindexS++){
658  for (int relindexQ = 0; relindexQ < NumOCCpq; relindexQ++){
659  subblock[ relindexQ + NumORBpq * relindexS ] += 2 * OneDMvalue * ( localTmat->get( irrep_pq, relindexQ, relindexS)
660  + localJKocc->get(irrep_pq, relindexQ, relindexS) );
661  }
662  for (int relindexQ = NumCOREpq; relindexQ < NumORBpq; relindexQ++){
663  subblock[ relindexQ + NumORBpq * relindexS ] += 2 * OneDMvalue * ( localTmat->get( irrep_pq, relindexQ, relindexS)
664  + localJKocc->get(irrep_pq, relindexQ, relindexS) );
665  }
666  }
667  }
668 
669  for (int irrep_rs = 0; irrep_rs < numIrreps; irrep_rs++){
670 
671  const int NumOCCrs = localIdx->getNOCC( irrep_rs );
672  const int NumDMRGrs = localIdx->getNDMRG( irrep_rs );
673  const int NumORBrs = localIdx->getNORB( irrep_rs );
674  const int NumCORErs = NumOCCrs + NumDMRGrs;
675  const int productirrep = Irreps::directProd( irrep_pq, irrep_rs );
676 
677  // P and R occupied --> QS only active or virtual
678  #pragma omp parallel for schedule(static)
679  for (int combined = 0; combined < NumOCCpq*NumOCCrs; combined++){
680 
681  const int relindexP = combined % NumOCCpq;
682  const int relindexR = combined / NumOCCpq;
683  double * subblock = localwtilde->getBlock( irrep_pq, irrep_rs, relindexP, relindexR);
684 
685  for (int relindexS = NumOCCrs; relindexS < NumORBrs; relindexS++){
686  for (int relindexQ = NumOCCpq; relindexQ < NumORBpq; relindexQ++){
687  subblock[ relindexQ + NumORBpq * relindexS ] +=
688  4 * ( 4 * theInts->FourIndexAPI(irrep_pq, irrep_rs, irrep_pq, irrep_rs, relindexQ, relindexS, relindexP, relindexR)
689  - theInts->FourIndexAPI(irrep_pq, irrep_pq, irrep_rs, irrep_rs, relindexQ, relindexP, relindexS, relindexR)
690  - theInts->FourIndexAPI(irrep_pq, irrep_rs, irrep_rs, irrep_pq, relindexQ, relindexS, relindexR, relindexP) );
691  }
692  }
693  } // End combined P and R occupied
694 
695  // P and R active --> QS only occupied or virtual
696  #pragma omp parallel for schedule(static)
697  for (int combined = 0; combined < NumDMRGpq*NumDMRGrs; combined++){
698 
699  const int relindexP = NumOCCpq + ( combined % NumDMRGpq );
700  const int relindexR = NumOCCrs + ( combined / NumDMRGpq );
701  double * subblock = localwtilde->getBlock( irrep_pq, irrep_rs, relindexP, relindexR );
702  const int DMRGindexP = relindexP - NumOCCpq + localIdx->getDMRGcumulative( irrep_pq );
703  const int DMRGindexR = relindexR - NumOCCrs + localIdx->getDMRGcumulative( irrep_rs );
704 
705  for (int irrep_alpha = 0; irrep_alpha < numIrreps; irrep_alpha++){
706 
707  const int irrep_beta = Irreps::directProd( irrep_alpha, productirrep );
708  const int NumDMRGalpha = localIdx->getNDMRG( irrep_alpha );
709  const int NumDMRGbeta = localIdx->getNDMRG( irrep_beta );
710 
711  for (int alpha = 0; alpha < NumDMRGalpha; alpha++){
712 
713  const int DMRGalpha = localIdx->getDMRGcumulative( irrep_alpha ) + alpha;
714  const int relalpha = localIdx->getNOCC( irrep_alpha ) + alpha;
715 
716  for (int beta = 0; beta < NumDMRGbeta; beta++){
717 
718  const int DMRGbeta = localIdx->getDMRGcumulative( irrep_beta ) + beta;
719  const int relbeta = localIdx->getNOCC( irrep_beta ) + beta;
720 
721  const double TwoDMvalue1 = local2DM[ DMRGindexR + totOrbDMRG * ( DMRGalpha + totOrbDMRG * ( DMRGindexP + totOrbDMRG * DMRGbeta ) ) ];
722  const double TwoDMvalue23 = local2DM[ DMRGindexR + totOrbDMRG * ( DMRGalpha + totOrbDMRG * ( DMRGbeta + totOrbDMRG * DMRGindexP ) ) ]
723  + local2DM[ DMRGindexR + totOrbDMRG * ( DMRGindexP + totOrbDMRG * ( DMRGbeta + totOrbDMRG * DMRGalpha ) ) ];
724 
725  for (int relindexS = 0; relindexS < NumOCCrs; relindexS++){
726  for (int relindexQ = 0; relindexQ < NumOCCpq; relindexQ++){
727  subblock[ relindexQ + NumORBpq * relindexS ] +=
728  2 * ( TwoDMvalue1 * theInts->FourIndexAPI( irrep_pq, irrep_alpha, irrep_rs, irrep_beta, relindexQ, relalpha, relindexS, relbeta)
729  + TwoDMvalue23 * theInts->FourIndexAPI( irrep_pq, irrep_rs, irrep_alpha, irrep_beta, relindexQ, relindexS, relalpha, relbeta) );
730  }
731  for (int relindexQ = NumCOREpq; relindexQ < NumORBpq; relindexQ++){
732  subblock[ relindexQ + NumORBpq * relindexS ] +=
733  2 * ( TwoDMvalue1 * theInts->FourIndexAPI( irrep_pq, irrep_alpha, irrep_rs, irrep_beta, relindexQ, relalpha, relindexS, relbeta)
734  + TwoDMvalue23 * theInts->FourIndexAPI( irrep_pq, irrep_rs, irrep_alpha, irrep_beta, relindexQ, relindexS, relalpha, relbeta) );
735  }
736  }
737  for (int relindexS = NumCORErs; relindexS < NumORBrs; relindexS++){
738  for (int relindexQ = 0; relindexQ < NumOCCpq; relindexQ++){
739  subblock[ relindexQ + NumORBpq * relindexS ] +=
740  2 * ( TwoDMvalue1 * theInts->FourIndexAPI( irrep_pq, irrep_alpha, irrep_rs, irrep_beta, relindexQ, relalpha, relindexS, relbeta)
741  + TwoDMvalue23 * theInts->FourIndexAPI( irrep_pq, irrep_rs, irrep_alpha, irrep_beta, relindexQ, relindexS, relalpha, relbeta) );
742  }
743  for (int relindexQ = NumCOREpq; relindexQ < NumORBpq; relindexQ++){
744  subblock[ relindexQ + NumORBpq * relindexS ] +=
745  2 * ( TwoDMvalue1 * theInts->FourIndexAPI( irrep_pq, irrep_alpha, irrep_rs, irrep_beta, relindexQ, relalpha, relindexS, relbeta)
746  + TwoDMvalue23 * theInts->FourIndexAPI( irrep_pq, irrep_rs, irrep_alpha, irrep_beta, relindexQ, relindexS, relalpha, relbeta) );
747  }
748  }
749  }
750  }
751  }
752  } // End combined P and R active
753 
754  // P active and R occupied --> Q occupied or virtual // S active or virtual
755  #pragma omp parallel for schedule(static)
756  for (int combined = 0; combined < NumDMRGpq*NumOCCrs; combined++){
757 
758  const int relindexP = NumOCCpq + ( combined % NumDMRGpq );
759  const int relindexR = combined / NumDMRGpq;
760  double * subblock = localwtilde->getBlock( irrep_pq, irrep_rs, relindexP, relindexR );
761  const int DMRGindexP = relindexP - NumOCCpq + localIdx->getDMRGcumulative( irrep_pq );
762 
763  for (int alpha = 0; alpha < NumDMRGpq; alpha++){
764 
765  const int DMRGalpha = localIdx->getDMRGcumulative( irrep_pq ) + alpha;
766  const int relalpha = localIdx->getNOCC( irrep_pq ) + alpha;
767  const double OneDMvalue = local1DM[ DMRGalpha + totOrbDMRG * DMRGindexP ];
768 
769  for (int relindexS = NumOCCrs; relindexS < NumORBrs; relindexS++){
770  for (int relindexQ = 0; relindexQ < NumOCCpq; relindexQ++){
771  subblock[ relindexQ + NumORBpq * relindexS ] += 2 * OneDMvalue *
772  ( 4 * theInts->FourIndexAPI( irrep_pq, irrep_rs, irrep_pq, irrep_rs, relindexQ, relindexS, relalpha, relindexR)
773  - theInts->FourIndexAPI( irrep_pq, irrep_pq, irrep_rs, irrep_rs, relindexQ, relalpha, relindexS, relindexR)
774  - theInts->FourIndexAPI( irrep_pq, irrep_rs, irrep_rs, irrep_pq, relindexQ, relindexS, relindexR, relalpha) );
775  }
776  for (int relindexQ = NumCOREpq; relindexQ < NumORBpq; relindexQ++){
777  subblock[ relindexQ + NumORBpq * relindexS ] += 2 * OneDMvalue *
778  ( 4 * theInts->FourIndexAPI( irrep_pq, irrep_rs, irrep_pq, irrep_rs, relindexQ, relindexS, relalpha, relindexR)
779  - theInts->FourIndexAPI( irrep_pq, irrep_pq, irrep_rs, irrep_rs, relindexQ, relalpha, relindexS, relindexR)
780  - theInts->FourIndexAPI( irrep_pq, irrep_rs, irrep_rs, irrep_pq, relindexQ, relindexS, relindexR, relalpha) );
781  }
782  }
783  }
784  } // End combined P active and R occupied
785 
786  // P occupied and R active --> Q active or virtual // S occupied or virtual
787  #pragma omp parallel for schedule(static)
788  for (int combined = 0; combined < NumOCCpq*NumDMRGrs; combined++){
789 
790  const int relindexP = combined % NumOCCpq;
791  const int relindexR = NumOCCrs + ( combined / NumOCCpq );
792  double * subblock = localwtilde->getBlock( irrep_pq, irrep_rs, relindexP, relindexR );
793  const int DMRGindexR = relindexR - NumOCCrs + localIdx->getDMRGcumulative( irrep_rs );
794 
795  for (int beta = 0; beta < NumDMRGrs; beta++){
796 
797  const int DMRGbeta = localIdx->getDMRGcumulative( irrep_rs ) + beta;
798  const int relbeta = localIdx->getNOCC( irrep_rs ) + beta;
799  const double OneDMvalue = local1DM[ DMRGindexR + totOrbDMRG * DMRGbeta ];
800 
801  for (int relindexQ = NumOCCpq; relindexQ < NumORBpq; relindexQ++){
802  for (int relindexS = 0; relindexS < NumOCCrs; relindexS++){
803  subblock[ relindexQ + NumORBpq * relindexS ] += 2 * OneDMvalue *
804  ( 4 * theInts->FourIndexAPI( irrep_pq, irrep_rs, irrep_pq, irrep_rs, relindexQ, relindexS, relindexP, relbeta)
805  - theInts->FourIndexAPI( irrep_pq, irrep_pq, irrep_rs, irrep_rs, relindexQ, relindexP, relindexS, relbeta)
806  - theInts->FourIndexAPI( irrep_pq, irrep_rs, irrep_rs, irrep_pq, relindexQ, relindexS, relbeta, relindexP) );
807  }
808  for (int relindexS = NumCORErs; relindexS < NumORBrs; relindexS++){
809  subblock[ relindexQ + NumORBpq * relindexS ] += 2 * OneDMvalue *
810  ( 4 * theInts->FourIndexAPI( irrep_pq, irrep_rs, irrep_pq, irrep_rs, relindexQ, relindexS, relindexP, relbeta)
811  - theInts->FourIndexAPI( irrep_pq, irrep_pq, irrep_rs, irrep_rs, relindexQ, relindexP, relindexS, relbeta)
812  - theInts->FourIndexAPI( irrep_pq, irrep_rs, irrep_rs, irrep_pq, relindexQ, relindexS, relbeta, relindexP) );
813  }
814  }
815  }
816  } // End combined P occupied and R active
817 
818  }
819  }
820 
821 }
822 
823 void CheMPS2::CASSCF::buildFmat(DMRGSCFmatrix * localFmat, const DMRGSCFmatrix * localTmat, const DMRGSCFmatrix * localJKocc, const DMRGSCFmatrix * localJKact, const DMRGSCFindices * localIdx, const DMRGSCFintegrals * theInts, double * local2DM, double * local1DM){
824 
825  localFmat->clear();
826  const int numIrreps = localIdx->getNirreps();
827  const int totOrbDMRG = localIdx->getDMRGcumulative( numIrreps );
828  for (int irrep_pq = 0; irrep_pq < numIrreps; irrep_pq++){
829 
830  const int NumORB = localIdx->getNORB( irrep_pq );
831  const int NumOCC = localIdx->getNOCC( irrep_pq );
832  const int NumDMRG = localIdx->getNDMRG( irrep_pq );
833  const int NumOCCDMRG = NumOCC + NumDMRG;
834 
835  #pragma omp parallel for schedule(static)
836  for (int p = 0; p < NumOCC; p++){
837  for (int q = 0; q < NumORB; q++){
838  localFmat->set( irrep_pq, p, q, 2 * ( localTmat->get( irrep_pq, q, p )
839  + localJKocc->get( irrep_pq, q, p )
840  + localJKact->get( irrep_pq, q, p ) ) );
841  }
842  }
843 
844  #pragma omp parallel for schedule(static)
845  for (int p = NumOCC; p < NumOCCDMRG; p++){
846  const int DMRGindex_p = p - NumOCC + localIdx->getDMRGcumulative( irrep_pq );
847 
848  //One-body terms --> matrix multiplication?
849  for (int r = NumOCC; r < NumOCCDMRG; r++){
850  const double OneDMvalue = local1DM[ DMRGindex_p + totOrbDMRG * ( DMRGindex_p + r - p ) ];
851  for (int q = 0; q < NumORB; q++){
852  localFmat->getBlock(irrep_pq)[ p + NumORB * q ] += OneDMvalue * ( localTmat->get( irrep_pq, q, r ) + localJKocc->get( irrep_pq, q, r) );
853  }
854  }
855 
856  //Two-body terms --> matrix multiplication possible?
857  for (int irrep_r = 0; irrep_r < numIrreps; irrep_r++){
858  const int irrep_product = Irreps::directProd(irrep_pq, irrep_r);
859  for (int irrep_s = 0; irrep_s < numIrreps; irrep_s++){
860  const int irrep_t = Irreps::directProd(irrep_product, irrep_s);
861  for (int r = localIdx->getNOCC(irrep_r); r < localIdx->getNOCC(irrep_r) + localIdx->getNDMRG(irrep_r); r++){
862  const int DMRGindex_r = r - localIdx->getNOCC( irrep_r ) + localIdx->getDMRGcumulative( irrep_r );
863  for (int s = localIdx->getNOCC(irrep_s); s < localIdx->getNOCC(irrep_s) + localIdx->getNDMRG(irrep_s); s++){
864  const int DMRGindex_s = s - localIdx->getNOCC( irrep_s ) + localIdx->getDMRGcumulative( irrep_s );
865  for (int t = localIdx->getNOCC(irrep_t); t < localIdx->getNOCC(irrep_t) + localIdx->getNDMRG(irrep_t); t++){
866  const int DMRGindex_t = t - localIdx->getNOCC( irrep_t ) + localIdx->getDMRGcumulative( irrep_t );
867  const double TwoDMvalue = local2DM[ DMRGindex_p + totOrbDMRG * ( DMRGindex_r + totOrbDMRG * ( DMRGindex_s + totOrbDMRG * DMRGindex_t ) ) ];
868  for (int q = 0; q < NumORB; q++){
869  localFmat->getBlock(irrep_pq)[ p + NumORB * q ] += TwoDMvalue * theInts->FourIndexAPI(irrep_pq, irrep_r, irrep_s, irrep_t, q, r, s, t);
870  }
871  }
872  }
873  }
874  }
875  }
876  }
877  }
878 
879 }
880 
881 
int getMaxIterations() const
Get the maximum number of DMRGSCF iterations.
int getNORB(const int irrep) const
Get the number of orbitals for an irrep.
double * getBlock(const int irrep)
Get a matrix block.
int getNOCCsum() const
Get the total number of occupied orbitals.
static void rotate(const FourIndex *ORIG_VMAT, FourIndex *NEW_VMAT, DMRGSCFintegrals *ROT_TEI, const char space1, const char space2, const char space3, const char space4, DMRGSCFindices *idx, DMRGSCFunitary *umat, double *mem1, double *mem2, const int mem_size, const string filename)
Fill the rotated two-body matrix elements for the space. If the blocks become too large...
int getROTparamsize() const
Get the orbital rotation parameter space size.
int GetNumMultiplications() const
Get the number of matrix vector multiplications which have been performed.
Definition: Davidson.cpp:103
void rotateActiveSpaceVectors(double *eigenvecs, double *work)
Rotate the unitary matrix.
double GSDavidson(double *inoutput=NULL, const int DVDSN_NUM_VEC=CheMPS2::DAVIDSON_NUM_VEC) const
Calculates the FCI ground state with Davidson&#39;s algorithm.
Definition: FCI.cpp:1920
static void buildWtilde(DMRGSCFwtilde *localwtilde, const DMRGSCFmatrix *localTmat, const DMRGSCFmatrix *localJKocc, const DMRGSCFmatrix *localJKact, const DMRGSCFindices *localIdx, const DMRGSCFintegrals *theInts, double *local2DM, double *local1DM)
Build the Wtilde-matrix (Eq. (20b) in the Siegbahn paper [CAS3])
void appendNew(double *newError, double *newParam)
Append a new error and parameter vector.
Definition: DIIS.cpp:74
static void copy_active(double *origin, DMRGSCFmatrix *result, const DMRGSCFindices *idx, const bool one_rdm)
Copy a one-orbital quantity from array format to DMRGSCFmatrix format.
Definition: CASSCF.cpp:375
void loadDIIS(const string filename=DMRGSCF_diis_storage_name)
Load the DIIS object from disk.
Definition: DIIS.cpp:223
void deleteStoredMPS()
Call "rm " + CheMPS2::DMRG_MPS_storage_prefix + "*.h5".
Definition: DMRGmpsio.cpp:164
int getGroupNumber() const
Get the group number.
static void augmentedHessianNR(DMRGSCFmatrix *localFmat, DMRGSCFwtilde *localwtilde, const DMRGSCFindices *localIdx, const DMRGSCFunitary *localUmat, double *theupdate, double *updateNorm, double *gradNorm)
Calculate the augmented Hessian Newton-Raphson update for the orthogonal orbital rotation matrix...
void loadU(const string filename=DMRGSCF_unitary_storage_name)
Load the unitary from disk.
int * getIrrepOfEachDMRGorbital()
Get an array with the irreps of each DMRG orbital.
void Print(const int precision=6, const int columnsPerLine=8) const
Print the correlation functions and two-orbital mutual information.
static int mpi_rank()
Get the rank of this MPI process.
Definition: MPIchemps2.h:131
double getVmat(const int index1, const int index2, const int index3, const int index4) const
Get a Vmat element.
int getDMRGcumulative(const int irrep) const
Get the cumulative number of active orbitals for an irrep.
Correlations * getCorrelations()
Get the pointer to the Correlations.
Definition: DMRG.h:128
TwoDM * get2DM()
Get the pointer to the 2-RDM.
Definition: DMRG.h:113
void getLog(double *vector, double *temp1, double *temp2) const
Obtain the logarithm of the unitary matrix.
int getGroupNumber() const
Get the group number.
Definition: Irreps.cpp:66
void saveDIIS(const string filename=DMRGSCF_diis_storage_name) const
Save the DIIS object to disk.
Definition: DIIS.cpp:165
void set(const int irrep, const int p, const int q, const double val)
Set an element.
int getNORBmax() const
Get the maximum NORB.
unsigned int LowestEnergyDeterminant() const
Return the global counter of the Slater determinant with the lowest energy.
Definition: FCI.cpp:1700
void saveU(const string filename=DMRGSCF_unitary_storage_name) const
Save the unitary to disk.
bool getDumpCorrelations() const
Get whether the correlations and two-orbital mutual information should be printed.
double Solve()
Solver.
Definition: DMRG.cpp:199
static void copy2DMover(TwoDM *theDMRG2DM, const int LAS, double *two_dm)
Copy over the DMRG 2-RDM.
Definition: CASSCF.cpp:119
int getWhichActiveSpace() const
Get which active space should be considered in the DMRG routine.
static void block_diagonalize(const char space, const DMRGSCFmatrix *Mat, DMRGSCFunitary *Umat, double *work1, double *work2, const DMRGSCFindices *idx, const bool invert, double *two_dm, double *three_dm, double *contract)
Block-diagonalize Mat.
Definition: CASSCF.cpp:433
double FourIndexAPI(const int I1, const int I2, const int I3, const int I4, const int index1, const int index2, const int index3, const int index4) const
Get a two-body matrix element with at most 2 virtual indices, using the FourIndex API...
int getNumDIISVecs() const
Get the number of DIIS update vectors which should be kept.
void FiedlerGlobal(int *dmrg2ham) const
Permute the orbitals so that the bandwidth of the exchange matrix is (approximately) minimized...
string getDIISStorageName() const
Get the DIIS checkpoint filename.
static void fillLocalizedOrbitalRotations(DMRGSCFunitary *umat, DMRGSCFindices *idx, double *eigenvecs)
From an Edmiston-Ruedenberg active space rotation, fetch the eigenvectors and store them in eigenvecs...
Definition: CASSCF.cpp:160
void newExcitation(const double EshiftIn)
Push back current calculation and set everything up to calculate a (new) excitation.
Definition: DMRG.cpp:395
bool getDoDIIS() const
Get whether DIIS should be performed.
DMRGSCFunitary * getUnitary()
Get the pointer to the unitary to use in DMRGSCF.
void FiedlerExchange(const int maxlinsize, double *temp1, double *temp2)
Permute the orbitals so that the bandwidth of the exchange matrix is (approximately) minimized (per i...
bool getStartLocRandom() const
Get whether the localization procedure should start from a random unitary.
double Optimize(double *temp1, double *temp2, const bool startFromRandomUnitary, const double gradThreshold=EDMISTONRUED_gradThreshold, const int maxIter=EDMISTONRUED_maxIter)
Maximize the Edmiston-Ruedenberg cost function.
double solve(const int Nelectrons, const int TwoS, const int Irrep, ConvergenceScheme *OptScheme, const int rootNum, DMRGSCFoptions *scf_options)
Do the CASSCF cycles with the augmented Hessian Newton-Raphson method.
double get(const int irrep, const int p, const int q) const
Get an element.
bool getStoreDIIS() const
Get whether the DIIS checkpoint should be stored to disk.
void makeSureAllBlocksDetOne(double *temp1, double *temp2)
Orbitals are defined up to a phase factor. Make sure that the logarithm of each block of the unitary ...
int getNumVariablesX() const
Get the number of variables in the x-parametrization of the unitary update.
static int directProd(const int Irrep1, const int Irrep2)
Get the direct product of the irreps with numbers Irrep1 and Irrep2: a bitwise XOR for psi4&#39;s convent...
Definition: Irreps.h:123
bool getStoreUnitary() const
Get whether the Orbital Rotation checkpoint should be stored to disk.
unsigned int getVecLength(const int irrep_center) const
Getter for the number of variables in the vector " E_ij | FCI vector > " ; where irrep_center = I_i x...
Definition: FCI.h:65
static void setDMRG1DM(const int num_elec, const int LAS, double *one_dm, double *two_dm)
Construct the 1-RDM from the 2-RDM.
Definition: CASSCF.cpp:134
void calculateParam(double *newParam)
Calculate the new parameter vector, based on the just appended error and parameter vectors...
Definition: DIIS.cpp:103
double get(const int irrep_pq, const int irrep_rs, const int p, const int q, const int r, const int s) const
Get an element of w_tilde_pqrs.
bool getStateAveraging() const
Get whether state-averaging or state-specific DMRGSCF should be performed.
static void ClearVector(const unsigned int vecLength, double *vec)
Set the entries of a vector to zero.
Definition: FCI.cpp:1908
void updateUnitary(double *workmem1, double *workmem2, double *vector, const bool multiply, const bool compact)
Update the unitary transformation.
void activateExcitations(const int maxExcIn)
Activate the necessary storage and machinery to handle excitations.
Definition: DMRG.cpp:384
int getNDMRG(const int irrep) const
Get the number of active orbitals for an irrep.
int getNOCC(const int irrep) const
Get the number of occupied orbitals for an irrep.
char FetchInstruction(double **pointers)
The iterator to converge the ground state vector.
Definition: Davidson.cpp:105
void deleteStoredOperators()
Call "rm " + tempfolder + "/" + CheMPS2::DMRG_OPERATOR_storage_prefix + string(thePID) + "*...
void calc2DMandCorrelations()
Calculate the 2-RDM and correlations. Afterwards the MPS is again in LLLLLLLC gauge.
Definition: DMRG.h:104
double Fill2RDM(double *vector, double *TwoRDM) const
Construct the (spin-summed) 2-RDM of a FCI vector: Gamma^2(i,j,k,l) = sum_sigma,tau < a^+_i...
Definition: FCI.cpp:805
int getNVIRT(const int irrep) const
Get the number of virtual orbitals for an irrep.
double getGradientThreshold() const
Get the threshold for DMRGSCF convergence.
void clear()
Clear the matrix.
double * getBlock(const int irrep_pq, const int irrep_rs, const int p, const int r)
Get the (pr) subblock of w_tilde_pqrs, which is stored as w_tilde[ I_pq ][ I_rs ][ p + ( Nocc[I_pq] +...
static void buildFmat(DMRGSCFmatrix *localFmat, const DMRGSCFmatrix *localTmat, const DMRGSCFmatrix *localJKocc, const DMRGSCFmatrix *localJKact, const DMRGSCFindices *localIdx, const DMRGSCFintegrals *theInts, double *local2DM, double *local1DM)
Build the F-matrix (Eq. (11) in the Siegbahn paper [CAS3])
string getUnitaryStorageName() const
Get the Orbital Rotation checkpoint filename.
int getNirreps() const
Get the number of irreps.