52 void CheMPS2::TensorQ::AddTermSimpleRight(
TensorT * denT){
56 for (
int ikappa=0; ikappa<
nKappa; ikappa++){
72 dgemm_(&trans,¬r,&dimRU,&dimRD,&dimL,&alpha,BlockTup,&dimL,BlockTdo,&dimL,&beta,
storage+
kappa2index[ikappa],&dimRU);
79 void CheMPS2::TensorQ::AddTermSimpleLeft(
TensorT * denT){
83 for (
int ikappa=0; ikappa<
nKappa; ikappa++){
99 dgemm_(¬r,&trans,&dimLU,&dimLD,&dimR,&alpha,BlockTup,&dimLU,BlockTdo,&dimLD,&beta,
storage+
kappa2index[ikappa],&dimLU);
108 if (
moving_right){ AddTermsLRight(Ltensors, denT, workmem, workmem2); }
109 else{ AddTermsLLeft( Ltensors, denT, workmem, workmem2); }
113 void CheMPS2::TensorQ::AddTermsLRight(
TensorL ** Ltensors,
TensorT * denT,
double * workmem,
double * workmem2){
115 bool OneToAdd =
false;
116 for (
int loca=0; loca<
index-1; loca++){
121 for (
int ikappa=0; ikappa<
nKappa; ikappa++){
131 if ((dimLU>0) && (dimLD>0)){
133 int dimLUxLD = dimLU * dimLD;
134 for (
int cnt=0; cnt<dimLUxLD; cnt++){ workmem[cnt] = 0.0; }
136 for (
int loca=0; loca<index-1; loca++){
139 double alpha = Prob->
gMxElement(loca,site,index-1,index-1);
141 daxpy_(&dimLUxLD, &alpha, BlockL, &inc, workmem, &inc);
154 dgemm_(&totrans,&totrans,&dimRU,&dimLD,&dimLU,&alpha,BlockTup,&dimLU,workmem,&dimLD,&beta,workmem2,&dimRU);
160 dgemm_(&totrans,&totrans,&dimRU,&dimRD,&dimLD,&alpha,workmem2,&dimRU, BlockTdo, &dimLD, &beta,
storage+
kappa2index[ikappa], &dimRU);
167 if ((dimLU>0) && (dimLD>0)){
169 int dimLUxLD = dimLU * dimLD;
170 for (
int cnt=0; cnt<dimLUxLD; cnt++){ workmem[cnt] = 0.0; }
172 for (
int loca=0; loca<index-1; loca++){
175 double alpha = 2*Prob->
gMxElement(loca,index-1,site,index-1) - Prob->
gMxElement(loca,index-1,index-1,site);
177 daxpy_(&dimLUxLD, &alpha, BlockL, &inc, workmem, &inc);
190 dgemm_(&trans,¬r,&dimRU,&dimLD,&dimLU,&alpha,BlockTup,&dimLU,workmem,&dimLU,&beta,workmem2,&dimRU);
194 dgemm_(¬r,¬r,&dimRU,&dimRD,&dimLD,&alpha,workmem2,&dimRU, BlockTdo, &dimLD, &beta,
storage+
kappa2index[ikappa], &dimRU);
201 if ((TwoSLD>=0) && (TwoSLU>=0) && (abs(TwoSLD-TwoSLU)<2)){
206 if ((dimLU>0) && (dimLD>0)){
208 double factor = fase * sqrt((TwoSLD+1)*(
sector_spin_up[ikappa]+1.0))
211 int dimLUxLD = dimLU * dimLD;
212 for (
int cnt=0; cnt<dimLUxLD; cnt++){ workmem[cnt] = 0.0; }
214 for (
int loca=0; loca<index-1; loca++){
217 double alpha = factor * Prob->
gMxElement(loca,index-1,site,index-1);
220 daxpy_(&dimLUxLD, &alpha, BlockL, &inc, workmem, &inc);
233 dgemm_(&trans,¬r,&dimRU,&dimLD,&dimLU,&alpha,BlockTup,&dimLU,workmem,&dimLU,&beta,workmem2,&dimRU);
237 dgemm_(¬r,¬r,&dimRU,&dimRD,&dimLD,&alpha,workmem2,&dimRU, BlockTdo, &dimLD, &beta,
storage+
kappa2index[ikappa], &dimRU);
249 void CheMPS2::TensorQ::AddTermsLLeft(
TensorL ** Ltensors,
TensorT * denT,
double * workmem,
double * workmem2){
251 bool OneToAdd =
false;
252 for (
int loca=
index+1; loca<Prob->
gL(); loca++){
257 for (
int ikappa=0; ikappa<
nKappa; ikappa++){
267 if ((dimRU>0) && (dimRD>0)){
269 int dimRUxRD = dimRU * dimRD;
270 for (
int cnt=0; cnt<dimRUxRD; cnt++){ workmem[cnt] = 0.0; }
272 for (
int loca=
index+1; loca<Prob->
gL(); loca++){
277 daxpy_(&dimRUxRD, &alpha, BlockL, &inc, workmem, &inc);
291 dgemm_(¬r,&trans,&dimLU,&dimRD,&dimRU,&alpha,BlockTup,&dimLU,workmem,&dimRD,&beta,workmem2,&dimLU);
296 dgemm_(¬r,&trans,&dimLU,&dimLD,&dimRD,&alpha,workmem2,&dimLU, BlockTdo, &dimLD, &beta,
storage+
kappa2index[ikappa], &dimLU);
303 if ((dimRU>0) && (dimRD>0)){
305 int dimRUxRD = dimRU * dimRD;
306 for (
int cnt=0; cnt<dimRUxRD; cnt++){ workmem[cnt] = 0.0; }
308 for (
int loca=
index+1; loca<Prob->
gL(); loca++){
313 daxpy_(&dimRUxRD, &alpha, BlockL, &inc, workmem, &inc);
325 dgemm_(¬r,¬r,&dimLU,&dimRD,&dimRU,&alpha,BlockTup,&dimLU,workmem,&dimRU,&beta,workmem2,&dimLU);
330 dgemm_(¬r,&trans,&dimLU,&dimLD,&dimRD,&alpha,workmem2,&dimLU, BlockTdo, &dimLD, &beta,
storage+
kappa2index[ikappa], &dimLU);
337 if ((TwoSRD>=0) && (TwoSRU>=0) && (abs(TwoSRD-TwoSRU)<2)){
342 if ((dimRU>0) && (dimRD>0)){
344 double factor1 = fase * sqrt((TwoSRU+1.0)/(
sector_spin_down[ikappa]+1.0)) * (TwoSRD+1)
348 int dimRUxRD = dimRU * dimRD;
349 for (
int cnt=0; cnt<dimRUxRD; cnt++){ workmem[cnt] = 0.0; }
351 for (
int loca=
index+1; loca<Prob->
gL(); loca++){
357 daxpy_(&dimRUxRD, &alpha, BlockL, &inc, workmem, &inc);
369 dgemm_(¬r,¬r,&dimLU,&dimRD,&dimRU,&alpha,BlockTup,&dimLU,workmem,&dimRU,&beta,workmem2,&dimLU);
374 dgemm_(¬r,&trans,&dimLU,&dimLD,&dimRD,&alpha,workmem2,&dimLU, BlockTdo, &dimLD, &beta,
storage+
kappa2index[ikappa], &dimLU);
388 if (
moving_right){ AddTermsABRight(denA, denB, denT, workmem, workmem2); }
389 else{ AddTermsABLeft( denA, denB, denT, workmem, workmem2); }
395 for (
int ikappa=0; ikappa<
nKappa; ikappa++){
405 if ((dimLU>0) && (dimLD>0)){
423 for (
int cnt=0; cnt<dimLU*dimLD; cnt++){ mem[cnt] = factorA * BlockA[cnt] + factorB * BlockB[cnt]; }
439 dgemm_(&trans,¬r,&dimRU,&dimLD,&dimLU,&alpha,BlockTup,&dimLU,mem,&dimLU,&beta,workmem2,&dimRU);
443 dgemm_(¬r,¬r,&dimRU,&dimRD,&dimLD,&alpha,workmem2,&dimRU,BlockTdo,&dimLD,&beta,
storage+
kappa2index[ikappa],&dimRU);
453 if ((dimLU>0) && (dimLD>0)){
463 double factorA = - sqrt(0.5);
469 for (
int cnt=0; cnt<dimLU*dimLD; cnt++){ mem[cnt] = factorA * BlockA[cnt] + factorB * BlockB[cnt]; }
485 dgemm_(&trans,¬r,&dimRU,&dimLD,&dimLU,&alpha,BlockTup,&dimLU,mem,&dimLU,&beta,workmem2,&dimRU);
489 dgemm_(¬r,¬r,&dimRU,&dimRD,&dimLD,&alpha,workmem2,&dimRU,BlockTdo,&dimLD,&beta,
storage+
kappa2index[ikappa],&dimRU);
499 for (
int ikappa=0; ikappa<
nKappa; ikappa++){
509 if ((dimRU>0) && (dimRD>0)){
511 int fase = ((((TwoSRD +
sector_spin_up[ikappa] + 2)/2)%2)!=0)?-1:1;
512 const double factorB = fase * sqrt(3.0/(
sector_spin_down[ikappa]+1.0)) * (TwoSRD+1)
527 for (
int cnt=0; cnt<dimRU*dimRD; cnt++){ mem[cnt] = factorA * BlockA[cnt] + factorB * BlockB[cnt]; }
542 dgemm_(¬r,¬r,&dimLU,&dimRD,&dimRU,&alpha,BlockTup,&dimLU,mem,&dimRU,&beta,workmem2,&dimLU);
547 dgemm_(¬r,&trans,&dimLU,&dimLD,&dimRD,&alpha,workmem2,&dimLU,BlockTdo,&dimLD,&beta,
storage+
kappa2index[ikappa],&dimLU);
557 if ((dimRU>0) && (dimRD>0)){
567 double factorA = - sqrt(0.5);
573 for (
int cnt=0; cnt<dimRU*dimRD; cnt++){ mem[cnt] = factorA * BlockA[cnt] + factorB * BlockB[cnt]; }
588 dgemm_(¬r,¬r,&dimLU,&dimRD,&dimRU,&alpha,BlockTup,&dimLU,mem,&dimRU,&beta,workmem2,&dimLU);
593 dgemm_(¬r,&trans,&dimLU,&dimLD,&dimRD,&alpha,workmem2,&dimLU,BlockTdo,&dimLD,&beta,
storage+
kappa2index[ikappa],&dimLU);
603 if (
moving_right){ AddTermsCDRight(denC, denD, denT, workmem, workmem2); }
604 else{ AddTermsCDLeft(denC, denD, denT, workmem, workmem2); }
610 for (
int ikappa=0; ikappa<
nKappa; ikappa++){
621 if ((dimLU>0) && (dimLD>0)){
623 int dimLUxLD = dimLU * dimLD;
629 for (
int cnt=0; cnt<dimLUxLD; cnt++){ workmem[cnt] = factor * block[cnt]; }
636 daxpy_(&dimLUxLD, &factor, block, &inc, workmem, &inc);
646 dgemm_(&trans,¬r,&dimRU,&dimLD,&dimLU,&alpha,BlockTup,&dimLU,workmem,&dimLU,&beta,workmem2,&dimRU);
648 dgemm_(¬r,¬r,&dimRU,&dimRD,&dimLD,&alpha,workmem2,&dimRU,BlockTdo,&dimLD,&beta,
storage+
kappa2index[ikappa],&dimRU);
659 if ((dimLU>0) && (dimLD>0)){
661 int dimLUxLD = dimLU * dimLD;
667 for (
int cnt=0; cnt<dimLUxLD; cnt++){ workmem[cnt] = factor * block[cnt]; }
675 daxpy_(&dimLUxLD, &factor, block, &inc, workmem, &inc);
685 dgemm_(&trans,¬r,&dimRU,&dimLD,&dimLU,&alpha,BlockTup,&dimLU,workmem,&dimLU,&beta,workmem2,&dimRU);
687 dgemm_(¬r,¬r,&dimRU,&dimRD,&dimLD,&alpha,workmem2,&dimRU,BlockTdo,&dimLD,&beta,
storage+
kappa2index[ikappa],&dimRU);
697 for (
int ikappa=0; ikappa<
nKappa; ikappa++){
708 if ((dimRU>0) && (dimRD>0)){
710 int dimRUxRD = dimRU * dimRD;
714 double factor = fase * sqrt(3.0/(
sector_spin_down[ikappa]+1.0)) * ( TwoSRU + 1 )
717 for (
int cnt=0; cnt<dimRUxRD; cnt++){ workmem[cnt] = factor * block[cnt]; }
724 daxpy_(&dimRUxRD, &factor, block, &inc, workmem, &inc);
733 dgemm_(¬r,¬r,&dimLU,&dimRD,&dimRU,&alpha,BlockTup,&dimLU,workmem,&dimRU,&beta,workmem2,&dimLU);
736 dgemm_(¬r,&trans,&dimLU,&dimLD,&dimRD,&alpha,workmem2,&dimLU,BlockTdo,&dimLD,&beta,
storage+
kappa2index[ikappa],&dimLU);
747 if ((dimRU>0) && (dimRD>0)){
749 int dimRUxRD = dimRU * dimRD;
752 int fase = (((TwoSRD+1)%2)!=0)?-1:1;
756 for (
int cnt=0; cnt<dimRUxRD; cnt++){ workmem[cnt] = factor * block[cnt]; }
764 daxpy_(&dimRUxRD, &factor, block, &inc, workmem, &inc);
773 dgemm_(¬r,¬r,&dimLU,&dimRD,&dimRU,&alpha,BlockTup,&dimLU,workmem,&dimRU,&beta,workmem2,&dimLU);
776 dgemm_(¬r,&trans,&dimLU,&dimLD,&dimRD,&alpha,workmem2,&dimLU,BlockTdo,&dimLD,&beta,
storage+
kappa2index[ikappa],&dimLU);
static double wigner6j(const int two_ja, const int two_jb, const int two_jc, const int two_jd, const int two_je, const int two_jf)
Wigner-6j symbol (gsl api)
int index
Index of the Tensor object. For TensorT: a site index; for other tensors: a boundary index...
int * sector_nelec_up
The up particle number sector.
void AddTermsL(TensorL **Ltensors, TensorT *denT, double *workmem, double *workmem2)
Add terms after update/clear with previous TensorL's.
int * sector_spin_down
The down spin symmetry sector (pointer points to sectorTwoS1 if two_j == 0)
int gIndex() const
Get the location index.
int gCurrentDim(const int boundary, const int N, const int TwoS, const int irrep) const
Get the current virtual dimensions.
double * gStorage()
Get the pointer to the storage.
double gMxElement(const int alpha, const int beta, const int gamma, const int delta) const
Get a specific interaction matrix element.
bool moving_right
Whether or not moving right.
void AddTermsCD(TensorOperator *denC, TensorOperator *denD, TensorT *denT, double *workmem, double *workmem2)
Add terms after update/clear with previous C-tensors and D-tensors.
int * sector_spin_up
The up spin symmetry sector.
double * gStorage()
Get the pointer to the storage.
virtual ~TensorQ()
Destructor.
int n_irrep
The (real-valued abelian) point group irrep difference between the symmetry sectors of the lower and ...
static int directProd(const int Irrep1, const int Irrep2)
Get the direct product of the irreps with numbers Irrep1 and Irrep2: a bitwise XOR for psi4's convent...
void AddTermSimple(TensorT *denT)
Add terms after update/clear without previous tensors.
int gL() const
Get the number of orbitals.
int * sector_irrep_up
The up spin symmetry sector.
TensorQ(const int boundary_index, const int Idiff, const bool moving_right, const SyBookkeeper *denBK, const Problem *Prob, const int site)
Constructor.
const SyBookkeeper * bk_up
The bookkeeper of the upper MPS.
int * kappa2index
kappa2index[kappa] indicates the start of tensor block kappa in storage. kappa2index[nKappa] gives th...
void AddTermsAB(TensorOperator *denA, TensorOperator *denB, TensorT *denT, double *workmem, double *workmem2)
Add terms after update/clear with previous A-tensors and B-tensors.
int get_irrep() const
Get the (real-valued abelian) point group irrep difference between the symmetry sectors of the lower ...
int nKappa
Number of Tensor blocks.
int gIrrep(const int orbital) const
Get an orbital irrep.
double * storage
The actual variables. Tensor block kappa begins at storage+kappa2index[kappa] and ends at storage+kap...