CheMPS2
MPIchemps2.h
1 /*
2  CheMPS2: a spin-adapted implementation of DMRG for ab initio quantum chemistry
3  Copyright (C) 2013-2016 Sebastian Wouters
4 
5  This program is free software; you can redistribute it and/or modify
6  it under the terms of the GNU General Public License as published by
7  the Free Software Foundation; either version 2 of the License, or
8  (at your option) any later version.
9 
10  This program is distributed in the hope that it will be useful,
11  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  GNU General Public License for more details.
14 
15  You should have received a copy of the GNU General Public License along
16  with this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19 
20 #ifndef MPICHEMPS2_CHEMPS2_H
21 #define MPICHEMPS2_CHEMPS2_H
22 
23 //#define CHEMPS2_MPI_COMPILATION
24 
25 #ifdef CHEMPS2_MPI_COMPILATION
26 
27  #include <mpi.h>
28  #include <assert.h>
29  #include "Tensor.h"
30 
31  #define MPI_CHEMPS2_MASTER 0
32 
33  //Assign diagrams which are not specifically owned round-robin
34 
35  #define MPI_CHEMPS2_4D1AB 1
36  #define MPI_CHEMPS2_4D2AB 2
37  #define MPI_CHEMPS2_4I1AB 3
38  #define MPI_CHEMPS2_4I2AB 4
39  #define MPI_CHEMPS2_4F1AB 5
40  #define MPI_CHEMPS2_4F2AB 6
41  #define MPI_CHEMPS2_4G1AB 7
42  #define MPI_CHEMPS2_4G2AB 8
43 
44  #define MPI_CHEMPS2_4D3ABCD 9
45  #define MPI_CHEMPS2_4D4ABCD 10
46  #define MPI_CHEMPS2_4I3ABCD 11
47  #define MPI_CHEMPS2_4I4ABCD 12
48  #define MPI_CHEMPS2_4F3ABCD 13
49  #define MPI_CHEMPS2_4F4ABCD 14
50  #define MPI_CHEMPS2_4G3ABCD 15
51  #define MPI_CHEMPS2_4G4ABCD 16
52 
53  #define MPI_CHEMPS2_4E1 17
54  #define MPI_CHEMPS2_4E2 18
55  #define MPI_CHEMPS2_4H1 19
56  #define MPI_CHEMPS2_4H2 20
57 
58  #define MPI_CHEMPS2_4E3A 21
59  #define MPI_CHEMPS2_4E3B 22
60  #define MPI_CHEMPS2_4E4A 23
61  #define MPI_CHEMPS2_4E4B 24
62  #define MPI_CHEMPS2_4H3A 25
63  #define MPI_CHEMPS2_4H3B 26
64  #define MPI_CHEMPS2_4H4A 27
65  #define MPI_CHEMPS2_4H4B 28
66 
67  #define MPI_CHEMPS2_5A1 29
68  #define MPI_CHEMPS2_5A2 30
69  #define MPI_CHEMPS2_5A3 31
70  #define MPI_CHEMPS2_5A4 32
71 
72  #define MPI_CHEMPS2_5B1 33
73  #define MPI_CHEMPS2_5B2 34
74  #define MPI_CHEMPS2_5B3 35
75  #define MPI_CHEMPS2_5B4 36
76 
77  #define MPI_CHEMPS2_5C1 37
78  #define MPI_CHEMPS2_5C2 38
79  #define MPI_CHEMPS2_5C3 39
80  #define MPI_CHEMPS2_5C4 40
81 
82  #define MPI_CHEMPS2_5D1 41
83  #define MPI_CHEMPS2_5D2 42
84  #define MPI_CHEMPS2_5D3 43
85  #define MPI_CHEMPS2_5D4 44
86 
87  #define MPI_CHEMPS2_5E1 45
88  #define MPI_CHEMPS2_5E2 46
89  #define MPI_CHEMPS2_5E3 47
90  #define MPI_CHEMPS2_5E4 48
91 
92  #define MPI_CHEMPS2_5F1 49
93  #define MPI_CHEMPS2_5F2 50
94  #define MPI_CHEMPS2_5F3 51
95  #define MPI_CHEMPS2_5F4 52
96 
97  #define MPI_CHEMPS2_OFFSET 53
98 
99 #endif
100 
101 namespace CheMPS2{
107  class MPIchemps2{
108 
109  public:
110 
113 
115  virtual ~MPIchemps2(){}
116 
118 
119  static int mpi_size(){
120  #ifdef CHEMPS2_MPI_COMPILATION
121  int size;
122  MPI_Comm_size( MPI_COMM_WORLD, &size );
123  return size;
124  #else
125  return 1;
126  #endif
127  }
128 
130 
131  static int mpi_rank(){
132  #ifdef CHEMPS2_MPI_COMPILATION
133  int rank;
134  MPI_Comm_rank( MPI_COMM_WORLD, &rank );
135  return rank;
136  #else
137  return 0;
138  #endif
139  }
140 
141  #ifdef CHEMPS2_MPI_COMPILATION
142  static void mpi_init(){
144  int zero = 0;
145  MPI_Init( &zero, NULL );
146  }
147  #endif
148 
149  #ifdef CHEMPS2_MPI_COMPILATION
150  static void mpi_finalize(){
152  MPI_Finalize();
153  }
154  #endif
155 
156  #ifdef CHEMPS2_MPI_COMPILATION
157  static int owner_x(){ return MPI_CHEMPS2_MASTER; }
159  #endif
160 
161  #ifdef CHEMPS2_MPI_COMPILATION
162 
166  static int owner_absigma(const int index1, const int index2){ // 1 <= proc < 1 + L*(L+1)/2
167  assert( index1 <= index2 );
168  return ( 1 + index1 + (index2*(index2+1))/2 ) % mpi_size();
169  }
170  #endif
171 
172  #ifdef CHEMPS2_MPI_COMPILATION
173 
178  static int owner_cdf(const int L, const int index1, const int index2){ // 1 + L*(L+1)/2 <= proc < 1 + L*(L+1)
179  assert( index1 <= index2 );
180  return ( 1 + (L*(L+1))/2 + index1 + (index2*(index2+1))/2 ) % mpi_size();
181  }
182  #endif
183 
184  #ifdef CHEMPS2_MPI_COMPILATION
185 
189  static int owner_q(const int L, const int index){ // 1 + L*(L+1) <= proc < 1 + L*(L+2)
190  return ( 1 + L*(L+1) + index ) % mpi_size();
191  }
192  #endif
193 
194  #ifdef CHEMPS2_MPI_COMPILATION
195 
201  static int owner_3rdm_diagram(const int L, const int index1, const int index2, const int index3){ // 1 + L*(L+1) <= proc < 1 + L*(L+1) + L*(L+1)*(L+2)/6
202  assert( index1 <= index2 );
203  assert( index2 <= index3 );
204  return ( 1 + L*(L+1) + index1 + (index2*(index2+1))/2 + (index3*(index3+1)*(index3+2))/6 ) % mpi_size();
205  }
206  #endif
207 
208  #ifdef CHEMPS2_MPI_COMPILATION
209 
211  static int owner_1cd2d3eh(){ return MPI_CHEMPS2_MASTER; }
212  #endif
213 
214  #ifdef CHEMPS2_MPI_COMPILATION
215 
219  static int owner_specific_diagram(const int L, const int macro){
220  return (macro + L*(L+2)) % mpi_size();
221  }
222  #endif
223 
224  #ifdef CHEMPS2_MPI_COMPILATION
225 
229  static int owner_specific_excitation(const int L, const int excitation){
230  return (MPI_CHEMPS2_OFFSET + L*(L+2) + excitation) % mpi_size();
231  }
232  #endif
233 
234  #ifdef CHEMPS2_MPI_COMPILATION
235 
238  static void broadcast_tensor(Tensor * object, int ROOT){
239  int arraysize = object->gKappa2index(object->gNKappa());
240  MPI_Bcast(object->gStorage(), arraysize, MPI_DOUBLE, ROOT, MPI_COMM_WORLD);
241  }
242  #endif
243 
244  #ifdef CHEMPS2_MPI_COMPILATION
245 
249  static void broadcast_array_double(double * array, int length, int ROOT){
250  MPI_Bcast(array, length, MPI_DOUBLE, ROOT, MPI_COMM_WORLD);
251  }
252  #endif
253 
254  #ifdef CHEMPS2_MPI_COMPILATION
255 
259  static void broadcast_array_int(int * array, int length, int ROOT){
260  MPI_Bcast(array, length, MPI_INT, ROOT, MPI_COMM_WORLD);
261  }
262  #endif
263 
264  #ifdef CHEMPS2_MPI_COMPILATION
265 
268  static bool all_booleans_equal(const bool mybool){
269  int my_value = ( mybool ) ? 1 : 0 ;
270  int tot_value;
271  MPI_Allreduce(&my_value, &tot_value, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
272  return ( my_value * MPIchemps2::mpi_size() == tot_value ); // Only true if mybool is the same for all processes
273  }
274  #endif
275 
276  #ifdef CHEMPS2_MPI_COMPILATION
277 
282  static void sendreceive_tensor(Tensor * object, int SENDER, int RECEIVER, int tag){
283  if ( SENDER != RECEIVER ){
284  const int MPIRANK = mpi_rank();
285  if ( SENDER == MPIRANK ){
286  int arraysize = object->gKappa2index(object->gNKappa());
287  MPI_Send(object->gStorage(), arraysize, MPI_DOUBLE, RECEIVER, tag, MPI_COMM_WORLD);
288  }
289  if ( RECEIVER == MPIRANK ){
290  int arraysize = object->gKappa2index(object->gNKappa());
291  MPI_Recv(object->gStorage(), arraysize, MPI_DOUBLE, SENDER, tag, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
292  }
293  }
294  }
295  #endif
296 
297  #ifdef CHEMPS2_MPI_COMPILATION
298 
303  static void reduce_array_double(double * vec_in, double * vec_out, int size, int ROOT){
304  MPI_Reduce(vec_in, vec_out, size, MPI_DOUBLE, MPI_SUM, ROOT, MPI_COMM_WORLD);
305  }
306  #endif
307 
308  #ifdef CHEMPS2_MPI_COMPILATION
309 
313  static void allreduce_array_double(double * vec_in, double * vec_out, int size){
314  MPI_Allreduce(vec_in, vec_out, size, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
315  }
316  #endif
317 
318  };
319 }
320 
321 #endif
322 
Definition: CASPT2.h:42
virtual ~MPIchemps2()
Destructor.
Definition: MPIchemps2.h:115
static int mpi_rank()
Get the rank of this MPI process.
Definition: MPIchemps2.h:131
static int mpi_size()
Get the number of MPI processes.
Definition: MPIchemps2.h:119
MPIchemps2()
Constructor.
Definition: MPIchemps2.h:112
virtual double * gStorage()=0
Get the pointer to the storage.
virtual int gNKappa() const =0
Get the number of tensor blocks.