Grid 0.7.0
OpenQcdIOChromaReference.h
Go to the documentation of this file.
1/*************************************************************************************
2
3Grid physics library, www.github.com/paboyle/Grid
4
5Source file: ./lib/parallelIO/OpenQcdIOChromaReference.h
6
7Copyright (C) 2015 - 2020
8
9Author: Daniel Richtmann <daniel.richtmann@ur.de>
10
11This program is free software; you can redistribute it and/or modify
12it under the terms of the GNU General Public License as published by
13the Free Software Foundation; either version 2 of the License, or
14(at your option) any later version.
15
16This program is distributed in the hope that it will be useful,
17but WITHOUT ANY WARRANTY; without even the implied warranty of
18MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19GNU General Public License for more details.
20
21You should have received a copy of the GNU General Public License along
22with this program; if not, write to the Free Software Foundation, Inc.,
2351 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24
25See the full license in the file "LICENSE" in the top level distribution
26directory
27*************************************************************************************/
28/* END LEGAL */
29#pragma once
30
31#include <ios>
32#include <iostream>
33#include <limits>
34#include <iomanip>
35#include <mpi.h>
36#include <ostream>
37#include <string>
38
39#define CHECK {std::cerr << __FILE__ << " @l " << __LINE__ << ": CHECK" << grid->ThisRank() << std::endl;}
40#define CHECK_VAR(a) { std::cerr << __FILE__ << "@l" << __LINE__ << " on "<< grid->ThisRank() << ": " << __func__ << " " << #a << "=" << (a) << std::endl; }
41// #undef CHECK
42// #define CHECK
43
45
46class ParRdr {
47private:
48 bool const swap;
49
50 MPI_Status status;
51 MPI_File fp;
52
53 int err;
54
55 MPI_Datatype oddSiteType;
56 MPI_Datatype fileViewType;
57
59
60public:
61 ParRdr(MPI_Comm comm, std::string const& filename, GridBase* gridPtr)
62 : swap(false)
63 , grid(gridPtr) {
64 err = MPI_File_open(comm, const_cast<char*>(filename.c_str()), MPI_MODE_RDONLY, MPI_INFO_NULL, &fp);
65 assert(err == MPI_SUCCESS);
66 }
67
68 virtual ~ParRdr() { MPI_File_close(&fp); }
69
70 inline void errInfo(int const err, std::string const& func) {
71 static char estring[MPI_MAX_ERROR_STRING];
72 int eclass = -1, len = 0;
73 MPI_Error_class(err, &eclass);
74 MPI_Error_string(err, estring, &len);
75 std::cerr << func << " - Error " << eclass << ": " << estring << std::endl;
76 }
77
79 assert((grid->_ndimension == Nd) && (Nd == 4));
80 assert(Nc == 3);
81
83
84 readBlock(reinterpret_cast<char*>(&header), 0, sizeof(OpenQcdHeader), MPI_CHAR);
85
86 header.plaq /= 3.; // TODO change this into normalizationfactor
87
88 // sanity check (should trigger on endian issues) TODO remove?
89 assert(0 < header.Nt && header.Nt <= 1024);
90 assert(0 < header.Nx && header.Nx <= 1024);
91 assert(0 < header.Ny && header.Ny <= 1024);
92 assert(0 < header.Nz && header.Nz <= 1024);
93
94 field.dimension[0] = header.Nx;
95 field.dimension[1] = header.Ny;
96 field.dimension[2] = header.Nz;
97 field.dimension[3] = header.Nt;
98
99 for(int d = 0; d < Nd; d++)
100 assert(grid->FullDimensions()[d] == field.dimension[d]);
101
102 field.plaquette = header.plaq;
103
104 field.data_start = sizeof(OpenQcdHeader);
105
106 return field.data_start;
107 }
108
109 void readBlock(void* const dest, uint64_t const pos, uint64_t const nbytes, MPI_Datatype const datatype) {
110 err = MPI_File_read_at_all(fp, pos, dest, nbytes, datatype, &status);
111 errInfo(err, "MPI_File_read_at_all");
112 // CHECK_VAR(err)
113
114 int read = -1;
115 MPI_Get_count(&status, datatype, &read);
116 // CHECK_VAR(read)
117 assert(nbytes == (uint64_t)read);
118 assert(err == MPI_SUCCESS);
119 }
120
121 void createTypes() {
122 constexpr int elem_size = Nd * 2 * 2 * Nc * Nc * sizeof(double); // 2_complex 2_fwdbwd
123
124 err = MPI_Type_contiguous(elem_size, MPI_BYTE, &oddSiteType); assert(err == MPI_SUCCESS);
125 err = MPI_Type_commit(&oddSiteType); assert(err == MPI_SUCCESS);
126
127 Coordinate const L = grid->GlobalDimensions();
128 Coordinate const l = grid->LocalDimensions();
129 Coordinate const i = grid->ThisProcessorCoor();
130
131 Coordinate sizes({L[2] / 2, L[1], L[0], L[3]});
132 Coordinate subsizes({l[2] / 2, l[1], l[0], l[3]});
133 Coordinate starts({i[2] * l[2] / 2, i[1] * l[1], i[0] * l[0], i[3] * l[3]});
134
135 err = MPI_Type_create_subarray(grid->_ndimension, &sizes[0], &subsizes[0], &starts[0], MPI_ORDER_FORTRAN, oddSiteType, &fileViewType); assert(err == MPI_SUCCESS);
136 err = MPI_Type_commit(&fileViewType); assert(err == MPI_SUCCESS);
137 }
138
139 void freeTypes() {
140 err = MPI_Type_free(&fileViewType); assert(err == MPI_SUCCESS);
141 err = MPI_Type_free(&oddSiteType); assert(err == MPI_SUCCESS);
142 }
143
144 bool readGauge(std::vector<ColourMatrixD>& domain_buff, FieldMetaData& meta) {
145 auto hdr_offset = readHeader(meta);
146 CHECK
147 createTypes();
148 err = MPI_File_set_view(fp, hdr_offset, oddSiteType, fileViewType, "native", MPI_INFO_NULL); errInfo(err, "MPI_File_set_view0"); assert(err == MPI_SUCCESS);
149 CHECK
150 int const domainSites = grid->lSites();
151 domain_buff.resize(Nd * domainSites); // 2_fwdbwd * 4_Nd * domainSites / 2_onlyodd
152
153 // the actual READ
154 constexpr uint64_t cm_size = 2 * Nc * Nc * sizeof(double); // 2_complex
155 constexpr uint64_t os_size = Nd * 2 * cm_size; // 2_fwdbwd
156 constexpr uint64_t max_elems = std::numeric_limits<int>::max(); // int adressable elems: floor is fine
157 uint64_t const n_os = domainSites / 2;
158
159 for(uint64_t os_idx = 0; os_idx < n_os;) {
160 uint64_t const read_os = os_idx + max_elems <= n_os ? max_elems : n_os - os_idx;
161 uint64_t const cm = os_idx * Nd * 2;
162 readBlock(&(domain_buff[cm]), os_idx, read_os, oddSiteType);
163 os_idx += read_os;
164 }
165
166 CHECK
167 err = MPI_File_set_view(fp, 0, MPI_BYTE, MPI_BYTE, "native", MPI_INFO_NULL);
168 errInfo(err, "MPI_File_set_view1");
169 assert(err == MPI_SUCCESS);
170 freeTypes();
171
172 std::cout << GridLogMessage << "read sum: " << n_os * os_size << " bytes" << std::endl;
173 return true;
174 }
175};
176
178public:
179 template<class vsimd>
181 Grid::FieldMetaData& header,
182 std::string file) {
183 typedef Lattice<iDoubleStoredColourMatrix<vsimd>> DoubledGaugeField;
184
185 assert(Ns == 4 and Nd == 4 and Nc == 3);
186
187 auto grid = Umu.Grid();
188
189 typedef ColourMatrixD fobj;
190
191 std::vector<fobj> iodata(
192 Nd * grid->lSites()); // actual size = 2*Nd*lsites but have only lsites/2 sites in file
193
194 {
195 ParRdr rdr(MPI_COMM_WORLD, file, grid);
196 rdr.readGauge(iodata, header);
197 } // equivalent to using binaryio
198
199 std::vector<iDoubleStoredColourMatrix<typename vsimd::scalar_type>> Umu_ds_scalar(grid->lSites());
200
201 copyToLatticeObject(Umu_ds_scalar, iodata, grid); // equivalent to munging
202
203 DoubledGaugeField Umu_ds(grid);
204
205 vectorizeFromLexOrdArray(Umu_ds_scalar, Umu_ds);
206
207 redistribute(Umu, Umu_ds); // equivalent to undoDoublestore
208
209 FieldMetaData clone(header);
210
211 PeriodicGaugeStatistics Stats; Stats(Umu, clone);
212
213 RealD plaq_diff = fabs(clone.plaquette - header.plaquette);
214
215 // clang-format off
216 std::cout << GridLogMessage << "OpenQcd Configuration " << file
217 << " plaquette " << clone.plaquette
218 << " header " << header.plaquette
219 << " difference " << plaq_diff
220 << std::endl;
221 // clang-format on
222
223 RealD precTol = (getPrecision<vsimd>::value == 1) ? 2e-7 : 2e-15;
224 RealD tol = precTol * std::sqrt(grid->_Nprocessors); // taken from RQCD chroma code
225
226 if(plaq_diff >= tol)
227 std::cout << " Plaquette mismatch (diff = " << plaq_diff << ", tol = " << tol << ")" << std::endl;
228 assert(plaq_diff < tol);
229
230 std::cout << GridLogMessage << "OpenQcd Configuration " << file << " and plaquette agree" << std::endl;
231 }
232
233private:
234 template<class vsimd>
237 Grid::conformable(Umu.Grid(), Umu_ds.Grid());
238 Lattice<iColourMatrix<vsimd>> U(Umu.Grid());
239
240 U = PeekIndex<LorentzIndex>(Umu_ds, 2) + Cshift(PeekIndex<LorentzIndex>(Umu_ds, 3), 0, +1); PokeIndex<LorentzIndex>(Umu, U, 0);
241 U = PeekIndex<LorentzIndex>(Umu_ds, 4) + Cshift(PeekIndex<LorentzIndex>(Umu_ds, 5), 1, +1); PokeIndex<LorentzIndex>(Umu, U, 1);
242 U = PeekIndex<LorentzIndex>(Umu_ds, 6) + Cshift(PeekIndex<LorentzIndex>(Umu_ds, 7), 2, +1); PokeIndex<LorentzIndex>(Umu, U, 2);
243 U = PeekIndex<LorentzIndex>(Umu_ds, 0) + Cshift(PeekIndex<LorentzIndex>(Umu_ds, 1), 3, +1); PokeIndex<LorentzIndex>(Umu, U, 3);
244 }
245
246 static inline void copyToLatticeObject(std::vector<DoubleStoredColourMatrix>& u_fb,
247 std::vector<ColourMatrixD> const& node_buff,
248 GridBase* grid) {
249 assert(node_buff.size() == Nd * grid->lSites());
250
251 Coordinate const& l = grid->LocalDimensions();
252
253 Coordinate coord(Nd);
254 int& x = coord[0];
255 int& y = coord[1];
256 int& z = coord[2];
257 int& t = coord[3];
258
259 int buff_idx = 0;
260 for(t = 0; t < l[3]; ++t) // IMPORTANT: openQCD file ordering
261 for(x = 0; x < l[0]; ++x)
262 for(y = 0; y < l[1]; ++y)
263 for(z = 0; z < l[2]; ++z) {
264 if((t + z + y + x) % 2 == 0) continue;
265
266 int local_idx;
267 Lexicographic::IndexFromCoor(coord, local_idx, grid->LocalDimensions());
268 for(int mu = 0; mu < 2 * Nd; ++mu)
269 for(int c1 = 0; c1 < Nc; ++c1) {
270 for(int c2 = 0; c2 < Nc; ++c2) {
271 u_fb[local_idx](mu)()(c1,c2) = node_buff[mu+buff_idx]()()(c1,c2);
272 }
273 }
274 buff_idx += 2 * Nd;
275 }
276
277 assert(node_buff.size() == buff_idx);
278 }
279};
280
AcceleratorVector< int, MaxDims > Coordinate
Definition Coordinate.h:95
auto Cshift(const Expression &expr, int dim, int shift) -> decltype(closure(expr))
Definition Cshift.h:55
void PokeIndex(Lattice< vobj > &lhs, const Lattice< decltype(peekIndex< Index >(vobj(), 0))> &rhs, int i)
auto PeekIndex(const Lattice< vobj > &lhs, int i) -> Lattice< decltype(peekIndex< Index >(vobj(), i))>
std::enable_if< isSIMDvectorized< vobj >::value &&!isSIMDvectorized< sobj >::value, void >::type vectorizeFromLexOrdArray(std::vector< sobj > &in, Lattice< vobj > &out)
GridLogger GridLogMessage(1, "Message", GridLogColours, "NORMAL")
GaugeStatistics< PeriodicGimplD > PeriodicGaugeStatistics
Definition MetaData.h:190
#define NAMESPACE_BEGIN(A)
Definition Namespace.h:35
#define NAMESPACE_END(A)
Definition Namespace.h:36
#define CHECK
iVector< iScalar< iMatrix< vtype, Nc > >, Nds > iDoubleStoredColourMatrix
Definition QCD.h:108
static constexpr int Ns
Definition QCD.h:51
static constexpr int Nd
Definition QCD.h:52
static constexpr int Nc
Definition QCD.h:50
iVector< iScalar< iMatrix< vtype, Nc > >, Nd > iLorentzColourMatrix
Definition QCD.h:106
iColourMatrix< ComplexD > ColourMatrixD
Definition QCD.h:135
#define header
double RealD
Definition Simd.h:61
static INTERNAL_PRECISION U
Definition Zolotarev.cc:230
int lSites(void) const
const Coordinate & LocalDimensions(void)
static void copyToLatticeObject(std::vector< DoubleStoredColourMatrix > &u_fb, std::vector< ColourMatrixD > const &node_buff, GridBase *grid)
static void redistribute(Lattice< iLorentzColourMatrix< vsimd > > &Umu, Lattice< iDoubleStoredColourMatrix< vsimd > > const &Umu_ds)
static void readConfiguration(Lattice< iLorentzColourMatrix< vsimd > > &Umu, Grid::FieldMetaData &header, std::string file)
MPI_Datatype oddSiteType
MPI_Datatype fileViewType
int readHeader(FieldMetaData &field)
bool readGauge(std::vector< ColourMatrixD > &domain_buff, FieldMetaData &meta)
void errInfo(int const err, std::string const &func)
void readBlock(void *const dest, uint64_t const pos, uint64_t const nbytes, MPI_Datatype const datatype)
ParRdr(MPI_Comm comm, std::string const &filename, GridBase *gridPtr)