Grid 0.7.0
WilsonLoops.h
Go to the documentation of this file.
1/*************************************************************************************
2
3 Grid physics library, www.github.com/paboyle/Grid
4
5 Source file: ./lib/qcd/utils/WilsonLoops.h
6
7 Copyright (C) 2015
8
9 Author: Azusa Yamaguchi <ayamaguc@staffmail.ed.ac.uk>
10 Author: Peter Boyle <paboyle@ph.ed.ac.uk>
11 Author: neo <cossu@post.kek.jp>
12 Author: paboyle <paboyle@ph.ed.ac.uk>
13 Author: James Harrison <J.Harrison@soton.ac.uk>
14 Author: Antonin Portelli <antonin.portelli@me.com>
15
16 This program is free software; you can redistribute it and/or modify
17 it under the terms of the GNU General Public License as published by
18 the Free Software Foundation; either version 2 of the License, or
19 (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful,
22 but WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 GNU General Public License for more details.
25
26 You should have received a copy of the GNU General Public License along
27 with this program; if not, write to the Free Software Foundation, Inc.,
28 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
29
30 See the full license in the file "LICENSE" in the top level distribution
31directory
32*************************************************************************************/
33/* END LEGAL */
34#ifndef QCD_UTILS_WILSON_LOOPS_H
35#define QCD_UTILS_WILSON_LOOPS_H
36
38
39// Common wilson loop observables
40template <class Gimpl> class WilsonLoops : public Gimpl {
41public:
43
44 typedef typename Gimpl::GaugeLinkField GaugeMat;
45 typedef typename Gimpl::GaugeField GaugeLorentz;
46
48 // directed plaquette oriented in mu,nu plane
50 static void dirPlaquette(GaugeMat &plaq, const std::vector<GaugeMat> &U,
51 const int mu, const int nu) {
52 // Annoyingly, must use either scope resolution to find dependent base
53 // class,
54 // or this-> ; there is no "this" in a static method. This forces explicit
55 // Gimpl scope
56 // resolution throughout the usage in this file, and rather defeats the
57 // purpose of deriving
58 // from Gimpl.
59 /*
60 plaq = Gimpl::CovShiftBackward(
61 U[mu], mu, Gimpl::CovShiftBackward(
62 U[nu], nu, Gimpl::CovShiftForward(U[mu], mu, U[nu])));
63 */
64 // _
65 //|< _|
66 plaq = Gimpl::CovShiftForward(U[mu],mu,
67 Gimpl::CovShiftForward(U[nu],nu,
68 Gimpl::CovShiftBackward(U[mu],mu,
69 Gimpl::CovShiftIdentityBackward(U[nu], nu))));
70
71
72
73
74 }
75
76 // trace of directed plaquette oriented in mu,nu plane
79 const std::vector<GaugeMat> &U, const int mu,
80 const int nu) {
81 GaugeMat sp(U[0].Grid());
82 dirPlaquette(sp, U, mu, nu);
83 plaq = trace(sp);
84 }
85
86 // sum over all planes of plaquette
88 static void sitePlaquette(ComplexField &Plaq,
89 const std::vector<GaugeMat> &U) {
90 ComplexField sitePlaq(U[0].Grid());
91 Plaq = Zero();
92 for (int mu = 1; mu < Nd; mu++) {
93 for (int nu = 0; nu < mu; nu++) {
94 traceDirPlaquette(sitePlaq, U, mu, nu);
95 Plaq = Plaq + sitePlaq;
96 }
97 }
98 }
99
100 // sum over all x,y,z,t and over all planes of plaquette
102 static RealD sumPlaquette(const GaugeLorentz &Umu) {
103 std::vector<GaugeMat> U(Nd, Umu.Grid());
104 // inefficient here
105 for (int mu = 0; mu < Nd; mu++) {
106 U[mu] = PeekIndex<LorentzIndex>(Umu, mu);
107 }
108
109 ComplexField Plaq(Umu.Grid());
110
111 sitePlaquette(Plaq, U);
112 auto Tp = sum(Plaq);
113 auto p = TensorRemove(Tp);
114 return p.real();
115 }
116
117
119 // average over all x,y,z,t and over all planes of plaquette
121 static RealD avgPlaquette(const GaugeLorentz &Umu) {
122 RealD sumplaq = sumPlaquette(Umu);
123 double vol = Umu.Grid()->gSites();
124 double faces = (1.0 * Nd * (Nd - 1)) / 2.0;
125 return sumplaq / vol / faces / Nc; // Nd , Nc dependent... FIXME
126 }
127
129 // sum over all spatial planes of plaquette
132 const std::vector<GaugeMat> &U) {
133 ComplexField sitePlaq(U[0].Grid());
134 Plaq = Zero();
135 for (int mu = 1; mu < Nd-1; mu++) {
136 for (int nu = 0; nu < mu; nu++) {
137 traceDirPlaquette(sitePlaq, U, mu, nu);
138 Plaq = Plaq + sitePlaq;
139 }
140 }
141 }
142
144 // sum over all x,y,z and over all spatial planes of plaquette
146 static std::vector<RealD> timesliceSumSpatialPlaquette(const GaugeLorentz &Umu) {
147 std::vector<GaugeMat> U(Nd, Umu.Grid());
148 // inefficient here
149 for (int mu = 0; mu < Nd; mu++) {
150 U[mu] = PeekIndex<LorentzIndex>(Umu, mu);
151 }
152
153 ComplexField Plaq(Umu.Grid());
154
155 siteSpatialPlaquette(Plaq, U);
156 typedef typename ComplexField::scalar_object sobj;
157 std::vector<sobj> Tq;
158 sliceSum(Plaq, Tq, Nd-1);
159
160 std::vector<Real> out(Tq.size());
161 for(int t=0;t<Tq.size();t++) out[t] = TensorRemove(Tq[t]).real();
162 return out;
163 }
164
166 // average over all x,y,z and over all spatial planes of plaquette
168 static std::vector<RealD> timesliceAvgSpatialPlaquette(const GaugeLorentz &Umu) {
169 std::vector<RealD> sumplaq = timesliceSumSpatialPlaquette(Umu);
170 int Lt = Umu.Grid()->FullDimensions()[Nd-1];
171 assert(sumplaq.size() == Lt);
172 double vol = Umu.Grid()->gSites() / Lt;
173 double faces = (1.0 * (Nd - 1)* (Nd - 2)) / 2.0;
174 for(int t=0;t<Lt;t++)
175 sumplaq[t] = sumplaq[t] / vol / faces / Nc; // Nd , Nc dependent... FIXME
176 return sumplaq;
177 }
178
180 // average Polyakov loop in mu direction over all directions != mu
182 static ComplexD avgPolyakovLoop(const GaugeField &Umu, const int mu) { //assume Nd=4
183
184 // Protect against bad value of mu [0, 3]
185 if ((mu < 0 ) || (mu > 3)) {
186 std::cout << GridLogError << "Index is not an integer inclusively between 0 and 3." << std::endl;
187 exit(1);
188 }
189
190 // U_loop is U_{mu}
191 GaugeMat U_loop(Umu.Grid()), P(Umu.Grid());
192 ComplexD out;
193 int T = Umu.Grid()->GlobalDimensions()[3];
194 int X = Umu.Grid()->GlobalDimensions()[0];
195 int Y = Umu.Grid()->GlobalDimensions()[1];
196 int Z = Umu.Grid()->GlobalDimensions()[2];
197
198 // Number of sites in mu direction
199 int N_mu = Umu.Grid()->GlobalDimensions()[mu];
200
201 U_loop = peekLorentz(Umu, mu); //Select direction
202 P = U_loop;
203 for (int t=1;t<N_mu;t++){
204 P = Gimpl::CovShiftForward(U_loop,mu,P);
205 }
206 RealD norm = 1.0/(Nc*X*Y*Z*T);
207 out = sum(trace(P))*norm;
208 return out;
209 }
210
212 // overload for temporal Polyakov loop
214 static ComplexD avgPolyakovLoop(const GaugeField &Umu) {
215 return avgPolyakovLoop(Umu, 3);
216 }
217
219 // average over traced single links
221 static RealD linkTrace(const GaugeLorentz &Umu) {
222 std::vector<GaugeMat> U(Nd, Umu.Grid());
223
224 ComplexField Tr(Umu.Grid());
225 Tr = Zero();
226 for (int mu = 0; mu < Nd; mu++) {
227 U[mu] = PeekIndex<LorentzIndex>(Umu, mu);
228 Tr = Tr + trace(U[mu]);
229 }
230
231 auto Tp = sum(Tr);
232 auto p = TensorRemove(Tp);
233
234 double vol = Umu.Grid()->gSites();
235
236 return p.real() / vol / (4.0 * Nc ) ;
237 };
238
240 // the sum over all staples on each site in direction mu,nu
242 static void Staple(GaugeMat &staple, const GaugeLorentz &Umu, int mu,
243 int nu) {
244
245 GridBase *grid = Umu.Grid();
246
247 std::vector<GaugeMat> U(Nd, grid);
248 for (int d = 0; d < Nd; d++) {
249 U[d] = PeekIndex<LorentzIndex>(Umu, d);
250 }
251 staple = Zero();
252
253 if (nu != mu) {
254
255 // mu
256 // ^
257 // |__> nu
258
259 // __
260 // |
261 // __|
262 //
263
264 staple += Gimpl::ShiftStaple(
265 Gimpl::CovShiftForward(
266 U[nu], nu,
267 Gimpl::CovShiftBackward(
268 U[mu], mu, Gimpl::CovShiftIdentityBackward(U[nu], nu))),
269 mu);
270
271 // __
272 // |
273 // |__
274 //
275 //
276 staple += Gimpl::ShiftStaple(
277 Gimpl::CovShiftBackward(U[nu], nu,
278 Gimpl::CovShiftBackward(U[mu], mu, U[nu])),
279 mu);
280 }
281 }
282
283
284 // For the force term
285/*
286 static void StapleMult(GaugeMat &staple, const GaugeLorentz &Umu, int mu) {
287 GridBase *grid = Umu.Grid();
288 std::vector<GaugeMat> U(Nd, grid);
289 for (int d = 0; d < Nd; d++) {
290 // this operation is taking too much time
291 U[d] = PeekIndex<LorentzIndex>(Umu, d);
292 }
293 staple = Zero();
294 GaugeMat tmp1(grid);
295 GaugeMat tmp2(grid);
296
297 for (int nu = 0; nu < Nd; nu++) {
298 if (nu != mu) {
299 // this is ~10% faster than the Staple -- PAB: so what it gives the WRONG answers for other BC's!
300 tmp1 = Cshift(U[nu], mu, 1);
301 tmp2 = Cshift(U[mu], nu, 1);
302 staple += tmp1* adj(U[nu]*tmp2);
303 tmp2 = adj(U[mu]*tmp1)*U[nu];
304 staple += Cshift(tmp2, nu, -1);
305 }
306 }
307 staple = U[mu]*staple;
308 }
309*/
311 // the sum over all nu-oriented staples for nu != mu on each site
313 static void Staple(GaugeMat &staple, const GaugeLorentz &U, int mu) {
314
315 std::vector<GaugeMat> Umu(Nd, U.Grid());
316 for (int d = 0; d < Nd; d++) {
317 Umu[d] = PeekIndex<LorentzIndex>(U, d);
318 }
319 Staple(staple, Umu, mu);
320 }
321
322 static void Staple(GaugeMat &staple, const std::vector<GaugeMat> &Umu, int mu) {
323
324 autoView(staple_v, staple, AcceleratorWrite);
325 accelerator_for(i, staple.Grid()->oSites(), Simd::Nsimd(), {
326 staple_v[i] = Zero();
327 });
328
329 for (int nu = 0; nu < Nd; nu++) {
330
331 if (nu != mu) {
332
333 // mu
334 // ^
335 // |__> nu
336
337 // __
338 // |
339 // __|
340 //
341
342 staple += Gimpl::ShiftStaple(
343 Gimpl::CovShiftForward(
344 Umu[nu], nu,
345 Gimpl::CovShiftBackward(
346 Umu[mu], mu, Gimpl::CovShiftIdentityBackward(Umu[nu], nu))),
347 mu);
348
349 // __
350 // |
351 // |__
352 //
353 //
354
355 staple += Gimpl::ShiftStaple(
356 Gimpl::CovShiftBackward(Umu[nu], nu,
357 Gimpl::CovShiftBackward(Umu[mu], mu, Umu[nu])), mu);
358 }
359 }
360 }
361
363 //Staples for each direction mu, summed over nu != mu
364 //staple: output staples for each mu (Nd)
365 //U: link array (Nd)
367 static void StapleAll(std::vector<GaugeMat> &staple, const std::vector<GaugeMat> &U) {
368 assert(staple.size() == Nd); assert(U.size() == Nd);
369 for(int mu=0;mu<Nd;mu++) Staple(staple[mu], U, mu);
370 }
371
372
373 //A workspace class allowing reuse of the stencil
375 std::unique_ptr<GeneralLocalStencil> stencil;
376 size_t nshift;
377
378 void generateStencil(GridBase* padded_grid){
379 double t0 = usecond();
380
381 //Generate shift arrays
382 std::vector<Coordinate> shifts = this->getShifts();
383 nshift = shifts.size();
384
385 double t1 = usecond();
386 //Generate local stencil
387 stencil.reset(new GeneralLocalStencil(padded_grid,shifts));
388 double t2 = usecond();
389 std::cout << GridLogPerformance << " WilsonLoopPaddedWorkspace timings: coord:" << (t1-t0)/1000 << "ms, stencil:" << (t2-t1)/1000 << "ms" << std::endl;
390 }
391 public:
392 //Get the stencil. If not already generated, or if generated using a different Grid than in PaddedCell, it will be created on-the-fly
394 assert(pcell.depth >= this->paddingDepth());
395 if(!stencil || stencil->Grid() != (GridBase*)pcell.grids.back() ) generateStencil((GridBase*)pcell.grids.back());
396 return *stencil;
397 }
398 size_t Nshift() const{ return nshift; }
399
400 virtual std::vector<Coordinate> getShifts() const = 0;
401 virtual int paddingDepth() const = 0; //padding depth required
402
404 };
405
406 //This workspace allows the sharing of a common PaddedCell object between multiple stencil workspaces
408 std::vector<WilsonLoopPaddedStencilWorkspace*> stencil_wk;
409 std::unique_ptr<PaddedCell> pcell;
410
411 void generatePcell(GridBase* unpadded_grid){
412 assert(stencil_wk.size());
413 int max_depth = 0;
414 for(auto const &s : stencil_wk) max_depth=std::max(max_depth, s->paddingDepth());
415
416 pcell.reset(new PaddedCell(max_depth, dynamic_cast<GridCartesian*>(unpadded_grid)));
417 }
418
419 public:
420 //Add a stencil definition. This should be done before the first call to retrieve a stencil object.
421 //Takes ownership of the pointer
423 assert(!pcell);
424 stencil_wk.push_back(stencil);
425 }
426
427 const GeneralLocalStencil & getStencil(const size_t stencil_idx, GridBase* unpadded_grid){
428 if(!pcell || pcell->unpadded_grid != unpadded_grid) generatePcell(unpadded_grid);
429 return stencil_wk[stencil_idx]->getStencil(*pcell);
430 }
431 const PaddedCell & getPaddedCell(GridBase* unpadded_grid){
432 if(!pcell || pcell->unpadded_grid != unpadded_grid) generatePcell(unpadded_grid);
433 return *pcell;
434 }
435
437 for(auto &s : stencil_wk) delete s;
438 }
439 };
440
441 //A workspace class allowing reuse of the stencil
443 public:
444 std::vector<Coordinate> getShifts() const override{
445 std::vector<Coordinate> shifts;
446 for(int mu=0;mu<Nd;mu++){
447 for(int nu=0;nu<Nd;nu++){
448 if(nu != mu){
449 Coordinate shift_0(Nd,0);
450 Coordinate shift_mu(Nd,0); shift_mu[mu]=1;
451 Coordinate shift_nu(Nd,0); shift_nu[nu]=1;
452 Coordinate shift_mnu(Nd,0); shift_mnu[nu]=-1;
453 Coordinate shift_mnu_pmu(Nd,0); shift_mnu_pmu[nu]=-1; shift_mnu_pmu[mu]=1;
454
455 //U_nu(x+mu)U^dag_mu(x+nu) U^dag_nu(x)
456 shifts.push_back(shift_0);
457 shifts.push_back(shift_nu);
458 shifts.push_back(shift_mu);
459
460 //U_nu^dag(x-nu+mu) U_mu^dag(x-nu) U_nu(x-nu)
461 shifts.push_back(shift_mnu);
462 shifts.push_back(shift_mnu);
463 shifts.push_back(shift_mnu_pmu);
464 }
465 }
466 }
467 return shifts;
468 }
469
470 int paddingDepth() const override{ return 1; }
471 };
472
473 //Padded cell implementation of the staple method for all mu, summed over nu != mu
474 //staple: output staple for each mu, summed over nu != mu (Nd)
475 //U_padded: the gauge link fields padded out using the PaddedCell class
476 //Cell: the padded cell class
477 static void StaplePaddedAll(std::vector<GaugeMat> &staple, const std::vector<GaugeMat> &U_padded, const PaddedCell &Cell) {
478 StaplePaddedAllWorkspace wk;
479 StaplePaddedAll(staple,U_padded,Cell,wk.getStencil(Cell));
480 }
481
482 //Padded cell implementation of the staple method for all mu, summed over nu != mu
483 //staple: output staple for each mu, summed over nu != mu (Nd)
484 //U_padded: the gauge link fields padded out using the PaddedCell class
485 //Cell: the padded cell class
486 //gStencil: the precomputed generalized local stencil for the staple
487 static void StaplePaddedAll(std::vector<GaugeMat> &staple, const std::vector<GaugeMat> &U_padded, const PaddedCell &Cell, const GeneralLocalStencil &gStencil)
488 {
489 double t0 = usecond();
490 assert(U_padded.size() == Nd); assert(staple.size() == Nd);
491 assert(U_padded[0].Grid() == (GridBase*)Cell.grids.back());
492 assert(Cell.depth >= 1);
493 GridBase *ggrid = U_padded[0].Grid(); //padded cell grid
494
495 int shift_mu_off = gStencil._npoints/Nd;
496
497 //Open views to padded gauge links and keep open over mu loop
499 size_t vsize = Nd*sizeof(GaugeViewType);
500 GaugeViewType* Ug_dirs_v_host = (GaugeViewType*)malloc(vsize);
501 for(int i=0;i<Nd;i++) Ug_dirs_v_host[i] = U_padded[i].View(AcceleratorRead);
502 GaugeViewType* Ug_dirs_v = (GaugeViewType*)acceleratorAllocDevice(vsize);
503 acceleratorCopyToDevice(Ug_dirs_v_host,Ug_dirs_v,vsize);
504
505 GaugeMat gStaple(ggrid);
506
507 int outer_off = 0;
508 for(int mu=0;mu<Nd;mu++){
509 { //view scope
510 autoView( gStaple_v , gStaple, AcceleratorWrite);
511 auto gStencil_v = gStencil.View(AcceleratorRead);
512
513 accelerator_for(ss, ggrid->oSites(), (size_t)ggrid->Nsimd(), {
514 decltype(coalescedRead(Ug_dirs_v[0][0])) stencil_ss;
515 stencil_ss = Zero();
516 int off = outer_off;
517
518 for(int nu=0;nu<Nd;nu++){
519 if(nu != mu){
520 GeneralStencilEntry const* e = gStencil_v.GetEntry(off++,ss);
521 auto U0 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
522 e = gStencil_v.GetEntry(off++,ss);
523 auto U1 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
524 e = gStencil_v.GetEntry(off++,ss);
525 auto U2 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
526
527 stencil_ss = stencil_ss + U2 * U1 * U0;
528
529 e = gStencil_v.GetEntry(off++,ss);
530 U0 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
531 e = gStencil_v.GetEntry(off++,ss);
532 U1 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
533 e = gStencil_v.GetEntry(off++,ss);
534 U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
535
536 stencil_ss = stencil_ss + U2 * U1 * U0;
537 }
538 }
539
540 coalescedWrite(gStaple_v[ss],stencil_ss);
541 }
542 );
543 } //ensure views are all closed!
544
545 staple[mu] = Cell.Extract(gStaple);
546 outer_off += shift_mu_off;
547 }//mu loop
548
549 for(int i=0;i<Nd;i++) Ug_dirs_v_host[i].ViewClose();
550 free(Ug_dirs_v_host);
551 acceleratorFreeDevice(Ug_dirs_v);
552
553 double t1=usecond();
554
555 std::cout << GridLogPerformance << "StaplePaddedAll timing:" << (t1-t0)/1000 << "ms" << std::endl;
556 }
557
558
560 // the sum over all staples on each site in direction mu,nu, upper part
562 static void StapleUpper(GaugeMat &staple, const GaugeLorentz &Umu, int mu,
563 int nu) {
564 if (nu != mu) {
565 GridBase *grid = Umu.Grid();
566
567 std::vector<GaugeMat> U(Nd, grid);
568 for (int d = 0; d < Nd; d++) {
569 U[d] = PeekIndex<LorentzIndex>(Umu, d);// some redundant copies
570 }
571
572 // mu
573 // ^
574 // |__> nu
575
576 // __
577 // |
578 // __|
579 //
580
581 staple = Gimpl::ShiftStaple(
582 Gimpl::CovShiftForward(
583 U[nu], nu,
584 Gimpl::CovShiftBackward(
585 U[mu], mu, Gimpl::CovShiftIdentityBackward(U[nu], nu))),
586 mu);
587 }
588 }
589
591 // the sum over all staples on each site in direction mu,nu, lower part
593 static void StapleLower(GaugeMat &staple, const GaugeLorentz &Umu, int mu,
594 int nu) {
595 if (nu != mu) {
596 GridBase *grid = Umu.Grid();
597
598 std::vector<GaugeMat> U(Nd, grid);
599 for (int d = 0; d < Nd; d++) {
600 U[d] = PeekIndex<LorentzIndex>(Umu, d);// some redundant copies
601 }
602
603 // mu
604 // ^
605 // |__> nu
606
607 // __
608 // |
609 // |__
610 //
611 //
612 staple = Gimpl::ShiftStaple(
613 Gimpl::CovShiftBackward(U[nu], nu,
614 Gimpl::CovShiftBackward(U[mu], mu, U[nu])),
615 mu);
616
617 }
618 }
619
621 // Field Strength
623 static void FieldStrength(GaugeMat &FS, const GaugeLorentz &Umu, int mu, int nu){
624 // Fmn +--<--+ Ut +--<--+
625 // | | | |
626 // (x)+-->--+ +-->--+(x) - h.c.
627 // | | | |
628 // +--<--+ +--<--+
629
630 GaugeMat Vup(Umu.Grid()), Vdn(Umu.Grid());
631 StapleUpper(Vup, Umu, mu, nu);
632 StapleLower(Vdn, Umu, mu, nu);
633 GaugeMat v = Vup - Vdn;
634 GaugeMat u = PeekIndex<LorentzIndex>(Umu, mu); // some redundant copies
635 GaugeMat vu = v*u;
636 //FS = 0.25*Ta(u*v + Cshift(vu, mu, -1));
637 FS = (u*v + Gimpl::CshiftLink(vu, mu, -1));
638 FS = 0.125*(FS - adj(FS));
639 }
640
642 // 4d topological charge
643 assert(Nd==4);
644 // Bx = -iF(y,z), By = -iF(z,y), Bz = -iF(x,y)
645 GaugeMat Bx(U.Grid()), By(U.Grid()), Bz(U.Grid());
646 FieldStrength(Bx, U, Ydir, Zdir);
647 FieldStrength(By, U, Zdir, Xdir);
648 FieldStrength(Bz, U, Xdir, Ydir);
649
650 // Ex = -iF(t,x), Ey = -iF(t,y), Ez = -iF(t,z)
651 GaugeMat Ex(U.Grid()), Ey(U.Grid()), Ez(U.Grid());
652 FieldStrength(Ex, U, Tdir, Xdir);
653 FieldStrength(Ey, U, Tdir, Ydir);
654 FieldStrength(Ez, U, Tdir, Zdir);
655
656 double coeff = 8.0/(32.0*M_PI*M_PI);
657
658 ComplexField qfield = coeff*trace(Bx*Ex + By*Ey + Bz*Ez);
659 auto Tq = sum(qfield);
660 return TensorRemove(Tq).real();
661 }
662
663
664 //Clover-leaf Wilson loop combination for arbitrary mu-extent M and nu extent N, mu >= nu
665 //cf https://arxiv.org/pdf/hep-lat/9701012.pdf Eq 7 for 1x2 Wilson loop
666 //Clockwise ordering
667 static void CloverleafMxN(GaugeMat &FS, const GaugeMat &Umu, const GaugeMat &Unu, int mu, int nu, int M, int N){
668#define Fmu(A) Gimpl::CovShiftForward(Umu, mu, A)
669#define Bmu(A) Gimpl::CovShiftBackward(Umu, mu, A)
670#define Fnu(A) Gimpl::CovShiftForward(Unu, nu, A)
671#define Bnu(A) Gimpl::CovShiftBackward(Unu, nu, A)
672#define FmuI Gimpl::CovShiftIdentityForward(Umu, mu)
673#define BmuI Gimpl::CovShiftIdentityBackward(Umu, mu)
674#define FnuI Gimpl::CovShiftIdentityForward(Unu, nu)
675#define BnuI Gimpl::CovShiftIdentityBackward(Unu, nu)
676
677 //Upper right loop
678 GaugeMat tmp = BmuI;
679 for(int i=1;i<M;i++)
680 tmp = Bmu(tmp);
681 for(int j=0;j<N;j++)
682 tmp = Bnu(tmp);
683 for(int i=0;i<M;i++)
684 tmp = Fmu(tmp);
685 for(int j=0;j<N;j++)
686 tmp = Fnu(tmp);
687
688 FS = tmp;
689
690 //Upper left loop
691 tmp = BnuI;
692 for(int j=1;j<N;j++)
693 tmp = Bnu(tmp);
694 for(int i=0;i<M;i++)
695 tmp = Fmu(tmp);
696 for(int j=0;j<N;j++)
697 tmp = Fnu(tmp);
698 for(int i=0;i<M;i++)
699 tmp = Bmu(tmp);
700
701 FS = FS + tmp;
702
703 //Lower right loop
704 tmp = FnuI;
705 for(int j=1;j<N;j++)
706 tmp = Fnu(tmp);
707 for(int i=0;i<M;i++)
708 tmp = Bmu(tmp);
709 for(int j=0;j<N;j++)
710 tmp = Bnu(tmp);
711 for(int i=0;i<M;i++)
712 tmp = Fmu(tmp);
713
714 FS = FS + tmp;
715
716 //Lower left loop
717 tmp = FmuI;
718 for(int i=1;i<M;i++)
719 tmp = Fmu(tmp);
720 for(int j=0;j<N;j++)
721 tmp = Fnu(tmp);
722 for(int i=0;i<M;i++)
723 tmp = Bmu(tmp);
724 for(int j=0;j<N;j++)
725 tmp = Bnu(tmp);
726
727 FS = FS + tmp;
728
729#undef Fmu
730#undef Bmu
731#undef Fnu
732#undef Bnu
733#undef FmuI
734#undef BmuI
735#undef FnuI
736#undef BnuI
737 }
738
739 //Field strength from MxN Wilson loop
740 //Note F_numu = - F_munu
741 static void FieldStrengthMxN(GaugeMat &FS, const GaugeLorentz &U, int mu, int nu, int M, int N){
744 if(M == N){
745 GaugeMat F(Umu.Grid());
746 CloverleafMxN(F, Umu, Unu, mu, nu, M, N);
747 FS = 0.125 * ( F - adj(F) );
748 }else{
749 //Average over both orientations
750 GaugeMat horizontal(Umu.Grid()), vertical(Umu.Grid());
751 CloverleafMxN(horizontal, Umu, Unu, mu, nu, M, N);
752 CloverleafMxN(vertical, Umu, Unu, mu, nu, N, M);
753 FS = 0.0625 * ( horizontal - adj(horizontal) + vertical - adj(vertical) );
754 }
755 }
756
757 //Topological charge contribution from MxN Wilson loops
758 //cf https://arxiv.org/pdf/hep-lat/9701012.pdf Eq 6
759 //output is the charge by timeslice: sum over timeslices to obtain the total
760 static std::vector<Real> TimesliceTopologicalChargeMxN(const GaugeLorentz &U, int M, int N){
761 assert(Nd == 4);
762 std::vector<std::vector<GaugeMat*> > F(Nd,std::vector<GaugeMat*>(Nd,nullptr));
763 //Note F_numu = - F_munu
764 //hence we only need to loop over mu,nu,rho,sigma that aren't related by permuting mu,nu or rho,sigma
765 //Use nu > mu
766 for(int mu=0;mu<Nd-1;mu++){
767 for(int nu=mu+1; nu<Nd; nu++){
768 F[mu][nu] = new GaugeMat(U.Grid());
769 FieldStrengthMxN(*F[mu][nu], U, mu, nu, M, N);
770 }
771 }
772 Real coeff = -1./(32 * M_PI*M_PI * M*M * N*N); //overall sign to match CPS and Grid conventions, possibly related to time direction = 3 vs 0
773
774 static const int combs[3][4] = { {0,1,2,3}, {0,2,1,3}, {0,3,1,2} };
775 static const int signs[3] = { 1, -1, 1 }; //epsilon_{mu nu rho sigma}
776
777 ComplexField fsum(U.Grid());
778 fsum = Zero();
779 for(int c=0;c<3;c++){
780 int mu = combs[c][0], nu = combs[c][1], rho = combs[c][2], sigma = combs[c][3];
781 int eps = signs[c];
782 fsum = fsum + (8. * coeff * eps) * trace( (*F[mu][nu]) * (*F[rho][sigma]) );
783 }
784
785 for(int mu=0;mu<Nd-1;mu++)
786 for(int nu=mu+1; nu<Nd; nu++)
787 delete F[mu][nu];
788
789 typedef typename ComplexField::scalar_object sobj;
790 std::vector<sobj> Tq;
791 sliceSum(fsum, Tq, Nd-1);
792
793 std::vector<Real> out(Tq.size());
794 for(int t=0;t<Tq.size();t++) out[t] = TensorRemove(Tq[t]).real();
795 return out;
796 }
797 static Real TopologicalChargeMxN(const GaugeLorentz &U, int M, int N){
798 std::vector<Real> Tq = TimesliceTopologicalChargeMxN(U,M,N);
799 Real out(0);
800 for(int t=0;t<Tq.size();t++) out += Tq[t];
801 return out;
802 }
803
804 //Generate the contributions to the 5Li topological charge from Wilson loops of the following sizes
805 //Use coefficients from hep-lat/9701012
806 //1x1 : c1=(19.-55.*c5)/9.
807 //2x2 : c2=(1-64.*c5)/9.
808 //1x2 : c3=(-64.+640.*c5)/45.
809 //1x3 : c4=1./5.-2.*c5
810 //3x3 : c5=1./20.
811 //Output array outer index contains the loops in the above order
812 //Inner index is the time coordinate
813 static std::vector<std::vector<Real> > TimesliceTopologicalCharge5LiContributions(const GaugeLorentz &U){
814 static const int exts[5][2] = { {1,1}, {2,2}, {1,2}, {1,3}, {3,3} };
815 std::vector<std::vector<Real> > out(5);
816 for(int i=0;i<5;i++){
817 out[i] = TimesliceTopologicalChargeMxN(U,exts[i][0],exts[i][1]);
818 }
819 return out;
820 }
821
822 static std::vector<Real> TopologicalCharge5LiContributions(const GaugeLorentz &U){
823 static const int exts[5][2] = { {1,1}, {2,2}, {1,2}, {1,3}, {3,3} };
824 std::vector<Real> out(5);
825 std::cout << GridLogMessage << "Computing topological charge" << std::endl;
826 for(int i=0;i<5;i++){
827 out[i] = TopologicalChargeMxN(U,exts[i][0],exts[i][1]);
828 std::cout << GridLogMessage << exts[i][0] << "x" << exts[i][1] << " Wilson loop contribution " << out[i] << std::endl;
829 }
830 return out;
831 }
832
833 //Compute the 5Li topological charge
834 static std::vector<Real> TimesliceTopologicalCharge5Li(const GaugeLorentz &U){
835 std::vector<std::vector<Real> > loops = TimesliceTopologicalCharge5LiContributions(U);
836
837 double c5=1./20.;
838 double c4=1./5.-2.*c5;
839 double c3=(-64.+640.*c5)/45.;
840 double c2=(1-64.*c5)/9.;
841 double c1=(19.-55.*c5)/9.;
842
843 int Lt = loops[0].size();
844 std::vector<Real> out(Lt,0.);
845 for(int t=0;t<Lt;t++)
846 out[t] += c1*loops[0][t] + c2*loops[1][t] + c3*loops[2][t] + c4*loops[3][t] + c5*loops[4][t];
847 return out;
848 }
849
851 std::vector<Real> Qt = TimesliceTopologicalCharge5Li(U);
852 Real Q = 0.;
853 for(int t=0;t<Qt.size();t++) Q += Qt[t];
854 std::cout << GridLogMessage << "5Li Topological charge: " << Q << std::endl;
855 return Q;
856 }
857
858
859
860
862 // Similar to above for rectangle is required
864 static void dirRectangle(GaugeMat &rect, const std::vector<GaugeMat> &U,
865 const int mu, const int nu) {
866 rect = Gimpl::CovShiftForward(
867 U[mu], mu, Gimpl::CovShiftForward(U[mu], mu, U[nu])) * // ->->|
868 adj(Gimpl::CovShiftForward(
869 U[nu], nu, Gimpl::CovShiftForward(U[mu], mu, U[mu])));
870 rect = rect +
871 Gimpl::CovShiftForward(
872 U[mu], mu, Gimpl::CovShiftForward(U[nu], nu, U[nu])) * // ->||
873 adj(Gimpl::CovShiftForward(
874 U[nu], nu, Gimpl::CovShiftForward(U[nu], nu, U[mu])));
875 }
877 const std::vector<GaugeMat> &U, const int mu,
878 const int nu) {
879 GaugeMat sp(U[0].Grid());
880 dirRectangle(sp, U, mu, nu);
881 rect = trace(sp);
882 }
883 static void siteRectangle(ComplexField &Rect,
884 const std::vector<GaugeMat> &U) {
885 ComplexField siteRect(U[0].Grid());
886 Rect = Zero();
887 for (int mu = 1; mu < Nd; mu++) {
888 for (int nu = 0; nu < mu; nu++) {
889 traceDirRectangle(siteRect, U, mu, nu);
890 Rect = Rect + siteRect;
891 }
892 }
893 }
894
896 // sum over all x,y,z,t and over all planes of plaquette
898 static RealD sumRectangle(const GaugeLorentz &Umu) {
899 std::vector<GaugeMat> U(Nd, Umu.Grid());
900
901 for (int mu = 0; mu < Nd; mu++) {
902 U[mu] = PeekIndex<LorentzIndex>(Umu, mu);
903 }
904
905 ComplexField Rect(Umu.Grid());
906
907 siteRectangle(Rect, U);
908
909 auto Tp = sum(Rect);
910 auto p = TensorRemove(Tp);
911 return p.real();
912 }
913
914 // average over all x,y,z,t and over all planes of plaquette
916 static RealD avgRectangle(const GaugeLorentz &Umu) {
917
918 RealD sumrect = sumRectangle(Umu);
919
920 double vol = Umu.Grid()->gSites();
921
922 double faces = (1.0 * Nd * (Nd - 1)); // 2 distinct orientations summed
923
924 return sumrect / vol / faces / Nc; // Nd , Nc dependent... FIXME
925 }
926
928 // the sum over all staples on each site
930 static void RectStapleDouble(GaugeMat &U2, const GaugeMat &U, int mu) {
931 U2 = U * Gimpl::CshiftLink(U, mu, 1);
932 }
933
935 // Hop by two optimisation strategy. Use RectStapleDouble to obtain 'U2'
937 static void RectStapleOptimised(GaugeMat &Stap, const std::vector<GaugeMat> &U2,
938 const std::vector<GaugeMat> &U, int mu) {
939
940 Stap = Zero();
941
942 GridBase *grid = U[0].Grid();
943
944 GaugeMat Staple2x1(grid);
945 GaugeMat tmp(grid);
946
947 for (int nu = 0; nu < Nd; nu++) {
948 if (nu != mu) {
949
950 // Up staple ___ ___
951 // | |
952 tmp = Gimpl::CshiftLink(adj(U[nu]), nu, -1);
953 tmp = adj(U2[mu]) * tmp;
954 tmp = Gimpl::CshiftLink(tmp, mu, -2);
955
956 Staple2x1 = Gimpl::CovShiftForward(U[nu], nu, tmp);
957
958 // Down staple
959 // |___ ___|
960 //
961 tmp = adj(U2[mu]) * U[nu];
962 Staple2x1 += Gimpl::CovShiftBackward(U[nu], nu, Gimpl::CshiftLink(tmp, mu, -2));
963
964 // ___ ___
965 // | ___|
966 // |___ ___|
967 //
968
969 Stap += Gimpl::CshiftLink(Gimpl::CovShiftForward(U[mu], mu, Staple2x1), mu, 1);
970
971 // ___ ___
972 // |___ |
973 // |___ ___|
974 //
975
976 // tmp= Staple2x1* Cshift(U[mu],mu,-2);
977 // Stap+= Cshift(tmp,mu,1) ;
978 Stap += Gimpl::CshiftLink(Staple2x1, mu, 1) * Gimpl::CshiftLink(U[mu], mu, -1);
979 ;
980
981 // --
982 // | |
983 //
984 // | |
985
986 tmp = Gimpl::CshiftLink(adj(U2[nu]), nu, -2);
987 tmp = Gimpl::CovShiftBackward(U[mu], mu, tmp);
988 tmp = U2[nu] * Gimpl::CshiftLink(tmp, nu, 2);
989 Stap += Gimpl::CshiftLink(tmp, mu, 1);
990
991 // | |
992 //
993 // | |
994 // --
995
996 tmp = Gimpl::CovShiftBackward(U[mu], mu, U2[nu]);
997 tmp = adj(U2[nu]) * tmp;
998 tmp = Gimpl::CshiftLink(tmp, nu, -2);
999 Stap += Gimpl::CshiftLink(tmp, mu, 1);
1000 }
1001 }
1002 }
1003
1004 static void RectStapleUnoptimised(GaugeMat &Stap, const GaugeLorentz &Umu,
1005 int mu) {
1006 GridBase *grid = Umu.Grid();
1007
1008 std::vector<GaugeMat> U(Nd, grid);
1009 for (int d = 0; d < Nd; d++) {
1010 U[d] = PeekIndex<LorentzIndex>(Umu, d);
1011 }
1012
1013 Stap = Zero();
1014
1015 for (int nu = 0; nu < Nd; nu++) {
1016 if (nu != mu) {
1017 // __ ___
1018 // | __ |
1019 //
1020 Stap += Gimpl::ShiftStaple(
1021 Gimpl::CovShiftForward(
1022 U[mu], mu,
1023 Gimpl::CovShiftForward(
1024 U[nu], nu,
1025 Gimpl::CovShiftBackward(
1026 U[mu], mu,
1027 Gimpl::CovShiftBackward(
1028 U[mu], mu,
1029 Gimpl::CovShiftIdentityBackward(U[nu], nu))))),
1030 mu);
1031
1032 // __
1033 // |__ __ |
1034
1035 Stap += Gimpl::ShiftStaple(
1036 Gimpl::CovShiftForward(
1037 U[mu], mu,
1038 Gimpl::CovShiftBackward(
1039 U[nu], nu,
1040 Gimpl::CovShiftBackward(
1041 U[mu], mu, Gimpl::CovShiftBackward(U[mu], mu, U[nu])))),
1042 mu);
1043
1044 // __
1045 // |__ __ |
1046
1047 Stap += Gimpl::ShiftStaple(
1048 Gimpl::CovShiftBackward(
1049 U[nu], nu,
1050 Gimpl::CovShiftBackward(
1051 U[mu], mu,
1052 Gimpl::CovShiftBackward(
1053 U[mu], mu, Gimpl::CovShiftForward(U[nu], nu, U[mu])))),
1054 mu);
1055
1056 // __ ___
1057 // |__ |
1058
1059 Stap += Gimpl::ShiftStaple(
1060 Gimpl::CovShiftForward(
1061 U[nu], nu,
1062 Gimpl::CovShiftBackward(
1063 U[mu], mu,
1064 Gimpl::CovShiftBackward(
1065 U[mu], mu, Gimpl::CovShiftBackward(U[nu], nu, U[mu])))),
1066 mu);
1067
1068 // --
1069 // | |
1070 //
1071 // | |
1072
1073 Stap += Gimpl::ShiftStaple(
1074 Gimpl::CovShiftForward(
1075 U[nu], nu,
1076 Gimpl::CovShiftForward(
1077 U[nu], nu,
1078 Gimpl::CovShiftBackward(
1079 U[mu], mu,
1080 Gimpl::CovShiftBackward(
1081 U[nu], nu,
1082 Gimpl::CovShiftIdentityBackward(U[nu], nu))))),
1083 mu);
1084
1085 // | |
1086 //
1087 // | |
1088 // --
1089
1090 Stap += Gimpl::ShiftStaple(
1091 Gimpl::CovShiftBackward(
1092 U[nu], nu,
1093 Gimpl::CovShiftBackward(
1094 U[nu], nu,
1095 Gimpl::CovShiftBackward(
1096 U[mu], mu, Gimpl::CovShiftForward(U[nu], nu, U[nu])))),
1097 mu);
1098 }
1099 }
1100 }
1101
1102 static void RectStaple(GaugeMat &Stap, const GaugeLorentz &Umu, int mu) {
1103 RectStapleUnoptimised(Stap, Umu, mu);
1104 }
1105 static void RectStaple(const GaugeLorentz &Umu, GaugeMat &Stap,
1106 std::vector<GaugeMat> &U2, std::vector<GaugeMat> &U,
1107 int mu) {
1108 RectStapleOptimised(Stap, U2, U, mu);
1109 }
1110
1111 //Compute the rectangular staples for all orientations
1112 //Stap : Array of staples (Nd)
1113 //U: Gauge links in each direction (Nd)
1115 static void RectStapleAll(std::vector<GaugeMat> &Stap, const std::vector<GaugeMat> &U){
1116 assert(Stap.size() == Nd); assert(U.size() == Nd);
1117 std::vector<GaugeMat> U2(Nd,U[0].Grid());
1118 for(int mu=0;mu<Nd;mu++) RectStapleDouble(U2[mu], U[mu], mu);
1119 for(int mu=0;mu<Nd;mu++) RectStapleOptimised(Stap[mu], U2, U, mu);
1120 }
1121
1122 //A workspace class allowing reuse of the stencil
1124 public:
1125 std::vector<Coordinate> getShifts() const override{
1126 std::vector<Coordinate> shifts;
1127 for (int mu = 0; mu < Nd; mu++){
1128 for (int nu = 0; nu < Nd; nu++) {
1129 if (nu != mu) {
1130 auto genShift = [&](int mushift,int nushift){
1131 Coordinate out(Nd,0); out[mu]=mushift; out[nu]=nushift; return out;
1132 };
1133
1134 //tmp6 = tmp5(x+mu) = U_mu(x+mu)U_nu(x+2mu)U_mu^dag(x+nu+mu) U_mu^dag(x+nu) U_nu^dag(x)
1135 shifts.push_back(genShift(0,0));
1136 shifts.push_back(genShift(0,+1));
1137 shifts.push_back(genShift(+1,+1));
1138 shifts.push_back(genShift(+2,0));
1139 shifts.push_back(genShift(+1,0));
1140
1141 //tmp5 = tmp4(x+mu) = U_mu(x+mu)U^dag_nu(x-nu+2mu)U^dag_mu(x-nu+mu)U^dag_mu(x-nu)U_nu(x-nu)
1142 shifts.push_back(genShift(0,-1));
1143 shifts.push_back(genShift(0,-1));
1144 shifts.push_back(genShift(+1,-1));
1145 shifts.push_back(genShift(+2,-1));
1146 shifts.push_back(genShift(+1,0));
1147
1148 //tmp5 = tmp4(x+mu) = U^dag_nu(x-nu+mu)U^dag_mu(x-nu)U^dag_mu(x-mu-nu)U_nu(x-mu-nu)U_mu(x-mu)
1149 shifts.push_back(genShift(-1,0));
1150 shifts.push_back(genShift(-1,-1));
1151 shifts.push_back(genShift(-1,-1));
1152 shifts.push_back(genShift(0,-1));
1153 shifts.push_back(genShift(+1,-1));
1154
1155 //tmp5 = tmp4(x+mu) = U_nu(x+mu)U_mu^dag(x+nu)U_mu^dag(x-mu+nu)U_nu^dag(x-mu)U_mu(x-mu)
1156 shifts.push_back(genShift(-1,0));
1157 shifts.push_back(genShift(-1,0));
1158 shifts.push_back(genShift(-1,+1));
1159 shifts.push_back(genShift(0,+1));
1160 shifts.push_back(genShift(+1,0));
1161
1162 //tmp6 = tmp5(x+mu) = U_nu(x+mu)U_nu(x+mu+nu)U_mu^dag(x+2nu)U_nu^dag(x+nu)U_nu^dag(x)
1163 shifts.push_back(genShift(0,0));
1164 shifts.push_back(genShift(0,+1));
1165 shifts.push_back(genShift(0,+2));
1166 shifts.push_back(genShift(+1,+1));
1167 shifts.push_back(genShift(+1,0));
1168
1169 //tmp5 = tmp4(x+mu) = U_nu^dag(x+mu-nu)U_nu^dag(x+mu-2nu)U_mu^dag(x-2nu)U_nu(x-2nu)U_nu(x-nu)
1170 shifts.push_back(genShift(0,-1));
1171 shifts.push_back(genShift(0,-2));
1172 shifts.push_back(genShift(0,-2));
1173 shifts.push_back(genShift(+1,-2));
1174 shifts.push_back(genShift(+1,-1));
1175 }
1176 }
1177 }
1178 return shifts;
1179 }
1180
1181 int paddingDepth() const override{ return 2; }
1182 };
1183
1184 //Padded cell implementation of the rectangular staple method for all mu, summed over nu != mu
1185 //staple: output staple for each mu, summed over nu != mu (Nd)
1186 //U_padded: the gauge link fields padded out using the PaddedCell class
1187 //Cell: the padded cell class
1188 static void RectStaplePaddedAll(std::vector<GaugeMat> &staple, const std::vector<GaugeMat> &U_padded, const PaddedCell &Cell) {
1189 RectStaplePaddedAllWorkspace wk;
1190 RectStaplePaddedAll(staple,U_padded,Cell,wk.getStencil(Cell));
1191 }
1192
1193 //Padded cell implementation of the rectangular staple method for all mu, summed over nu != mu
1194 //staple: output staple for each mu, summed over nu != mu (Nd)
1195 //U_padded: the gauge link fields padded out using the PaddedCell class
1196 //Cell: the padded cell class
1197 //gStencil: the stencil
1198 static void RectStaplePaddedAll(std::vector<GaugeMat> &staple, const std::vector<GaugeMat> &U_padded, const PaddedCell &Cell, const GeneralLocalStencil &gStencil) {
1199 double t0 = usecond();
1200 assert(U_padded.size() == Nd); assert(staple.size() == Nd);
1201 assert(U_padded[0].Grid() == (GridBase*)Cell.grids.back());
1202 assert(Cell.depth >= 2);
1203 GridBase *ggrid = U_padded[0].Grid(); //padded cell grid
1204
1205 size_t nshift = gStencil._npoints;
1206 int mu_off_delta = nshift / Nd;
1207
1208 //Open views to padded gauge links and keep open over mu loop
1210 size_t vsize = Nd*sizeof(GaugeViewType);
1211 GaugeViewType* Ug_dirs_v_host = (GaugeViewType*)malloc(vsize);
1212 for(int i=0;i<Nd;i++) Ug_dirs_v_host[i] = U_padded[i].View(AcceleratorRead);
1213 GaugeViewType* Ug_dirs_v = (GaugeViewType*)acceleratorAllocDevice(vsize);
1214 acceleratorCopyToDevice(Ug_dirs_v_host,Ug_dirs_v,vsize);
1215
1216 GaugeMat gStaple(ggrid); //temp staple object on padded grid
1217
1218 int offset = 0;
1219 for(int mu=0; mu<Nd; mu++){
1220
1221 { //view scope
1222 autoView( gStaple_v , gStaple, AcceleratorWrite);
1223 auto gStencil_v = gStencil.View(AcceleratorRead);
1224
1225 accelerator_for(ss, ggrid->oSites(), (size_t)ggrid->Nsimd(), {
1226 decltype(coalescedRead(Ug_dirs_v[0][0])) stencil_ss;
1227 stencil_ss = Zero();
1228 int s=offset;
1229 for(int nu=0;nu<Nd;nu++){
1230 if(nu != mu){
1231 //tmp6 = tmp5(x+mu) = U_mu(x+mu)U_nu(x+2mu)U_mu^dag(x+nu+mu) U_mu^dag(x+nu) U_nu^dag(x)
1232 GeneralStencilEntry const* e = gStencil_v.GetEntry(s++,ss);
1233 auto U0 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
1234 e = gStencil_v.GetEntry(s++,ss);
1235 auto U1 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
1236 e = gStencil_v.GetEntry(s++,ss);
1237 auto U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
1238 e = gStencil_v.GetEntry(s++,ss);
1239 auto U3 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
1240 e = gStencil_v.GetEntry(s++,ss);
1241 auto U4 = coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd);
1242
1243 stencil_ss = stencil_ss + U4*U3*U2*U1*U0;
1244
1245 //tmp5 = tmp4(x+mu) = U_mu(x+mu)U^dag_nu(x-nu+2mu)U^dag_mu(x-nu+mu)U^dag_mu(x-nu)U_nu(x-nu)
1246 e = gStencil_v.GetEntry(s++,ss);
1247 U0 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
1248 e = gStencil_v.GetEntry(s++,ss);
1249 U1 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
1250 e = gStencil_v.GetEntry(s++,ss);
1251 U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
1252 e = gStencil_v.GetEntry(s++,ss);
1253 U3 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
1254 e = gStencil_v.GetEntry(s++,ss);
1255 U4 = coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd);
1256
1257 stencil_ss = stencil_ss + U4*U3*U2*U1*U0;
1258
1259 //tmp5 = tmp4(x+mu) = U^dag_nu(x-nu+mu)U^dag_mu(x-nu)U^dag_mu(x-mu-nu)U_nu(x-mu-nu)U_mu(x-mu)
1260 e = gStencil_v.GetEntry(s++,ss);
1261 U0 = coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd);
1262 e = gStencil_v.GetEntry(s++,ss);
1263 U1 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
1264 e = gStencil_v.GetEntry(s++,ss);
1265 U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
1266 e = gStencil_v.GetEntry(s++,ss);
1267 U3 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
1268 e = gStencil_v.GetEntry(s++,ss);
1269 U4 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
1270
1271 stencil_ss = stencil_ss + U4*U3*U2*U1*U0;
1272
1273 //tmp5 = tmp4(x+mu) = U_nu(x+mu)U_mu^dag(x+nu)U_mu^dag(x-mu+nu)U_nu^dag(x-mu)U_mu(x-mu)
1274 e = gStencil_v.GetEntry(s++,ss);
1275 U0 = coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd);
1276 e = gStencil_v.GetEntry(s++,ss);
1277 U1 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
1278 e = gStencil_v.GetEntry(s++,ss);
1279 U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
1280 e = gStencil_v.GetEntry(s++,ss);
1281 U3 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
1282 e = gStencil_v.GetEntry(s++,ss);
1283 U4 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
1284
1285 stencil_ss = stencil_ss + U4*U3*U2*U1*U0;
1286
1287 //tmp6 = tmp5(x+mu) = U_nu(x+mu)U_nu(x+mu+nu)U_mu^dag(x+2nu)U_nu^dag(x+nu)U_nu^dag(x)
1288 e = gStencil_v.GetEntry(s++,ss);
1289 U0 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
1290 e = gStencil_v.GetEntry(s++,ss);
1291 U1 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
1292 e = gStencil_v.GetEntry(s++,ss);
1293 U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
1294 e = gStencil_v.GetEntry(s++,ss);
1295 U3 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
1296 e = gStencil_v.GetEntry(s++,ss);
1297 U4 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
1298
1299 stencil_ss = stencil_ss + U4*U3*U2*U1*U0;
1300
1301 //tmp5 = tmp4(x+mu) = U_nu^dag(x+mu-nu)U_nu^dag(x+mu-2nu)U_mu^dag(x-2nu)U_nu(x-2nu)U_nu(x-nu)
1302 e = gStencil_v.GetEntry(s++,ss);
1303 U0 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
1304 e = gStencil_v.GetEntry(s++,ss);
1305 U1 = coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd);
1306 e = gStencil_v.GetEntry(s++,ss);
1307 U2 = adj(coalescedReadGeneralPermute(Ug_dirs_v[mu][e->_offset], e->_permute, Nd));
1308 e = gStencil_v.GetEntry(s++,ss);
1309 U3 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
1310 e = gStencil_v.GetEntry(s++,ss);
1311 U4 = adj(coalescedReadGeneralPermute(Ug_dirs_v[nu][e->_offset], e->_permute, Nd));
1312
1313 stencil_ss = stencil_ss + U4*U3*U2*U1*U0;
1314
1315 }
1316 }
1317 coalescedWrite(gStaple_v[ss],stencil_ss);
1318 }
1319 );
1320 offset += mu_off_delta;
1321 }//kernel/view scope
1322
1323 staple[mu] = Cell.Extract(gStaple);
1324 }//mu loop
1325
1326 for(int i=0;i<Nd;i++) Ug_dirs_v_host[i].ViewClose();
1327 free(Ug_dirs_v_host);
1328 acceleratorFreeDevice(Ug_dirs_v);
1329
1330 double t1 = usecond();
1331
1332 std::cout << GridLogPerformance << "RectStaplePaddedAll timings:" << (t1-t0)/1000 << "ms" << std::endl;
1333 }
1334
1335 //A workspace for reusing the PaddedCell and GeneralLocalStencil objects
1343
1345 //Compute the 1x1 and 1x2 staples for all orientations
1346 //Stap : Array of staples (Nd)
1347 //RectStap: Array of rectangular staples (Nd)
1348 //U: Gauge links in each direction (Nd)
1350 static void StapleAndRectStapleAll(std::vector<GaugeMat> &Stap, std::vector<GaugeMat> &RectStap, const std::vector<GaugeMat> &U){
1351 StapleAndRectStapleAllWorkspace wk;
1352 StapleAndRectStapleAll(Stap,RectStap,U,wk);
1353 }
1354
1356 //Compute the 1x1 and 1x2 staples for all orientations
1357 //Stap : Array of staples (Nd)
1358 //RectStap: Array of rectangular staples (Nd)
1359 //U: Gauge links in each direction (Nd)
1360 //wk: a workspace containing stored PaddedCell and GeneralLocalStencil objects to maximize reuse
1362 static void StapleAndRectStapleAll(std::vector<GaugeMat> &Stap, std::vector<GaugeMat> &RectStap, const std::vector<GaugeMat> &U, StapleAndRectStapleAllWorkspace &wk){
1363#if 0
1364 StapleAll(Stap, U);
1365 RectStapleAll(RectStap, U);
1366#else
1367 double t0 = usecond();
1368
1369 GridCartesian* unpadded_grid = dynamic_cast<GridCartesian*>(U[0].Grid());
1370 const PaddedCell &Ghost = wk.getPaddedCell(unpadded_grid);
1371
1372 CshiftImplGauge<Gimpl> cshift_impl;
1373 std::vector<GaugeMat> U_pad(Nd, Ghost.grids.back());
1374 for(int mu=0;mu<Nd;mu++) U_pad[mu] = Ghost.Exchange(U[mu], cshift_impl);
1375 double t1 = usecond();
1376 StaplePaddedAll(Stap, U_pad, Ghost, wk.getStencil(0,unpadded_grid) );
1377 double t2 = usecond();
1378 RectStaplePaddedAll(RectStap, U_pad, Ghost, wk.getStencil(1,unpadded_grid));
1379 double t3 = usecond();
1380 std::cout << GridLogPerformance << "StapleAndRectStapleAll timings: pad:" << (t1-t0)/1000 << "ms, staple:" << (t2-t1)/1000 << "ms, rect-staple:" << (t3-t2)/1000 << "ms" << std::endl;
1381#endif
1382 }
1383
1385 // Wilson loop of size (R1, R2), oriented in mu,nu plane
1387 static void wilsonLoop(GaugeMat &wl, const std::vector<GaugeMat> &U,
1388 const int Rmu, const int Rnu,
1389 const int mu, const int nu) {
1390 wl = U[nu];
1391
1392 for(int i = 0; i < Rnu-1; i++){
1393 wl = Gimpl::CovShiftForward(U[nu], nu, wl);
1394 }
1395
1396 for(int i = 0; i < Rmu; i++){
1397 wl = Gimpl::CovShiftForward(U[mu], mu, wl);
1398 }
1399
1400 for(int i = 0; i < Rnu; i++){
1401 wl = Gimpl::CovShiftBackward(U[nu], nu, wl);
1402 }
1403
1404 for(int i = 0; i < Rmu; i++){
1405 wl = Gimpl::CovShiftBackward(U[mu], mu, wl);
1406 }
1407 }
1408
1409 // trace of Wilson Loop oriented in mu,nu plane
1412 const std::vector<GaugeMat> &U,
1413 const int Rmu, const int Rnu,
1414 const int mu, const int nu) {
1415 GaugeMat sp(U[0].Grid());
1416 wilsonLoop(sp, U, Rmu, Rnu, mu, nu);
1417 wl = trace(sp);
1418 }
1419
1420 // sum over all planes of Wilson loop
1423 const std::vector<GaugeMat> &U,
1424 const int R1, const int R2) {
1425 LatticeComplex siteWl(U[0].Grid());
1426 Wl = Zero();
1427 for (int mu = 1; mu < U[0].Grid()->_ndimension; mu++) {
1428 for (int nu = 0; nu < mu; nu++) {
1429 traceWilsonLoop(siteWl, U, R1, R2, mu, nu);
1430 Wl = Wl + siteWl;
1431 traceWilsonLoop(siteWl, U, R2, R1, mu, nu);
1432 Wl = Wl + siteWl;
1433 }
1434 }
1435 }
1436
1437 // sum over planes of Wilson loop with length R1
1438 // in the time direction
1441 const std::vector<GaugeMat> &U,
1442 const int R1, const int R2) {
1443 LatticeComplex siteWl(U[0].Grid());
1444
1445 int ndim = U[0].Grid()->_ndimension;
1446
1447 Wl = Zero();
1448 for (int nu = 0; nu < ndim - 1; nu++) {
1449 traceWilsonLoop(siteWl, U, R1, R2, ndim-1, nu);
1450 Wl = Wl + siteWl;
1451 }
1452 }
1453
1454 // sum Wilson loop over all planes orthogonal to the time direction
1457 const std::vector<GaugeMat> &U,
1458 const int R1, const int R2) {
1459 LatticeComplex siteWl(U[0].Grid());
1460
1461 Wl = Zero();
1462 for (int mu = 1; mu < U[0].Grid()->_ndimension - 1; mu++) {
1463 for (int nu = 0; nu < mu; nu++) {
1464 traceWilsonLoop(siteWl, U, R1, R2, mu, nu);
1465 Wl = Wl + siteWl;
1466 traceWilsonLoop(siteWl, U, R2, R1, mu, nu);
1467 Wl = Wl + siteWl;
1468 }
1469 }
1470 }
1471
1472 // sum over all x,y,z,t and over all planes of Wilson loop
1475 const int R1, const int R2) {
1476 std::vector<GaugeMat> U(4, Umu.Grid());
1477
1478 for (int mu = 0; mu < Umu.Grid()->_ndimension; mu++) {
1479 U[mu] = PeekIndex<LorentzIndex>(Umu, mu);
1480 }
1481
1482 LatticeComplex Wl(Umu.Grid());
1483
1484 siteWilsonLoop(Wl, U, R1, R2);
1485
1486 TComplex Tp = sum(Wl);
1487 Complex p = TensorRemove(Tp);
1488 return p.real();
1489 }
1490
1491 // sum over all x,y,z,t and over all planes of timelike Wilson loop
1494 const int R1, const int R2) {
1495 std::vector<GaugeMat> U(4, Umu.Grid());
1496
1497 for (int mu = 0; mu < Umu.Grid()->_ndimension; mu++) {
1498 U[mu] = PeekIndex<LorentzIndex>(Umu, mu);
1499 }
1500
1501 LatticeComplex Wl(Umu.Grid());
1502
1503 siteTimelikeWilsonLoop(Wl, U, R1, R2);
1504
1505 TComplex Tp = sum(Wl);
1506 Complex p = TensorRemove(Tp);
1507 return p.real();
1508 }
1509
1510 // sum over all x,y,z,t and over all planes of spatial Wilson loop
1513 const int R1, const int R2) {
1514 std::vector<GaugeMat> U(4, Umu.Grid());
1515
1516 for (int mu = 0; mu < Umu.Grid()->_ndimension; mu++) {
1517 U[mu] = PeekIndex<LorentzIndex>(Umu, mu);
1518 }
1519
1520 LatticeComplex Wl(Umu.Grid());
1521
1522 siteSpatialWilsonLoop(Wl, U, R1, R2);
1523
1524 TComplex Tp = sum(Wl);
1525 Complex p = TensorRemove(Tp);
1526 return p.real();
1527 }
1528
1529 // average over all x,y,z,t and over all planes of Wilson loop
1532 const int R1, const int R2) {
1533 int ndim = Umu.Grid()->_ndimension;
1534 Real sumWl = sumWilsonLoop(Umu, R1, R2);
1535 Real vol = Umu.Grid()->gSites();
1536 Real faces = 1.0 * ndim * (ndim - 1);
1537 return sumWl / vol / faces / Nc; // Nc dependent... FIXME
1538 }
1539
1540 // average over all x,y,z,t and over all planes of timelike Wilson loop
1543 const int R1, const int R2) {
1544 int ndim = Umu.Grid()->_ndimension;
1545 Real sumWl = sumTimelikeWilsonLoop(Umu, R1, R2);
1546 Real vol = Umu.Grid()->gSites();
1547 Real faces = 1.0 * (ndim - 1);
1548 return sumWl / vol / faces / Nc; // Nc dependent... FIXME
1549 }
1550
1551 // average over all x,y,z,t and over all planes of spatial Wilson loop
1554 const int R1, const int R2) {
1555 int ndim = Umu.Grid()->_ndimension;
1556 Real sumWl = sumSpatialWilsonLoop(Umu, R1, R2);
1557 Real vol = Umu.Grid()->gSites();
1558 Real faces = 1.0 * (ndim - 1) * (ndim - 2);
1559 return sumWl / vol / faces / Nc; // Nc dependent... FIXME
1560 }
1561};
1562
1567
1569
1570#endif
void * acceleratorAllocDevice(size_t bytes)
void acceleratorCopyToDevice(void *from, void *to, size_t bytes)
#define accelerator_for(iterator, num, nsimd,...)
void acceleratorFreeDevice(void *ptr)
int signs[4]
Definition BGQQPX.h:536
#define U2
Definition BGQQPX.h:107
AcceleratorVector< int, MaxDims > Coordinate
Definition Coordinate.h:95
accelerator_inline Grid_simd2< S, V > trace(const Grid_simd2< S, V > &arg)
auto PeekIndex(const Lattice< vobj > &lhs, int i) -> Lattice< decltype(peekIndex< Index >(vobj(), i))>
Lattice< vobj > adj(const Lattice< vobj > &lhs)
vobj::scalar_object sum(const vobj *arg, Integer osites)
void sliceSum(const Lattice< vobj > &Data, std::vector< typename vobj::scalar_object > &result, int orthogdim)
#define autoView(l_v, l, mode)
GridLogger GridLogError(1, "Error", GridLogColours, "RED")
GridLogger GridLogPerformance(1, "Performance", GridLogColours, "GREEN")
GridLogger GridLogMessage(1, "Message", GridLogColours, "NORMAL")
@ AcceleratorRead
@ AcceleratorWrite
#define NAMESPACE_BEGIN(A)
Definition Namespace.h:35
#define NAMESPACE_END(A)
Definition Namespace.h:36
static constexpr int Nd
Definition QCD.h:52
static constexpr int Tp
Definition QCD.h:44
static constexpr int Nc
Definition QCD.h:50
static constexpr int Xdir
Definition QCD.h:36
static constexpr int Tdir
Definition QCD.h:39
static constexpr int Zdir
Definition QCD.h:38
Lattice< vTComplex > LatticeComplex
Definition QCD.h:359
static constexpr int Ydir
Definition QCD.h:37
iSinglet< Complex > TComplex
Definition QCD.h:273
auto peekLorentz(const vobj &rhs, int i) -> decltype(PeekIndex< LorentzIndex >(rhs, 0))
Definition QCD.h:446
RealF Real
Definition Simd.h:65
std::complex< RealD > ComplexD
Definition Simd.h:79
std::complex< Real > Complex
Definition Simd.h:80
double RealD
Definition Simd.h:61
accelerator_inline void coalescedWrite(vobj &__restrict__ vec, const vobj &__restrict__ extracted, int lane=0)
Definition Tensor_SIMT.h:87
accelerator_inline std::enable_if<!isGridTensor< T >::value, T >::type TensorRemove(T arg)
double usecond(void)
Definition Timer.h:50
#define FnuI
WilsonLoops< PeriodicGimplR > SU2WilsonLoops
WilsonLoops< PeriodicGimplR > ColourWilsonLoops
#define Fnu(A)
#define FmuI
WilsonLoops< PeriodicGimplR > SU3WilsonLoops
#define BmuI
#define Bmu(A)
#define Fmu(A)
#define BnuI
#define Bnu(A)
WilsonLoops< PeriodicGimplR > U1WilsonLoops
static INTERNAL_PRECISION U
Definition Zolotarev.cc:230
static INTERNAL_PRECISION F
Definition Zolotarev.cc:230
#define M_PI
Definition Zolotarev.cc:41
accelerator_inline void push_back(const value &val)
Definition Coordinate.h:69
Lattice< SiteComplex > ComplexField
View_type View(int mode) const
int oSites(void) const
int Nsimd(void) const
static accelerator_inline constexpr int Nsimd(void)
SiteComplex::scalar_object scalar_object
Lattice< vobj > Extract(const Lattice< vobj > &in) const
Definition PaddedCell.h:287
std::vector< GridCartesian * > grids
Definition PaddedCell.h:235
Lattice< vobj > Exchange(const Lattice< vobj > &in, const CshiftImplBase< vobj > &cshift=CshiftImplDefault< vobj >()) const
Definition PaddedCell.h:304
std::vector< Coordinate > getShifts() const override
std::vector< Coordinate > getShifts() const override
const GeneralLocalStencil & getStencil(const PaddedCell &pcell)
virtual std::vector< Coordinate > getShifts() const =0
std::unique_ptr< GeneralLocalStencil > stencil
void generateStencil(GridBase *padded_grid)
const GeneralLocalStencil & getStencil(const size_t stencil_idx, GridBase *unpadded_grid)
void generatePcell(GridBase *unpadded_grid)
std::unique_ptr< PaddedCell > pcell
std::vector< WilsonLoopPaddedStencilWorkspace * > stencil_wk
void addStencil(WilsonLoopPaddedStencilWorkspace *stencil)
const PaddedCell & getPaddedCell(GridBase *unpadded_grid)
static Real avgTimelikeWilsonLoop(const GaugeLorentz &Umu, const int R1, const int R2)
static void RectStaple(const GaugeLorentz &Umu, GaugeMat &Stap, std::vector< GaugeMat > &U2, std::vector< GaugeMat > &U, int mu)
static void StaplePaddedAll(std::vector< GaugeMat > &staple, const std::vector< GaugeMat > &U_padded, const PaddedCell &Cell, const GeneralLocalStencil &gStencil)
static RealD avgRectangle(const GaugeLorentz &Umu)
static RealD sumRectangle(const GaugeLorentz &Umu)
static void traceDirRectangle(ComplexField &rect, const std::vector< GaugeMat > &U, const int mu, const int nu)
Gimpl::GaugeLinkField GaugeMat
Definition WilsonLoops.h:44
static RealD sumPlaquette(const GaugeLorentz &Umu)
static std::vector< RealD > timesliceAvgSpatialPlaquette(const GaugeLorentz &Umu)
static void RectStaplePaddedAll(std::vector< GaugeMat > &staple, const std::vector< GaugeMat > &U_padded, const PaddedCell &Cell, const GeneralLocalStencil &gStencil)
static void StapleLower(GaugeMat &staple, const GaugeLorentz &Umu, int mu, int nu)
static void RectStapleUnoptimised(GaugeMat &Stap, const GaugeLorentz &Umu, int mu)
static void siteSpatialWilsonLoop(LatticeComplex &Wl, const std::vector< GaugeMat > &U, const int R1, const int R2)
static void RectStaplePaddedAll(std::vector< GaugeMat > &staple, const std::vector< GaugeMat > &U_padded, const PaddedCell &Cell)
static void traceDirPlaquette(ComplexField &plaq, const std::vector< GaugeMat > &U, const int mu, const int nu)
Definition WilsonLoops.h:78
static void Staple(GaugeMat &staple, const std::vector< GaugeMat > &Umu, int mu)
static void FieldStrength(GaugeMat &FS, const GaugeLorentz &Umu, int mu, int nu)
static void siteSpatialPlaquette(ComplexField &Plaq, const std::vector< GaugeMat > &U)
static void CloverleafMxN(GaugeMat &FS, const GaugeMat &Umu, const GaugeMat &Unu, int mu, int nu, int M, int N)
static Real sumWilsonLoop(const GaugeLorentz &Umu, const int R1, const int R2)
static void RectStapleDouble(GaugeMat &U2, const GaugeMat &U, int mu)
static RealD avgPlaquette(const GaugeLorentz &Umu)
static void RectStapleOptimised(GaugeMat &Stap, const std::vector< GaugeMat > &U2, const std::vector< GaugeMat > &U, int mu)
static ComplexD avgPolyakovLoop(const GaugeField &Umu, const int mu)
static void siteTimelikeWilsonLoop(LatticeComplex &Wl, const std::vector< GaugeMat > &U, const int R1, const int R2)
static std::vector< RealD > timesliceSumSpatialPlaquette(const GaugeLorentz &Umu)
static Real TopologicalCharge(const GaugeLorentz &U)
static void Staple(GaugeMat &staple, const GaugeLorentz &U, int mu)
static void FieldStrengthMxN(GaugeMat &FS, const GaugeLorentz &U, int mu, int nu, int M, int N)
static std::vector< std::vector< Real > > TimesliceTopologicalCharge5LiContributions(const GaugeLorentz &U)
static std::vector< Real > TimesliceTopologicalChargeMxN(const GaugeLorentz &U, int M, int N)
static RealD linkTrace(const GaugeLorentz &Umu)
static void StapleUpper(GaugeMat &staple, const GaugeLorentz &Umu, int mu, int nu)
static Real sumSpatialWilsonLoop(const GaugeLorentz &Umu, const int R1, const int R2)
static void siteWilsonLoop(LatticeComplex &Wl, const std::vector< GaugeMat > &U, const int R1, const int R2)
static void dirPlaquette(GaugeMat &plaq, const std::vector< GaugeMat > &U, const int mu, const int nu)
Definition WilsonLoops.h:50
static void StaplePaddedAll(std::vector< GaugeMat > &staple, const std::vector< GaugeMat > &U_padded, const PaddedCell &Cell)
static void StapleAll(std::vector< GaugeMat > &staple, const std::vector< GaugeMat > &U)
static void StapleAndRectStapleAll(std::vector< GaugeMat > &Stap, std::vector< GaugeMat > &RectStap, const std::vector< GaugeMat > &U, StapleAndRectStapleAllWorkspace &wk)
static void wilsonLoop(GaugeMat &wl, const std::vector< GaugeMat > &U, const int Rmu, const int Rnu, const int mu, const int nu)
static Real TopologicalCharge5Li(const GaugeLorentz &U)
static Real sumTimelikeWilsonLoop(const GaugeLorentz &Umu, const int R1, const int R2)
static Real avgSpatialWilsonLoop(const GaugeLorentz &Umu, const int R1, const int R2)
static Real TopologicalChargeMxN(const GaugeLorentz &U, int M, int N)
INHERIT_GIMPL_TYPES(Gimpl)
static Real avgWilsonLoop(const GaugeLorentz &Umu, const int R1, const int R2)
Gimpl::GaugeField GaugeLorentz
Definition WilsonLoops.h:45
static std::vector< Real > TimesliceTopologicalCharge5Li(const GaugeLorentz &U)
static void sitePlaquette(ComplexField &Plaq, const std::vector< GaugeMat > &U)
Definition WilsonLoops.h:88
static void StapleAndRectStapleAll(std::vector< GaugeMat > &Stap, std::vector< GaugeMat > &RectStap, const std::vector< GaugeMat > &U)
static std::vector< Real > TopologicalCharge5LiContributions(const GaugeLorentz &U)
static void traceWilsonLoop(LatticeComplex &wl, const std::vector< GaugeMat > &U, const int Rmu, const int Rnu, const int mu, const int nu)
static void siteRectangle(ComplexField &Rect, const std::vector< GaugeMat > &U)
static void RectStaple(GaugeMat &Stap, const GaugeLorentz &Umu, int mu)
static void dirRectangle(GaugeMat &rect, const std::vector< GaugeMat > &U, const int mu, const int nu)
static void RectStapleAll(std::vector< GaugeMat > &Stap, const std::vector< GaugeMat > &U)
static void Staple(GaugeMat &staple, const GaugeLorentz &Umu, int mu, int nu)
static ComplexD avgPolyakovLoop(const GaugeField &Umu)
Definition Simd.h:194