Grid 0.7.0
Tensor_arith_sub.h
Go to the documentation of this file.
1/*************************************************************************************
2
3 Grid physics library, www.github.com/paboyle/Grid
4
5 Source file: ./lib/tensors/Tensor_arith_sub.h
6
7 Copyright (C) 2015
8
9Author: Peter Boyle <paboyle@ph.ed.ac.uk>
10Author: neo <cossu@post.kek.jp>
11
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or
15 (at your option) any later version.
16
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License along
23 with this program; if not, write to the Free Software Foundation, Inc.,
24 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25
26 See the full license in the file "LICENSE" in the top level distribution directory
27*************************************************************************************/
28/* END LEGAL */
29#ifndef GRID_MATH_ARITH_SUB_H
30#define GRID_MATH_ARITH_SUB_H
31
33
37
38
39// SUB is simple for now; cannot mix types and straightforward template
40// Scalar +/- Scalar
41// Vector +/- Vector
42// Matrix +/- Matrix
43// Matrix /- scalar
44template<class vtype,class ltype,class rtype> accelerator_inline void sub(iScalar<vtype> * __restrict__ ret,
45 const iScalar<ltype> * __restrict__ lhs,
46 const iScalar<rtype> * __restrict__ rhs)
47{
48 sub(&ret->_internal,&lhs->_internal,&rhs->_internal);
49}
50
51template<class vtype,class ltype,class rtype,int N> accelerator_inline void sub(iVector<vtype,N> * __restrict__ ret,
52 const iVector<ltype,N> * __restrict__ lhs,
53 const iVector<rtype,N> * __restrict__ rhs)
54{
55 for(int c=0;c<N;c++){
56 ret->_internal[c]=lhs->_internal[c]-rhs->_internal[c];
57 }
58 return;
59}
60template<class vtype,class ltype,class rtype, int N> accelerator_inline void sub(iMatrix<vtype,N> * __restrict__ ret,
61 const iMatrix<ltype,N> * __restrict__ lhs,
62 const iMatrix<rtype,N> * __restrict__ rhs){
63 for(int c2=0;c2<N;c2++){
64 for(int c1=0;c1<N;c1++){
65 sub(&ret->_internal[c1][c2],&lhs->_internal[c1][c2],&rhs->_internal[c1][c2]);
66 }}
67 return;
68}
69template<class vtype,class ltype,class rtype, int N> accelerator_inline void sub(iMatrix<vtype,N> * __restrict__ ret,
70 const iScalar<ltype> * __restrict__ lhs,
71 const iMatrix<rtype,N> * __restrict__ rhs){
72 for(int c2=0;c2<N;c2++){
73 for(int c1=0;c1<N;c1++){
74 if ( c1==c2) {
75 sub(&ret->_internal[c1][c2],&lhs->_internal,&rhs->_internal[c1][c2]);
76 } else {
77 // Fails -- need unary minus. Catalogue other unops?
78 ret->_internal[c1][c2]=Zero();
79 ret->_internal[c1][c2]=ret->_internal[c1][c2]-rhs->_internal[c1][c2];
80
81 }
82 }}
83 return;
84}
85template<class vtype,class ltype,class rtype, int N> accelerator_inline void sub(iMatrix<vtype,N> * __restrict__ ret,
86 const iMatrix<ltype,N> * __restrict__ lhs,
87 const iScalar<rtype> * __restrict__ rhs){
88 for(int c2=0;c2<N;c2++){
89 for(int c1=0;c1<N;c1++){
90 if ( c1==c2)
91 sub(&ret->_internal[c1][c2],&lhs->_internal[c1][c2],&rhs->_internal);
92 else
93 ret->_internal[c1][c2]=lhs->_internal[c1][c2];
94 }}
95 return;
96}
97
98// - operator for scalar, vector, matrix
99template<class ltype,class rtype> accelerator_inline auto
100operator - (const iScalar<ltype>& lhs, const iScalar<rtype>& rhs) -> iScalar<decltype(lhs._internal - rhs._internal)>
101{
102 typedef iScalar<decltype(lhs._internal-rhs._internal)> ret_t;
103 ret_t ret;
104 sub(&ret,&lhs,&rhs);
105 return ret;
106}
107template<class ltype,class rtype,int N>
108accelerator_inline auto operator - (const iVector<ltype,N>& lhs,const iVector<rtype,N>& rhs) ->iVector<decltype(lhs._internal[0]-rhs._internal[0]),N>
109{
110 typedef iVector<decltype(lhs._internal[0]-rhs._internal[0]),N> ret_t;
111 ret_t ret;
112 sub(&ret,&lhs,&rhs);
113 return ret;
114}
115template<class ltype,class rtype,int N>
116accelerator_inline auto operator - (const iMatrix<ltype,N>& lhs,const iMatrix<rtype,N>& rhs) ->iMatrix<decltype(lhs._internal[0][0]-rhs._internal[0][0]),N>
117{
118 typedef iMatrix<decltype(lhs._internal[0][0]-rhs._internal[0][0]),N> ret_t;
119 ret_t ret;
120 sub(&ret,&lhs,&rhs);
121 return ret;
122}
123template<class ltype,class rtype,int N>
124accelerator_inline auto operator - (const iScalar<ltype>& lhs,const iMatrix<rtype,N>& rhs)->iMatrix<decltype(lhs._internal-rhs._internal[0][0]),N>
125{
126 typedef iMatrix<decltype(lhs._internal-rhs._internal[0][0]),N> ret_t;
127 ret_t ret;
128 sub(&ret,&lhs,&rhs);
129 return ret;
130}
131template<class ltype,class rtype,int N>
132accelerator_inline auto operator - (const iMatrix<ltype,N>& lhs,const iScalar<rtype>& rhs)->iMatrix<decltype(lhs._internal[0][0]-rhs._internal),N>
133{
134 typedef iMatrix<decltype(lhs._internal[0][0]-rhs._internal),N> ret_t;
135 ret_t ret;
136 sub(&ret,&lhs,&rhs);
137 return ret;
138}
139
141
142#endif
#define accelerator_inline
#define NAMESPACE_BEGIN(A)
Definition Namespace.h:35
#define NAMESPACE_END(A)
Definition Namespace.h:36
accelerator_inline auto operator-(const iScalar< ltype > &lhs, const iScalar< rtype > &rhs) -> iScalar< decltype(lhs._internal - rhs._internal)>
accelerator_inline void sub(iScalar< vtype > *__restrict__ ret, const iScalar< ltype > *__restrict__ lhs, const iScalar< rtype > *__restrict__ rhs)
Definition Simd.h:194
vtype _internal[N][N]
vtype _internal
vtype _internal[N]