Fermat
tensor.h
1 /*
2  * cugar
3  * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  * * Redistributions of source code must retain the above copyright
8  * notice, this list of conditions and the following disclaimer.
9  * * Redistributions in binary form must reproduce the above copyright
10  * notice, this list of conditions and the following disclaimer in the
11  * documentation and/or other materials provided with the distribution.
12  * * Neither the name of the NVIDIA CORPORATION nor the
13  * names of its contributors may be used to endorse or promote products
14  * derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
20  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #pragma once
29 
30 #include <cugar/basic/types.h>
31 #include <cugar/basic/numbers.h>
32 #include <cugar/linalg/vector.h>
33 #include <cugar/linalg/matrix.h>
34 #include <cmath>
35 #include <limits>
36 
37 namespace cugar {
38 
42 template <typename T, uint32 ORDER, uint32 N>
43 struct Tensor {};
44 
48 template <uint32 ORDER>
49 struct TensorIndex {};
50 
54 template <>
55 struct TensorIndex<0>
56 {
57  uint32 dummy;
58 };
59 
63 template <>
64 struct TensorIndex<1>
65 {
66  TensorIndex<1>() {};
67  TensorIndex<1>(const uint32 _i) : x(_i) {};
68 
69  operator uint32() const { return x; }
70 
71  uint32 x;
72 };
73 
77 template <>
78 struct TensorIndex<2> : public uint2
79 {
80  TensorIndex<2>() {};
81  TensorIndex<2>(const uint2 _i) : uint2(_i) {};
82 };
83 
87 template <>
88 struct TensorIndex<3> : public uint3
89 {
90  TensorIndex<3>() {};
91  TensorIndex<3>(const uint3 _i) : uint3(_i) {};
92 };
93 
97 template <typename T, uint32 N>
98 struct Tensor<T,0,N>
99 {
100  typedef T value_type;
101  typedef T component_type;
102 
103  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
104  Tensor() {}
105 
106  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
107  explicit Tensor(const value_type _d) : data(_d) {}
108 
109  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
110  operator value_type() const { return data; }
111 
112  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
113  value_type operator() () const { return data; }
114 
115  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
116  const value_type& operator() (TensorIndex<0> i) const { return data; }
117 
118  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
119  value_type& operator() (TensorIndex<0> i) { return data; }
120 
121  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
122  uint32 order() const { return 0; }
123 
124  value_type data;
125 };
126 
130 template <typename T, uint32 N>
131 struct Tensor<T,1,N> : public Vector<T,N>
132 {
133  typedef T value_type;
134  typedef Vector<T,N> base_type;
135  typedef T component_type;
136 
137  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
138  Tensor() {}
139 
140  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
141  explicit Tensor(const value_type _v) : base_type(_v) {}
142 
143  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
144  Tensor(const Vector<T,N>& _v) : base_type(_v) {}
145 
146  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
147  Tensor& operator=(const Vector<T,N>& _v) { *static_cast<base_type*>(this) = _v; return *this; }
148 
149  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
150  Tensor& operator=(const Tensor<T,1,N>& _v) { *static_cast<base_type*>(this) = static_cast<base_type>(_v); return *this; }
151 
152  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
153  const value_type& operator() (TensorIndex<1> i) const { return base_type::operator[](i); }
154 
155  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
156  value_type& operator() (TensorIndex<1> i) { return base_type::operator[](i); }
157 
158  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
159  uint32 order() const { return 1; }
160 
161  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
162  uint32 dimension() const { return N; }
163 };
164 
168 template <typename T, uint32 N>
169 struct Tensor<T,2,N> : public Matrix<T,N,N>
170 {
171  typedef T value_type;
172  typedef Matrix<T,N,N> base_type;
174 
175  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
176  Tensor() {}
177 
178  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
179  explicit Tensor(const value_type _v) : base_type(_v) {}
180 
181  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
182  Tensor(const base_type& _v) : base_type(_v) {}
183 
184  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
185  Tensor& operator=(const base_type& _v) { *static_cast<base_type*>(this) = _v; return *this; }
186 
187  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
188  Tensor& operator=(const Tensor<T,2,N>& _v) { *static_cast<base_type*>(this) = static_cast<base_type>(_v); return *this; }
189 
190  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
191  const value_type& operator() (const TensorIndex<2> i) const { return base_type::operator()(i.x, i.y); }
192 
193  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
194  value_type& operator() (const TensorIndex<2> i) { return base_type::operator()(i.x, i.y); }
195 
196  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
197  const value_type& operator() (const uint32 i, const uint32 j) const { return base_type::operator()(i,j); }
198 
199  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
200  value_type& operator() (const uint32 i, const uint32 j) { return base_type::operator()(i,j); }
201 
202  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
203  const component_type& operator[] (const uint32 i) const { return reinterpret_cast<const component_type&>(base_type::operator[](i)); }
204 
205  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
206  component_type& operator[] (const uint32 i) { return reinterpret_cast<component_type&>(base_type::operator[](i)); }
207 
208  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
209  uint32 order() const { return 2; }
210 
211  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
212  uint32 dimension() const { return N; }
213 };
214 
218 template <typename T, uint32 N>
219 struct Tensor<T,3,N>
220 {
221  typedef T value_type;
223 
224  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
225  Tensor() {}
226 
227  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
228  explicit Tensor(const value_type _v)
229  {
230  #pragma unroll
231  for (uint32 i = 0; i < N; ++i)
232  data[i] = component_type(_v);
233  }
234 
235  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
236  Tensor& operator=(const Tensor<T,3,N>& _v)
237  {
238  #pragma unroll
239  for (uint32 i = 0; i < N; ++i)
240  data[i] = _v.data[i];
241  return *this;
242  }
243 
244  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
245  const component_type& operator[] (const uint32 i) const { return data[i]; }
246 
247  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
248  component_type& operator[] (const uint32 i) { return data[i]; }
249 
250  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
251  const value_type& operator() (const uint32 i, const uint32 j, const uint32 k) const { return data[i](j,k); }
252 
253  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
254  value_type& operator() (const uint32 i, const uint32 j, const uint32 k) { return data[i](j,k); }
255 
256  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
257  const value_type& operator() (const TensorIndex<3> i) const { return data[i.x](i.y, i.z); }
258 
259  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
260  value_type& operator() (const TensorIndex<3> i) { return data[i.x](i.y, i.z); }
261 
262  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
263  uint32 order() const { return 3; }
264 
265  CUGAR_FORCEINLINE CUGAR_HOST_DEVICE
266  uint32 dimension() const { return N; }
267 
268  component_type data[N];
269 };
270 
273 CUGAR_API_CS CUGAR_HOST_DEVICE CUGAR_FORCEINLINE
274 float outer_product(const float op1, const float op2) { return op1 * op2; }
275 
278 template <typename T, uint32 N> CUGAR_API_CS CUGAR_HOST_DEVICE
280 {
281  return Tensor<T,0,N>( op1 * op2 );
282 }
283 
286 template <typename T, uint32 N> CUGAR_API_CS CUGAR_HOST_DEVICE
288 {
289  Tensor<T,1,N> r;
290  for (uint32 i = 0; i < N; ++i)
291  r[i] = op1[i] * op2;
292  return r;
293 }
294 
297 template <typename T, uint32 N> CUGAR_API_CS CUGAR_HOST_DEVICE
299 {
300  Tensor<T,1,N> r;
301  for (uint32 i = 0; i < N; ++i)
302  r[i] = op1 * op2[i];
303  return r;
304 }
305 
308 template <typename T, uint32 D1, uint32 N> CUGAR_API_CS CUGAR_HOST_DEVICE
310 {
311  Tensor<T,D1,N> r;
312  for (uint32 i = 0; i < N; ++i)
313  r[i] = op1[i] * op2;
314  return r;
315 }
318 template <typename T, uint32 D2, uint32 N> CUGAR_API_CS CUGAR_HOST_DEVICE
320 {
321  Tensor<T,D2,N> r;
322  for (uint32 i = 0; i < N; ++i)
323  r[i] = op1 * op2[i];
324  return r;
325 }
326 
329 template <typename T, uint32 N> CUGAR_API_CS CUGAR_HOST_DEVICE
331 {
332  Tensor<T,2,N> r;
333  for (uint32 i = 0; i < N; ++i)
334  for (uint32 j = 0; j < N; ++j)
335  r(i,j) = op1[i] * op2[j];
336  return r;
337 }
340 template <typename T, uint32 D2, uint32 N> CUGAR_API_CS CUGAR_HOST_DEVICE
342 {
344  for (uint32 i = 0; i < N; ++i)
345  r[i] = outer_product( op1[i], op2 );
346  return r;
347 }
350 template <typename T, uint32 N, uint32 D1> CUGAR_API_CS CUGAR_HOST_DEVICE
352 {
354  for (uint32 i = 0; i < N; ++i)
355  r[i] = outer_product( op1[i], op2 );
356  return r;
357 }
358 
361 template <typename T, uint32 N> CUGAR_API_CS CUGAR_HOST_DEVICE
362 Tensor<T,1,N> operator* (const T op1, const Tensor<T, 1, N> op2)
363 {
364  Tensor<T,1,N> r;
365  for (uint32 i = 0; i < N; ++i)
366  r[i] = op1 * op2[i];
367  return r;
368 }
371 template <typename T, uint32 N> CUGAR_API_CS CUGAR_HOST_DEVICE
372 Tensor<T,1,N> operator* (const Tensor<T, 1, N> op1, const T op2)
373 {
374  Tensor<T,1,N> r;
375  for (uint32 i = 0; i < N; ++i)
376  r[i] = op1[i] * op2;
377  return r;
378 }
379 
382 template <typename T, uint32 D, uint32 N> CUGAR_API_CS CUGAR_HOST_DEVICE
383 Tensor<T,D,N> operator* (const Tensor<T, D, N> op1, const T op2)
384 {
385  Tensor<T,D,N> r;
386  for (uint32 i = 0; i < N; ++i)
387  r[i] = op1[i] * op2;
388  return r;
389 }
392 template <typename T, uint32 D, uint32 N> CUGAR_API_CS CUGAR_HOST_DEVICE
393 Tensor<T,D,N> operator* (const T op1, const Tensor<T, D, N> op2)
394 {
395  Tensor<T,D,N> r;
396  for (uint32 i = 0; i < N; ++i)
397  r[i] = op1 * op2[i];
398  return r;
399 }
400 
403 template <typename T, uint32 D, uint32 N> CUGAR_API_CS CUGAR_HOST_DEVICE
404 Tensor<T,D,N> operator* (const Tensor<T, D, N> op1, const Tensor<T, 0, N> op2)
405 {
406  return op1 * T(op2);
407 }
410 template <typename T, uint32 D, uint32 N> CUGAR_API_CS CUGAR_HOST_DEVICE
411 Tensor<T,D,N> operator* (const Tensor<T, 0, N> op1, const Tensor<T, D, N> op2)
412 {
413  return T(op1) * op2;
414 }
415 
418 template <typename T, uint32 D, uint32 N> CUGAR_API_CS CUGAR_HOST_DEVICE
419 Tensor<T,D,N> operator+ (const Tensor<T, D, N> op1, const Tensor<T, D, N> op2)
420 {
421  Tensor<T,D,N> r;
422  for (uint32 i = 0; i < N; ++i)
423  r[i] = op1[i] + op2[i];
424  return r;
425 }
428 template <typename T, uint32 D, uint32 N> CUGAR_API_CS CUGAR_HOST_DEVICE
429 Tensor<T,D,N> operator- (const Tensor<T, D, N> op1, const Tensor<T, D, N> op2)
430 {
431  Tensor<T,D,N> r;
432  for (uint32 i = 0; i < N; ++i)
433  r[i] = op1[i] - op2[i];
434  return r;
435 }
436 
439 template <typename T, uint32 D, uint32 N> CUGAR_API_CS CUGAR_HOST_DEVICE
440 Tensor<T,D,N> operator/ (const Tensor<T, D, N> op1, const T op2)
441 {
442  Tensor<T,D,N> r;
443  for (uint32 i = 0; i < N; ++i)
444  r[i] = op1[i] / op2;
445  return r;
446 }
447 
448 } // namespace cugar
Definition: tensor.h:64
Definition: matrix.h:54
Definition: tensor.h:169
Definition: tensor.h:98
Definition: tensor.h:78
Definition: tensor.h:219
Definition: tensor.h:55
Definition: vector.h:54
Define a vector_view POD type and plain_view() for std::vector.
Definition: diff.h:38
CUGAR_API_CS CUGAR_HOST_DEVICE Matrix< T, N, M > outer_product(const Vector< T, N > op1, const Vector< T, M > op2)
Definition: matrix_inline.h:503
Definition: tensor.h:131
Definition: tensor.h:49
Definition: tensor.h:88
Definition: tensor.h:43