Maestro 0.1.0
Unified interface for quantum circuit simulation
Loading...
Searching...
No Matches
Tensor.h
Go to the documentation of this file.
1
13
14#pragma once
15
16#ifndef _TENSOR_H_
17
18#include <complex>
19#include <initializer_list>
20#include <numeric>
21#include <unordered_set>
22#include <valarray>
23#include <vector>
24
25#include <boost/archive/text_iarchive.hpp>
26#include <boost/archive/text_oarchive.hpp>
27#include <boost/serialization/valarray.hpp>
28#include <boost/serialization/vector.hpp>
29
30#include "QubitRegisterCalculator.h"
31
32namespace Utils {
33
34// WARNING: Not all functions are tested, some of them are tested only
35// superficially, use with caution! Some were tested before switching to fortran
36// layout, so they might not work properly anymore! Accessing values and
37// contractions are working since they are used in circuit cutting and in the
38// tensor networks simulator.
39template <class T = std::complex<double>, class Storage = std::valarray<T>>
40class Tensor {
41protected:
42 Storage values;
43 std::vector<size_t> dims;
44 mutable size_t sz;
45
46 constexpr static size_t OmpLimit = 1024;
47 constexpr static int divSchedule = 2;
48
49 static int GetNumberOfThreads() {
50 return QC::QubitRegisterCalculator<>::GetNumberOfThreads();
51 }
52
53public:
55
56 Tensor() : sz(0) {}
57
58 // a dummy tensor should be used only for incrementing indices, not for
59 // storing values!
60 Tensor(const std::vector<size_t> &dims, bool dummy = false)
61 : dims(dims), sz(0) {
62 if (!dummy) {
63 if constexpr (std::is_convertible<int, T>::value)
64 values.resize(GetSize(), static_cast<T>(0));
65 else
66 values.resize(GetSize());
67 }
68 }
69
70 Tensor(const std::vector<int> &dims, bool dummy = false)
71 : dims(dims.cbegin(), dims.cend()), sz(0) {
72 if (!dummy) {
73 if constexpr (std::is_convertible<int, T>::value)
74 values.resize(GetSize(), static_cast<T>(0));
75 else
76 values.resize(GetSize());
77 }
78 }
79
80 template <class indT = size_t>
81 Tensor(std::initializer_list<indT> dims, bool dummy = false)
82 : dims(dims.begin(), dims.end()), sz(0) {
83 if (!dummy) {
84 if constexpr (std::is_convertible<int, T>::value)
85 values.resize(GetSize(), static_cast<T>(0));
86 else
87 values.resize(GetSize());
88 }
89 }
90
92 : values(other.values), dims(other.dims), sz(other.sz) {}
93
94 Tensor(Tensor<T, Storage> &&other) noexcept {
95 dims.swap(other.dims);
96 values.swap(other.values);
97 std::swap(sz, other.sz);
98 }
99
100 virtual ~Tensor() = default;
101
102 template <class Archive>
103 void serialize(Archive &ar, const unsigned int /*version*/) {
104 if (typename Archive::is_loading())
105 values.clear();
106
107 ar &dims &sz;
108 }
109
111 Tensor temp(other);
112 *this = std::move(temp);
113
114 return *this;
115 }
116
118 if (this != &other) {
119 dims.swap(other.dims);
120 values.swap(other.values);
121 std::swap(sz, other.sz);
122 }
123
124 return *this;
125 }
126
128 dims.swap(other.dims);
129 values.swap(other.values);
130 std::swap(sz, other.sz);
131 }
132
133 size_t GetSize() const {
134 if (!sz)
135 sz = std::accumulate(dims.begin(), dims.end(), 1,
136 std::multiplies<size_t>());
137
138 return sz;
139 }
140
141 size_t GetDim(size_t index) const {
142 assert(index < dims.size());
143
144 return dims[index];
145 }
146
147 const std::vector<size_t> &GetDims() const { return dims; }
148
149 const size_t GetRank() const { return dims.size(); }
150
151 inline bool IsDummy() const { return values.size() == 0; }
152
153 inline bool IncrementIndex(std::vector<size_t> &indices) const {
154 long long pos = dims.size() - 1;
155
156 while (pos >= 0) {
157 if (indices[pos] < dims[pos] - 1) {
158 ++indices[pos];
159 break;
160 } else {
161 indices[pos] = 0;
162 --pos;
163 }
164 }
165
166 return pos >= 0;
167 }
168
169 inline bool IncrementIndexSkip(std::vector<size_t> &indices, int skip) const {
170 long long pos = dims.size() - 1;
171
172 while (pos >= 0) {
173 const bool notOnPos = pos != skip;
174 if (notOnPos && indices[pos] < dims[pos] - 1) {
175 ++indices[pos];
176 break;
177 } else {
178 if (notOnPos)
179 indices[pos] = 0;
180 --pos;
181 }
182 }
183
184 return pos >= 0;
185 }
186
187 inline bool IncrementIndexSkip(std::vector<size_t> &indices,
188 const std::vector<int> &skipv) const {
189 long long int pos = dims.size() - 1;
190
191 int skip = -1;
192 int skipPos = -1;
193 if (!skipv.empty()) {
194 skip = skipv.back();
195 skipPos = static_cast<int>(skipv.size()) - 1;
196 }
197
198 while (pos >= 0) {
199 const bool notOnPos = pos != skip;
200 if (notOnPos && indices[pos] < dims[pos] - 1) {
201 ++indices[pos];
202 break;
203 } else {
204 if (notOnPos)
205 indices[pos] = 0;
206 --pos;
207
208 if (pos < skip) {
209 --skipPos;
210 if (skipPos >= 0)
211 skip = skipv[skipPos];
212 else
213 skip = -1;
214 }
215 }
216 }
217
218 return pos >= 0;
219 }
220
221 void Clear() {
222 dims.clear();
223 ClearValues();
224 }
225
226 void ClearValues() {
227 sz = 0;
228 values.resize(0);
229 }
230
231 bool Empty() const { return dims.empty(); }
232
233 void Resize(const std::vector<size_t> &newdims) {
234 Clear();
235
236 dims = newdims;
237 sz = 0;
238 values.resize(GetSize());
239 }
240
241 template <class indT = size_t>
242 void Resize(std::initializer_list<indT> newdims) {
243 Clear();
244
245 dims = newdims;
246 sz = 0;
247 values.resize(GetSize());
248 }
249
250 const T at(const std::vector<size_t> &indices) const {
251 assert(indices.size() == dims.size());
252
253 return values[GetOffset(indices)];
254 }
255
256 inline T &operator[](const std::vector<size_t> &indices) {
257 assert(indices.size() == dims.size());
258
259 return values[GetOffset(indices)];
260 }
261
262 inline const T &operator[](const std::vector<size_t> &indices) const {
263 return values[GetOffset(indices)];
264 }
265
266 T atOffset(size_t offset) {
267 assert(values.size() > offset);
268
269 return values[offset];
270 }
271
272 inline T &operator[](size_t offset) {
273 assert(values.size() > offset);
274
275 return values[offset];
276 }
277
278 inline const T &operator[](size_t offset) const { return values[offset]; }
279
280 template <class indT = size_t>
281 T &operator[](std::initializer_list<indT> args) {
282 assert(args.size() == dims.size());
283
284 const std::vector<size_t> indices(args.begin(), args.end());
285
286 return values[GetOffset(indices)];
287 }
288
289 template <class indT = size_t>
290 const T &operator[](std::initializer_list<indT> args) const {
291 assert(args.size() == dims.size());
292
293 const std::vector<size_t> indices(args.begin(), args.end());
294
295 return values[GetOffset(indices)];
296 }
297
298 inline T &operator()(const std::vector<size_t> &indices) {
299 assert(indices.size() == dims.size());
300
301 return values[GetOffset(indices)];
302 }
303
304 inline const T &operator()(const std::vector<size_t> &indices) const {
305 return values[GetOffset(indices)];
306 }
307
308 template <class indT = size_t>
309 T &operator()(std::initializer_list<indT> args) {
310 assert(args.size() == dims.size());
311
312 const std::vector<size_t> indices(args.begin(), args.end());
313
314 return values[GetOffset(indices)];
315 }
316
317 template <class indT = size_t>
318 const T &operator()(std::initializer_list<indT> args) const {
319 assert(args.size() == dims.size());
320
321 const std::vector<size_t> indices(args.begin(), args.end());
322
323 return values[GetOffset(indices)];
324 }
325
326 inline const Storage &GetValues() const { return values; }
327
328 inline Storage &GetValues() { return values; }
329
331 assert(dims == other.dims);
332
333 Tensor<T, Storage> result(dims);
334
335 for (size_t i = 0; i < GetSize(); ++i)
336 result.values[i] = values[i] + other.values[i];
337
338 return result;
339 }
340
342 assert(dims == other.dims);
343
344 Tensor<T, Storage> result(dims);
345
346 for (size_t i = 0; i < GetSize(); ++i)
347 result.values[i] = values[i] - other.values[i];
348
349 return result;
350 }
351
353 assert(dims == other.dims);
354
355 Tensor<T, Storage> result(dims);
356
357 for (size_t i = 0; i < GetSize(); ++i)
358 result.values[i] = values[i] * other.values[i];
359
360 return result;
361 }
362
364 assert(dims == other.dims);
365
366 Tensor<T, Storage> result(dims);
367
368 for (size_t i = 0; i < GetSize(); ++i)
369 result.values[i] = values[i] / other.values[i];
370
371 return result;
372 }
373
374 Tensor<T, Storage> operator+(const T &scalar) const {
375 Tensor<T, Storage> result(dims);
376
377 for (size_t i = 0; i < GetSize(); ++i)
378 result.values[i] = values[i] + scalar;
379
380 return result;
381 }
382
383 Tensor<T, Storage> operator-(const T &scalar) const {
384 Tensor<T, Storage> result(dims);
385
386 for (size_t i = 0; i < GetSize(); ++i)
387 result.values[i] = values[i] - scalar;
388
389 return result;
390 }
391
392 Tensor<T, Storage> operator*(const T &scalar) const {
393 Tensor<T, Storage> result(dims);
394
395 for (size_t i = 0; i < GetSize(); ++i)
396 result.values[i] = values[i] * scalar;
397
398 return result;
399 }
400
401 Tensor<T, Storage> operator/(const T &scalar) const {
402 Tensor<T, Storage> result(dims);
403
404 for (size_t i = 0; i < GetSize(); ++i)
405 result.values[i] = values[i] / scalar;
406
407 return result;
408 }
409
411 assert(dims == other.dims);
412
413 for (size_t i = 0; i < GetSize(); ++i)
414 values[i] += other.values[i];
415
416 return *this;
417 }
418
420 assert(dims == other.dims);
421
422 for (size_t i = 0; i < GetSize(); ++i)
423 values[i] -= other.values[i];
424
425 return *this;
426 }
427
429 assert(dims == other.dims);
430
431 for (size_t i = 0; i < GetSize(); ++i)
432 values[i] *= other.values[i];
433
434 return *this;
435 }
436
438 assert(dims == other.dims);
439
440 for (size_t i = 0; i < GetSize(); ++i)
441 values[i] /= other.values[i];
442
443 return *this;
444 }
445
446 Tensor<T, Storage> &operator+=(const T &scalar) {
447 for (size_t i = 0; i < GetSize(); ++i)
448 values[i] += scalar;
449
450 return *this;
451 }
452
453 Tensor<T, Storage> &operator-=(const T &scalar) {
454 for (size_t i = 0; i < GetSize(); ++i)
455 values[i] -= scalar;
456
457 return *this;
458 }
459
460 Tensor<T, Storage> &operator*=(const T &scalar) {
461 for (size_t i = 0; i < GetSize(); ++i)
462 values[i] *= scalar;
463
464 return *this;
465 }
466
467 Tensor<T, Storage> &operator/=(const T &scalar) {
468 for (size_t i = 0; i < GetSize(); ++i)
469 values[i] /= scalar;
470
471 return *this;
472 }
473
474 // the following four are for the special case then each value is a map of
475 // results (for cutting)
476 Tensor<T, Storage> &operator+=(const double scalar) {
477 for (size_t i = 0; i < GetSize(); ++i)
478 values[i] = values[i] + scalar;
479
480 return *this;
481 }
482
483 Tensor<T, Storage> &operator-=(const double scalar) {
484 for (size_t i = 0; i < GetSize(); ++i)
485 values[i] = values[i] - scalar;
486
487 return *this;
488 }
489
490 Tensor<T, Storage> &operator*=(const double scalar) {
491 for (size_t i = 0; i < GetSize(); ++i)
492 values[i] = scalar * values[i];
493
494 return *this;
495 }
496
497 Tensor<T, Storage> &operator/=(const double scalar) {
498 for (size_t i = 0; i < GetSize(); ++i)
499 values[i] = values[i] / scalar;
500
501 return *this;
502 }
503
505 Tensor<T, Storage> result(dims);
506
507 for (size_t i = 0; i < GetSize(); ++i)
508 result.values[i] = -values[i];
509
510 return result;
511 }
512
513 void Conj() {
514 for (size_t i = 0; i < GetSize(); ++i)
515 values[i] = std::conj(values[i]);
516 }
517
519 std::vector<size_t> newdims(dims.size() + other.dims.size());
520
521 for (size_t i = 0; i < dims.size(); ++i)
522 newdims[i] = dims[i];
523
524 for (size_t i = 0; i < other.dims.size(); ++i)
525 newdims[dims.size() + i] = other.dims[i];
526
527 Tensor<T, Storage> result(newdims, values.size() == 0);
528
529 if (!IsDummy())
530 for (size_t i = 0; i < GetSize(); ++i)
531 for (size_t j = 0; j < other.GetSize(); ++j)
532 result.values[i * other.GetSize() + j] = values[i] * other.values[j];
533
534 return result;
535 }
536
538 size_t ind2,
539 bool allowMultithreading = true) const {
540 assert(dims[ind1] == other.dims[ind2]);
541
542 std::vector<size_t> newdims;
543
544 const size_t newsize = dims.size() + other.dims.size() - 2;
545 if (newsize == 0)
546 newdims.resize(1, 1);
547 else {
548 newdims.reserve(newsize);
549
550 for (size_t i = 0; i < dims.size(); ++i)
551 if (i != ind1)
552 newdims.push_back(dims[i]);
553
554 for (size_t i = 0; i < other.dims.size(); ++i)
555 if (i != ind2)
556 newdims.push_back(other.dims[i]);
557 }
558
559 Tensor<T, Storage> result(newdims, IsDummy());
560
561 if (!IsDummy()) {
562 const size_t sz = result.GetSize();
563
564 if (!allowMultithreading || sz < OmpLimit) {
565 std::vector<size_t> indices1(dims.size());
566 std::vector<size_t> indices2(other.dims.size());
567
568 for (size_t offset = 0; offset < sz; ++offset) {
569 const std::vector<size_t> indicesres = result.IndexFromOffset(offset);
570
571 size_t pos = 0;
572 for (size_t i = 0; i < dims.size(); ++i)
573 if (i != ind1) {
574 indices1[i] = indicesres[pos];
575 ++pos;
576 }
577
578 for (size_t i = 0; i < other.dims.size(); ++i)
579 if (i != ind2) {
580 indices2[i] = indicesres[pos];
581 ++pos;
582 }
583
584 // contracting more than one index would require creating a dummy
585 // tensor to iterate over all the indices that are contracted
586 for (size_t i = 0; i < dims[ind1]; ++i) {
587 indices1[ind1] = indices2[ind2] = i;
588
589 result[offset] =
590 result[offset] + values[GetOffset(indices1)] * other[indices2];
591 }
592 }
593 } else {
594 const auto processor_count = GetNumberOfThreads();
595
596#pragma omp parallel for num_threads(processor_count) \
597 schedule(static, OmpLimit / divSchedule)
598 for (long long int offset = 0; offset < static_cast<long long int>(sz);
599 ++offset) {
600 const std::vector<size_t> indicesres = result.IndexFromOffset(offset);
601 std::vector<size_t> indices1(dims.size());
602 std::vector<size_t> indices2(other.dims.size());
603
604 size_t pos = 0;
605 for (size_t i = 0; i < dims.size(); ++i)
606 if (i != ind1) {
607 indices1[i] = indicesres[pos];
608 ++pos;
609 }
610
611 for (size_t i = 0; i < other.dims.size(); ++i)
612 if (i != ind2) {
613 indices2[i] = indicesres[pos];
614 ++pos;
615 }
616
617 // contracting more than one index would require creating a dummy
618 // tensor to iterate over all the indices that are contracted
619 for (size_t i = 0; i < dims[ind1]; ++i) {
620 indices1[ind1] = indices2[ind2] = i;
621
622 result[offset] =
623 result[offset] + values[GetOffset(indices1)] * other[indices2];
624 }
625 }
626 }
627 }
628
629 return result;
630 }
631
634 const std::vector<std::pair<size_t, size_t>> &indices,
635 bool allowMultithreading = true) const {
636 std::vector<size_t> newdims;
637 std::vector<size_t> contractDims;
638
639 std::unordered_set<size_t> indicesSet1;
640 std::unordered_set<size_t> indicesSet2;
641
642 for (const auto &index : indices) {
643 assert(dims[index.first] == other.dims[index.second]);
644
645 indicesSet1.insert(index.first);
646 indicesSet2.insert(index.second);
647 contractDims.push_back(dims[index.first]);
648 }
649
650 const size_t newsize = dims.size() + other.dims.size() - 2 * indices.size();
651 if (newsize == 0)
652 newdims.resize(1, 1);
653 else {
654 newdims.reserve(newsize);
655
656 for (size_t i = 0; i < dims.size(); ++i)
657 if (indicesSet1.find(i) == indicesSet1.end())
658 newdims.push_back(dims[i]);
659
660 for (size_t i = 0; i < other.dims.size(); ++i)
661 if (indicesSet2.find(i) == indicesSet2.end())
662 newdims.push_back(other.dims[i]);
663 }
664
665 Tensor<T, Storage> result(newdims, IsDummy());
666
667 if (!IsDummy()) {
668 const Tensor<T, Storage> dummy(contractDims,
669 true); // used for incrementing the index
670 const size_t sz = result.GetSize();
671
672 if (!allowMultithreading || sz < OmpLimit) {
673 std::vector<size_t> dummyIndices(
674 contractDims.size(), 0); // the dummy index to be incremented - use
675 // the values to complete the real indices
676 std::vector<size_t> indices1(dims.size());
677 std::vector<size_t> indices2(other.dims.size());
678
679 for (size_t offset = 0; offset < sz; ++offset) {
680 const std::vector<size_t> indicesres = result.IndexFromOffset(offset);
681
682 size_t pos = 0;
683 for (size_t i = 0; i < dims.size(); ++i)
684 if (indicesSet1.find(i) == indicesSet1.end()) {
685 indices1[i] = indicesres[pos];
686 ++pos;
687 }
688
689 for (size_t i = 0; i < other.dims.size(); ++i)
690 if (indicesSet2.find(i) == indicesSet2.end()) {
691 indices2[i] = indicesres[pos];
692 ++pos;
693 }
694
695 // contracting more than one index requires creating a dummy tensor to
696 // iterate over all the indices that are contracted
697 do {
698 for (size_t i = 0; i < dummyIndices.size(); ++i)
699 indices1[indices[i].first] = indices2[indices[i].second] =
700 dummyIndices[i];
701
702 // trying to fix a linux compile bug
703 const T &val1 = values[GetOffset(indices1)];
704 const T &val2 = other[indices2];
705 auto mulRes = val1 * val2;
706 result[offset] = result[offset] + std::move(mulRes);
707 } while (dummy.IncrementIndex(dummyIndices));
708 }
709 } else {
710 const auto processor_count = GetNumberOfThreads();
711
712#pragma omp parallel for num_threads(processor_count) \
713 schedule(static, OmpLimit / divSchedule)
714 for (long long int offset = 0; offset < static_cast<long long int>(sz);
715 ++offset) {
716 const std::vector<size_t> indicesres = result.IndexFromOffset(offset);
717 std::vector<size_t> indices1(dims.size());
718 std::vector<size_t> indices2(other.dims.size());
719
720 size_t pos = 0;
721 for (size_t i = 0; i < dims.size(); ++i)
722 if (indicesSet1.find(i) == indicesSet1.end()) {
723 indices1[i] = indicesres[pos];
724 ++pos;
725 }
726
727 for (size_t i = 0; i < other.dims.size(); ++i)
728 if (indicesSet2.find(i) == indicesSet2.end()) {
729 indices2[i] = indicesres[pos];
730 ++pos;
731 }
732
733 // contracting more than one index requires creating a dummy tensor to
734 // iterate over all the indices that are contracted
735 std::vector<size_t> dummyIndices(
736 contractDims.size(),
737 0); // the dummy index to be incremented - use the values to
738 // complete the real indices
739
740 do {
741 for (size_t i = 0; i < dummyIndices.size(); ++i)
742 indices1[indices[i].first] = indices2[indices[i].second] =
743 dummyIndices[i];
744
745 // trying to fix a linux compile bug
746 const T &val1 = values[GetOffset(indices1)];
747 const T &val2 = other[indices2];
748
749 auto mulRes = val1 * val2;
750 result[offset] = result[offset] + std::move(mulRes);
751 } while (dummy.IncrementIndex(dummyIndices));
752 }
753 }
754 }
755
756 return result;
757 }
758
759 T Trace() const {
760 assert(dims.size() > 0);
761
762 T result = 0;
763
764 if (values.size() == 0)
765 return result;
766
767 size_t dimMin = dims[0];
768 for (size_t i = 1; i < dims.size(); ++i)
769 if (dims[i] < dimMin)
770 dimMin = dims[i];
771
772 for (size_t i = 0; i < dimMin; ++i) {
773 const std::vector<size_t> indices(dims.size(), i);
774
775 result += values[GetOffset(indices)];
776 }
777
778 return result;
779 }
780
781 Tensor<T, Storage> Trace(size_t ind1, size_t ind2) const {
782 assert(ind1 < dims.size() && ind2 < dims.size());
783
784 std::vector<size_t> newdims;
785
786 size_t newsize = dims.size() - 2;
787
788 if (newsize == 0) {
789 newsize = 1;
790 newdims.push_back(1);
791 } else {
792 newdims.reserve(newsize);
793
794 for (size_t i = 0; i < dims.size(); ++i)
795 if (i != ind1 && i != ind2)
796 newdims.push_back(dims[i]);
797 }
798
799 size_t dimMin = dims[ind1];
800 if (dims[ind2] < dimMin)
801 dimMin = dims[ind2];
802
803 Tensor<T, Storage> result(newdims, values.size() == 0);
804
805 if (values.size() != 0) {
806 for (size_t offset = 0; offset < result.GetSize(); ++offset) {
807 std::vector<size_t> indices1(dims.size());
808 const std::vector<size_t> indices = result.IndexFromOffset(offset);
809
810 size_t skip = 0;
811 for (size_t i = 0; i < dims.size(); ++i)
812 if (i != ind1 && i != ind2)
813 indices1[i] = indices[i - skip];
814 else
815 ++skip;
816
817 for (size_t i = 0; i < dimMin; ++i) {
818 indices1[ind1] = indices1[ind2] = i;
819
820 result[offset] += values[GetOffset(indices1)];
821 }
822 }
823 }
824
825 return result;
826 }
827
828 Tensor<T, Storage> Trace(const std::vector<size_t> &tindices) const {
829 std::vector<size_t> newdims;
830
831 size_t newsize = dims.size() - tindices.size();
832
833 if (newsize == 0) {
834 newsize = 1;
835 newdims.push_back(1);
836 } else {
837 newdims.reserve(newsize);
838
839 for (size_t i = 0; i < dims.size(); ++i) {
840 bool skip = false;
841 for (size_t j = 0; j < tindices.size(); ++j)
842 if (i == tindices[j]) {
843 skip = true;
844 break;
845 }
846
847 if (!skip)
848 newdims.push_back(dims[i]);
849 }
850 }
851
852 size_t dimMin = dims[tindices[0]];
853 for (size_t i = 1; i < tindices.size(); ++i)
854 if (dims[tindices[i]] < dimMin)
855 dimMin = dims[tindices[i]];
856
857 Tensor<T, Storage> result(newdims, values.size() == 0);
858
859 if (values.size() != 0) {
860 for (size_t offset = 0; offset < result.GetSize(); ++offset) {
861 std::vector<size_t> indices1(dims.size());
862 const std::vector<size_t> indices = result.IndexFromOffset(offset);
863
864 size_t skipv = 0;
865 for (size_t i = 0; i < dims.size(); ++i) {
866 bool skip = false;
867 for (size_t j = 0; j < tindices.size(); ++j)
868 if (i == tindices[j]) {
869 skip = true;
870 break;
871 }
872
873 if (!skip)
874 indices1[i] = indices[i - skipv];
875 else
876 ++skipv;
877 }
878
879 for (size_t i = 0; i < dimMin; ++i) {
880 for (size_t j = 0; j < tindices.size(); ++j)
881 indices1[tindices[j]] = i;
882
883 result[offset] += values[GetOffset(indices1)];
884 }
885 }
886 }
887
888 return result;
889 }
890
891 template <class indT = size_t>
892 Tensor<T, Storage> Trace(std::initializer_list<indT> args) const {
893 const std::vector<size_t> indices(args.begin(), args.end());
894
895 return Trace(indices);
896 }
897
898 Tensor<T, Storage> Shuffle(const std::vector<size_t> &indices) const {
899 assert(indices.size() == dims.size());
900
901 std::vector<size_t> newdims(dims.size());
902 for (size_t i = 0; i < dims.size(); ++i)
903 newdims[i] = dims[indices[i]];
904
905 Tensor<T, Storage> result(newdims, values.size() == 0);
906
907 if (values.size() != 0) {
908 std::vector<size_t> indices2(dims.size());
909 for (size_t offset = 0; offset < GetSize(); ++offset) {
910 const std::vector<size_t> indices1 = IndexFromOffset(offset);
911
912 for (size_t i = 0; i < dims.size(); ++i)
913 indices2[indices[i]] = indices1[i];
914
915 result[indices2] = values[offset];
916 }
917 }
918
919 return result;
920 }
921
922 template <class indT = size_t>
923 Tensor<T, Storage> Shuffle(std::initializer_list<indT> args) const {
924 const std::vector<size_t> indices(args.begin(), args.end());
925
926 return Shuffle(indices);
927 }
928
929 Tensor<T, Storage> Reshape(const std::vector<size_t> &newdims) const {
930 Tensor<T, Storage> result(newdims, values.size() == 0);
931
932 assert(result.GetSize() == GetSize());
933
934 if (values.size() != 0)
935 for (size_t offset = 0; offset < GetSize(); ++offset)
936 result[offset] = values[offset];
937
938 return result;
939 }
940
941 template <class indT = size_t>
942 Tensor<T, Storage> Reshape(std::initializer_list<indT> args) const {
943 const std::vector<size_t> newdims(args.begin(), args.end());
944
945 return Reshape(newdims);
946 }
947
948 // changing the layout format is as easy as changing what's called in the
949 // following two functions
950
951 inline size_t GetOffset(const std::vector<size_t> &indices) const {
952 return GetFortranOffset(indices);
953 }
954
955 inline std::vector<size_t> IndexFromOffset(size_t offset) const {
956 return IndexFromFortranOffset(offset);
957 }
958
959private:
960 // C/C++ layout (row-major order)
961
962 inline size_t GetCPPOffset(const std::vector<size_t> &indices) const {
963 assert(indices.size() == dims.size());
964
965 size_t result = 0;
966
967 for (size_t i = 0; i < dims.size(); ++i) {
968 assert(indices[i] < dims[i]);
969
970 result = result * dims[i] + indices[i];
971 }
972
973 return result;
974 }
975
976 inline std::vector<size_t> IndexFromCPPOffset(size_t offset) const {
977 std::vector<size_t> indices(dims.size(), 0);
978
979 for (int i = static_cast<int>(dims.size()) - 1; i >= 0; --i) {
980 indices[i] = offset % dims[i];
981 offset /= dims[i];
982 if (0 == offset)
983 break;
984 }
985
986 return indices;
987 }
988
989 // the following two are for fortran layout (column-major order)
990 // for passing to cuda we need probably to convert anyway (as in from
991 // complex<double> to complex<float>) the tensors will be small so converting
992 // them should not be computationally expensive
993
994 inline size_t GetFortranOffset(const std::vector<size_t> &indices) const {
995 assert(indices.size() == dims.size());
996
997 size_t result = 0;
998
999 for (long long int i = dims.size() - 1; i >= 0; --i) {
1000 assert(indices[i] < dims[i]);
1001
1002 result = result * dims[i] + indices[i];
1003 }
1004
1005 return result;
1006 }
1007
1008 inline std::vector<size_t> IndexFromFortranOffset(size_t offset) const {
1009 std::vector<size_t> indices(dims.size(), 0);
1010
1011 for (size_t i = 0; i < dims.size(); ++i) {
1012 indices[i] = offset % dims[i];
1013 offset /= dims[i];
1014 if (0 == offset)
1015 break;
1016 }
1017
1018 return indices;
1019 }
1020};
1021
1022} // namespace Utils
1023
1024#endif
void ClearValues()
Definition Tensor.h:226
Tensor< T, Storage > & operator+=(const Tensor< T, Storage > &other)
Definition Tensor.h:410
Tensor< T, Storage > operator+(const Tensor< T, Storage > &other) const
Definition Tensor.h:330
T & operator[](size_t offset)
Definition Tensor.h:272
Tensor(const std::vector< size_t > &dims, bool dummy=false)
Definition Tensor.h:60
T & operator[](std::initializer_list< indT > args)
Definition Tensor.h:281
T atOffset(size_t offset)
Definition Tensor.h:266
Tensor< T, Storage > & operator=(const Tensor< T, Storage > &other)
Definition Tensor.h:110
std::vector< size_t > dims
Definition Tensor.h:43
Tensor< T, Storage > & operator*=(const Tensor< T, Storage > &other)
Definition Tensor.h:428
T Trace() const
Definition Tensor.h:759
bool IncrementIndexSkip(std::vector< size_t > &indices, const std::vector< int > &skipv) const
Definition Tensor.h:187
Tensor< T, Storage > operator/(const Tensor< T, Storage > &other) const
Definition Tensor.h:363
Tensor< T, Storage > operator/(const T &scalar) const
Definition Tensor.h:401
static int GetNumberOfThreads()
Definition Tensor.h:49
bool IncrementIndexSkip(std::vector< size_t > &indices, int skip) const
Definition Tensor.h:169
void Resize(const std::vector< size_t > &newdims)
Definition Tensor.h:233
size_t GetSize() const
Definition Tensor.h:133
Tensor< T, Storage > & operator-=(const Tensor< T, Storage > &other)
Definition Tensor.h:419
const Storage & GetValues() const
Definition Tensor.h:326
const T & operator[](const std::vector< size_t > &indices) const
Definition Tensor.h:262
bool IncrementIndex(std::vector< size_t > &indices) const
Definition Tensor.h:153
void serialize(Archive &ar, const unsigned int)
Definition Tensor.h:103
size_t GetDim(size_t index) const
Definition Tensor.h:141
static constexpr int divSchedule
Definition Tensor.h:47
Tensor(std::initializer_list< indT > dims, bool dummy=false)
Definition Tensor.h:81
Storage values
Definition Tensor.h:42
Storage & GetValues()
Definition Tensor.h:328
Tensor< T, Storage > & operator/=(const double scalar)
Definition Tensor.h:497
Tensor< T, Storage > Trace(const std::vector< size_t > &tindices) const
Definition Tensor.h:828
static constexpr size_t OmpLimit
Definition Tensor.h:46
void Swap(Tensor< T, Storage > &other)
Definition Tensor.h:127
const T & operator[](std::initializer_list< indT > args) const
Definition Tensor.h:290
Tensor< T, Storage > & operator/=(const Tensor< T, Storage > &other)
Definition Tensor.h:437
Tensor(const std::vector< int > &dims, bool dummy=false)
Definition Tensor.h:70
Tensor< T, Storage > & operator-=(const double scalar)
Definition Tensor.h:483
Tensor< T, Storage > & operator+=(const T &scalar)
Definition Tensor.h:446
const T at(const std::vector< size_t > &indices) const
Definition Tensor.h:250
T & operator[](const std::vector< size_t > &indices)
Definition Tensor.h:256
const size_t GetRank() const
Definition Tensor.h:149
void Conj()
Definition Tensor.h:513
Tensor< T, Storage > & operator+=(const double scalar)
Definition Tensor.h:476
bool Empty() const
Definition Tensor.h:231
Tensor(const Tensor< T, Storage > &other)
Definition Tensor.h:91
Tensor< T, Storage > operator*(const Tensor< T, Storage > &other) const
Definition Tensor.h:352
virtual ~Tensor()=default
Tensor(Tensor< T, Storage > &&other) noexcept
Definition Tensor.h:94
const T & operator()(std::initializer_list< indT > args) const
Definition Tensor.h:318
const std::vector< size_t > & GetDims() const
Definition Tensor.h:147
T & operator()(std::initializer_list< indT > args)
Definition Tensor.h:309
Tensor< T, Storage > & operator*=(const T &scalar)
Definition Tensor.h:460
size_t GetOffset(const std::vector< size_t > &indices) const
Definition Tensor.h:951
Tensor< T, Storage > Reshape(const std::vector< size_t > &newdims) const
Definition Tensor.h:929
Tensor< T, Storage > & operator-=(const T &scalar)
Definition Tensor.h:453
Tensor< T, Storage > operator+(const T &scalar) const
Definition Tensor.h:374
bool IsDummy() const
Definition Tensor.h:151
const T & operator()(const std::vector< size_t > &indices) const
Definition Tensor.h:304
Tensor< T, Storage > & operator=(Tensor< T, Storage > &&other) noexcept
Definition Tensor.h:117
Tensor< T, Storage > operator*(const T &scalar) const
Definition Tensor.h:392
const T & operator[](size_t offset) const
Definition Tensor.h:278
Tensor< T, Storage > operator-(const T &scalar) const
Definition Tensor.h:383
void Clear()
Definition Tensor.h:221
Tensor< T, Storage > TensorProduct(const Tensor< T, Storage > &other) const
Definition Tensor.h:518
Tensor< T, Storage > operator-() const
Definition Tensor.h:504
Tensor< T, Storage > Reshape(std::initializer_list< indT > args) const
Definition Tensor.h:942
std::vector< size_t > IndexFromOffset(size_t offset) const
Definition Tensor.h:955
friend class boost::serialization::access
Definition Tensor.h:54
size_t sz
Definition Tensor.h:44
Tensor< T, Storage > & operator/=(const T &scalar)
Definition Tensor.h:467
Tensor< T, Storage > Shuffle(const std::vector< size_t > &indices) const
Definition Tensor.h:898
void Resize(std::initializer_list< indT > newdims)
Definition Tensor.h:242
Tensor< T, Storage > Trace(size_t ind1, size_t ind2) const
Definition Tensor.h:781
Tensor< T, Storage > Contract(const Tensor< T, Storage > &other, const std::vector< std::pair< size_t, size_t > > &indices, bool allowMultithreading=true) const
Definition Tensor.h:633
Tensor< T, Storage > Trace(std::initializer_list< indT > args) const
Definition Tensor.h:892
Tensor< T, Storage > Contract(const Tensor< T, Storage > &other, size_t ind1, size_t ind2, bool allowMultithreading=true) const
Definition Tensor.h:537
Tensor< T, Storage > & operator*=(const double scalar)
Definition Tensor.h:490
Tensor< T, Storage > Shuffle(std::initializer_list< indT > args) const
Definition Tensor.h:923
Tensor< T, Storage > operator-(const Tensor< T, Storage > &other) const
Definition Tensor.h:341
T & operator()(const std::vector< size_t > &indices)
Definition Tensor.h:298
Definition Alias.h:20