Maestro 0.1.0
Unified interface for quantum circuit simulation
Loading...
Searching...
No Matches
Tensor.h
Go to the documentation of this file.
1
14#pragma once
15
16#ifndef _TENSOR_H_
17
18#include <complex>
19#include <initializer_list>
20#include <numeric>
21#include <unordered_set>
22#include <valarray>
23#include <vector>
24
25#include <boost/archive/text_iarchive.hpp>
26#include <boost/archive/text_oarchive.hpp>
27#include <boost/serialization/valarray.hpp>
28#include <boost/serialization/vector.hpp>
29
30#include "QubitRegisterCalculator.h"
31
32namespace Utils {
33
34// WARNING: Not all functions are tested, some of them are tested only
35// superficially, use with caution! Some were tested before switching to fortran
36// layout, so they might not work properly anymore! Accessing values and
37// contractions are working since they are used in circuit cutting and in the
38// tensor networks simulator.
39template <class T = std::complex<double>, class Storage = std::valarray<T>>
40class Tensor {
41 protected:
42 Storage values;
43 std::vector<size_t> dims;
44 mutable size_t sz;
45
46 constexpr static size_t OmpLimit = 1024;
47 constexpr static int divSchedule = 2;
48
49 static int GetNumberOfThreads() {
50 return QC::QubitRegisterCalculator<>::GetNumberOfThreads();
51 }
52
53 public:
55
56 Tensor() : sz(0) {}
57
58 // a dummy tensor should be used only for incrementing indices, not for
59 // storing values!
60 Tensor(const std::vector<size_t> &dims, bool dummy = false)
61 : dims(dims), sz(0) {
62 if (!dummy) {
63 if constexpr (std::is_convertible<int, T>::value)
64 values.resize(GetSize(), static_cast<T>(0));
65 else
66 values.resize(GetSize());
67 }
68 }
69
70 Tensor(const std::vector<int> &dims, bool dummy = false)
71 : dims(dims.cbegin(), dims.cend()), sz(0) {
72 if (!dummy) {
73 if constexpr (std::is_convertible<int, T>::value)
74 values.resize(GetSize(), static_cast<T>(0));
75 else
76 values.resize(GetSize());
77 }
78 }
79
80 template <class indT = size_t>
81 Tensor(std::initializer_list<indT> dims, bool dummy = false)
82 : dims(dims.begin(), dims.end()), sz(0) {
83 if (!dummy) {
84 if constexpr (std::is_convertible<int, T>::value)
85 values.resize(GetSize(), static_cast<T>(0));
86 else
87 values.resize(GetSize());
88 }
89 }
90
92 : values(other.values), dims(other.dims), sz(other.sz) {}
93
94 Tensor(Tensor<T, Storage> &&other) noexcept {
95 dims.swap(other.dims);
96 values.swap(other.values);
97 std::swap(sz, other.sz);
98 }
99
100 virtual ~Tensor() = default;
101
102 template <class Archive>
103 void serialize(Archive &ar, const unsigned int /*version*/) {
104 if (typename Archive::is_loading()) values.clear();
105
106 ar &dims &sz;
107 }
108
110 Tensor temp(other);
111 *this = std::move(temp);
112
113 return *this;
114 }
115
117 if (this != &other) {
118 dims.swap(other.dims);
119 values.swap(other.values);
120 std::swap(sz, other.sz);
121 }
122
123 return *this;
124 }
125
127 dims.swap(other.dims);
128 values.swap(other.values);
129 std::swap(sz, other.sz);
130 }
131
132 size_t GetSize() const {
133 if (!sz)
134 sz = std::accumulate(dims.begin(), dims.end(), 1,
135 std::multiplies<size_t>());
136
137 return sz;
138 }
139
140 size_t GetDim(size_t index) const {
141 assert(index < dims.size());
142
143 return dims[index];
144 }
145
146 const std::vector<size_t> &GetDims() const { return dims; }
147
148 const size_t GetRank() const { return dims.size(); }
149
150 inline bool IsDummy() const { return values.size() == 0; }
151
152 inline bool IncrementIndex(std::vector<size_t> &indices) const {
153 long long pos = dims.size() - 1;
154
155 while (pos >= 0) {
156 if (indices[pos] < dims[pos] - 1) {
157 ++indices[pos];
158 break;
159 } else {
160 indices[pos] = 0;
161 --pos;
162 }
163 }
164
165 return pos >= 0;
166 }
167
168 inline bool IncrementIndexSkip(std::vector<size_t> &indices, int skip) const {
169 long long pos = dims.size() - 1;
170
171 while (pos >= 0) {
172 const bool notOnPos = pos != skip;
173 if (notOnPos && indices[pos] < dims[pos] - 1) {
174 ++indices[pos];
175 break;
176 } else {
177 if (notOnPos) indices[pos] = 0;
178 --pos;
179 }
180 }
181
182 return pos >= 0;
183 }
184
185 inline bool IncrementIndexSkip(std::vector<size_t> &indices,
186 const std::vector<int> &skipv) const {
187 long long int pos = dims.size() - 1;
188
189 int skip = -1;
190 int skipPos = -1;
191 if (!skipv.empty()) {
192 skip = skipv.back();
193 skipPos = static_cast<int>(skipv.size()) - 1;
194 }
195
196 while (pos >= 0) {
197 const bool notOnPos = pos != skip;
198 if (notOnPos && indices[pos] < dims[pos] - 1) {
199 ++indices[pos];
200 break;
201 } else {
202 if (notOnPos) indices[pos] = 0;
203 --pos;
204
205 if (pos < skip) {
206 --skipPos;
207 if (skipPos >= 0)
208 skip = skipv[skipPos];
209 else
210 skip = -1;
211 }
212 }
213 }
214
215 return pos >= 0;
216 }
217
218 void Clear() {
219 dims.clear();
220 ClearValues();
221 }
222
223 void ClearValues() {
224 sz = 0;
225 values.resize(0);
226 }
227
228 bool Empty() const { return dims.empty(); }
229
230 void Resize(const std::vector<size_t> &newdims) {
231 Clear();
232
233 dims = newdims;
234 sz = 0;
235 values.resize(GetSize());
236 }
237
238 template <class indT = size_t>
239 void Resize(std::initializer_list<indT> newdims) {
240 Clear();
241
242 dims = newdims;
243 sz = 0;
244 values.resize(GetSize());
245 }
246
247 const T at(const std::vector<size_t> &indices) const {
248 assert(indices.size() == dims.size());
249
250 return values[GetOffset(indices)];
251 }
252
253 inline T &operator[](const std::vector<size_t> &indices) {
254 assert(indices.size() == dims.size());
255
256 return values[GetOffset(indices)];
257 }
258
259 inline const T &operator[](const std::vector<size_t> &indices) const {
260 return values[GetOffset(indices)];
261 }
262
263 T atOffset(size_t offset) {
264 assert(values.size() > offset);
265
266 return values[offset];
267 }
268
269 inline T &operator[](size_t offset) {
270 assert(values.size() > offset);
271
272 return values[offset];
273 }
274
275 inline const T &operator[](size_t offset) const { return values[offset]; }
276
277 template <class indT = size_t>
278 T &operator[](std::initializer_list<indT> args) {
279 assert(args.size() == dims.size());
280
281 const std::vector<size_t> indices(args.begin(), args.end());
282
283 return values[GetOffset(indices)];
284 }
285
286 template <class indT = size_t>
287 const T &operator[](std::initializer_list<indT> args) const {
288 assert(args.size() == dims.size());
289
290 const std::vector<size_t> indices(args.begin(), args.end());
291
292 return values[GetOffset(indices)];
293 }
294
295 inline T &operator()(const std::vector<size_t> &indices) {
296 assert(indices.size() == dims.size());
297
298 return values[GetOffset(indices)];
299 }
300
301 inline const T &operator()(const std::vector<size_t> &indices) const {
302 return values[GetOffset(indices)];
303 }
304
305 template <class indT = size_t>
306 T &operator()(std::initializer_list<indT> args) {
307 assert(args.size() == dims.size());
308
309 const std::vector<size_t> indices(args.begin(), args.end());
310
311 return values[GetOffset(indices)];
312 }
313
314 template <class indT = size_t>
315 const T &operator()(std::initializer_list<indT> args) const {
316 assert(args.size() == dims.size());
317
318 const std::vector<size_t> indices(args.begin(), args.end());
319
320 return values[GetOffset(indices)];
321 }
322
323 inline const Storage &GetValues() const { return values; }
324
325 inline Storage &GetValues() { return values; }
326
328 assert(dims == other.dims);
329
330 Tensor<T, Storage> result(dims);
331
332 for (size_t i = 0; i < GetSize(); ++i)
333 result.values[i] = values[i] + other.values[i];
334
335 return result;
336 }
337
339 assert(dims == other.dims);
340
341 Tensor<T, Storage> result(dims);
342
343 for (size_t i = 0; i < GetSize(); ++i)
344 result.values[i] = values[i] - other.values[i];
345
346 return result;
347 }
348
350 assert(dims == other.dims);
351
352 Tensor<T, Storage> result(dims);
353
354 for (size_t i = 0; i < GetSize(); ++i)
355 result.values[i] = values[i] * other.values[i];
356
357 return result;
358 }
359
361 assert(dims == other.dims);
362
363 Tensor<T, Storage> result(dims);
364
365 for (size_t i = 0; i < GetSize(); ++i)
366 result.values[i] = values[i] / other.values[i];
367
368 return result;
369 }
370
371 Tensor<T, Storage> operator+(const T &scalar) const {
372 Tensor<T, Storage> result(dims);
373
374 for (size_t i = 0; i < GetSize(); ++i)
375 result.values[i] = values[i] + scalar;
376
377 return result;
378 }
379
380 Tensor<T, Storage> operator-(const T &scalar) const {
381 Tensor<T, Storage> result(dims);
382
383 for (size_t i = 0; i < GetSize(); ++i)
384 result.values[i] = values[i] - scalar;
385
386 return result;
387 }
388
389 Tensor<T, Storage> operator*(const T &scalar) const {
390 Tensor<T, Storage> result(dims);
391
392 for (size_t i = 0; i < GetSize(); ++i)
393 result.values[i] = values[i] * scalar;
394
395 return result;
396 }
397
398 Tensor<T, Storage> operator/(const T &scalar) const {
399 Tensor<T, Storage> result(dims);
400
401 for (size_t i = 0; i < GetSize(); ++i)
402 result.values[i] = values[i] / scalar;
403
404 return result;
405 }
406
408 assert(dims == other.dims);
409
410 for (size_t i = 0; i < GetSize(); ++i) values[i] += other.values[i];
411
412 return *this;
413 }
414
416 assert(dims == other.dims);
417
418 for (size_t i = 0; i < GetSize(); ++i) values[i] -= other.values[i];
419
420 return *this;
421 }
422
424 assert(dims == other.dims);
425
426 for (size_t i = 0; i < GetSize(); ++i) values[i] *= other.values[i];
427
428 return *this;
429 }
430
432 assert(dims == other.dims);
433
434 for (size_t i = 0; i < GetSize(); ++i) values[i] /= other.values[i];
435
436 return *this;
437 }
438
439 Tensor<T, Storage> &operator+=(const T &scalar) {
440 for (size_t i = 0; i < GetSize(); ++i) values[i] += scalar;
441
442 return *this;
443 }
444
445 Tensor<T, Storage> &operator-=(const T &scalar) {
446 for (size_t i = 0; i < GetSize(); ++i) values[i] -= scalar;
447
448 return *this;
449 }
450
451 Tensor<T, Storage> &operator*=(const T &scalar) {
452 for (size_t i = 0; i < GetSize(); ++i) values[i] *= scalar;
453
454 return *this;
455 }
456
457 Tensor<T, Storage> &operator/=(const T &scalar) {
458 for (size_t i = 0; i < GetSize(); ++i) values[i] /= scalar;
459
460 return *this;
461 }
462
463 // the following four are for the special case then each value is a map of
464 // results (for cutting)
465 Tensor<T, Storage> &operator+=(const double scalar) {
466 for (size_t i = 0; i < GetSize(); ++i) values[i] = values[i] + scalar;
467
468 return *this;
469 }
470
471 Tensor<T, Storage> &operator-=(const double scalar) {
472 for (size_t i = 0; i < GetSize(); ++i) values[i] = values[i] - scalar;
473
474 return *this;
475 }
476
477 Tensor<T, Storage> &operator*=(const double scalar) {
478 for (size_t i = 0; i < GetSize(); ++i) values[i] = scalar * values[i];
479
480 return *this;
481 }
482
483 Tensor<T, Storage> &operator/=(const double scalar) {
484 for (size_t i = 0; i < GetSize(); ++i) values[i] = values[i] / scalar;
485
486 return *this;
487 }
488
490 Tensor<T, Storage> result(dims);
491
492 for (size_t i = 0; i < GetSize(); ++i) result.values[i] = -values[i];
493
494 return result;
495 }
496
497 void Conj() {
498 for (size_t i = 0; i < GetSize(); ++i) values[i] = std::conj(values[i]);
499 }
500
502 std::vector<size_t> newdims(dims.size() + other.dims.size());
503
504 for (size_t i = 0; i < dims.size(); ++i) newdims[i] = dims[i];
505
506 for (size_t i = 0; i < other.dims.size(); ++i)
507 newdims[dims.size() + i] = other.dims[i];
508
509 Tensor<T, Storage> result(newdims, values.size() == 0);
510
511 if (!IsDummy())
512 for (size_t i = 0; i < GetSize(); ++i)
513 for (size_t j = 0; j < other.GetSize(); ++j)
514 result.values[i * other.GetSize() + j] = values[i] * other.values[j];
515
516 return result;
517 }
518
520 size_t ind2,
521 bool allowMultithreading = true) const {
522 assert(dims[ind1] == other.dims[ind2]);
523
524 std::vector<size_t> newdims;
525
526 const size_t newsize = dims.size() + other.dims.size() - 2;
527 if (newsize == 0)
528 newdims.resize(1, 1);
529 else {
530 newdims.reserve(newsize);
531
532 for (size_t i = 0; i < dims.size(); ++i)
533 if (i != ind1) newdims.push_back(dims[i]);
534
535 for (size_t i = 0; i < other.dims.size(); ++i)
536 if (i != ind2) newdims.push_back(other.dims[i]);
537 }
538
539 Tensor<T, Storage> result(newdims, IsDummy());
540
541 if (!IsDummy()) {
542 const size_t sz = result.GetSize();
543
544 if (!allowMultithreading || sz < OmpLimit) {
545 std::vector<size_t> indices1(dims.size());
546 std::vector<size_t> indices2(other.dims.size());
547
548 for (size_t offset = 0; offset < sz; ++offset) {
549 const std::vector<size_t> indicesres = result.IndexFromOffset(offset);
550
551 size_t pos = 0;
552 for (size_t i = 0; i < dims.size(); ++i)
553 if (i != ind1) {
554 indices1[i] = indicesres[pos];
555 ++pos;
556 }
557
558 for (size_t i = 0; i < other.dims.size(); ++i)
559 if (i != ind2) {
560 indices2[i] = indicesres[pos];
561 ++pos;
562 }
563
564 // contracting more than one index would require creating a dummy
565 // tensor to iterate over all the indices that are contracted
566 for (size_t i = 0; i < dims[ind1]; ++i) {
567 indices1[ind1] = indices2[ind2] = i;
568
569 result[offset] =
570 result[offset] + values[GetOffset(indices1)] * other[indices2];
571 }
572 }
573 } else {
574 const auto processor_count = GetNumberOfThreads();
575
576#pragma omp parallel for num_threads(processor_count) \
577 schedule(static, OmpLimit / divSchedule)
578 for (long long int offset = 0; offset < static_cast<long long int>(sz);
579 ++offset) {
580 const std::vector<size_t> indicesres = result.IndexFromOffset(offset);
581 std::vector<size_t> indices1(dims.size());
582 std::vector<size_t> indices2(other.dims.size());
583
584 size_t pos = 0;
585 for (size_t i = 0; i < dims.size(); ++i)
586 if (i != ind1) {
587 indices1[i] = indicesres[pos];
588 ++pos;
589 }
590
591 for (size_t i = 0; i < other.dims.size(); ++i)
592 if (i != ind2) {
593 indices2[i] = indicesres[pos];
594 ++pos;
595 }
596
597 // contracting more than one index would require creating a dummy
598 // tensor to iterate over all the indices that are contracted
599 for (size_t i = 0; i < dims[ind1]; ++i) {
600 indices1[ind1] = indices2[ind2] = i;
601
602 result[offset] =
603 result[offset] + values[GetOffset(indices1)] * other[indices2];
604 }
605 }
606 }
607 }
608
609 return result;
610 }
611
613 const Tensor<T, Storage> &other,
614 const std::vector<std::pair<size_t, size_t>> &indices,
615 bool allowMultithreading = true) const {
616 std::vector<size_t> newdims;
617 std::vector<size_t> contractDims;
618
619 std::unordered_set<size_t> indicesSet1;
620 std::unordered_set<size_t> indicesSet2;
621
622 for (const auto &index : indices) {
623 assert(dims[index.first] == other.dims[index.second]);
624
625 indicesSet1.insert(index.first);
626 indicesSet2.insert(index.second);
627 contractDims.push_back(dims[index.first]);
628 }
629
630 const size_t newsize = dims.size() + other.dims.size() - 2 * indices.size();
631 if (newsize == 0)
632 newdims.resize(1, 1);
633 else {
634 newdims.reserve(newsize);
635
636 for (size_t i = 0; i < dims.size(); ++i)
637 if (indicesSet1.find(i) == indicesSet1.end())
638 newdims.push_back(dims[i]);
639
640 for (size_t i = 0; i < other.dims.size(); ++i)
641 if (indicesSet2.find(i) == indicesSet2.end())
642 newdims.push_back(other.dims[i]);
643 }
644
645 Tensor<T, Storage> result(newdims, IsDummy());
646
647 if (!IsDummy()) {
648 const Tensor<T, Storage> dummy(contractDims,
649 true); // used for incrementing the index
650 const size_t sz = result.GetSize();
651
652 if (!allowMultithreading || sz < OmpLimit) {
653 std::vector<size_t> dummyIndices(
654 contractDims.size(), 0); // the dummy index to be incremented - use
655 // the values to complete the real indices
656 std::vector<size_t> indices1(dims.size());
657 std::vector<size_t> indices2(other.dims.size());
658
659 for (size_t offset = 0; offset < sz; ++offset) {
660 const std::vector<size_t> indicesres = result.IndexFromOffset(offset);
661
662 size_t pos = 0;
663 for (size_t i = 0; i < dims.size(); ++i)
664 if (indicesSet1.find(i) == indicesSet1.end()) {
665 indices1[i] = indicesres[pos];
666 ++pos;
667 }
668
669 for (size_t i = 0; i < other.dims.size(); ++i)
670 if (indicesSet2.find(i) == indicesSet2.end()) {
671 indices2[i] = indicesres[pos];
672 ++pos;
673 }
674
675 // contracting more than one index requires creating a dummy tensor to
676 // iterate over all the indices that are contracted
677 do {
678 for (size_t i = 0; i < dummyIndices.size(); ++i)
679 indices1[indices[i].first] = indices2[indices[i].second] =
680 dummyIndices[i];
681
682 // trying to fix a linux compile bug
683 const T &val1 = values[GetOffset(indices1)];
684 const T &val2 = other[indices2];
685 auto mulRes = val1 * val2;
686 result[offset] = result[offset] + std::move(mulRes);
687 } while (dummy.IncrementIndex(dummyIndices));
688 }
689 } else {
690 const auto processor_count = GetNumberOfThreads();
691
692#pragma omp parallel for num_threads(processor_count) \
693 schedule(static, OmpLimit / divSchedule)
694 for (long long int offset = 0; offset < static_cast<long long int>(sz);
695 ++offset) {
696 const std::vector<size_t> indicesres = result.IndexFromOffset(offset);
697 std::vector<size_t> indices1(dims.size());
698 std::vector<size_t> indices2(other.dims.size());
699
700 size_t pos = 0;
701 for (size_t i = 0; i < dims.size(); ++i)
702 if (indicesSet1.find(i) == indicesSet1.end()) {
703 indices1[i] = indicesres[pos];
704 ++pos;
705 }
706
707 for (size_t i = 0; i < other.dims.size(); ++i)
708 if (indicesSet2.find(i) == indicesSet2.end()) {
709 indices2[i] = indicesres[pos];
710 ++pos;
711 }
712
713 // contracting more than one index requires creating a dummy tensor to
714 // iterate over all the indices that are contracted
715 std::vector<size_t> dummyIndices(
716 contractDims.size(),
717 0); // the dummy index to be incremented - use the values to
718 // complete the real indices
719
720 do {
721 for (size_t i = 0; i < dummyIndices.size(); ++i)
722 indices1[indices[i].first] = indices2[indices[i].second] =
723 dummyIndices[i];
724
725 // trying to fix a linux compile bug
726 const T &val1 = values[GetOffset(indices1)];
727 const T &val2 = other[indices2];
728
729 auto mulRes = val1 * val2;
730 result[offset] = result[offset] + std::move(mulRes);
731 } while (dummy.IncrementIndex(dummyIndices));
732 }
733 }
734 }
735
736 return result;
737 }
738
739 T Trace() const {
740 assert(dims.size() > 0);
741
742 T result = 0;
743
744 if (values.size() == 0) return result;
745
746 size_t dimMin = dims[0];
747 for (size_t i = 1; i < dims.size(); ++i)
748 if (dims[i] < dimMin) dimMin = dims[i];
749
750 for (size_t i = 0; i < dimMin; ++i) {
751 const std::vector<size_t> indices(dims.size(), i);
752
753 result += values[GetOffset(indices)];
754 }
755
756 return result;
757 }
758
759 Tensor<T, Storage> Trace(size_t ind1, size_t ind2) const {
760 assert(ind1 < dims.size() && ind2 < dims.size());
761
762 std::vector<size_t> newdims;
763
764 size_t newsize = dims.size() - 2;
765
766 if (newsize == 0) {
767 newsize = 1;
768 newdims.push_back(1);
769 } else {
770 newdims.reserve(newsize);
771
772 for (size_t i = 0; i < dims.size(); ++i)
773 if (i != ind1 && i != ind2) newdims.push_back(dims[i]);
774 }
775
776 size_t dimMin = dims[ind1];
777 if (dims[ind2] < dimMin) dimMin = dims[ind2];
778
779 Tensor<T, Storage> result(newdims, values.size() == 0);
780
781 if (values.size() != 0) {
782 for (size_t offset = 0; offset < result.GetSize(); ++offset) {
783 std::vector<size_t> indices1(dims.size());
784 const std::vector<size_t> indices = result.IndexFromOffset(offset);
785
786 size_t skip = 0;
787 for (size_t i = 0; i < dims.size(); ++i)
788 if (i != ind1 && i != ind2)
789 indices1[i] = indices[i - skip];
790 else
791 ++skip;
792
793 for (size_t i = 0; i < dimMin; ++i) {
794 indices1[ind1] = indices1[ind2] = i;
795
796 result[offset] += values[GetOffset(indices1)];
797 }
798 }
799 }
800
801 return result;
802 }
803
804 Tensor<T, Storage> Trace(const std::vector<size_t> &tindices) const {
805 std::vector<size_t> newdims;
806
807 size_t newsize = dims.size() - tindices.size();
808
809 if (newsize == 0) {
810 newsize = 1;
811 newdims.push_back(1);
812 } else {
813 newdims.reserve(newsize);
814
815 for (size_t i = 0; i < dims.size(); ++i) {
816 bool skip = false;
817 for (size_t j = 0; j < tindices.size(); ++j)
818 if (i == tindices[j]) {
819 skip = true;
820 break;
821 }
822
823 if (!skip) newdims.push_back(dims[i]);
824 }
825 }
826
827 size_t dimMin = dims[tindices[0]];
828 for (size_t i = 1; i < tindices.size(); ++i)
829 if (dims[tindices[i]] < dimMin) dimMin = dims[tindices[i]];
830
831 Tensor<T, Storage> result(newdims, values.size() == 0);
832
833 if (values.size() != 0) {
834 for (size_t offset = 0; offset < result.GetSize(); ++offset) {
835 std::vector<size_t> indices1(dims.size());
836 const std::vector<size_t> indices = result.IndexFromOffset(offset);
837
838 size_t skipv = 0;
839 for (size_t i = 0; i < dims.size(); ++i) {
840 bool skip = false;
841 for (size_t j = 0; j < tindices.size(); ++j)
842 if (i == tindices[j]) {
843 skip = true;
844 break;
845 }
846
847 if (!skip)
848 indices1[i] = indices[i - skipv];
849 else
850 ++skipv;
851 }
852
853 for (size_t i = 0; i < dimMin; ++i) {
854 for (size_t j = 0; j < tindices.size(); ++j)
855 indices1[tindices[j]] = i;
856
857 result[offset] += values[GetOffset(indices1)];
858 }
859 }
860 }
861
862 return result;
863 }
864
865 template <class indT = size_t>
866 Tensor<T, Storage> Trace(std::initializer_list<indT> args) const {
867 const std::vector<size_t> indices(args.begin(), args.end());
868
869 return Trace(indices);
870 }
871
872 Tensor<T, Storage> Shuffle(const std::vector<size_t> &indices) const {
873 assert(indices.size() == dims.size());
874
875 std::vector<size_t> newdims(dims.size());
876 for (size_t i = 0; i < dims.size(); ++i) newdims[i] = dims[indices[i]];
877
878 Tensor<T, Storage> result(newdims, values.size() == 0);
879
880 if (values.size() != 0) {
881 std::vector<size_t> indices2(dims.size());
882 for (size_t offset = 0; offset < GetSize(); ++offset) {
883 const std::vector<size_t> indices1 = IndexFromOffset(offset);
884
885 for (size_t i = 0; i < dims.size(); ++i)
886 indices2[indices[i]] = indices1[i];
887
888 result[indices2] = values[offset];
889 }
890 }
891
892 return result;
893 }
894
895 template <class indT = size_t>
896 Tensor<T, Storage> Shuffle(std::initializer_list<indT> args) const {
897 const std::vector<size_t> indices(args.begin(), args.end());
898
899 return Shuffle(indices);
900 }
901
902 Tensor<T, Storage> Reshape(const std::vector<size_t> &newdims) const {
903 Tensor<T, Storage> result(newdims, values.size() == 0);
904
905 assert(result.GetSize() == GetSize());
906
907 if (values.size() != 0)
908 for (size_t offset = 0; offset < GetSize(); ++offset)
909 result[offset] = values[offset];
910
911 return result;
912 }
913
914 template <class indT = size_t>
915 Tensor<T, Storage> Reshape(std::initializer_list<indT> args) const {
916 const std::vector<size_t> newdims(args.begin(), args.end());
917
918 return Reshape(newdims);
919 }
920
921 // changing the layout format is as easy as changing what's called in the
922 // following two functions
923
924 inline size_t GetOffset(const std::vector<size_t> &indices) const {
925 return GetFortranOffset(indices);
926 }
927
928 inline std::vector<size_t> IndexFromOffset(size_t offset) const {
929 return IndexFromFortranOffset(offset);
930 }
931
932 private:
933 // C/C++ layout (row-major order)
934
935 inline size_t GetCPPOffset(const std::vector<size_t> &indices) const {
936 assert(indices.size() == dims.size());
937
938 size_t result = 0;
939
940 for (size_t i = 0; i < dims.size(); ++i) {
941 assert(indices[i] < dims[i]);
942
943 result = result * dims[i] + indices[i];
944 }
945
946 return result;
947 }
948
949 inline std::vector<size_t> IndexFromCPPOffset(size_t offset) const {
950 std::vector<size_t> indices(dims.size(), 0);
951
952 for (int i = static_cast<int>(dims.size()) - 1; i >= 0; --i) {
953 indices[i] = offset % dims[i];
954 offset /= dims[i];
955 if (0 == offset) break;
956 }
957
958 return indices;
959 }
960
961 // the following two are for fortran layout (column-major order)
962 // for passing to cuda we need probably to convert anyway (as in from
963 // complex<double> to complex<float>) the tensors will be small so converting
964 // them should not be computationally expensive
965
966 inline size_t GetFortranOffset(const std::vector<size_t> &indices) const {
967 assert(indices.size() == dims.size());
968
969 size_t result = 0;
970
971 for (long long int i = dims.size() - 1; i >= 0; --i) {
972 assert(indices[i] < dims[i]);
973
974 result = result * dims[i] + indices[i];
975 }
976
977 return result;
978 }
979
980 inline std::vector<size_t> IndexFromFortranOffset(size_t offset) const {
981 std::vector<size_t> indices(dims.size(), 0);
982
983 for (size_t i = 0; i < dims.size(); ++i) {
984 indices[i] = offset % dims[i];
985 offset /= dims[i];
986 if (0 == offset) break;
987 }
988
989 return indices;
990 }
991};
992
993} // namespace Utils
994
995#endif
void ClearValues()
Definition Tensor.h:223
Tensor< T, Storage > & operator+=(const Tensor< T, Storage > &other)
Definition Tensor.h:407
Tensor< T, Storage > operator+(const Tensor< T, Storage > &other) const
Definition Tensor.h:327
T & operator[](size_t offset)
Definition Tensor.h:269
Tensor(const std::vector< size_t > &dims, bool dummy=false)
Definition Tensor.h:60
T & operator[](std::initializer_list< indT > args)
Definition Tensor.h:278
T atOffset(size_t offset)
Definition Tensor.h:263
Tensor< T, Storage > & operator=(const Tensor< T, Storage > &other)
Definition Tensor.h:109
std::vector< size_t > dims
Definition Tensor.h:43
Tensor< T, Storage > & operator*=(const Tensor< T, Storage > &other)
Definition Tensor.h:423
T Trace() const
Definition Tensor.h:739
bool IncrementIndexSkip(std::vector< size_t > &indices, const std::vector< int > &skipv) const
Definition Tensor.h:185
Tensor< T, Storage > operator/(const Tensor< T, Storage > &other) const
Definition Tensor.h:360
Tensor< T, Storage > operator/(const T &scalar) const
Definition Tensor.h:398
static int GetNumberOfThreads()
Definition Tensor.h:49
bool IncrementIndexSkip(std::vector< size_t > &indices, int skip) const
Definition Tensor.h:168
void Resize(const std::vector< size_t > &newdims)
Definition Tensor.h:230
size_t GetSize() const
Definition Tensor.h:132
Tensor< T, Storage > & operator-=(const Tensor< T, Storage > &other)
Definition Tensor.h:415
const Storage & GetValues() const
Definition Tensor.h:323
const T & operator[](const std::vector< size_t > &indices) const
Definition Tensor.h:259
bool IncrementIndex(std::vector< size_t > &indices) const
Definition Tensor.h:152
void serialize(Archive &ar, const unsigned int)
Definition Tensor.h:103
size_t GetDim(size_t index) const
Definition Tensor.h:140
static constexpr int divSchedule
Definition Tensor.h:47
Tensor(std::initializer_list< indT > dims, bool dummy=false)
Definition Tensor.h:81
Storage values
Definition Tensor.h:42
Storage & GetValues()
Definition Tensor.h:325
Tensor< T, Storage > & operator/=(const double scalar)
Definition Tensor.h:483
Tensor< T, Storage > Trace(const std::vector< size_t > &tindices) const
Definition Tensor.h:804
static constexpr size_t OmpLimit
Definition Tensor.h:46
void Swap(Tensor< T, Storage > &other)
Definition Tensor.h:126
const T & operator[](std::initializer_list< indT > args) const
Definition Tensor.h:287
Tensor< T, Storage > & operator/=(const Tensor< T, Storage > &other)
Definition Tensor.h:431
Tensor(const std::vector< int > &dims, bool dummy=false)
Definition Tensor.h:70
Tensor< T, Storage > & operator-=(const double scalar)
Definition Tensor.h:471
Tensor< T, Storage > & operator+=(const T &scalar)
Definition Tensor.h:439
const T at(const std::vector< size_t > &indices) const
Definition Tensor.h:247
T & operator[](const std::vector< size_t > &indices)
Definition Tensor.h:253
const size_t GetRank() const
Definition Tensor.h:148
void Conj()
Definition Tensor.h:497
Tensor< T, Storage > & operator+=(const double scalar)
Definition Tensor.h:465
bool Empty() const
Definition Tensor.h:228
Tensor(const Tensor< T, Storage > &other)
Definition Tensor.h:91
Tensor< T, Storage > operator*(const Tensor< T, Storage > &other) const
Definition Tensor.h:349
virtual ~Tensor()=default
Tensor(Tensor< T, Storage > &&other) noexcept
Definition Tensor.h:94
const T & operator()(std::initializer_list< indT > args) const
Definition Tensor.h:315
const std::vector< size_t > & GetDims() const
Definition Tensor.h:146
T & operator()(std::initializer_list< indT > args)
Definition Tensor.h:306
Tensor< T, Storage > & operator*=(const T &scalar)
Definition Tensor.h:451
size_t GetOffset(const std::vector< size_t > &indices) const
Definition Tensor.h:924
Tensor< T, Storage > Reshape(const std::vector< size_t > &newdims) const
Definition Tensor.h:902
Tensor< T, Storage > & operator-=(const T &scalar)
Definition Tensor.h:445
Tensor< T, Storage > operator+(const T &scalar) const
Definition Tensor.h:371
bool IsDummy() const
Definition Tensor.h:150
const T & operator()(const std::vector< size_t > &indices) const
Definition Tensor.h:301
Tensor< T, Storage > & operator=(Tensor< T, Storage > &&other) noexcept
Definition Tensor.h:116
Tensor< T, Storage > operator*(const T &scalar) const
Definition Tensor.h:389
const T & operator[](size_t offset) const
Definition Tensor.h:275
Tensor< T, Storage > operator-(const T &scalar) const
Definition Tensor.h:380
void Clear()
Definition Tensor.h:218
Tensor< T, Storage > TensorProduct(const Tensor< T, Storage > &other) const
Definition Tensor.h:501
Tensor< T, Storage > operator-() const
Definition Tensor.h:489
Tensor< T, Storage > Reshape(std::initializer_list< indT > args) const
Definition Tensor.h:915
std::vector< size_t > IndexFromOffset(size_t offset) const
Definition Tensor.h:928
friend class boost::serialization::access
Definition Tensor.h:54
size_t sz
Definition Tensor.h:44
Tensor< T, Storage > & operator/=(const T &scalar)
Definition Tensor.h:457
Tensor< T, Storage > Shuffle(const std::vector< size_t > &indices) const
Definition Tensor.h:872
void Resize(std::initializer_list< indT > newdims)
Definition Tensor.h:239
Tensor< T, Storage > Trace(size_t ind1, size_t ind2) const
Definition Tensor.h:759
Tensor< T, Storage > Contract(const Tensor< T, Storage > &other, const std::vector< std::pair< size_t, size_t > > &indices, bool allowMultithreading=true) const
Definition Tensor.h:612
Tensor< T, Storage > Trace(std::initializer_list< indT > args) const
Definition Tensor.h:866
Tensor< T, Storage > Contract(const Tensor< T, Storage > &other, size_t ind1, size_t ind2, bool allowMultithreading=true) const
Definition Tensor.h:519
Tensor< T, Storage > & operator*=(const double scalar)
Definition Tensor.h:477
Tensor< T, Storage > Shuffle(std::initializer_list< indT > args) const
Definition Tensor.h:896
Tensor< T, Storage > operator-(const Tensor< T, Storage > &other) const
Definition Tensor.h:338
T & operator()(const std::vector< size_t > &indices)
Definition Tensor.h:295
Definition Alias.h:20