GQCP
Loading...
Searching...
No Matches
Tensor.hpp
Go to the documentation of this file.
1// This file is part of GQCG-GQCP.
2//
3// Copyright (C) 2017-2020 the GQCG developers
4//
5// GQCG-GQCP is free software: you can redistribute it and/or modify
6// it under the terms of the GNU Lesser General Public License as published by
7// the Free Software Foundation, either version 3 of the License, or
8// (at your option) any later version.
9//
10// GQCG-GQCP is distributed in the hope that it will be useful,
11// but WITHOUT ANY WARRANTY; without even the implied warranty of
12// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13// GNU Lesser General Public License for more details.
14//
15// You should have received a copy of the GNU Lesser General Public License
16// along with GQCG-GQCP. If not, see <http://www.gnu.org/licenses/>.
17
18#pragma once
19
20
23
24#include <boost/algorithm/string.hpp>
25
26#include <unsupported/Eigen/CXX11/Tensor>
27
28#include <algorithm>
29#include <iostream>
30#include <string>
31
32
33namespace GQCP {
34
35
44template <typename _Scalar, int _Rank>
45class Tensor:
46 public Eigen::Tensor<_Scalar, _Rank> {
47
48public:
49 // The scalar type of one of the elements of the tensor.
50 using Scalar = _Scalar;
51
52 // The rank of the tensor, i.e. the number of axes.
53 static constexpr auto Rank = _Rank;
54
55 // The type of 'this'.
57
58 // The Eigen base type.
59 using Base = Eigen::Tensor<Scalar, Rank>;
60
61
62public:
63 /*
64 * MARK: Constructors
65 */
66
67 // Inherit `Eigen::Tensor`'s constructors.
68 using Eigen::Tensor<Scalar, _Rank>::Tensor;
69
70
71 /*
72 * MARK: Named constructors
73 */
74
86 template <int Z = Rank>
87 static enable_if_t<Z == 4, Self> FromBlock(const Self& T, const size_t i, const size_t j, const size_t k, const size_t l, const size_t cutoff = 0) {
88
89 Tensor<double, Rank> T_result {static_cast<long>(T.dimension(0) - i - cutoff),
90 static_cast<long>(T.dimension(1) - j - cutoff),
91 static_cast<long>(T.dimension(2) - k - cutoff),
92 static_cast<long>(T.dimension(3) - l - cutoff)};
93 T_result.setZero();
94
95 for (size_t p = 0; p < T_result.dimension(0); p++) {
96 for (size_t q = 0; q < T_result.dimension(1); q++) {
97 for (size_t r = 0; r < T_result.dimension(2); r++) {
98 for (size_t s = 0; s < T_result.dimension(3); s++) {
99 T_result(p, q, r, s) = T(i + p, j + q, k + r, l + s);
100 }
101 }
102 }
103 }
104
105 return T_result;
106 }
107
108
109 /*
110 * MARK: Conversions
111 */
112
116 const Base& Eigen() const { return static_cast<const Base&>(*this); }
117
121 Base& Eigen() { return static_cast<Base&>(*this); }
122
123
124 /*
125 * MARK: Tensor operations
126 */
127
152 template <size_t r, size_t s, int Z = Rank>
153 enable_if_t<Z == 4, Self&> addBlock(const MatrixX<Scalar>& M, const size_t i, const size_t j, const size_t k, const size_t l) {
154
155 // Initialize series of arrays with 1 or 0 values, so that the correct tensor indices given by the template argument correspond to the matrix indices
156 size_t ia[4] = {1, 0, 0, 0};
157 size_t ja[4] = {0, 1, 0, 0};
158 size_t ka[4] = {0, 0, 1, 0};
159 size_t la[4] = {0, 0, 0, 1};
160
161 for (size_t x = 0; x < M.rows(); x++) {
162 for (size_t y = 0; y < M.cols(); y++) {
163
164 size_t i_effective = i + x * ia[r] + y * ia[s];
165 size_t j_effective = j + x * ja[r] + y * ja[s];
166 size_t k_effective = k + x * ka[r] + y * ka[s];
167 size_t l_effective = l + x * la[r] + y * la[s];
168
169 this->operator()(i_effective, j_effective, k_effective, l_effective) += M(x, y);
170 }
171 }
172
173 return (*this);
174 }
175
176
188 template <int Z = Rank>
189 enable_if_t<Z == 4, Self&> addBlock(const Self& T, const size_t i, const size_t j, const size_t k, const size_t l) {
190
191 for (size_t p = 0; p < T.dimension(0); p++) {
192 for (size_t q = 0; q < T.dimension(1); q++) {
193 for (size_t r = 0; r < T.dimension(2); r++) {
194 for (size_t s = 0; s < T.dimension(3); s++) {
195 this->operator()(i + p, j + q, k + r, l + s) += T(p, q, r, s);
196 }
197 }
198 }
199 }
200
201 return (*this);
202 }
203
204
218 template <int Z = Rank>
219 enable_if_t<Z == 4, Matrix<Scalar>> pairWiseReduced(const size_t start_i = 0, const size_t start_j = 0, const size_t start_k = 0, const size_t start_l = 0) const {
220
221 // Initialize the resulting matrix
222 const auto dims = this->dimensions();
223 Matrix<Scalar> M {(dims[0] - start_i) * (dims[1] - start_j),
224 (dims[2] - start_k) * (dims[3] - start_l)};
225
226 // Calculate the compound indices and bring the elements from the tensor over into the matrix
227 size_t row_index = 0;
228 for (size_t j = start_j; j < dims[1]; j++) { // "column major" ordering for row_index<-i,j so we do j first, then i
229 for (size_t i = start_i; i < dims[0]; i++) { // in column major indices, columns are contiguous, so the first of two indices changes more rapidly
230
231 size_t column_index = 0;
232 for (size_t l = start_l; l < dims[3]; l++) { // "column major" ordering for column_index<-k,l so we do l first, then k
233 for (size_t k = start_k; k < dims[2]; k++) { // in column major indices, columns are contiguous, so the first of two indices changes more rapidly
234
235 M(row_index, column_index) = this->operator()(i, j, k, l);
236
237 column_index++;
238 }
239 }
240
241 row_index++;
242 }
243 }
244
245 return M;
246 }
247
248
263 template <int N, int LHSRank = Rank, int RHSRank>
264 Tensor<Scalar, LHSRank + RHSRank - 2 * N> einsum(const Tensor<Scalar, RHSRank>& rhs, const std::string& lhs_labels, const std::string& rhs_labels, const std::string& output_labels) const {
265
266 // Check the length of the indices.
267 constexpr auto ResultRank = LHSRank + RHSRank - 2 * N;
268
269 if (lhs_labels.size() != LHSRank) {
270 throw std::invalid_argument("Tensor.einsum(const Tensor<Scalar, RHSRank>&, const std::string&, const std::string&, const std::string&): The number of indices for the left-hand side of the contraction does not match the rank of the left-hand side tensor.");
271 }
272
273 if (rhs_labels.size() != RHSRank) {
274 throw std::invalid_argument("Tensor.einsum(const Tensor<Scalar, RHSRank>&, const std::string&, const std::string&, const std::string&): The number of indices for the right-hand side of the contraction does not match the rank of the right-hand side tensor.");
275 }
276
277 if (output_labels.size() != ResultRank) {
278 throw std::invalid_argument("Tensor.einsum(const Tensor<Scalar, RHSRank>&, const std::string&, const std::string&, const std::string&): The number of output indices does not match the number of axes that should be contracted over.");
279 }
280
281
282 // Find the indices that should be contracted over, these are the positions of the axis labels that are both in the lhs- and rhs-indices.
283 Eigen::array<Eigen::IndexPair<int>, N> contraction_pairs {};
284 size_t array_position = 0; // The index at which an `Eigen::IndexPair` should be placed.
285 for (size_t i = 0; i < LHSRank; i++) { // 'i' loops over the lhs-indices
286 const auto current_label = lhs_labels[i];
287 const auto match = rhs_labels.find(current_label);
288 if (match != std::string::npos) { // an actual match was found
289 contraction_pairs[array_position] = Eigen::IndexPair<int>(i, match);
290 array_position++;
291 }
292 }
293
294 // Perform the contraction. It is an intermediate result, because we still have to align the tensor axes that Eigen's contraction module produces with the user's requested axes.
295 Tensor<Scalar, ResultRank> T_intermediate = this->contract(rhs.Eigen(), contraction_pairs);
296
297 // Finally, we should find the shuffle indices that map the obtained intermediate axes to the requested axes.
298 // The intermediate axes are just concatenated from left to right, removing any duplicates.
299 auto intermediate_indices = lhs_labels + rhs_labels;
300
301 for (size_t i = 1; i < LHSRank + RHSRank; i++) { // We should skip the first iteration, since there wouldn't be any duplicates anyways.
302
303 // Check if the current index label appears in the substring before it.
304 const auto current_label = intermediate_indices[i];
305 const auto match = intermediate_indices.substr(0, i).find(current_label);
306
307 if (match != std::string::npos) { // an actual match was found
308 intermediate_indices.erase(match, 1);
309 intermediate_indices.erase(i - 1, 1); // i-1 is used because the length of the parameter ìntermediate_indices`changed.
310
311 // We have to set back the current index to account for the removed indices.
312 i -= 2;
313 }
314 }
315
316 // Eigen3 does not document its tensor contraction clearly, so see the accepted answer on stackoverflow (https://stackoverflow.com/a/47558349/7930415):
317 // Eigen3 does not accept a way to specify the output axes: instead, it retains the order from left to right of the axes that survive the contraction.
318 // This means that, in order to get the right ordering of the axes, we will have to swap axes.
319
320 // Find the position of the intermediate axes' labels in the requested output labels in order to set up the required shuffle indices.
321 // This is only necessary when not contracting over all axes, in other words, when the string of output labels is not empty.
322 if (output_labels != "") {
323 Eigen::array<int, 4> shuffle_indices {};
324
325 for (size_t i = 0; i < 4; i++) {
326 const auto current_label = intermediate_indices[i];
327 shuffle_indices[i] = output_labels.find(current_label);
328 }
329
330 return T_intermediate.shuffle(shuffle_indices);
331 }
332
333 return T_intermediate;
334 }
335
336
349 template <int N, int LHSRank = Rank, int RHSRank>
350 Tensor<Scalar, LHSRank + RHSRank - 2 * N> einsum(std::string contraction_string, const Tensor<Scalar, RHSRank>& rhs) const {
351 // Remove unnecessary symbols from string.
352 boost::erase_all(contraction_string, " "); // Remove all spaces.
353 boost::erase_all(contraction_string, ">");
354 boost::replace_all(contraction_string, "-", " ");
355 boost::replace_all(contraction_string, ",", " ");
356
357 // Split the stringstream in the necessary components, 3 in most cases.
358 std::vector<std::string> segment_list;
359 boost::split(segment_list, contraction_string, boost::is_any_of(" "));
360
361 // If a contraction over all indices is done, only 2 sets of labels are saved in the vector. The last set of labels is an empty string, and is added manually.
362 if (segment_list.size() == 2) {
363 segment_list.push_back("");
364 }
365
366 // The following code can be used to calculate template parameter `N` at runtime. At the moment however, this is not used.
367 // Determine number of axes to contract over
368 //
369 // std::vector<char> segment_1(segment_list[0].begin(), segment_list[0].end());
370 // std::vector<char> segment_2(segment_list[1].begin(), segment_list[1].end());
371 // std::vector<char> intersection;
372 //
373 // std::set_intersection(segment_1.begin(), segment_1.end(),
374 // segment_2.begin(), segment_2.end(), std::back_inserter(intersection));
375 //
376 // const int Z = intersection.size();
377
378 return this->einsum<N>(rhs, segment_list[0], segment_list[1], segment_list[2]);
379 }
380
381
394 template <int N, int LHSRank = Rank>
395 Tensor<Scalar, LHSRank + 2 - 2 * N> einsum(std::string contraction_string, const Matrix<Scalar> rhs) const {
396 // Convert the given `Matrix` to its equivalent rank-2 `Tensor`.
397 const auto tensor_from_matrix = Tensor<Scalar, 2>(Eigen::TensorMap<Eigen::Tensor<const Scalar, 2>>(rhs.data(), rhs.rows(), rhs.cols()));
398 return this->einsum<N>(contraction_string, tensor_from_matrix);
399 }
400
401
405 template <int Z = Rank>
407
408 const auto rows = this->dimension(0);
409 const auto cols = this->dimension(1);
410
411 return GQCP::Matrix<Scalar>(Eigen::Map<const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>>(this->data(), rows, cols));
412 }
413
422 GQCP::Matrix<Scalar> reshape(const size_t rows, const size_t cols) const {
423
424 // The dimensions of the tensor have to match the dimensions of the target matrix.
425 if (rows * cols != this->numberOfElements()) {
426 throw std::invalid_argument("Tensor.reshape(const size_t rows, const size_t cols): The tensor cannot be reshaped into a matrix with the specified dimensions.");
427 }
428
429 // Initialize a matrix with the wanted dimensions.
430 GQCP::Matrix<Scalar> M {rows, cols};
431
432 // We create an intermediate vector to contain all the values of the tensor.
433 std::vector<Scalar> vectorized;
434
435 // We reserve the amount of data we expect the tensor to take up.
436 vectorized.reserve(rows * cols);
437
438 // Fill the vector with the elements of the tensor.
439 for (int i = 0; i < this->dimension(0); i++) {
440 for (int j = 0; j < this->dimension(1); j++) {
441 for (int k = 0; k < this->dimension(2); k++) {
442 for (int l = 0; l < this->dimension(3); l++) {
443 vectorized.push_back(this->operator()(i, j, k, l));
444 }
445 }
446 }
447 }
448
449 // Fill the matrix with the elements of the intermediate vector.
450 for (int r = 0; r < rows; r++) {
451 for (int c = 0; c < cols; c++) {
452
453 // The columns are looped first. Hence, the position within the row is determined by c (+c).
454 // The rows are determined by the outer loop. When you start filling a new row, you have to skip all the vector elements used for the previous row (r * cols).
455 M(r, c) = vectorized[r * cols + c];
456 }
457 }
458
459 return M;
460 }
461
462
463 /*
464 * MARK: General information
465 */
466
472 bool hasEqualDimensionsAs(const Self& other) const {
473
474 for (size_t i = 0; i < Rank; i++) {
475 if (this->dimension(i) != other.dimension(i)) {
476 return false;
477 }
478 }
479
480 return true;
481 }
482
483
490 template <int Z = Rank>
491 enable_if_t<Z == 4, bool> isApprox(const Self& other, const double tolerance = 1.0e-12) const {
492
493 if (!this->hasEqualDimensionsAs(other)) {
494 throw std::invalid_argument("RankFourTensor<Scalar>::isApprox(Self, double): the tensors have different dimensions");
495 }
496
497 // Check every pair of values
498 for (size_t i = 0; i < this->dimension(0); i++) {
499 for (size_t j = 0; j < this->dimension(1); j++) {
500 for (size_t k = 0; k < this->dimension(2); k++) {
501 for (size_t l = 0; l < this->dimension(3); l++) {
502 if (std::abs(this->operator()(i, j, k, l) - other(i, j, k, l)) > tolerance) {
503 return false;
504 }
505 }
506 }
507 }
508 } // rank-4 tensor traversing
509
510 return true;
511 }
512
513
517 size_t numberOfElements() const {
518 return this->dimension(0) * this->dimension(1) * this->dimension(2) * this->dimension(3);
519 }
520
521
527 template <int Z = Rank>
528 enable_if_t<Z == 4> print(std::ostream& output_stream = std::cout) const {
529
530 for (size_t i = 0; i < this->dimension(0); i++) {
531 for (size_t j = 0; j < this->dimension(1); j++) {
532 for (size_t k = 0; k < this->dimension(2); k++) {
533 for (size_t l = 0; l < this->dimension(3); l++) {
534 output_stream << i << ' ' << j << ' ' << k << ' ' << l << " " << this->operator()(i, j, k, l) << std::endl;
535 }
536 }
537 }
538 }
539 }
540};
541
542
543} // namespace GQCP
Definition: Matrix.hpp:47
Definition: Tensor.hpp:46
enable_if_t< Z==4 > print(std::ostream &output_stream=std::cout) const
Definition: Tensor.hpp:528
GQCP::Matrix< Scalar > reshape(const size_t rows, const size_t cols) const
Definition: Tensor.hpp:422
bool hasEqualDimensionsAs(const Self &other) const
Definition: Tensor.hpp:472
const Base & Eigen() const
Definition: Tensor.hpp:116
Tensor< Scalar, LHSRank+RHSRank - 2 *N > einsum(std::string contraction_string, const Tensor< Scalar, RHSRank > &rhs) const
Definition: Tensor.hpp:350
Tensor< Scalar, LHSRank+RHSRank - 2 *N > einsum(const Tensor< Scalar, RHSRank > &rhs, const std::string &lhs_labels, const std::string &rhs_labels, const std::string &output_labels) const
Definition: Tensor.hpp:264
enable_if_t< Z==4, Self & > addBlock(const MatrixX< Scalar > &M, const size_t i, const size_t j, const size_t k, const size_t l)
Definition: Tensor.hpp:153
Eigen::Tensor< Scalar, Rank > Base
Definition: Tensor.hpp:59
enable_if_t< Z==4, bool > isApprox(const Self &other, const double tolerance=1.0e-12) const
Definition: Tensor.hpp:491
enable_if_t< Z==4, Matrix< Scalar > > pairWiseReduced(const size_t start_i=0, const size_t start_j=0, const size_t start_k=0, const size_t start_l=0) const
Definition: Tensor.hpp:219
static constexpr auto Rank
Definition: Tensor.hpp:53
static enable_if_t< Z==4, Self > FromBlock(const Self &T, const size_t i, const size_t j, const size_t k, const size_t l, const size_t cutoff=0)
Definition: Tensor.hpp:87
Tensor< Scalar, LHSRank+2 - 2 *N > einsum(std::string contraction_string, const Matrix< Scalar > rhs) const
Definition: Tensor.hpp:395
size_t numberOfElements() const
Definition: Tensor.hpp:517
const enable_if_t< Z==2, GQCP::Matrix< Scalar > > asMatrix() const
Definition: Tensor.hpp:406
Base & Eigen()
Definition: Tensor.hpp:121
enable_if_t< Z==4, Self & > addBlock(const Self &T, const size_t i, const size_t j, const size_t k, const size_t l)
Definition: Tensor.hpp:189
_Scalar Scalar
Definition: Tensor.hpp:50
Definition: BaseOneElectronIntegralBuffer.hpp:25
typename std::enable_if< B, T >::type enable_if_t
Definition: type_traits.hpp:37
@ x
Definition: CartesianDirection.hpp:28
@ y
Definition: CartesianDirection.hpp:29