我有下面的例子:
#include <unsupported/Eigen/CXX11/Tensor>
#include <Eigen/Core>
Eigen::Tensor<double, 5> test_tensor(3,3,2,1,1);
test_tensor.setValues({
{{{{1.1}},{{1.1}}},{{{0}},{{0}}},{{{0}},{{0}}}},
{{{{0}}, {{0}}}, {{{1}},{{1}}},{{{0}},{{0}}}},
{{{{0}}, {{0}}}, {{{0}},{{0}}},{{{1}},{{1}}}}
});
//use chip and slice to compute the subtensor sum
template <typename TensorType>
auto tensor_sum(const TensorType& tensor) -> typename TensorType::Scalar {
using T = typename TensorType::Scalar;
T sum = 0; //provisory
for (int i = 0; i < tensor.size(); ++i) {
sum += tensor.data()[i];
}
return sum;
}
Eigen::Tensor<double, 3> field_slice;
for (int l = 0; l < 3; ++l) {
for (int m = 0; m < 3; ++m) {
auto field_slice_ = test_tensor.chip(1, m).chip(0, l);
field_slice = field_slice_.slice(
Eigen::array<Eigen::Index, 3>({0, 0, 0}),
Eigen::array<Eigen::Index, 3>({2, 1, 1}));
std::cout << "sum slice " << l << " " << m << " " << tensor_sum(field_slice) << std::endl;
}
}
// use nested loops to compute subtensor sum
double sum;
for (int l = 0; l < 3; ++l) {
for (int m = 0; m < 3; ++m) {
sum = 0;
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 1; ++j) {
for (int k = 0; k < 1; ++k) {
sum += test_tensor(l, m, i, j, k);
}
}
}
std::cout << "sum nested loops " << l << " " << m << " " << sum << std::endl;
}
}
打印出来
sum slice 0 0 0
sum slice 0 1 0
sum slice 0 2 1.1
sum slice 1 0 1
sum slice 1 1 1
sum slice 1 2 1.1
sum slice 2 0 1
sum slice 2 1 1
sum slice 2 2 1.1
sum nested loops 0 0 2.2
sum nested loops 0 1 0
sum nested loops 0 2 0
sum nested loops 1 0 0
sum nested loops 1 1 2
sum nested loops 1 2 0
sum nested loops 2 0 0
sum nested loops 2 1 0
sum nested loops 2 2 2
为什么结果会不一样?我怀疑chip
或slice
操作没有按预期的方式工作。通过首先存储切片Tensor然后创建切片来分离这两个步骤也没有改变结果。我如何计算我的子Tensor的和而不使用嵌套循环?
1条答案
按热度按时间jbose2ul1#
你在这里用错误的方式索引:
请阅读文档:特征不支持:本征Tensor
chip(const Index offset,const Index dim)
筹码是一种特殊的薄片。它是维度dim中给定偏移处的子Tensor。返回的Tensor比输入Tensor少一个维度:移除维度DIM。
例如,矩阵芯片可以是输入矩阵的行或列。
所以基本上你输入参数的顺序是错误的。
Fixed version:
或相同的结果: