forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathReduceOpsUtils.h
85 lines (74 loc) · 2.31 KB
/
ReduceOpsUtils.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
#pragma once
namespace at { namespace native {
static inline int64_t ensure_nonempty_dim(int64_t dim) {
return std::max<int64_t>(dim, 1);
}
static inline int64_t ensure_nonempty_size(const Tensor& t, int64_t dim) {
return t.dim() == 0 ? 1 : t.size(dim);
}
static inline int64_t ensure_nonempty_stride(const Tensor& t, int64_t dim) {
return t.dim() == 0 ? 1 : t.stride(dim);
}
using IdxVec = std::vector<int64_t>;
static inline IdxVec ensure_nonempty_vec(IdxVec vec) {
if (vec.size() == 0) {
vec.push_back(1);
}
return vec;
}
static inline Tensor restride_dim(
const Tensor& src, int64_t dim,
IntArrayRef replacement_shape
) {
auto strides = ensure_nonempty_vec(src.strides().vec());
strides[dim] = 0;
return src.as_strided(replacement_shape, strides);
}
inline Tensor &_dimreduce_setup(Tensor &result, const Tensor &self,
int64_t dim) {
IntArrayRef self_sizes = self.sizes();
std::vector<int64_t> result_sizes;
result_sizes.insert(result_sizes.end(), self_sizes.begin(), self_sizes.end());
result_sizes[dim] = 1;
result.resize_(result_sizes);
return result;
}
inline bool _dimreduce_return_trivial(Tensor &result, const Tensor &self,
Scalar ident, int64_t dim, bool keepdim) {
if (self.numel() == 1 && self.ndimension() == 0) {
result.resize_({});
result.fill_(self);
return true;
}
// Return identity
if (self.numel() == 0) {
_dimreduce_setup(result, self, dim);
result.fill_(ident);
if (!keepdim) result.squeeze_(dim);
return true;
}
return false;
}
inline bool _dimreduce_return_trivial_no_ident(Tensor &result, const Tensor &self,
int64_t dim, bool keepdim, const char *fn_name) {
if (self.numel() == 1 && self.ndimension() == 0) {
result.resize_({});
result.fill_(self);
return true;
}
if (self.numel() == 0) {
AT_ERROR("cannot perform reduction function ", fn_name,
" on tensor with no elements because the operation does not have an identity");
}
return false;
}
inline c10::optional<Tensor> _allreduce_return_trivial(
const Tensor& self,
Scalar ident) {
// Return identity
if (self.numel() == 0) {
return at::scalar_tensor(ident, self.options());
}
return c10::nullopt;
}
}} // at::native