forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathReduceOps.h
40 lines (30 loc) · 1.15 KB
/
ReduceOps.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
#pragma once
#include <ATen/ATen.h>
#include <ATen/native/DispatchStub.h>
#include <c10/util/Optional.h>
namespace at {
struct TensorIterator;
}
namespace at { namespace native {
using reduce_fn = void(*)(TensorIterator &);
DECLARE_DISPATCH(reduce_fn, sum_stub);
DECLARE_DISPATCH(reduce_fn, prod_stub);
DECLARE_DISPATCH(reduce_fn, mean_stub);
DECLARE_DISPATCH(reduce_fn, and_stub);
DECLARE_DISPATCH(reduce_fn, or_stub);
DECLARE_DISPATCH(reduce_fn, min_values_stub);
DECLARE_DISPATCH(reduce_fn, max_values_stub);
DECLARE_DISPATCH(reduce_fn, argmax_stub);
DECLARE_DISPATCH(reduce_fn, argmin_stub);
using reduce_std_var_function =
void (*)(TensorIterator&, bool unbiased, bool take_sqrt);
DECLARE_DISPATCH(reduce_std_var_function, std_var_stub);
using reduce_norm_fn =
void (*)(Tensor&, const Tensor&, Scalar, c10::optional<int64_t>);
DECLARE_DISPATCH(reduce_norm_fn, norm_kernel);
using reduce_fn_flag = void(*)(TensorIterator &, Scalar);
DECLARE_DISPATCH(reduce_fn_flag, norm_stub);
using cum_fn = void (*)(Tensor & result, const Tensor & self, int64_t dim);
DECLARE_DISPATCH(cum_fn, cumsum_stub);
DECLARE_DISPATCH(cum_fn, cumprod_stub);
}} // namespace at::native