Home
last modified time | relevance | path

Searched defs:ReduceAdd (Results 1 – 6 of 6) sorted by relevance

/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/parallel/
H A Ddata_parallel.h62 struct ReduceAdd : public autograd::Node { struct
63 explicit ReduceAdd(const at::Device& destination_device) in ReduceAdd() argument
65 ~ReduceAdd() override {} in ~ReduceAdd() argument
70 "ReduceAdd can only be used during the backward pass of data parallel."); in apply() argument
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/
H A DWeightNorm.cu44 struct ReduceAdd { struct
45 inline __device__ T operator()(const T a, const T b) const { in operator ()()
H A DScatterGatherKernel.cu31 class ReduceAdd { class
H A DIndexing.cu304 class ReduceAdd { class
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/
H A DScatterGatherKernel.cpp46 class ReduceAdd { class
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/
H A Dhlo_evaluator_test.cc2624 TEST_P(HloEvaluatorBf16Test, ReduceAdd) { in TEST_P() argument