1 #include <torch/csrc/lazy/ts_backend/tensor_aten_ops.h>
2
3 #include <ATen/InferSize.h>
4 #include <torch/csrc/autograd/variable.h>
5 #include <torch/csrc/lazy/core/helpers.h>
6 #include <torch/csrc/lazy/core/ir_builder.h>
7 #include <torch/csrc/lazy/core/ir_util.h>
8 #include <torch/csrc/lazy/core/lazy_graph_executor.h>
9 #include <torch/csrc/lazy/core/metrics.h>
10 #include <torch/csrc/lazy/core/ops/arithmetic_ir_ops.h>
11 #include <torch/csrc/lazy/core/ops/utils.h>
12 #include <torch/csrc/lazy/core/tensor.h>
13 #include <torch/csrc/lazy/core/util.h>
14 #include <torch/csrc/lazy/generated/LazyIr.h>
15 #include <algorithm>
16 #include <functional>
17 #include <optional>
18
19 namespace torch {
20 namespace lazy {
21 namespace {
22
23 // to enable operator+-*/ for Value
24 using namespace torch::lazy;
25
MaybeExpand(const torch::lazy::Value & input,const torch::lazy::Shape & target_shape)26 torch::lazy::Value MaybeExpand(
27 const torch::lazy::Value& input,
28 const torch::lazy::Shape& target_shape) {
29 if (input.shape().sizes() == target_shape.sizes()) {
30 return input;
31 }
32 return torch::lazy::MakeExpand(
33 input,
34 target_shape.sizes().vec(),
35 /*is_scalar_expand=*/false);
36 }
37
38 } // namespace
39
40 //////////////////////////////////////////////////////////////////////////////
41 // ATEN operators follows here, listed in alphabetical order.
42 //////////////////////////////////////////////////////////////////////////////
43
fill_(torch::lazy::LazyTensorPtr & input,const at::Scalar & value)44 void fill_(torch::lazy::LazyTensorPtr& input, const at::Scalar& value) {
45 torch::lazy::Value constant =
46 torch::lazy::LazyGraphExecutor::Get()->GetIrValueForExpandedScalar(
47 value, input->shape(), input->GetDevice());
48 input->SetInPlaceIrValue(std::move(constant));
49 }
50
copy_(torch::lazy::LazyTensorPtr & input,torch::lazy::LazyTensorPtr & src)51 void copy_(torch::lazy::LazyTensorPtr& input, torch::lazy::LazyTensorPtr& src) {
52 if (input->GetDevice() == src->GetDevice()) {
53 torch::lazy::Value copy_value;
54 if (input->dtype() == src->dtype()) {
55 copy_value = src->GetIrValue();
56 } else {
57 copy_value = torch::lazy::MakeCast(
58 src->GetIrValue(), input->dtype(), src->dtype());
59 }
60 input->SetIrValue(MaybeExpand(copy_value, input->shape()));
61 } else {
62 auto input_shape = input->shape();
63 at::Tensor src_tensor = src->ToTensor(/*detached=*/true);
64 if (src_tensor.sizes() != input_shape.Get().sizes()) {
65 src_tensor = src_tensor.expand(input_shape.Get().sizes().vec());
66 }
67 input->UpdateFromTensor(std::move(src_tensor), /*sync=*/false);
68 }
69 }
70
71 } // namespace lazy
72 } // namespace torch
73