1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/kernels/portable/cpu/scalar_utils.h>
10 #include <executorch/kernels/portable/cpu/util/kernel_ops_util.h>
11 #include <executorch/runtime/kernel/kernel_includes.h>
12 #include <executorch/runtime/platform/assert.h>
13
14 #include <cstddef>
15 #include <cstdint>
16 #include <cstring>
17
18 namespace torch {
19 namespace executor {
20 namespace native {
21
arange_out(KernelRuntimeContext & ctx,const Scalar & end,Tensor & out)22 Tensor& arange_out(KernelRuntimeContext& ctx, const Scalar& end, Tensor& out) {
23 double end_val = 0;
24 ET_KERNEL_CHECK(
25 ctx, utils::extract_scalar(end, &end_val), InvalidArgument, out);
26
27 ET_KERNEL_CHECK(
28 ctx, check_arange_args(0.0, end_val, 1.0, out), InvalidArgument, out);
29
30 ET_KERNEL_CHECK(ctx, tensor_is_default_dim_order(out), InvalidArgument, out);
31
32 size_t size = static_cast<size_t>(std::ceil(end_val));
33
34 Tensor::SizesType out_length = static_cast<Tensor::SizesType>(size);
35
36 ET_KERNEL_CHECK(
37 ctx,
38 resize_tensor(out, {&out_length, 1}) == Error::Ok,
39 InvalidArgument,
40 out);
41
42 ET_SWITCH_REAL_TYPES(out.scalar_type(), ctx, "arange.out", CTYPE, [&]() {
43 auto out_data = out.mutable_data_ptr<CTYPE>();
44 for (size_t i = 0; i < size; i++) {
45 out_data[i] = static_cast<CTYPE>(i);
46 }
47 });
48
49 return out;
50 }
51
arange_start_out(KernelRuntimeContext & ctx,const Scalar & start,const Scalar & end,const Scalar & step,Tensor & out)52 Tensor& arange_start_out(
53 KernelRuntimeContext& ctx,
54 const Scalar& start,
55 const Scalar& end,
56 const Scalar& step,
57 Tensor& out) {
58 (void)ctx;
59
60 double d_start = 0;
61 ET_KERNEL_CHECK(
62 ctx, utils::extract_scalar(start, &d_start), InvalidArgument, out);
63
64 double d_end = 0;
65 ET_KERNEL_CHECK(
66 ctx, utils::extract_scalar(end, &d_end), InvalidArgument, out);
67
68 double d_step = 0;
69 ET_KERNEL_CHECK(
70 ctx, utils::extract_scalar(step, &d_step), InvalidArgument, out);
71
72 ET_KERNEL_CHECK(
73 ctx,
74 check_arange_args(d_start, d_end, d_step, out),
75 InvalidArgument,
76 out);
77
78 ET_KERNEL_CHECK(ctx, tensor_is_default_dim_order(out), InvalidArgument, out);
79
80 double size_d = (d_end - d_start) / d_step;
81 size_t size = static_cast<size_t>(std::ceil(size_d));
82
83 Tensor::SizesType out_length = static_cast<Tensor::SizesType>(size);
84
85 ET_KERNEL_CHECK(
86 ctx,
87 resize_tensor(out, {&out_length, 1}) == Error::Ok,
88 InvalidArgument,
89 out);
90
91 ET_SWITCH_REAL_TYPES(
92 out.scalar_type(), ctx, "arange.start_out", CTYPE, [&]() {
93 auto out_data = out.mutable_data_ptr<CTYPE>();
94 for (size_t i = 0; i < size; i++) {
95 out_data[i] = convert<CTYPE, double>(d_start + i * d_step);
96 }
97 });
98
99 return out;
100 }
101
102 } // namespace native
103 } // namespace executor
104 } // namespace torch
105