1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <cmath>
10
11 #include <executorch/kernels/portable/cpu/util/functional_util.h>
12 #include <executorch/runtime/kernel/kernel_includes.h>
13
14 namespace torch {
15 namespace executor {
16 namespace native {
17
18 using exec_aten::Tensor;
19
20 /**
21 * Computes the bitwise NOT of the given input tensor. The input tensor must be
22 * of Integral or Boolean types. For bool tensors, it computes the logical NOT.
23 **/
24 Tensor&
bitwise_not_out(KernelRuntimeContext & ctx,const Tensor & in,Tensor & out)25 bitwise_not_out(KernelRuntimeContext& ctx, const Tensor& in, Tensor& out) {
26 (void)ctx;
27
28 // Resize for dynamic shape
29 ET_KERNEL_CHECK_MSG(
30 ctx,
31 resize_tensor(out, in.sizes()) == Error::Ok,
32 InvalidArgument,
33 out,
34 "Failed to resize output tensor.");
35
36 ET_KERNEL_CHECK(ctx, tensors_have_same_dtype(in, out), InvalidArgument, out);
37 ET_KERNEL_CHECK(
38 ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
39
40 if (in.scalar_type() == exec_aten::ScalarType::Bool) {
41 apply_unary_map_fn(
42 [](const bool val_in) { return !val_in; },
43 in.const_data_ptr<bool>(),
44 out.mutable_data_ptr<bool>(),
45 in.numel());
46 } else if (isIntegralType(in.scalar_type(), /*includeBool=*/false)) {
47 ET_SWITCH_INT_TYPES(in.scalar_type(), ctx, "bitwise_not.out", CTYPE, [&] {
48 apply_unary_map_fn(
49 [](const CTYPE val_in) { return ~val_in; },
50 in.const_data_ptr<CTYPE>(),
51 out.mutable_data_ptr<CTYPE>(),
52 in.numel());
53 });
54 } else {
55 ET_KERNEL_CHECK_MSG(
56 ctx,
57 false,
58 InvalidArgument,
59 out,
60 "Unsupported input dtype %" PRId8,
61 static_cast<int8_t>(in.scalar_type()));
62 }
63
64 return out;
65 }
66
67 } // namespace native
68 } // namespace executor
69 } // namespace torch
70