1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/kernels/optimized/blas/CPUBlas.h>
10 #include <executorch/kernels/portable/cpu/util/matmul_ops_util.h>
11 #include <executorch/runtime/kernel/kernel_includes.h>
12
13 #include <array>
14
15 namespace torch {
16 namespace executor {
17 namespace native {
18
19 using Tensor = exec_aten::Tensor;
20
opt_linear_out(RuntimeContext & ctx,const Tensor & in,const Tensor & mat2,const optional<Tensor> & bias,Tensor & out)21 Tensor& opt_linear_out(
22 RuntimeContext& ctx,
23 const Tensor& in,
24 const Tensor& mat2,
25 const optional<Tensor>& bias,
26 Tensor& out) {
27 ET_KERNEL_CHECK_MSG(
28 ctx,
29 !bias.has_value(),
30 InvalidArgument,
31 out,
32 "bias not supported yet in linear");
33 ET_KERNEL_CHECK(ctx, check_linear_args(in, mat2, out), InvalidArgument, out);
34
35 size_t output_ndim = 0;
36 std::array<exec_aten::SizesType, kTensorDimensionLimit> output_sizes;
37 get_linear_out_target_size(in, mat2, output_sizes.data(), &output_ndim);
38 ET_KERNEL_CHECK(
39 ctx,
40 resize_tensor(out, {output_sizes.data(), output_ndim}) == Error::Ok,
41 InvalidArgument,
42 out);
43
44 // gemm on some platforms doesn't tolerate empty input.
45 if (out.numel() == 0) {
46 return out;
47 }
48
49 int flattened_input_dim = 1;
50 for (int ii = 0; ii < in.dim() - 1; ++ii) {
51 flattened_input_dim *= in.sizes()[ii];
52 }
53 ET_SWITCH_REAL_TYPES_AND2(
54 Half, BFloat16, in.scalar_type(), ctx, "mm.out", CTYPE, [&]() {
55 size_t n = flattened_input_dim;
56 size_t k = in.sizes()[in.dim() - 1];
57 size_t m = mat2.size(0);
58
59 executorch::cpublas::gemm(
60 executorch::cpublas::TransposeType::Transpose,
61 executorch::cpublas::TransposeType::NoTranspose,
62 m,
63 n,
64 k,
65 static_cast<CTYPE>(1),
66 mat2.const_data_ptr<CTYPE>(),
67 k,
68 in.const_data_ptr<CTYPE>(),
69 k,
70 static_cast<CTYPE>(0),
71 out.mutable_data_ptr<CTYPE>(),
72 m);
73 });
74
75 return out;
76 }
77
78 } // namespace native
79 } // namespace executor
80 } // namespace torch
81