xref: /aosp_15_r20/external/pytorch/aten/src/ATen/native/NonSymbolicBC.h (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #pragma once
2 #include <ATen/core/Tensor.h>
3 #include <c10/util/irange.h>
4 #include <ATen/core/IListRef.h>
5 
6 namespace at::native {
7 // This file contains non-symbolic signatures for ops that we have sym-intified the signature of.
8 // However, in certain cases (such as static runtime), we call the native versions of the ops directly.
9 // In those cases, we will duplicate the signature here with non-symbolic ints, and also duplicate the C++ implementation.
10 TORCH_API at::Tensor reshape(const at::Tensor& self, at::IntArrayRef proposed_shape);
11 TORCH_API at::Tensor narrow(const at::Tensor& self, int64_t dim, int64_t start, int64_t length);
12 TORCH_API at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, std::optional<at::ScalarType> dtype=std::nullopt, std::optional<at::Layout> layout=std::nullopt, std::optional<at::Device> device=std::nullopt, std::optional<bool> pin_memory=std::nullopt, std::optional<bool> is_coalesced=std::nullopt);
13 TORCH_API at::Tensor nll_loss(const at::Tensor & self, const at::Tensor & target, const std::optional<at::Tensor>& weight_opt, int64_t reduction, int64_t ignore_index);
14 TORCH_API at::Tensor nll_loss2d(const at::Tensor & self, const at::Tensor & target, const std::optional<at::Tensor>& weight_opt, int64_t reduction, int64_t ignore_index);
15 // The below ops don't get a duplicated C++ implementation.
16 // They are backward ops, which make them very unlikely to be called directly
17 // by external code (at::native::trace_backward).
18 // They get their own declaration for BC purposes however.
19 TORCH_API at::Tensor _embedding_bag_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const std::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1);
20 TORCH_API at::Tensor _embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const std::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1);
21 TORCH_API at::Tensor value_selecting_reduction_backward(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, at::IntArrayRef sizes, bool keepdim);
22 TORCH_API at::Tensor trace_backward(const at::Tensor & grad, at::IntArrayRef sizes);
23 TORCH_API at::Tensor index_select_backward(const at::Tensor & grad, at::IntArrayRef self_sizes, int64_t dim, const at::Tensor & index);
24 TORCH_API at::Tensor select(const at::Tensor& self, int64_t dim, int64_t index);
25 TORCH_API std::vector<Tensor> tensor_split(const Tensor& self, IntArrayRef indices, int64_t dim);
26 } // namespace at::native
27