/aosp_15_r20/external/pytorch/torch/_inductor/kernel/ |
H A D | mm.py | 144 def bias_addmm(inp, mat1, mat2, *, out=None, alpha=1, beta=1): argument 159 def tuned_mm(mat1, mat2, *, layout=None): argument 279 def tuned_int_mm(mat1, mat2, *, layout=None): argument 323 def tuned_addmm(inp, mat1, mat2, *, alpha=1, beta=1, layout=None): argument 471 mat1, mat1_meta, mat2, *, out_dtype=None, layout=None argument 514 def fallback_mixed_mm(mat1, mat2, *, out): argument 531 def try_heuristic(m, n, k, choices, mat1, mat2, mat2_dtype, layout): argument 579 mat2, argument 596 def get_context(m, k, n, mat1, mat2, mat1_stride, mat2_stride): argument 639 def get_size_hints(mat1, mat2, m, n, k): argument [all …]
|
H A D | bmm.py | 107 def tuned_bmm(mat1, mat2, *, layout=None): argument 172 def tuned_baddbmm(inp, mat1, mat2, *, alpha=1, beta=1, layout=None): argument
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/ |
H A D | Matmul.cpp | 14 const Tensor &mat2, in mkldnn_matmul() 23 const Tensor& mat2, in use_mkldnn_bf16_matmul() 30 const Tensor& mat2, in use_mkldnn_fp16_matmul() 69 const Tensor& mat2, in use_mkldnn_bf32_matmul() 76 const Tensor& mat2, in use_mkldnn_matmul() 83 const Tensor &mat2, in mkldnn_matmul_i8i8i32() 229 const Tensor &mat2, in mkldnn_matmul() 326 inline bool checksize(const Tensor& mat1, const Tensor& mat2){ in checksize() 349 const Tensor& mat2, in use_mkldnn_bf16_matmul() 378 const Tensor& mat2, in use_mkldnn_fp16_matmul() [all …]
|
/aosp_15_r20/external/executorch/kernels/portable/cpu/util/ |
H A D | matmul_ops_util.cpp | 22 const Tensor& mat2, in check_addmm_args() 38 bool check_bmm_args(const Tensor& in, const Tensor& mat2, Tensor& out) { in check_bmm_args() 53 const Tensor& mat2, in get_bmm_out_target_size() 62 bool check_mm_args(const Tensor& in, const Tensor& mat2, Tensor& out) { in check_mm_args() 74 bool check_linear_args(const Tensor& in, const Tensor& mat2, Tensor& out) { in check_linear_args() 89 const Tensor& mat2, in get_mm_out_target_size() 99 const Tensor& mat2, in get_linear_out_target_size()
|
/aosp_15_r20/external/pytorch/torch/_inductor/fx_passes/ |
H A D | decompose_mem_bound_mm.py | 42 def should_decompose_bmm(mat1, mat2) -> bool: argument 63 def should_decompose_mm(mat1, mat2) -> bool: argument 100 def repl(mat1, mat2): argument 123 def repl(mat1, mat2, mat3): argument 145 def repl(mat1, mat2): argument
|
H A D | post_grad.py | 316 def mm_plus_mm(match: Match, mat1, mat2, mat3, mat4): argument 396 def uint4x2_mixed_mm(match: Match, mat1, mat2, mat2_mm_shape, mat2_dtype): argument 419 def mixed_mm(match: Match, mat1, mat2, mat2_dtype): argument 998 def unfuse_bias_add_to_pointwise(match: Match, mat1, mat2, *, inp): argument 1041 def addmm(match, mat1, mat2, *, inp): argument 1042 def repl(inp, mat1, mat2): argument 1084 def fused_int_mm_mul(match: Match, mat1, mat2, mat3, out_dtype=None): argument
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/sparse/cuda/ |
H A D | SparseBlas.cpp | 41 const Tensor& mat2, in sparse_sampled_addmm_out_sparse_csr_cuda() 70 const Tensor& mat2, in sparse_sampled_addmm_sparse_csr_cuda() 82 const Tensor& mat2, in addmm_out_sparse_compressed_cuda() 164 const Tensor& mat2, in baddbmm_out_sparse_csr_cuda() 200 const Tensor& mat2, in bmm_out_sparse_csr_cuda()
|
H A D | SparseSemiStructuredOps.cu | 524 const Tensor& mat1, const Tensor& mat1_meta, const Tensor& mat2, in sparse_semi_structured_mad_op() 789 const Tensor& mat1, const Tensor& mat1_meta, const Tensor& mat2, in _sparse_semi_structured_mm() 799 const Tensor& mat2, const Scalar& alpha, const Scalar& beta, in _sparse_semi_structured_addmm()
|
H A D | SparseBlasImpl.cpp | 74 const Tensor& mat2, in addmm_out_legacy() 465 const Tensor& mat2, in block_sparse_mm() 581 const Tensor& mat2, in spmm() 837 const Tensor& mat2, in addmm_out_sparse_csr()
|
/aosp_15_r20/external/eigen/unsupported/test/ |
H A D | cxx11_tensor_contraction.cpp | 23 Tensor<float, 2, DataLayout> mat2(2, 3); in test_evals() local 104 Tensor<float, 4, DataLayout> mat2(2, 2, 2, 2); in test_multidims() local 255 Tensor<float, 2, DataLayout> mat2(3, 2); in test_expr() local 274 Tensor<float, 3, DataLayout> mat2(2, 2, 2); in test_out_of_order_contraction() local 321 Tensor<float, 5, DataLayout> mat2(3, 2, 1, 5, 4); in test_consistency() local 470 Tensor<float, 2, DataLayout> mat2(4, 1); in test_tensor_product() local 502 TensorMap<Tensor<const float, 2, DataLayout> > mat2(in2.data(), 3, 2); in test_const_inputs() local
|
H A D | cxx11_tensor_expr.cpp | 76 TensorMap<Tensor<float, 2, RowMajor>> mat2(data2, 2, 3); in test_2d() local 115 Tensor<float, 3, RowMajor> mat2(2,3,7); in test_3d() local 169 Tensor<float, 3> mat2(2,3,7); in test_constants() local 227 Tensor<float, 3> mat2(2,3,7); in test_functors() local 257 Tensor<float, 3> mat2(2,3,7); in test_type_casting() local 285 Tensor<float, 3> mat2(2,3,7); in test_select() local
|
H A D | cxx11_tensor_of_const_values.cpp | 22 const TensorMap<Tensor<float, 2>> mat2(data2, 2, 3); in test_assign() local 58 TensorMap<Tensor<float, 2>> mat2(data2, 2, 3); in test_plus() local 84 TensorMap<Tensor<float, 2>> mat2(data2, 2, 3); in test_plus_equal() local
|
H A D | cxx11_tensor_map.cpp | 72 Tensor<int, 2, RowMajor> mat2(2,3); in test_2d() local 119 Tensor<int, 3, RowMajor> mat2(2,3,7); in test_3d() local 163 Tensor<int, 3, RowMajor> mat2(2,3,7); in test_from_tensor() local
|
H A D | cxx11_tensor_comparisons.cpp | 20 Tensor<float, 3> mat2(2,3,7); in test_orderings() local 50 Tensor<float, 3> mat2(2,3,7); in test_equality() local
|
/aosp_15_r20/external/executorch/kernels/optimized/cpu/ |
H A D | op_bmm.cpp | 32 bool check_bmm_out_args(const Tensor& self, const Tensor& mat2, Tensor& out) { in check_bmm_out_args() 79 void bmm_kernel(const Tensor& self, const Tensor& mat2, Tensor& out) { in bmm_kernel() 113 Error resize_out_tensor(const Tensor& self, const Tensor& mat2, Tensor& out) { in resize_out_tensor() 142 const Tensor& mat2, in opt_bmm_out()
|
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/impl/ |
H A D | MatMul.cpp | 43 vTensorPtr mat2 = graph->get_tensor(args[1].refs[1]); in resize_matmul_node() local 65 ValueRef mat2 = prepack_standard( in add_matmul_naive_buffer_node() local 116 ValueRef mat2 = prepack_standard( in add_matmul_naive_texture3d_node() local 161 ValueRef mat2 = prepack_standard( in add_matmul_optimized_node() local
|
H A D | Linear.cpp | 59 vTensorPtr mat2 = graph->get_tensor(args[1].refs[1]); in resize_addmm_node() local 100 ValueRef mat2 = prepack_standard( in add_addmm_naive_node() local 150 ValueRef mat2 = prepack_standard( in add_addmm_optimized_node() local 233 const ValueRef mat2, in add_addmm_node()
|
/aosp_15_r20/external/ruy/ruy/ |
H A D | prepacked_cache_test.cc | 66 PEMat mat2 = MakeDummyPEMat(Type::Create<std::uint8_t>(), 5, 3); in TEST() local 92 PEMat mat2 = MakeDummyPEMat(Type::Create<float>(), 5, 3); in TEST() local 117 PEMat mat2 = MakeDummyPEMat(Type::Create<std::uint8_t>(), 5, 3); in TEST() local 152 PEMat mat2 = MakeDummyPEMat(Type::Create<std::uint8_t>(), 10, 20); in TEST() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/xpu/ |
H A D | Blas.cpp | 12 const Tensor& mat2, in addmm_out() 109 const Tensor& mat2, in _addmm_activation_out() 123 Tensor& mm_out(const Tensor& self, const Tensor& mat2, Tensor& result) { in mm_out() 159 Tensor mm(const Tensor& self, const Tensor& mat2) { in mm()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/sparse/ |
H A D | SparseBlas.cpp | 120 const Tensor& mat2, in sparse_sampled_addmm_out_sparse_csr_cpu() 156 const Tensor& mat2, in sparse_sampled_addmm_sparse_csr_cpu() 169 const Tensor& mat2, in sparse_sampled_addmm_check_inputs()
|
H A D | SparseTensorMath.cpp | 1315 const Tensor& mat2, in addmm_out_sparse_dense_cpu() 1338 const Tensor& mat2, in addmm_sparse_dense_cpu() 1373 const Tensor& mat2 in _sparse_mm() 1396 Tensor _sparse_mm(const Tensor& mat1, const Tensor& mat2, const c10::string_view reduce) { in _sparse_mm() 1604 … const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha, Tensor& result) { in _sspaddmm_out_only_sparse() 1609 Tensor smm(const Tensor& self, const Tensor& mat2) { in smm() 1616 Tensor sspaddmm(const Tensor& self, const Tensor& mat1, const Tensor& mat2, in sspaddmm() 1898 Tensor bmm_sparse_cpu(const SparseTensor& self, const Tensor& mat2) { in bmm_sparse_cpu() 1947 Tensor& bmm_out_sparse_cpu(const SparseTensor& self, const Tensor& mat2, Tensor& result) { in bmm_out_sparse_cpu()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkl/ |
H A D | SparseBlasImpl.cpp | 295 const Tensor& mat2, in addmm_sparse_result() 360 const Tensor& mat2, in addmm_out_sparse_csr() 547 const Tensor& mat2, in add_out_sparse_csr()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | SparseMM.cu | 14 … const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha, Tensor& result) { in _sspaddmm_out_only_sparse_cuda() 18 … const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha, Tensor& result) { in _sspaddmm_out_cuda()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/nested/ |
H A D | NestedTensorMatmul.cpp | 20 Tensor bmm_nested(const Tensor& self, const Tensor& mat2) { in bmm_nested() 74 static Tensor matmul_with_bmm_nested(const Tensor& self, const Tensor& mat2) { in matmul_with_bmm_nested() 217 Tensor matmul_nested(const Tensor& self, const Tensor& mat2) { in matmul_nested()
|
/aosp_15_r20/external/pytorch/torch/csrc/inductor/aoti_torch/ |
H A D | shim_common.cpp | 685 AtenTensorHandle mat2, in aoti_torch__scaled_mm() 717 AtenTensorHandle mat2, in aoti_torch__scaled_mm_v2() 785 AtenTensorHandle mat2, in aoti_torch_addmm_out() 802 AtenTensorHandle mat2) { in aoti_torch_bmm_out() 825 AtenTensorHandle mat2) { in aoti_torch_mm_out()
|