Home
last modified time | relevance | path

Searched full:matmul (Results 1 – 25 of 1199) sorted by relevance

12345678910>>...48

/aosp_15_r20/external/pytorch/torch/_inductor/fx_passes/
H A Dmicro_pipeline_tp.py307 Replace the matmul with the new node.
319 # An ND-matmul is reshape -> mm -> reshape sequence. We first replace
436 matmul = _Matmul.from_match(match)
437 matmuls.append(matmul)
439 matmul = _ScaledMatmul.from_match(match)
440 matmuls.append(matmul)
460 matmul = _Matmul.from_match(match=[user])
461 matmuls.append(matmul)
463 matmul = _ScaledMatmul.from_match([user])
464 matmuls.append(matmul)
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/lite/tests/end2end/
H A Dback2back_fake_quant.pbtxt31 name: "sequential/quant_dense/MatMul/ReadVariableOp/resource"
58 name: "sequential/quant_dense/MatMul/ReadVariableOp"
60 input: "sequential/quant_dense/MatMul/ReadVariableOp/resource"
69 name: "sequential/quant_dense/MatMul/kquant/FakeQuantWithMinMaxVars/ReadVariableOp/resource"
90 name: "sequential/quant_dense/MatMul/kquant/FakeQuantWithMinMaxVars/ReadVariableOp"
92 input: "sequential/quant_dense/MatMul/kquant/FakeQuantWithMinMaxVars/ReadVariableOp/resource"
101 name: "sequential/quant_dense/MatMul/kquant/FakeQuantWithMinMaxVars/ReadVariableOp_1/resource"
122 name: "sequential/quant_dense/MatMul/kquant/FakeQuantWithMinMaxVars/ReadVariableOp_1"
124 input: "sequential/quant_dense/MatMul/kquant/FakeQuantWithMinMaxVars/ReadVariableOp_1/resource"
133 name: "sequential/quant_dense/MatMul/kquant/FakeQuantWithMinMaxVars"
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/ops/linalg/sparse/
H A Dsparse_csr_matrix_grad.py228 def matmul(x, y, **kwargs): # pylint: disable=invalid-name function
244 grad_a = matmul(grad, b, transpose_b=not t_b)
246 grad_a = matmul(b, grad, transpose_a=t_b, transpose_b=True)
250 grad_a = matmul(grad, b, adjoint_b=not adj_b)
252 grad_a = matmul(b, grad, adjoint_a=adj_b, adjoint_b=True)
260 grad_a = matmul(b, grad, transpose_a=True, adjoint_b=True)
263 grad_a = matmul(b, grad, transpose_a=True, transpose_b=True)
272 grad_a = matmul(grad, b, transpose_a=True, transpose_b=not t_b)
274 grad_a = matmul(b, grad, transpose_a=t_b)
279 grad_a = matmul(grad, b, transpose_a=True, adjoint_b=not adj_b)
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/aot/tests/
H A Dtfcompile_test.cc266 foo::bar::MatMulComp matmul; in TEST() local
267 matmul.set_thread_pool(&device); in TEST()
268 EXPECT_EQ(matmul.arg0_data(), matmul.arg_data(0)); in TEST()
269 EXPECT_EQ(matmul.arg1_data(), matmul.arg_data(1)); in TEST()
273 matmul.arg0(0, 0) = 1; in TEST()
274 matmul.arg0(0, 1) = 2; in TEST()
275 matmul.arg0(0, 2) = 3; in TEST()
276 matmul.arg0(1, 0) = 4; in TEST()
277 matmul.arg0(1, 1) = 5; in TEST()
278 matmul.arg0(1, 2) = 6; in TEST()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/cc/framework/
H A Dgradients_test.cc34 using ops::MatMul;
64 // dy| dx| (MatMul Gradient Graph)
75 // | z| | (MatMul Forward Graph)
93 auto z = MatMul(scope, x, y); in TEST_F()
100 auto dx = MatMul(scope, dz, y, MatMul::TransposeB(true)); in TEST_F()
101 auto dy = MatMul(scope, x, dz, MatMul::TransposeA(true)); in TEST_F()
119 auto z = MatMul(scope, x, y); in TEST_F()
128 auto dx = MatMul(scope, dz, y, MatMul::TransposeB(true)); in TEST_F()
129 auto dy = MatMul(scope, x, dz, MatMul::TransposeA(true)); in TEST_F()
145 auto x = MatMul(scope, u, v); in TEST_F()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/ops/
H A Dlinalg_grad.py51 return -math_ops.matmul( # pylint: disable=invalid-unary-operand-type
53 math_ops.matmul(grad, ainv, adjoint_a=op_adjoint,
475 middle = math_ops.matmul(l, grad, adjoint_a=True)
480 grad_a = math_ops.matmul(
481 math_ops.matmul(l_inverse, middle, adjoint_a=True), l_inverse)
508 """Equiv to matmul(x, adjoint(matrix_inverse(r))) if r is upper-tri."""
517 qdq = math_ops.matmul(q, dq, adjoint_a=True)
519 rdr = math_ops.matmul(r, dr, adjoint_b=True)
523 grad_a = math_ops.matmul(q, dr + _TriangularSolve(tril, r))
524 grad_b = _TriangularSolve(dq - math_ops.matmul(q, qdq), r)
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/math_ops/
H A Dmatmul_op_test.py15 """Tests for tensorflow.ops.math_ops.matmul."""
33 # TODO(yangzihao): Currently matmul autotuning is disabled by default. Use
39 """Simple test for tf.matmul where Tout is different from T."""
42 # TODO(shivaniagrawal): uint8 is not supported for mixed matmul type in XLA.
51 # TODO(shivaniagrawal): uint8 is not supported for mixed matmul type in XLA.
62 """Simple test for matvec, which is sugar on top of matmul."""
75 np.matmul(full.T, empty), math_ops.matmul(full, empty, adjoint_a=True))
77 np.matmul(empty.T, full), math_ops.matmul(empty, full, adjoint_a=True))
103 @test_util.run_without_tensor_float_32("Tests matmul")
111 print("Built without fp16 matmul support for Cuda, running test on CPU.")
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/tfrt/python_tests/
H A Dtf_matmul_test.py15 """Tests for tf.MatMul JIT compilation."""
23 def matmul(): function
25 func.func @matmul(%arg0: tensor<?x?xf32>,
27 %0 = "tf.MatMul"(%arg0, %arg1) {
43 np.testing.assert_allclose(res, np.matmul(lhs, rhs), rtol=1e-05)
48 # Matmul: [1, k] x [k, 1]
50 compiled = jitrt.compile(matmul(), "matmul")
55 # Matmul: [1, k] x [k, n]
57 compiled = jitrt.compile(matmul(), "matmul")
63 # Matmul: [n, k] x [k, 1]
[all …]
/aosp_15_r20/external/pytorch/test/distributed/
H A Dtest_compute_comm_reordering.py111 b = torch.matmul(a, a)
112 return torch.matmul(ar, b)
120 # Verify that the wait_tensor is sinked below the 1st matmul but
121 # above the 2nd matmul.
149 b = torch.matmul(a, a)
151 d = torch.matmul(c, c)
153 return torch.matmul(d, e)
162 # Verify that the all_reduce_ has been raised above the 2nd matmul
163 # but below the 1st matmul. Note that the all_reduce_ directly
164 # writes to the output buffer of the 1st matmul, which is an input
[all …]
/aosp_15_r20/external/pytorch/test/inductor/
H A Dtest_fused_attention.py115 torch.matmul(query, key.transpose(-2, -1))
118 .matmul(value)
143 torch.matmul(query, key.transpose(-2, -1))
146 .matmul(value)
253 torch.matmul(query, key.transpose(-2, -1))
257 return attn_weights.matmul(value), attn_weights
274 torch.matmul(query, key.transpose(-2, -1))
277 .matmul(value)
289 torch.matmul(query, key.transpose(-2, -1)).div(3.0).softmax(dim=-1),
293 ).matmul(value)
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/tensorflow/tests/
H A Dunroll-batch-matmul.mlir1 // RUN: tf-opt -split-input-file -verify-diagnostics -tf-unroll-batch-matmul %s | FileCheck %s
35 …// CHECK: %[[MATMUL_1:.*]] = "tf.MatMul"(%[[LHS_1]], %[[RHS_1]]) {transpose_a = false, transpose_b…
36 …// CHECK: %[[MATMUL_2:.*]] = "tf.MatMul"(%[[LHS_2]], %[[RHS_2]]) {transpose_a = false, transpose_b…
37 …// CHECK: %[[MATMUL_3:.*]] = "tf.MatMul"(%[[LHS_3]], %[[RHS_3]]) {transpose_a = false, transpose_b…
38 …// CHECK: %[[MATMUL_4:.*]] = "tf.MatMul"(%[[LHS_4]], %[[RHS_4]]) {transpose_a = false, transpose_b…
39 …// CHECK: %[[MATMUL_5:.*]] = "tf.MatMul"(%[[LHS_5]], %[[RHS_5]]) {transpose_a = false, transpose_b…
40 …// CHECK: %[[MATMUL_6:.*]] = "tf.MatMul"(%[[LHS_6]], %[[RHS_6]]) {transpose_a = false, transpose_b…
79 …// CHECK: %[[MATMUL_1:.*]] = "tf.MatMul"(%[[LHS_1]], %[[RHS_1]]) {transpose_a = true, transpose_b …
80 …// CHECK: %[[MATMUL_2:.*]] = "tf.MatMul"(%[[LHS_2]], %[[RHS_2]]) {transpose_a = true, transpose_b …
81 …// CHECK: %[[MATMUL_3:.*]] = "tf.MatMul"(%[[LHS_3]], %[[RHS_3]]) {transpose_a = true, transpose_b …
[all …]
H A Ddevice_copy.mlir5 // CHECK: tf.MatMul
6 …%outputs = "tf.MatMul"(%arg0, %arg1) {device = "/device:CPU:0", transpose_a = false, transpose_b =…
14 // CHECK: tf.MatMul
15 …%outputs = "tf.MatMul"(%arg0, %arg1) {device = "", transpose_a = false, transpose_b = false} : (te…
23 // CHECK: tf.MatMul
24 …%outputs = "tf.MatMul"(%arg0, %arg1) {device = "/device:GPU:0", transpose_a = false, transpose_b =…
32 // CHECK: tf.MatMul
33 …%outputs = "tf.MatMul"(%arg0, %arg1) {device = "/device:GPU:0", transpose_a = false, transpose_b =…
41 // CHECK: tf.MatMul
42 …%outputs = "tf.MatMul"(%arg0, %arg1) {device = "", transpose_a = false, transpose_b = false} : (te…
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/jit/tests/
H A Dopens2s_gnmt_mixed_precision.golden_summary119 MatMul 1
130 MatMul 2
194 MatMul 10
227 MatMul 20
248 MatMul 1
264 MatMul 2
289 MatMul 1
305 MatMul 2
321 MatMul 1
335 MatMul 1
[all …]
/aosp_15_r20/external/pytorch/test/dynamo/
H A Dtest_activation_checkpointing.py205 return torch.sigmoid(torch.matmul(x, y))
225 return torch.sigmoid(torch.matmul(x, y))
244 return torch.sigmoid(torch.matmul(x, y))
289 return torch.sigmoid(torch.matmul(x, y))
449 a = torch.sigmoid(torch.matmul(x, y))
476 a = torch.matmul(x, y)
478 return torch.matmul(a, z)
510 return torch.matmul(x, torch.nn.functional.dropout(y, 0.5))
562 return torch.sigmoid(torch.matmul(x, x))
591 freq=3, # 1 matmul recompute and 2 bwd mm ops per fwd matmul, so 1 + 2 * 1 = 3)
[all …]
/aosp_15_r20/external/swiftshader/third_party/llvm-16.0/llvm/lib/Transforms/Scalar/
H A DLowerMatrixIntrinsics.cpp915 // If we have a TT matmul or a TT add, lift the transpose. We may be able in optimizeTransposes()
1406 CallInst *MatMul) { in getNonAliasingPointer() argument
1418 BasicBlock *Check0 = MatMul->getParent(); in getNonAliasingPointer()
1427 SplitBlock(MatMul->getParent(), MatMul, (DomTreeUpdater *)nullptr, LI, in getNonAliasingPointer()
1430 SplitBlock(MatMul->getParent(), MatMul, (DomTreeUpdater *)nullptr, LI, in getNonAliasingPointer()
1433 SplitBlock(MatMul->getParent(), MatMul, (DomTreeUpdater *)nullptr, LI, in getNonAliasingPointer()
1439 IRBuilder<> Builder(MatMul); in getNonAliasingPointer()
1491 bool isFusionProfitable(CallInst *MatMul) { in isFusionProfitable() argument
1495 ShapeInfo LShape(MatMul->getArgOperand(2), MatMul->getArgOperand(3)); in isFusionProfitable()
1496 ShapeInfo RShape(MatMul->getArgOperand(3), MatMul->getArgOperand(4)); in isFusionProfitable()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/debug/cli/
H A Danalyzer_cli_test.py47 # MatMul op is supported by MKL for some data types and its name is prefixed
54 return "MatMul"
64 # default dtype of matmul op created is float64
621 w = math_ops.matmul(u, v, name="simple_mul_add/matmul")
681 "simple_mul_add/v/read:0", "simple_mul_add/matmul:0",
699 "simple_mul_add/matmul:0", "simple_mul_add/add:0"
715 "simple_mul_add/matmul:0", "simple_mul_add/add:0"
730 "simple_mul_add/matmul:0", "simple_mul_add/add:0"
752 "simple_mul_add/matmul:0", "simple_mul_add/add:0"
769 "simple_mul_add/matmul:0", "simple_mul_add/add:0"
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/core/common_runtime/
H A Dquantize_training_test.cc94 Node* m1 = test::graph::Matmul(g, relu, identity, false, false); in TEST_F()
145 Node* m1 = test::graph::Matmul(g, relu, relu6, false, false); in TEST_F()
179 // Construct a graph with an additional backward Matmul. in TEST_F()
185 // We will use node d as input to the backwards matmul to ensure that it in TEST_F()
194 Node* m1 = test::graph::Matmul(g, relu, identity, false, false); in TEST_F()
195 Node* m2 = test::graph::Matmul(g, identity, c, false, false); in TEST_F()
199 // Add a Matmul node with name starting with "gradients". We will check that in TEST_F()
202 TF_ASSERT_OK(NodeBuilder(g->NewName("gradients/n"), "MatMul") in TEST_F()
215 // Ensure that the backwards matmul input was not quantized. in TEST_F()
233 // Construct a graph with an additional backward Matmul. in TEST_F()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/linalg/
H A Dlinear_operator_identity_test.py100 y = operator.matmul(x)
164 operator.matmul(x)
175 self.evaluate(operator.matmul(x))
185 operator_matmul = operator.matmul(x)
199 operator_matmul = operator.matmul(x)
218 # Expected result of matmul and solve.
221 operator_matmul = operator.matmul(x)
242 # Expected result of matmul and solve.
245 operator_matmul = operator.matmul(x)
405 y = operator.matmul(x)
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/test/
H A Dnative_test.cpp135 // Throw StartsWith("both arguments to matmul need to be at least 1D") in TestMatmul()
137 ASSERT_ANY_THROW(scalar.matmul(d2)); in TestMatmul()
138 // Throw StartsWith("both arguments to matmul need to be at least 1D") in TestMatmul()
140 ASSERT_ANY_THROW(d2.matmul(scalar)); in TestMatmul()
143 ASSERT_ALLCLOSE(d1.matmul(d1), d1.dot(d1)); in TestMatmul()
144 ASSERT_ALLCLOSE(d2.matmul(d1), d2.mv(d1)); in TestMatmul()
146 ASSERT_ALLCLOSE(d1o.matmul(d2), d1o.unsqueeze(0).mm(d2).squeeze(0)); in TestMatmul()
150 ASSERT_ALLCLOSE(d2.matmul(d2o), d2.mm(d2o)); in TestMatmul()
155 d3.matmul(d1), d3.bmm(d1.view({1, 3, 1}).expand({5, 3, 1})).view({5, 2})); in TestMatmul()
156 ASSERT_ALLCLOSE(d1o.matmul(d3), d1o.expand({5, 1, 2}).bmm(d3).view({5, 3})); in TestMatmul()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/ops/linalg/
H A Dlinear_operator_block_lower_triangular.py97 >>> operator.matmul(x)
104 The above `matmul` is equivalent to:
105 >>> tf.concat([operator_0.matmul(x0),
106 ... operator_1.matmul(x0) + operator_2.matmul(x1)], axis=0)
116 `x` is a batch matrix with compatible shape for `matmul` and `solve` if
165 * `operator.matmul` has complexity equal to the sum of the `matmul`
168 of the operators on the diagonal and the `matmul` complexities of the
399 def matmul(self, x, adjoint=False, adjoint_arg=False, name="matmul"): member in LinearOperatorBlockLowerTriangular
409 Y = operator.matmul(X)
442 return linear_operator_algebra.matmul(left_operator, right_operator)
[all …]
/aosp_15_r20/external/pytorch/torch/csrc/autograd/
H A DFunctionsManual.cpp233 x1 * ratio.sum(-1, true) - ratio.matmul(x2), in _euclidean_dist_backward()
234 x2 * ratio.sum(-2, false).unsqueeze(-1) - ratio.mT().matmul(x1)}; in _euclidean_dist_backward()
401 .matmul(grads[1]); in linear_double_backward()
409 .matmul(grads[0].dim() == 1 ? grads[0].unsqueeze(0) : grads[0]); in linear_double_backward()
422 .matmul(weight.mT()); in linear_double_backward()
426 (self.dim() == 1 ? self.unsqueeze(0) : self).matmul(grads[1].mT()); in linear_double_backward()
875 is_vector_case ? dA.matmul(X.unsqueeze(-1)).squeeze(-1) : dA.matmul(X); in generic_solve_jvp()
1576 To implement the backward algorithm for sparse matrix-matrix matmul (SPMM) we in sparse_sparse_matmul_backward()
1936 dL = L_.matmul(dL); in cholesky_jvp()
1960 auto gA = L_.mH().matmul(gL_).tril(); in cholesky_backward()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/
H A Dmatmul_op_test.cc106 ops::MatMul matmul = ops::MatMul( in RunMatMulWithBias() local
107 root.WithOpName("matmul"), in RunMatMulWithBias()
110 ops::MatMul::Attrs().TransposeA(transpose_a).TransposeB(transpose_b)); in RunMatMulWithBias()
113 root.WithOpName("with_bias"), matmul, in RunMatMulWithBias()
125 ops::MatMul matmul = ops::MatMul( in RunMatMulWithBiasAndActivation() local
126 root.WithOpName("matmul"), in RunMatMulWithBiasAndActivation()
129 ops::MatMul::Attrs().TransposeA(transpose_a).TransposeB(transpose_b)); in RunMatMulWithBiasAndActivation()
132 root.WithOpName("with_bias"), matmul, in RunMatMulWithBiasAndActivation()
207 Tensor matmul; in VerifyBiasAddTensorsNear() local
210 run_default(lhs, rhs, bias, &matmul); in VerifyBiasAddTensorsNear()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/examples/speech_commands/
H A Dmodels.py163 This is a very simple model with just one matmul and bias layer. As you'd
171 [MatMul]<-(weights)
196 logits = tf.matmul(fingerprint_input, weights) + bias
230 [MatMul]<-(weights)
322 final_fc = tf.matmul(flattened_second_conv, final_fc_weights) + final_fc_bias
347 [MatMul]<-(weights)
351 [MatMul]<-(weights)
355 [MatMul]<-(weights)
423 first_fc = tf.matmul(flattened_first_conv, first_fc_weights) + first_fc_bias
437 second_fc = tf.matmul(second_fc_input, second_fc_weights) + second_fc_bias
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/profiler/internal/
H A Drun_metadata_test.py64 y = math_ops.matmul(x, w)
89 # Grappler might fuse MatMul with BiasAdd in remapper optimizer.
129 self.assertEqual(tfprof_node.children[0].name, 'MatMul')
132 ret = _extract_node(run_meta, 'MatMul')
145 mm = _extract_node(run_meta, 'MatMul')['gpu:0'][0]
160 # random normal must allocated first since matmul depends on it.
162 # deallocates the memory after matmul started.
170 self.assertEqual(tfprof_node.children[0].name, 'MatMul')
173 ret = _extract_node(run_meta, 'MatMul')
176 ret = _extract_node(run_meta, 'MatMul:MatMul')
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mkldnn/xpu/detail/
H A DMatmul.cpp14 sycl::event matmul( in matmul() function
25 "oneDNN matmul only works with 2D or 3D, got ", in matmul()
30 TORCH_CHECK(result.defined(), "oneDNN matmul result should be defined"); in matmul()
65 "matmul supports [n] or [1] when bias dim is 1 ..."); in matmul()
79 "matmul supports [m, n] or [1, n] or [m, 1] or [1, 1] when bias dim is 2 ..."); in matmul()
85 "matmul bias must be expandable to:", in matmul()
92 b.numel() == 1, "matmul supports 1 numel when bias dim is [] ..."); in matmul()
99 TORCH_CHECK(0, "unsupported bias dim in matmul ..."); in matmul()
105 // xpu matmul support both ab/ba shape for m2 tensor, we don't check any more in matmul()
167 dnnl::matmul matmul_p; in matmul()
[all …]

12345678910>>...48