Home
last modified time | relevance | path

Searched full:tensor_out (Results 1 – 25 of 64) sorted by relevance

123

/aosp_15_r20/external/executorch/kernels/aten/
H A Dfunctions.yaml83 - op: bitwise_and.Tensor_out
87 - op: bitwise_or.Tensor_out
93 - op: bitwise_xor.Tensor_out
108 - op: clamp.Tensor_out
148 - op: eq.Tensor_out
162 - op: fill.Tensor_out
170 - op: fmod.Tensor_out
182 - op: ge.Tensor_out
192 - op: gt.Tensor_out
200 - op: index.Tensor_out
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/
H A Dpooling_ops_3d.cc210 const Tensor& tensor_out, const Tensor& out_backprop, in launch()
270 // Slice from tensor_out. in launch()
273 tensor_out.tensor<T, 5>().slice(src_indices, src_sizes); in launch()
338 const Tensor& tensor_out = context->input(1); in Compute() local
342 OP_REQUIRES(context, tensor_out.dims() == 5, in Compute()
343 errors::InvalidArgument("tensor_out must be 5-dimensional")); in Compute()
371 context, tensor_out.shape() == out_shape, in Compute()
373 ", but got ", tensor_out.shape())); in Compute()
379 context, tensor_in, tensor_out, out_backprop, window, stride, out, in Compute()
563 const Tensor& tensor_in, const Tensor& tensor_out, in launch()
[all …]
H A Dpooling_ops_common.cc224 Tensor* tensor_out = nullptr; in Compute() local
226 context->allocate_output(0, tensor_out_shape, &tensor_out)); in Compute()
267 transformed_output = *tensor_out; in Compute()
272 auto& transformed_output = *tensor_out; in Compute()
416 tensor_out->tensor<RT, 4>()); in Compute()
444 TensorFormat data_format, const Tensor* tensor_in, const Tensor* tensor_out, in Compute() argument
448 (tensor_in && tensor_out)) in Compute()
449 << "For MaxPoolGrad, both tensor_in and tensor_out needs to be " in Compute()
464 if (tensor_out) { in Compute()
465 OP_REQUIRES(context, tensor_out->shape() == params.forward_output_shape(), in Compute()
[all …]
H A Dfractional_max_pool_op.cc240 const Tensor& tensor_out = context->input(1); in Compute() local
254 OP_REQUIRES(context, tensor_out.dims() == tensor_in_and_out_dims, in Compute()
257 tensor_out.DebugString())); in Compute()
258 OP_REQUIRES(context, tensor_out.NumElements() > 0, in Compute()
260 tensor_out.DebugString())); in Compute()
267 output_size[i] = tensor_out.dim_size(i); in Compute()
275 {1}, DataTypeToEnum<T>::v(), tensor_out.shape(), in Compute()
279 tensor_out.shape(), in Compute()
281 // Find arg_max for each tensor_out in Compute()
348 // Check tensor_out_dup is the same as tensor_out. in Compute()
[all …]
H A Dmaxpooling_op.cc268 const Tensor& tensor_out = context->input(1); in Compute() local
274 OP_REQUIRES(context, tensor_out.dims() == 4, in Compute()
275 errors::InvalidArgument("tensor_out must be 4-dimensional")); in Compute()
284 {1}, DataTypeToEnum<T>::v(), tensor_out.shape(), in Compute()
288 tensor_out.shape(), in Compute()
328 OP_REQUIRES(context, tensor_out.shape() == params.forward_output_shape(), in Compute()
331 ", but got ", tensor_out.shape())); in Compute()
394 const Tensor& tensor_out = context->input(1); in Compute() local
400 OP_REQUIRES(context, tensor_out.dims() == 4, in Compute()
401 errors::InvalidArgument("tensor_out must be 4-dimensional")); in Compute()
[all …]
H A Dcudnn_pooling_gpu.cc134 const Tensor* tensor_in, const Tensor* tensor_out, Tensor* input_backprop) { in Compute() argument
136 (tensor_in && tensor_out)) in Compute()
137 << "For MaxPoolGrad, both tensor_in and tensor_out needs to be " in Compute()
156 if (data_format == FORMAT_NHWC || tensor_out == nullptr) { in Compute()
163 transformed_output = *tensor_out; in Compute()
189 if (tensor_out != nullptr) { in Compute()
191 tensor_out->tensor<T, 5>(), in Compute()
/aosp_15_r20/external/pytorch/test/expect/
H A DHasDecompTest.test_aten_core_operators.expect90 aten::bitwise_and.Tensor_out
98 aten::bitwise_left_shift.Tensor_out
109 aten::bitwise_or.Tensor_out
117 aten::bitwise_right_shift.Tensor_out
125 aten::bitwise_xor.Tensor_out
131 aten::bucketize.Tensor_out
142 aten::clamp.Tensor_out
205 aten::eq.Tensor_out
244 aten::fmod.Tensor_out
248 aten::frexp.Tensor_out
[all …]
H A DHasDecompTest.test_has_decomposition.expect4 aten::__lshift__.Tensor_out
8 aten::__rshift__.Tensor_out
71 aten::_ctc_loss.Tensor_out
142 aten::_foreach_add.Tensor_out
152 aten::_foreach_addcdiv.Tensor_out
161 aten::_foreach_addcmul.Tensor_out
208 aten::_foreach_div.Tensor_out
279 aten::_foreach_mul.Tensor_out
693 aten::bernoulli.Tensor_out
790 aten::fill.Tensor_out
[all …]
/aosp_15_r20/external/executorch/kernels/portable/
H A Dfunctions.yaml180 - op: bitwise_and.Tensor_out
195 - op: bitwise_or.Tensor_out
205 - op: bitwise_xor.Tensor_out
231 - op: clamp.Tensor_out
327 - op: eq.Tensor_out
357 - op: fill.Tensor_out
377 - op: fmod.Tensor_out
410 - op: ge.Tensor_out
430 - op: gt.Tensor_out
440 - op: index.Tensor_out
[all …]
/aosp_15_r20/external/executorch/backends/arm/runtime/
H A DArmBackendEthosU.cpp246 auto tensor_out = args[handles.inputs->count + i]->toTensor(); in execute() local
250 tensor_out, in execute()
254 if (tensor_out.scalar_type() == ScalarType::Char and in execute()
259 tensor_out.mutable_data_ptr<char>(), in execute()
260 tensor_out.size(1), in execute()
261 tensor_out.size(2), in execute()
262 tensor_out.size(3)); in execute()
264 for (int j = 0; j < tensor_out.numel(); j++) { in execute()
265 if (tensor_out.scalar_type() == ScalarType::Char) { in execute()
267 tensor_out.mutable_data_ptr<char>()[j] = output_address[j]; in execute()
[all …]
/aosp_15_r20/external/pytorch/torch/distributed/tensor/_ops/
H A D_pointwise_ops.py109 aten.bitwise_and.Tensor_out,
116 aten.bitwise_left_shift.Tensor_out,
126 aten.bitwise_or.Tensor_out,
133 aten.bitwise_right_shift.Tensor_out,
140 aten.bitwise_xor.Tensor_out,
180 aten.eq.Tensor_out,
215 aten.fmod.Tensor_out,
225 aten.gt.Tensor_out,
252 aten.lt.Tensor_out,
260 aten.lerp.Tensor_out,
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/mkl/
H A Dmkl_input_conversion_op.cc110 Tensor* tensor_out; in Compute() local
122 AllocateOutputSetMklShape(context, kInputIndex_0, &tensor_out, in Compute()
137 input.CheckReorderToOpMem(input1_md, tensor_out, net, net_args, in Compute()
211 Tensor* tensor_out; in Compute() local
223 AllocateOutputSetMklShape(context, tf_tensor_index, &tensor_out, in Compute()
235 bool reordered = tf_input.CheckReorderToOpMem(output_mkl_md, tensor_out, in Compute()
244 tensor_out->CopyFrom(*tf_tensor, tensor_out->shape()), in Compute()
/aosp_15_r20/external/executorch/kernels/quantized/test/
H A Dtest_out_variants.py31 self.assertIsNotNone(ops.edge.quantized_decomposed.choose_qparams.Tensor_out)
35 out_variant.name(), "quantized_decomposed::choose_qparams.Tensor_out"
48 ops.edge.quantized_decomposed.dequantize_per_tensor.Tensor_out
53 out_variant.name(), "quantized_decomposed::dequantize_per_tensor.Tensor_out"
86 ops.edge.quantized_decomposed.quantize_per_tensor.Tensor_out
91 out_variant.name(), "quantized_decomposed::quantize_per_tensor.Tensor_out"
/aosp_15_r20/external/executorch/kernels/portable/cpu/pattern/
H A Dcomparison_op.h39 if (op == "eq.Tensor_out" || op == "eq.Scalar_out") { in get_comparison_fn()
42 if (op == "ne.Tensor_out" || op == "ne.Scalar_out") { in get_comparison_fn()
45 if (op == "ge.Tensor_out" || op == "ge.Scalar_out") { in get_comparison_fn()
48 if (op == "le.Tensor_out" || op == "le.Scalar_out") { in get_comparison_fn()
51 if (op == "gt.Tensor_out" || op == "gt.Scalar_out") { in get_comparison_fn()
54 if (op == "lt.Tensor_out" || op == "lt.Scalar_out") { in get_comparison_fn()
H A Dbitwise_op.h36 if (op == "bitwise_and.Tensor_out" || op == "bitwise_and.Scalar_out") { in get_bitwise_fn()
39 if (op == "bitwise_or.Tensor_out" || op == "bitwise_or.Scalar_out") { in get_bitwise_fn()
42 if (op == "bitwise_xor.Tensor_out" || op == "bitwise_xor.Scalar_out") { in get_bitwise_fn()
/aosp_15_r20/external/executorch/kernels/quantized/
H A Dtargets.bzl18 "quantized_decomposed::choose_qparams.Tensor_out",
22 "quantized_decomposed::dequantize_per_tensor.Tensor_out",
28 "quantized_decomposed::quantize_per_tensor.Tensor_out",
69 "quantized_decomposed::dequantize_per_tensor.Tensor_out",
71 "quantized_decomposed::quantize_per_tensor.Tensor_out",
H A DCMakeLists.txt62 "quantized_decomposed::choose_qparams.Tensor_out"
65 "quantized_decomposed::dequantize_per_tensor.Tensor_out"
70 "quantized_decomposed::quantize_per_tensor.Tensor_out"
H A Dquantized.yaml7 - func: quantized_decomposed::choose_qparams.Tensor_out(Tensor input, int quant_min, int quant_max,…
19 - func: quantized_decomposed::dequantize_per_tensor.Tensor_out(Tensor input, Tensor scale, Tensor z…
91 - func: quantized_decomposed::quantize_per_tensor.Tensor_out(Tensor input, Tensor scale, Tensor zer…
/aosp_15_r20/external/pytorch/test/edge/
H A Dselected_operators.yaml68 aten::bitwise_and.Tensor_out:
146 aten::eq.Tensor_out:
188 aten::index.Tensor_out:
344 aten::slice_copy.Tensor_out:
362 aten::split_copy.Tensor_out:
416 aten::unsafe_split.Tensor_out:
/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/
H A Dbatch_matmul.cc393 const scalar* input, TfLiteTensor* tensor_out, in TransposeRowsColumnsImpl() argument
413 TfLiteTensor* tensor_out) { in TransposeRowsColumns() argument
416 tensor_out, in TransposeRowsColumns()
417 GetTensorData<float>(tensor_out)); in TransposeRowsColumns()
421 tensor_in, GetTensorData<int8_t>(tensor_in), tensor_out, in TransposeRowsColumns()
422 GetTensorData<int8_t>(tensor_out)); in TransposeRowsColumns()
426 tensor_in, GetTensorData<int16_t>(tensor_in), tensor_out, in TransposeRowsColumns()
427 GetTensorData<int16_t>(tensor_out)); in TransposeRowsColumns()
/aosp_15_r20/external/executorch/kernels/optimized/cpu/
H A Dop_le.cpp46 Bool, out_type, ctx, "le.Tensor_out", CTYPE, [&]() { in opt_le_tensor_out()
57 Bool, a_type, ctx, "le.Tensor_out", CTYPE_A, [&]() { in opt_le_tensor_out()
59 Bool, b_type, ctx, "le.Tensor_out", CTYPE_B, [&]() { in opt_le_tensor_out()
66 Bool, out_type, ctx, "le.Tensor_out", CTYPE_OUT, [&]() { in opt_le_tensor_out()
/aosp_15_r20/external/pytorch/aten/src/ATen/core/
H A DNamedRegistrations.cpp103 m.impl("clamp.Tensor_out", CppFunction::makeFallthrough()); in TORCH_LIBRARY_IMPL()
109 m.impl("clamp_max.Tensor_out", CppFunction::makeFallthrough()); in TORCH_LIBRARY_IMPL()
115 m.impl("clamp_min.Tensor_out", CppFunction::makeFallthrough()); in TORCH_LIBRARY_IMPL()
167 m.impl("eq.Tensor_out", CppFunction::makeFallthrough()); in TORCH_LIBRARY_IMPL()
207 m.impl("ge.Tensor_out", CppFunction::makeFallthrough()); in TORCH_LIBRARY_IMPL()
212 m.impl("gt.Tensor_out", CppFunction::makeFallthrough()); in TORCH_LIBRARY_IMPL()
253 m.impl("le.Tensor_out", CppFunction::makeFallthrough()); in TORCH_LIBRARY_IMPL()
299 m.impl("lt.Tensor_out", CppFunction::makeFallthrough()); in TORCH_LIBRARY_IMPL()
355 m.impl("ne.Tensor_out", CppFunction::makeFallthrough()); in TORCH_LIBRARY_IMPL()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A Dnative_functions.yaml1105 autogen: bernoulli.Tensor, bernoulli.Tensor_out
1493 structured_delegate: clamp.Tensor_out
1505 structured_delegate: clamp.Tensor_out
1518 - func: clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Ten…
1535 structured_delegate: clamp_max.Tensor_out
1546 structured_delegate: clamp_max.Tensor_out
1558 - func: clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)
1575 structured_delegate: clamp_min.Tensor_out
1586 structured_delegate: clamp_min.Tensor_out
1598 - func: clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)
[all …]
/aosp_15_r20/external/executorch/kernels/portable/cpu/
H A Dop_split_copy.cpp29 * split_copy.Tensor_out(Tensor input, int split_size, int dim=0, *,
62 Bool, in_type, ctx, "split_copy.Tensor_out", CTYPE_IN, [&]() { in split_copy_Tensor_out()
64 Bool, out_type, ctx, "split_copy.Tensor_out", CTYPE_OUT, [&]() { in split_copy_Tensor_out()
/aosp_15_r20/external/pytorch/test/jit/
H A Dtest_complexity.py47 tensor_out = False
50 tensor_out = True
52 num_non_tensor += int(not tensor_out)

123