Home
last modified time | relevance | path

Searched full:argmax (Results 1 – 25 of 715) sorted by relevance

12345678910>>...29

/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/math_ops/
H A Dargmax_op_test.py63 # Check that argmin and argmax match numpy along the primary axis
64 self._testBothArg(math_ops.argmax, x, 0, x.argmax())
70 # Check that argmin and argmax match numpy along the primary axis for
72 self._testBothArg(math_ops.argmax, x, 0, x.argmax())
75 # Check that argmin and argmax match numpy along axis=1 for
78 self._testBothArg(math_ops.argmax, x, 1, x.argmax(axis=1))
89 # Check that argmin and argmax match numpy along all axes
91 self._testBothArg(math_ops.argmax, x, axis, x.argmax(axis))
101 expected_values = x.argmax()
103 ans = math_ops.argmax(x, axis=0, output_type=dtypes.int32)
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/
H A Dargmax_op.cc102 errors::InvalidArgument("Argmax and Argmin only support up " in Compute()
116 : public ArgOp<Device, T, Tout, functor::ArgMax<Device, T, Tout> > {
119 : ArgOp<Device, T, Tout, functor::ArgMax<Device, T, Tout> >(context) {} in ArgMaxOp()
131 REGISTER_KERNEL_BUILDER(Name("ArgMax") \
143 REGISTER_KERNEL_BUILDER(Name("ArgMax") \
155 REGISTER_KERNEL_BUILDER(Name("ArgMax") \
161 REGISTER_KERNEL_BUILDER(Name("ArgMax") \
179 void ArgMax<GPUDevice, T, Tout>::Reduce##Dims( \
204 extern template struct ArgMax<GPUDevice, T, int64_t>; \
206 extern template struct ArgMax<GPUDevice, T, int32>; \
[all …]
H A Dmaxpooling_op.cc951 const Tensor& input, Tensor* output, Tensor* argmax, in launch()
955 context, output, argmax, /*input_backprop=*/nullptr, input, unused, in launch()
1005 Tensor* argmax = nullptr; in Compute() local
1006 OP_REQUIRES_OK(context, context->allocate_output(1, out_shape, &argmax)); in Compute()
1009 context, params, tensor_in, output, argmax, propagate_nans_, in Compute()
1030 const Tensor& grad_in, const Tensor& argmax, in launch()
1035 auto shard = [&grad_in, &argmax, &grad_out, include_batch_in_index]( in launch()
1045 auto argmax_flat = argmax.flat<int64_t>(); in launch()
1057 if (index >= argmax.NumElements()) { in launch()
1081 // TODO(b/175733711): Support int32 argmax type in MaxPoolGradWithArgmax op.
[all …]
/aosp_15_r20/external/pytorch/test/torch_np/
H A Dtest_ndarray_methods.py221 @parametrize("method", [np.argmax, np.argmin])
304 @parametrize("method", ["argmax", "argmin"])
330 @parametrize("method", ["argmax", "argmin"])
339 "arr_method, np_method", [("argmax", np.argmax), ("argmin", np.argmin)]
342 # make sure both ndarray.argmax/argmin and
343 # numpy.argmax/argmin support out/axis args
355 "arr_method, np_method", [("argmax", np.argmax), ("argmin", np.argmin)]
463 assert_equal(np.argmax(arr), pos) # , err_msg="%r" % arr)
464 assert_equal(arr[np.argmax(arr)], val) # , err_msg="%r" % arr)
469 assert_equal(np.argmax(rarr), rpos, err_msg=f"{rarr!r}")
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/
H A DAdaptiveMaxPooling3d.cu43 * 4D input, 4D output, 4D argmax x and y
99 int64_t argmax = istartT*isizeH*isizeW + istartH*isizeW + istartW; in adaptivemaxpool() local
109 argmax = (it+istartT)*isizeH*isizeW + (ih+istartH)*isizeW + iw+istartW; in adaptivemaxpool()
115 // Update output and argmax in adaptivemaxpool()
117 *ptr_ind = argmax; in adaptivemaxpool()
157 * each input pixel can only be argmax of one output pixel.
192 // Compute the gradients for the argmax input pixel in adaptivemaxgradinput()
196 int argmax = (*ptr_ind); in adaptivemaxgradinput() local
197 gradInput_d[argmax] += grad_delta; in adaptivemaxgradinput()
268 // Compute the gradients for the argmax input pixel in atomicadaptivemaxgradinput()
[all …]
H A DAdaptiveMaxPooling2d.cu43 * 4D input, 4D output, 4D argmax x and y
87 int argmax = istartH * isizeW + istartW; in adaptivemaxpool() local
95 argmax = (ih+istartH)*isizeW + iw+istartW; in adaptivemaxpool()
100 // Update output and argmax in adaptivemaxpool()
102 *ptr_ind = argmax; in adaptivemaxpool()
146 int argmax = (*ptr_ind); in adaptivemaxgradinput() local
148 gradInput[argmax] += z; in adaptivemaxgradinput()
193 int argmax = (*ptr_ind); in atomicadaptivemaxgradinput() local
196 gpuAtomicAddNoReturn(&(gradInput[argmax]), z); in atomicadaptivemaxgradinput()
/aosp_15_r20/external/tensorflow/tensorflow/core/ops/compat/ops_history_v2/
H A DArgMax.pbtxt2 name: "ArgMax"
52 name: "ArgMax"
115 name: "ArgMax"
180 name: "ArgMax"
246 name: "ArgMax"
312 name: "ArgMax"
379 name: "ArgMax"
448 name: "ArgMax"
H A DMaxPoolGradWithArgmax.pbtxt12 name: "argmax"
76 name: "argmax"
144 name: "argmax"
214 name: "argmax"
285 name: "argmax"
356 name: "argmax"
H A DMaxPoolWithArgmax.pbtxt12 name: "argmax"
75 name: "argmax"
142 name: "argmax"
211 name: "argmax"
281 name: "argmax"
351 name: "argmax"
H A DMaxPoolGradGradWithArgmax.pbtxt12 name: "argmax"
80 name: "argmax"
150 name: "argmax"
221 name: "argmax"
292 name: "argmax"
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/
H A Dsoftargmax.c39 … "failed to create Soft ArgMax operator with %zu channels: number of channels must be non-zero", in pytorch_qnnp_create_softargmax_nc_q8()
46 … "failed to create Soft ArgMax operator with %.7g input scale: scale must be finite and positive", in pytorch_qnnp_create_softargmax_nc_q8()
53 … "failed to create Soft ArgMax operator with %.7g output scale: scale must be finite and positive", in pytorch_qnnp_create_softargmax_nc_q8()
62 …"failed to create Soft ArgMax operator with %.7g output scale: only output scale of 1/256 is suppo… in pytorch_qnnp_create_softargmax_nc_q8()
69 "failed to create Soft ArgMax operator with %" PRIu8 in pytorch_qnnp_create_softargmax_nc_q8()
88 "failed to allocate 256 bytes for Soft ArgMax lookup table"); in pytorch_qnnp_create_softargmax_nc_q8()
/aosp_15_r20/external/executorch/examples/mediatek/executor_runner/llama_runner/
H A DUtils.h70 static uint64_t argmax(const void* logits_buffer, const size_t vocab_size) { in argmax() function
83 static uint64_t argmax( in argmax() function
89 return argmax<int16_t>(logits_buffer, vocab_size); in argmax()
91 return argmax<__fp16>(logits_buffer, vocab_size); in argmax()
93 return argmax<float>(logits_buffer, vocab_size); in argmax()
97 "Unsupported logits type for argmax: %s", in argmax()
/aosp_15_r20/external/tensorflow/tensorflow/compiler/tests/
H A Dargminmax_test.py15 """Functional tests for ArgMin and ArgMax Ops."""
33 op: argmin or argmax operator to test.
48 # Complex numbers do not support argmin/argmax.
56 math_ops.argmax,
62 math_ops.argmax,
68 math_ops.argmax,
/aosp_15_r20/external/tensorflow/tensorflow/core/ops/compat/ops_history_v1/
H A DArgMax.pbtxt2 name: "ArgMax"
52 name: "ArgMax"
115 name: "ArgMax"
180 name: "ArgMax"
246 name: "ArgMax"
312 name: "ArgMax"
H A DMaxPoolGradWithArgmax.pbtxt12 name: "argmax"
76 name: "argmax"
144 name: "argmax"
214 name: "argmax"
285 name: "argmax"
356 name: "argmax"
H A DMaxPoolWithArgmax.pbtxt12 name: "argmax"
75 name: "argmax"
142 name: "argmax"
211 name: "argmax"
281 name: "argmax"
351 name: "argmax"
H A DMaxPoolGradGradWithArgmax.pbtxt12 name: "argmax"
80 name: "argmax"
150 name: "argmax"
221 name: "argmax"
292 name: "argmax"
/aosp_15_r20/packages/modules/NeuralNetworks/runtime/test/generated/spec_V1_2/
Dargmax_2.example.cpp44 .type = TestOperationType::ARGMAX, in get_test_model()
135 .type = TestOperationType::ARGMAX, in get_test_model_all_inputs_as_internal()
192 .type = TestOperationType::ARGMAX, in get_test_model_relaxed()
283 .type = TestOperationType::ARGMAX, in get_test_model_relaxed_all_inputs_as_internal()
340 .type = TestOperationType::ARGMAX, in get_test_model_float16()
431 .type = TestOperationType::ARGMAX, in get_test_model_float16_all_inputs_as_internal()
488 .type = TestOperationType::ARGMAX, in get_test_model_int32()
545 .type = TestOperationType::ARGMAX, in get_test_model_quant8()
636 .type = TestOperationType::ARGMAX, in get_test_model_quant8_all_inputs_as_internal()
Dargmax_3.example.cpp44 .type = TestOperationType::ARGMAX, in get_test_model()
135 .type = TestOperationType::ARGMAX, in get_test_model_all_inputs_as_internal()
192 .type = TestOperationType::ARGMAX, in get_test_model_relaxed()
283 .type = TestOperationType::ARGMAX, in get_test_model_relaxed_all_inputs_as_internal()
340 .type = TestOperationType::ARGMAX, in get_test_model_float16()
431 .type = TestOperationType::ARGMAX, in get_test_model_float16_all_inputs_as_internal()
488 .type = TestOperationType::ARGMAX, in get_test_model_int32()
545 .type = TestOperationType::ARGMAX, in get_test_model_quant8()
636 .type = TestOperationType::ARGMAX, in get_test_model_quant8_all_inputs_as_internal()
Dargmax_1.example.cpp44 .type = TestOperationType::ARGMAX, in get_test_model()
135 .type = TestOperationType::ARGMAX, in get_test_model_all_inputs_as_internal()
192 .type = TestOperationType::ARGMAX, in get_test_model_relaxed()
283 .type = TestOperationType::ARGMAX, in get_test_model_relaxed_all_inputs_as_internal()
340 .type = TestOperationType::ARGMAX, in get_test_model_float16()
431 .type = TestOperationType::ARGMAX, in get_test_model_float16_all_inputs_as_internal()
488 .type = TestOperationType::ARGMAX, in get_test_model_int32()
545 .type = TestOperationType::ARGMAX, in get_test_model_quant8()
636 .type = TestOperationType::ARGMAX, in get_test_model_quant8_all_inputs_as_internal()
/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/nn_ops/
H A Dpooling_ops_test.py935 argmax = self.evaluate(argmax_op)
937 out_op = gen_nn_ops.max_pool_grad_with_argmax(t, grad_in, argmax, ksize,
967 argmax = self.evaluate(argmax_op)
970 t, grad_in, argmax, ksize, strides, padding)
994 "Config", ["use_gpu", "include_batch_in_index", "argmax", "Targmax"])
1014 out, argmax = self.evaluate([out_op, argmax_op])
1016 self.assertShapeEqual(argmax, argmax_op)
1019 self.assertAllEqual(argmax.ravel(), config.argmax)
1029 "Config", ["use_gpu", "include_batch_in_index", "argmax"])
1042 config.argmax, shape=[2, 2, 2, 1], dtype=dtypes.int64)
[all …]
/aosp_15_r20/external/pytorch/test/
H A Dtest_reductions.py483 ops = [torch.norm, torch.argmax, torch.argmin]
1804 self.compare_with_numpy(torch.argmax, np.argmax, t)
1810 self.compare_with_numpy(torch.argmax, np.argmax, t)
1837 self.compare_with_numpy(torch.argmax, np.argmax, x, device=None, dtype=None)
1844 … lambda x: np.argmax(x, axis=rand_dim), x, device=None, dtype=None)
1849 # Argmax
1850 torch_fn = partial(torch.argmax, dim=1)
1851 np_fn = partial(np.argmax, axis=1)
2112 self.assertEqual(x.argmax().item(), 0)
2113 self.assertEqual(x.argmax(dim=None).item(), 0)
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/service/
H A Dreduce_decomposer_test.cc89 argmax { in TEST_F()
114 ) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax in TEST_F()
133 argmax { in TEST_F()
158 ) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax in TEST_F()
169 argmax { in TEST_F()
194 ) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax in TEST_F()
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/client/lib/
H A Darithmetic.cc160 XlaOp argmax = GetTupleElement(max_argmax, 1); in ArgMinMax() local
162 argmax = ConvertElementType(argmax, output_type); in ArgMinMax()
164 return argmax; in ArgMinMax()
168 XlaOp ArgMax(XlaOp input, PrimitiveType output_type, int axis) { in ArgMax() function
/aosp_15_r20/packages/modules/NeuralNetworks/runtime/test/generated/spec_V1_3/
Dargmax_quant8_signed.example.cpp44 .type = TestOperationType::ARGMAX, in get_test_model_quant8_signed()
135 .type = TestOperationType::ARGMAX, in get_test_model_quant8_signed_all_inputs_as_internal()
192 .type = TestOperationType::ARGMAX, in get_test_model_quant8_signed_2()
283 .type = TestOperationType::ARGMAX, in get_test_model_quant8_signed_all_inputs_as_internal_2()
340 .type = TestOperationType::ARGMAX, in get_test_model_quant8_signed_3()
431 .type = TestOperationType::ARGMAX, in get_test_model_quant8_signed_all_inputs_as_internal_3()

12345678910>>...29