/aosp_15_r20/external/XNNPACK/eval/ |
H A D | f16-f32-cvt.cc | 30 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> inputs(kBlockSize); in TEST() local 34 inputs[i] = n + i; in TEST() 36 xnn_math_f16_f32_cvt__sse2_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 38 const uint32_t reference_output = float_as_uint32(fp16_ieee_to_fp32_value(inputs[i])); in TEST() 40 … << "input = 0x" << std::hex << std::setw(4) << std::setfill('0') << float_as_uint32(inputs[i]) in TEST() 48 std::vector<uint16_t, AlignedAllocator<uint16_t, 64>> inputs(kBlockSize); in TEST() local 52 inputs[i] = n + i; in TEST() 54 xnn_math_f16_f32_cvt__sse2_int16(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 56 const uint32_t reference_output = float_as_uint32(fp16_ieee_to_fp32_value(inputs[i])); in TEST() 58 … << "input = 0x" << std::hex << std::setw(4) << std::setfill('0') << float_as_uint32(inputs[i]) in TEST() [all …]
|
H A D | f32-roundd.cc | 29 std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize); in TEST() local 31 std::fill(inputs.begin(), inputs.end(), UINT32_C(0x00000000)); in TEST() 32 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 33 const uint32_t reference_output = float_as_uint32(std::floor(inputs[0])); in TEST() 35 << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(inputs[0]) in TEST() 41 std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize); in TEST() local 43 std::fill(inputs.begin(), inputs.end(), UINT32_C(0x80000000)); in TEST() 44 xnn_math_f32_roundd__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 45 const uint32_t reference_output = float_as_uint32(std::floor(inputs[0])); in TEST() 47 << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(inputs[0]) in TEST() [all …]
|
H A D | f32-roundu.cc | 29 std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize); in TEST() local 31 std::fill(inputs.begin(), inputs.end(), UINT32_C(0x00000000)); in TEST() 32 xnn_math_f32_roundu__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 33 const uint32_t reference_output = float_as_uint32(std::ceil(inputs[0])); in TEST() 35 << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(inputs[0]) in TEST() 41 std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize); in TEST() local 43 std::fill(inputs.begin(), inputs.end(), UINT32_C(0x80000000)); in TEST() 44 xnn_math_f32_roundu__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 45 const uint32_t reference_output = float_as_uint32(std::ceil(inputs[0])); in TEST() 47 << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(inputs[0]) in TEST() [all …]
|
H A D | f32-roundz.cc | 29 std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize); in TEST() local 33 inputs[i] = uint32_as_float(n + i); in TEST() 35 xnn_math_f32_roundz__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 37 const uint32_t reference_output = float_as_uint32(std::trunc(inputs[i])); in TEST() 39 … << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(inputs[i]) in TEST() 47 std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize); in TEST() local 51 inputs[i] = uint32_as_float(n + i); in TEST() 53 xnn_math_f32_roundz__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 55 const uint32_t reference_output = float_as_uint32(std::trunc(inputs[i])); in TEST() 57 … << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(inputs[i]) in TEST() [all …]
|
H A D | f32-roundne.cc | 29 std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize); in TEST() local 33 inputs[i] = uint32_as_float(n + i); in TEST() 35 xnn_math_f32_roundne__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 37 const uint32_t reference_output = float_as_uint32(std::nearbyint(inputs[i])); in TEST() 39 … << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(inputs[i]) in TEST() 47 std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize); in TEST() local 51 inputs[i] = uint32_as_float(n + i); in TEST() 53 xnn_math_f32_roundne__sse_addsub(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 55 const uint32_t reference_output = float_as_uint32(std::nearbyint(inputs[i])); in TEST() 57 … << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(inputs[i]) in TEST() [all …]
|
H A D | f32-f16-cvt.cc | 30 std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize); in TEST() local 34 inputs[i] = uint32_as_float(n + i); in TEST() 36 xnn_math_f32_f16_cvt__sse2(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST() 38 const uint16_t reference_output = fp16_ieee_from_fp32_value(inputs[i]); in TEST() 40 … << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(inputs[i]) in TEST() 48 std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize); in TEST() local 52 inputs[i] = uint32_as_float(n + i); in TEST() 54 xnn_math_f32_f16_cvt__sse2(kBlockSize * sizeof(uint16_t), inputs.data(), outputs.data()); in TEST() 56 const uint16_t reference_output = fp16_ieee_from_fp32_value(inputs[i]); in TEST() 58 … << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(inputs[i]) in TEST() [all …]
|
H A D | f32-exp.cc | 33 std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize); in TEST() local 35 std::fill(inputs.begin(), inputs.end(), -0.0f); in TEST() 36 … xnn_math_f32_exp__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 39 << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(inputs[0]) in TEST() 47 std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize); in TEST() local 49 std::fill(inputs.begin(), inputs.end(), +0.0f); in TEST() 50 … xnn_math_f32_exp__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.data()); in TEST() 53 << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(inputs[0]) in TEST() 61 std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize); in TEST() local 65 inputs[i] = uint32_as_float(std::min<uint32_t>(n + i, UINT32_C(0xFF800000))); in TEST() [all …]
|
H A D | f32-expm1minus.cc | 33 std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize); in TEST() local 35 std::fill(inputs.begin(), inputs.end(), -0.0f); in TEST() 36 …xnn_math_f32_expm1minus__neon_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data… in TEST() 39 << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(inputs[0]) in TEST() 47 std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize); in TEST() local 51 inputs[i] = uint32_as_float(std::min<uint32_t>(n + i, UINT32_C(0xFF800000))); in TEST() 53 …xnn_math_f32_expm1minus__neon_rr2_lut16_p3(kBlockSize * sizeof(float), inputs.data(), outputs.data… in TEST() 57 … << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(inputs[i]) in TEST() 67 std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize); in TEST() local 71 inputs[i] = uint32_as_float(std::min<uint32_t>(UINT32_C(0x7FFFFFFF), n + i)); in TEST() [all …]
|
H A D | f32-expminus.cc | 33 std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize); in TEST() local 35 std::fill(inputs.begin(), inputs.end(), -0.0f); in TEST() 36 …xnn_math_f32_expminus__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.dat… in TEST() 39 << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(inputs[0]) in TEST() 47 std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize); in TEST() local 49 std::fill(inputs.begin(), inputs.end(), +0.0f); in TEST() 50 …xnn_math_f32_expminus__neonfma_rr2_lut64_p2(kBlockSize * sizeof(float), inputs.data(), outputs.dat… in TEST() 53 << "input = 0x" << std::hex << std::setw(8) << std::setfill('0') << float_as_uint32(inputs[0]) in TEST() 61 std::vector<float, AlignedAllocator<float, 64>> inputs(kBlockSize); in TEST() local 65 inputs[i] = uint32_as_float(std::min<uint32_t>(n + i, UINT32_C(0xFF800000))); in TEST() [all …]
|
H A D | u64-sqrt.cc | 48 std::vector<uint64_t, AlignedAllocator<uint64_t, 64>> inputs(kBlockSize); in TEST() local 52 inputs[i] = UINT64_C(0x0010000000000000) << s; in TEST() 54 …xnn_math_u64_sqrt__scalar_cvtu32_sqrt_cvtsatu32f64(kBlockSize * sizeof(uint64_t), inputs.data(), o… in TEST() 56 const uint64_t input = inputs[i]; in TEST() 64 std::vector<uint64_t, AlignedAllocator<uint64_t, 64>> inputs(kBlockSize); in TEST() local 68 inputs[i] = UINT64_C(0x001FFFFFFFFFFFFF) << (s - 1); in TEST() 70 …xnn_math_u64_sqrt__scalar_cvtu32_sqrt_cvtsatu32f64(kBlockSize * sizeof(uint64_t), inputs.data(), o… in TEST() 72 const uint64_t input = inputs[i]; in TEST() 80 std::vector<uint64_t, AlignedAllocator<uint64_t, 64>> inputs(kBlockSize); in TEST() local 84 inputs[i] = UINT64_C(0x0020000000000001) << (s - 1); in TEST() [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/tools/versioning/ |
H A D | op_version.cc | 36 if (op_sig.inputs.size() < 2) { in NeedBroadcastForBinaryInputs() 39 return (op_sig.inputs.at(0).dims != op_sig.inputs.at(1).dims); in NeedBroadcastForBinaryInputs() 44 for (auto& input : op_sig.inputs) { in GetInputMaxDims() 60 // If the op has signed int16 op_sig.inputs and op_sig.outputs, its in GetBuiltinOperatorVersion() 62 if (op_sig.inputs.at(0).type == kTfLiteInt16 && in GetBuiltinOperatorVersion() 63 op_sig.inputs.at(1).type == kTfLiteInt16 && in GetBuiltinOperatorVersion() 68 // If the op has signed int8 op_sig.inputs and op_sig.outputs, its in GetBuiltinOperatorVersion() 70 if (op_sig.inputs.at(0).type == kTfLiteInt8 && in GetBuiltinOperatorVersion() 71 op_sig.inputs.at(1).type == kTfLiteInt8 && in GetBuiltinOperatorVersion() 77 if (op_sig.inputs.at(0).type == kTfLiteFloat32 && in GetBuiltinOperatorVersion() [all …]
|
H A D | op_version_test.cc | 105 .inputs = CreateOpSignatureTensorSpecs( in TEST() 112 .inputs = CreateOpSignatureTensorSpecs( in TEST() 119 .inputs = CreateOpSignatureTensorSpecs( in TEST() 126 .inputs = CreateOpSignatureTensorSpecs( in TEST() 137 .inputs = CreateOpSignatureTensorSpecs(kTfLiteInt8), in SimpleVersioningTest() 143 .inputs = CreateOpSignatureTensorSpecs(kTfLiteUInt8), in SimpleVersioningTest() 153 .inputs = CreateOpSignatureTensorSpecs(kTfLiteInt16), in SimpleVersioningTestExtended() 164 .inputs = std::vector<OpSignatureTensorSpec>{}, in SimpleOutputVersioningTest() 171 .inputs = std::vector<OpSignatureTensorSpec>{}, in SimpleOutputVersioningTest() 181 .inputs = CreateOpSignatureTensorSpecs(kTfLiteString), in TEST() [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/lite/toco/ |
H A D | model.h | 404 std::vector<std::string> inputs; 406 // Output activation arrays. Same comments as for inputs apply here too. 463 // Inputs: 464 // inputs[0]: required: the input activations array 465 // inputs[1]: required: the Conv weights 466 // inputs[2]: optional: the bias vector, specifying the biases for each output 490 // Inputs: 491 // inputs[0]: required: the logits. 492 // inputs[1]: required: sequence length. 493 // inputs[2]: optional: beam width. [all …]
|
/aosp_15_r20/packages/modules/NeuralNetworks/runtime/test/fuzzing/operation_signatures/ |
D | Convolutions.cpp | 30 int32_t paddingLeft = op->inputs[3]->value<int32_t>(); in conv2DExplicitConstructor() 31 int32_t paddingRight = op->inputs[4]->value<int32_t>(); in conv2DExplicitConstructor() 32 int32_t paddingTop = op->inputs[5]->value<int32_t>(); in conv2DExplicitConstructor() 33 int32_t paddingBottom = op->inputs[6]->value<int32_t>(); in conv2DExplicitConstructor() 34 int32_t strideWidth = op->inputs[7]->value<int32_t>(); in conv2DExplicitConstructor() 35 int32_t strideHeight = op->inputs[8]->value<int32_t>(); in conv2DExplicitConstructor() 38 if (op->inputs.size() > 10) { in conv2DExplicitConstructor() 39 useNchw = op->inputs[10]->value<bool8>(); in conv2DExplicitConstructor() 40 if (op->inputs.size() > 11) { in conv2DExplicitConstructor() 41 dilationWidth = op->inputs[11]->value<int32_t>(); in conv2DExplicitConstructor() [all …]
|
D | Reshape.cpp | 30 if (op->inputs.size() > 2) useNchw = op->inputs[2]->value<bool8>(); in spaceToDepthConstructor() 35 op->inputs[0]->dimensions = {RandomVariableType::FREE, RandomVariableType::FREE, in spaceToDepthConstructor() 37 int32_t blockSize = op->inputs[1]->value<int32_t>(); in spaceToDepthConstructor() 38 auto outHeight = op->inputs[0]->dimensions[heightIndex].exactDiv(blockSize); in spaceToDepthConstructor() 39 auto outWidth = op->inputs[0]->dimensions[widthIndex].exactDiv(blockSize); in spaceToDepthConstructor() 40 auto outDepth = op->inputs[0]->dimensions[depthIndex] * (blockSize * blockSize); in spaceToDepthConstructor() 43 op->outputs[0]->dimensions = {op->inputs[0]->dimensions[0], outDepth, outHeight, outWidth}; in spaceToDepthConstructor() 45 op->outputs[0]->dimensions = {op->inputs[0]->dimensions[0], outHeight, outWidth, outDepth}; in spaceToDepthConstructor() 47 setSameQuantization(op->outputs[0], op->inputs[0]); in spaceToDepthConstructor() 56 .inputs = {INPUT_DEFAULT, PARAMETER_RANGE(TestOperandType::INT32, 1, 5)}, \ [all …]
|
D | Selection.cpp | 28 setFreeDimensions(op->inputs[0], /*rank=*/1); in embeddingLookupConstructor() 29 setFreeDimensions(op->inputs[1], rank); in embeddingLookupConstructor() 31 op->outputs[0]->dimensions[0] = op->inputs[0]->dimensions[0]; in embeddingLookupConstructor() 33 op->outputs[0]->dimensions[i] = op->inputs[1]->dimensions[i]; in embeddingLookupConstructor() 35 setSameQuantization(op->outputs[0], op->inputs[1]); in embeddingLookupConstructor() 39 uint32_t dimValue = op->inputs[1]->dimensions[0].getValue(); in embeddingLookupFinalizer() 40 uint32_t numElements = op->inputs[0]->getNumberOfElements(); in embeddingLookupFinalizer() 43 op->inputs[0]->value<int32_t>(i) = getUniform<int32_t>(0, dimValue - 1); in embeddingLookupFinalizer() 53 .inputs = {PARAMETER_NONE(TestOperandType::TENSOR_INT32), INPUT_DEFAULT}, \ 65 op->inputs[0]->dimensions = {RandomVariableType::FREE}; in hashtableLookupConstructor() [all …]
|
/aosp_15_r20/external/cronet/net/http/ |
H A D | http_chunked_decoder_unittest.cc | 27 void RunTest(const char* const inputs[], in RunTest() argument 38 std::string input = inputs[i]; in RunTest() 50 // Feed the inputs to the decoder, until it returns an error. 51 void RunTestUntilFailure(const char* const inputs[], in RunTestUntilFailure() argument 58 std::string input = inputs[i]; in RunTestUntilFailure() 70 const char* const inputs[] = { in TEST() local 73 RunTest(inputs, std::size(inputs), "hello hello", true, 0); in TEST() 77 const char* const inputs[] = { in TEST() local 80 RunTest(inputs, std::size(inputs), "hello", false, 0); in TEST() 84 const char* const inputs[] = { in TEST() local [all …]
|
/aosp_15_r20/external/pytorch/test/autograd/ |
H A D | test_functional.py | 174 TypeError, "The inputs given to vjp must be either a Tensor" 250 inputs = ctors.rand(4, 4) 253 res = autogradF.vjp(reducer, inputs, v) 258 inputs.requires_grad_() 261 res = autogradF.vjp(reducer, inputs, v, create_graph=True) 271 inputs = ctors.rand(4, 4) 273 res = autogradF.vjp(reducer, inputs, v) 274 self._assert_same_struct(res[1], inputs) 281 inputs = (ctors.rand(2), ctors.rand(2)) 283 out, vjp_val = autogradF.vjp(adder, inputs, v) [all …]
|
/aosp_15_r20/external/pytorch/torch/autograd/ |
H A D | functional.py | 69 def _grad_preprocess(inputs, create_graph, need_graph): argument 70 # Preprocess the inputs to make sure they require gradient 71 # inputs is a tuple of Tensors to preprocess 72 # create_graph specifies if the user wants gradients to flow back to the Tensors in inputs 75 # inputs given as arguments and the same Tensors automatically captured by the user function. 78 for inp in inputs: 92 def _grad_postprocess(inputs, create_graph): argument 95 if isinstance(inputs[0], torch.Tensor): 97 return tuple(inp.detach() for inp in inputs) 99 return inputs [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/tests/ |
H A D | lstm_layer_inference.pbtxt | 287 name: "inputs/random_uniform/shape" 312 name: "inputs/random_uniform/min" 334 name: "inputs/random_uniform/max" 356 name: "inputs/random_uniform/RandomUniform" 358 input: "inputs/random_uniform/shape" 386 name: "inputs/random_uniform/sub" 388 input: "inputs/random_uniform/max" 389 input: "inputs/random_uniform/min" 399 name: "inputs/random_uniform/mul" 401 input: "inputs/random_uniform/RandomUniform" [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/python/data/experimental/kernel_tests/ |
H A D | csv_dataset_test.py | 39 def _setup_files(self, inputs, linebreak='\n', compression_type=None): argument 41 for i, file_rows in enumerate(inputs): 59 def _make_test_datasets(self, inputs, **kwargs): argument 61 filenames = self._setup_files(inputs) 68 def _test_by_comparison(self, inputs, **kwargs): argument 71 inputs, **kwargs) 76 inputs, argument 84 filenames = self._setup_files(inputs, linebreak, compression_type) 103 inputs = [['1,2,3,4']] 104 self._test_by_comparison(inputs, record_defaults=record_defaults) [all …]
|
/aosp_15_r20/external/pytorch/torch/_inductor/ |
H A D | mkldnn_ir.py | 42 This function is a helper function to prepare inputs, layout and constant args 44 layout (channels first or channels last), realizing inputs and make them etc. The 166 inputs = [x, weight] 179 inputs.append(bias) 182 return inputs, constant_args, kernel_layout, req_stride_order 192 This function is a helper function to prepare inputs, layout and constant args 211 inputs = [x, weight] 223 inputs.append(bias) 226 return inputs, constant_args, kernel_layout, req_stride_order 233 inputs, argument [all …]
|
/aosp_15_r20/external/deqp/modules/gles31/functional/ |
H A D | es31fShaderPackingFunctionTests.cpp | 133 m_spec.inputs.push_back(Symbol("in0", glu::VarType(glu::TYPE_FLOAT_VEC2, precision))); in PackSnorm2x16Case() 142 std::vector<tcu::Vec2> inputs; in iterate() local 149 inputs.push_back(tcu::Vec2(0.0f, 0.0f)); in iterate() 150 inputs.push_back(tcu::Vec2(-1.0f, 1.0f)); in iterate() 151 inputs.push_back(tcu::Vec2(0.5f, -0.5f)); in iterate() 152 inputs.push_back(tcu::Vec2(-1.5f, 1.5f)); in iterate() 153 inputs.push_back(tcu::Vec2(0.25f, -0.75f)); in iterate() 160 inputs.push_back(tcu::Vec2(x, y)); in iterate() 168 inputs.push_back(tcu::Vec2(x, y)); in iterate() 171 outputs.resize(inputs.size()); in iterate() [all …]
|
/aosp_15_r20/external/deqp/external/vulkancts/modules/vulkan/shaderexecutor/ |
H A D | vktShaderPackingFunctionTests.cpp | 151 std::vector<tcu::Vec2> inputs; in iterate() local 158 inputs.push_back(tcu::Vec2(0.0f, 0.0f)); in iterate() 159 inputs.push_back(tcu::Vec2(-1.0f, 1.0f)); in iterate() 160 inputs.push_back(tcu::Vec2(0.5f, -0.5f)); in iterate() 161 inputs.push_back(tcu::Vec2(-1.5f, 1.5f)); in iterate() 162 inputs.push_back(tcu::Vec2(0.25f, -0.75f)); in iterate() 167 inputs.push_back(tcu::randomVector<float, 2>(rnd, tcu::Vec2(-1.25f), tcu::Vec2(1.25f))); in iterate() 173 … inputs.push_back(tcu::randomVector<float, 2>(rnd, tcu::Vec2(-0.5e6f), tcu::Vec2(0.5e6f))); in iterate() 176 outputs.resize(inputs.size()); in iterate() 178 …m_testCtx.getLog() << TestLog::Message << "Executing shader for " << inputs.size() << " input valu… in iterate() [all …]
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/mlir/tfrt/benchmarks/ |
H A D | transpose_op_benchmark.cc | 58 return [perm](llvm::ArrayRef<Tensor> inputs, in Shuffle() 62 shuffled[d] = inputs[0].dim_size(perm[d]); in Shuffle() 66 auto in0 = inputs[0].tensor<float, size>(); in Shuffle() 77 static llvm::SmallVector<InputTensorSpec> Inputs(llvm::ArrayRef<ssize_t> dims) { in Inputs() function 84 BM(Jitrt(Transpose_small_1x0, Transpose2D(), "compute", Inputs({128, 128}))); 85 BM(JitrtV(Transpose_small_1x0, Transpose2D(), "compute", Inputs({128, 128}))); 86 BM(Tfrt(Transpose_small_1x0, Transpose2D(), "compute", Inputs({128, 128}))); 87 BM(Eigen(Transpose_small_1x0, Shuffle<2>({1, 0}), Inputs({128, 128}))); 91 Inputs({32, 32, 16}))); 93 Inputs({32, 32, 16}))); [all …]
|