xref: /aosp_15_r20/external/tensorflow/tensorflow/lite/delegates/gpu/common/tasks/softmax_test_util.cc (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1 /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/lite/delegates/gpu/common/tasks/softmax_test_util.h"
17 
18 #include <memory>
19 #include <vector>
20 
21 #include "tensorflow/lite/delegates/gpu/common/operations.h"
22 #include "tensorflow/lite/delegates/gpu/common/status.h"
23 #include "tensorflow/lite/delegates/gpu/common/task/testing_util.h"
24 #include "tensorflow/lite/delegates/gpu/common/tasks/softmax.h"
25 #include "tensorflow/lite/delegates/gpu/common/tasks/softmax1x1.h"
26 
27 namespace tflite {
28 namespace gpu {
29 
SoftmaxTest(TestExecutionEnvironment * env)30 absl::Status SoftmaxTest(TestExecutionEnvironment* env) {
31   TensorFloat32 src_tensor;
32   src_tensor.shape = BHWC(1, 2, 1, 2);
33   src_tensor.data = {std::log(1.0f), std::log(2.0f), std::log(3.0f),
34                      std::log(4.0f)};
35 
36   for (auto precision : env->GetSupportedPrecisions()) {
37     auto data_type = DeduceDataTypeFromPrecision(precision);
38     for (auto storage : env->GetSupportedStorages(data_type)) {
39       const float eps = precision == CalculationsPrecision::F32 ? 1e-6f : 1e-3f;
40       OperationDef op_def;
41       op_def.precision = precision;
42       op_def.src_tensors.push_back({data_type, storage, Layout::HWC});
43       op_def.dst_tensors.push_back({data_type, storage, Layout::HWC});
44       TensorFloat32 dst_tensor;
45       GPUOperation operation = CreateSoftmax(op_def);
46       RETURN_IF_ERROR(env->ExecuteGPUOperation(
47           src_tensor, std::make_unique<GPUOperation>(std::move(operation)),
48           BHWC(1, 2, 1, 2), &dst_tensor));
49       RETURN_IF_ERROR(
50           PointWiseNear({1.0f / 3.0f, 2.0f / 3.0f, 3.0f / 7.0f, 4.0f / 7.0f},
51                         dst_tensor.data, eps));
52     }
53   }
54   return absl::OkStatus();
55 }
56 
SoftmaxBigNumberTest(TestExecutionEnvironment * env)57 absl::Status SoftmaxBigNumberTest(TestExecutionEnvironment* env) {
58   TensorFloat32 src_tensor;
59   src_tensor.shape = BHWC(1, 2, 1, 2);
60   double doubles[4] = {1.0, 2.0, 3.0, 100.0};
61   // exp(100) is inf in float (32 bit) but representable in double (64 bit)
62   src_tensor.data.resize(4);
63   src_tensor.data[0] = doubles[0];
64   src_tensor.data[1] = doubles[1];
65   src_tensor.data[2] = doubles[2];
66   src_tensor.data[3] = doubles[3];
67   if (!std::isinf(std::exp(src_tensor.data[3]))) {
68     return absl::InternalError("exp(100.0f) not inf in float (32 bit)");
69   }
70   if (std::isinf(std::exp(doubles[3]))) {
71     return absl::InternalError("exp(100.0) inf in double (64 bit)");
72   }
73   double s0 = std::exp(doubles[0]) + std::exp(doubles[1]);
74   double s1 = std::exp(doubles[2]) + std::exp(doubles[3]);
75 
76   for (auto precision : env->GetSupportedPrecisions()) {
77     auto data_type = DeduceDataTypeFromPrecision(precision);
78     for (auto storage : env->GetSupportedStorages(data_type)) {
79       const float eps = precision == CalculationsPrecision::F32 ? 1e-6f : 1e-3f;
80       OperationDef op_def;
81       op_def.precision = precision;
82       op_def.src_tensors.push_back({data_type, storage, Layout::HWC});
83       op_def.dst_tensors.push_back({data_type, storage, Layout::HWC});
84       TensorFloat32 dst_tensor;
85       GPUOperation operation = CreateSoftmax(op_def);
86       RETURN_IF_ERROR(env->ExecuteGPUOperation(
87           src_tensor, std::make_unique<GPUOperation>(std::move(operation)),
88           BHWC(1, 2, 1, 2), &dst_tensor));
89       RETURN_IF_ERROR(
90           PointWiseNear({static_cast<float>(std::exp(doubles[0]) / s0),
91                          static_cast<float>(std::exp(doubles[1]) / s0),
92                          static_cast<float>(std::exp(doubles[2]) / s1),
93                          static_cast<float>(std::exp(doubles[3]) / s1)},
94                         dst_tensor.data, eps));
95     }
96   }
97   return absl::OkStatus();
98 }
99 
Softmax1x1Test(TestExecutionEnvironment * env)100 absl::Status Softmax1x1Test(TestExecutionEnvironment* env) {
101   TensorFloat32 src_tensor;
102   src_tensor.shape = BHWC(1, 1, 1, 4);
103   src_tensor.data = {std::log(1.0f), std::log(2.0f), std::log(3.0f),
104                      std::log(4.0f)};
105 
106   for (auto precision : env->GetSupportedPrecisions()) {
107     auto data_type = DeduceDataTypeFromPrecision(precision);
108     for (auto storage : env->GetSupportedStorages(data_type)) {
109       const float eps = precision == CalculationsPrecision::F32 ? 1e-6f : 1e-3f;
110       OperationDef op_def;
111       op_def.precision = precision;
112       op_def.src_tensors.push_back({data_type, storage, Layout::HWC});
113       op_def.dst_tensors.push_back({data_type, storage, Layout::HWC});
114       TensorFloat32 dst_tensor;
115       Softmax1x1 operation = CreateSoftmax1x1(op_def);
116       RETURN_IF_ERROR(env->ExecuteGPUOperation(
117           src_tensor, std::make_unique<Softmax1x1>(std::move(operation)),
118           BHWC(1, 1, 1, 4), &dst_tensor));
119       RETURN_IF_ERROR(
120           PointWiseNear({0.1f, 0.2f, 0.3f, 0.4f}, dst_tensor.data, eps));
121     }
122   }
123   return absl::OkStatus();
124 }
125 
Softmax1x1BigNumberTest(TestExecutionEnvironment * env)126 absl::Status Softmax1x1BigNumberTest(TestExecutionEnvironment* env) {
127   TensorFloat32 src_tensor;
128   src_tensor.shape = BHWC(1, 1, 1, 4);
129   double doubles[4] = {1.0, 2.0, 3.0, 100.0};
130   // exp(100) is inf in float (32 bit) but representable in double (64 bit)
131   src_tensor.data.resize(4);
132   src_tensor.data[0] = doubles[0];
133   src_tensor.data[1] = doubles[1];
134   src_tensor.data[2] = doubles[2];
135   src_tensor.data[3] = doubles[3];
136   if (!std::isinf(std::exp(src_tensor.data[3]))) {
137     return absl::InternalError("exp(100.0f) not inf in float (32 bit)");
138   }
139   if (std::isinf(std::exp(doubles[3]))) {
140     return absl::InternalError("exp(100.0) inf in double (64 bit)");
141   }
142   double s0 = std::exp(doubles[0]) + std::exp(doubles[1]) +
143               std::exp(doubles[2]) + std::exp(doubles[3]);
144 
145   for (auto precision : env->GetSupportedPrecisions()) {
146     auto data_type = DeduceDataTypeFromPrecision(precision);
147     for (auto storage : env->GetSupportedStorages(data_type)) {
148       const float eps = precision == CalculationsPrecision::F32 ? 1e-6f : 1e-3f;
149       OperationDef op_def;
150       op_def.precision = precision;
151       op_def.src_tensors.push_back({data_type, storage, Layout::HWC});
152       op_def.dst_tensors.push_back({data_type, storage, Layout::HWC});
153       TensorFloat32 dst_tensor;
154       Softmax1x1 operation = CreateSoftmax1x1(op_def);
155       RETURN_IF_ERROR(env->ExecuteGPUOperation(
156           src_tensor, std::make_unique<Softmax1x1>(std::move(operation)),
157           BHWC(1, 1, 1, 4), &dst_tensor));
158       RETURN_IF_ERROR(
159           PointWiseNear({static_cast<float>(std::exp(doubles[0]) / s0),
160                          static_cast<float>(std::exp(doubles[1]) / s0),
161                          static_cast<float>(std::exp(doubles[2]) / s0),
162                          static_cast<float>(std::exp(doubles[3]) / s0)},
163                         dst_tensor.data, eps));
164     }
165   }
166   return absl::OkStatus();
167 }
168 
169 }  // namespace gpu
170 }  // namespace tflite
171