1# Copyright © 2020 Arm Ltd and Contributors. All rights reserved. 2# SPDX-License-Identifier: MIT 3import os 4import stat 5import numpy as np 6 7import pytest 8import pyarmnn as ann 9 10 11def test_optimizer_options_default_values(): 12 opt = ann.OptimizerOptions() 13 assert opt.m_ReduceFp32ToFp16 == False 14 assert opt.m_Debug == False 15 assert opt.m_ReduceFp32ToBf16 == False 16 assert opt.m_ImportEnabled == False 17 assert opt.m_shapeInferenceMethod == ann.ShapeInferenceMethod_ValidateOnly 18 19 20def test_optimizer_options_set_values1(): 21 opt = ann.OptimizerOptions(True, True) 22 assert opt.m_ReduceFp32ToFp16 == True 23 assert opt.m_Debug == True 24 assert opt.m_ReduceFp32ToBf16 == False 25 assert opt.m_ImportEnabled == False 26 assert opt.m_shapeInferenceMethod == ann.ShapeInferenceMethod_ValidateOnly 27 28 29def test_optimizer_options_set_values2(): 30 opt = ann.OptimizerOptions(False, False, True) 31 assert opt.m_ReduceFp32ToFp16 == False 32 assert opt.m_Debug == False 33 assert opt.m_ReduceFp32ToBf16 == True 34 assert opt.m_ImportEnabled == False 35 assert opt.m_shapeInferenceMethod == ann.ShapeInferenceMethod_ValidateOnly 36 37 38def test_optimizer_options_set_values3(): 39 opt = ann.OptimizerOptions(False, False, True, ann.ShapeInferenceMethod_InferAndValidate, True) 40 assert opt.m_ReduceFp32ToFp16 == False 41 assert opt.m_Debug == False 42 assert opt.m_ReduceFp32ToBf16 == True 43 assert opt.m_ImportEnabled == True 44 assert opt.m_shapeInferenceMethod == ann.ShapeInferenceMethod_InferAndValidate 45 46 47@pytest.fixture(scope="function") 48def get_runtime(shared_data_folder, network_file): 49 parser= ann.ITfLiteParser() 50 preferred_backends = [ann.BackendId('CpuAcc'), ann.BackendId('CpuRef')] 51 network = parser.CreateNetworkFromBinaryFile(os.path.join(shared_data_folder, network_file)) 52 options = ann.CreationOptions() 53 runtime = ann.IRuntime(options) 54 55 yield preferred_backends, network, runtime 56 57 58@pytest.mark.parametrize("network_file", 59 [ 60 'mock_model.tflite', 61 ], 62 ids=['mock_model']) 63def test_optimize_executes_successfully(network_file, get_runtime): 64 preferred_backends = [ann.BackendId('CpuRef')] 65 network = get_runtime[1] 66 runtime = get_runtime[2] 67 68 opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions()) 69 70 assert len(messages) == 0, 'With only CpuRef, there should be no warnings irrelevant of architecture.' 71 assert opt_network 72 73 74@pytest.mark.parametrize("network_file", 75 [ 76 'mock_model.tflite', 77 ], 78 ids=['mock_model']) 79def test_optimize_owned_by_python(network_file, get_runtime): 80 preferred_backends = get_runtime[0] 81 network = get_runtime[1] 82 runtime = get_runtime[2] 83 84 opt_network, _ = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions()) 85 assert opt_network.thisown 86 87 88@pytest.mark.aarch64 89@pytest.mark.parametrize("network_file", 90 [ 91 'mock_model.tflite' 92 ], 93 ids=['mock_model']) 94def test_optimize_executes_successfully_for_neon_backend_only(network_file, get_runtime): 95 preferred_backends = [ann.BackendId('CpuAcc')] 96 network = get_runtime[1] 97 runtime = get_runtime[2] 98 99 opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions()) 100 assert 0 == len(messages) 101 assert opt_network 102 103 104@pytest.mark.parametrize("network_file", 105 [ 106 'mock_model.tflite' 107 ], 108 ids=['mock_model']) 109def test_optimize_fails_for_invalid_backends(network_file, get_runtime): 110 invalid_backends = [ann.BackendId('Unknown')] 111 network = get_runtime[1] 112 runtime = get_runtime[2] 113 114 with pytest.raises(RuntimeError) as err: 115 ann.Optimize(network, invalid_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions()) 116 117 expected_error_message = "None of the preferred backends [Unknown ] are supported." 118 assert expected_error_message in str(err.value) 119 120 121@pytest.mark.parametrize("network_file", 122 [ 123 'mock_model.tflite' 124 ], 125 ids=['mock_model']) 126def test_optimize_fails_for_no_backends_specified(network_file, get_runtime): 127 empty_backends = [] 128 network = get_runtime[1] 129 runtime = get_runtime[2] 130 131 with pytest.raises(RuntimeError) as err: 132 ann.Optimize(network, empty_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions()) 133 134 expected_error_message = "Invoked Optimize with no backends specified" 135 assert expected_error_message in str(err.value) 136 137 138@pytest.mark.parametrize("network_file", 139 [ 140 'mock_model.tflite' 141 ], 142 ids=['mock_model']) 143def test_serialize_to_dot(network_file, get_runtime, tmpdir): 144 preferred_backends = get_runtime[0] 145 network = get_runtime[1] 146 runtime = get_runtime[2] 147 opt_network, _ = ann.Optimize(network, preferred_backends, 148 runtime.GetDeviceSpec(), ann.OptimizerOptions()) 149 dot_file_path = os.path.join(tmpdir, 'mock_model.dot') 150 """Check that serialized file does not exist at the start, gets created after SerializeToDot and is not empty""" 151 assert not os.path.exists(dot_file_path) 152 opt_network.SerializeToDot(dot_file_path) 153 154 assert os.path.exists(dot_file_path) 155 156 with open(dot_file_path) as res_file: 157 expected_data = res_file.read() 158 assert len(expected_data) > 1 159 assert '[label=< [1,28,28,1] >]' in expected_data 160 161 162@pytest.mark.x86_64 163@pytest.mark.parametrize("network_file", 164 [ 165 'mock_model.tflite' 166 ], 167 ids=['mock_model']) 168def test_serialize_to_dot_mode_readonly(network_file, get_runtime, tmpdir): 169 preferred_backends = get_runtime[0] 170 network = get_runtime[1] 171 runtime = get_runtime[2] 172 opt_network, _ = ann.Optimize(network, preferred_backends, 173 runtime.GetDeviceSpec(), ann.OptimizerOptions()) 174 """Create file, write to it and change mode to read-only""" 175 dot_file_path = os.path.join(tmpdir, 'mock_model.dot') 176 f = open(dot_file_path, "w+") 177 f.write("test") 178 f.close() 179 os.chmod(dot_file_path, stat.S_IREAD) 180 assert os.path.exists(dot_file_path) 181 182 with pytest.raises(RuntimeError) as err: 183 opt_network.SerializeToDot(dot_file_path) 184 185 expected_error_message = "Failed to open dot file" 186 assert expected_error_message in str(err.value) 187 188 189@pytest.mark.parametrize("method", [ 190 'AddActivationLayer', 191 'AddAdditionLayer', 192 'AddArgMinMaxLayer', 193 'AddBatchNormalizationLayer', 194 'AddBatchToSpaceNdLayer', 195 'AddCastLayer', 196 'AddChannelShuffleLayer', 197 'AddComparisonLayer', 198 'AddConcatLayer', 199 'AddConstantLayer', 200 'AddConvolution2dLayer', 201 'AddConvolution3dLayer', 202 'AddDepthToSpaceLayer', 203 'AddDepthwiseConvolution2dLayer', 204 'AddDequantizeLayer', 205 'AddDetectionPostProcessLayer', 206 'AddDivisionLayer', 207 'AddElementwiseUnaryLayer', 208 'AddFloorLayer', 209 'AddFillLayer', 210 'AddFullyConnectedLayer', 211 'AddGatherLayer', 212 'AddGatherNdLayer', 213 'AddInputLayer', 214 'AddInstanceNormalizationLayer', 215 'AddLogicalBinaryLayer', 216 'AddLogSoftmaxLayer', 217 'AddL2NormalizationLayer', 218 'AddLstmLayer', 219 'AddMaximumLayer', 220 'AddMeanLayer', 221 'AddMergeLayer', 222 'AddMinimumLayer', 223 'AddMultiplicationLayer', 224 'AddNormalizationLayer', 225 'AddOutputLayer', 226 'AddPadLayer', 227 'AddPermuteLayer', 228 'AddPooling2dLayer', 229 'AddPooling3dLayer', 230 'AddPreluLayer', 231 'AddQuantizeLayer', 232 'AddQuantizedLstmLayer', 233 'AddRankLayer', 234 'AddReduceLayer', 235 'AddReshapeLayer', 236 'AddResizeLayer', 237 'AddShapeLayer', 238 'AddSliceLayer', 239 'AddSoftmaxLayer', 240 'AddSpaceToBatchNdLayer', 241 'AddSpaceToDepthLayer', 242 'AddSplitterLayer', 243 'AddStackLayer', 244 'AddStandInLayer', 245 'AddStridedSliceLayer', 246 'AddSubtractionLayer', 247 'AddSwitchLayer', 248 'AddTransposeConvolution2dLayer', 249 'AddTransposeLayer' 250]) 251def test_network_method_exists(method): 252 assert getattr(ann.INetwork, method, None) 253 254def test_Convolution2d_layer_optional_none(): 255 net = ann.INetwork() 256 layer = net.AddConvolution2dLayer(convolution2dDescriptor=ann.Convolution2dDescriptor()) 257 258 assert layer 259 260 261def test_Convolution2d_layer_all_args(): 262 net = ann.INetwork() 263 layer = net.AddConvolution2dLayer(convolution2dDescriptor=ann.Convolution2dDescriptor(), 264 name='NAME1') 265 266 assert layer 267 assert 'NAME1' == layer.GetName() 268 269 270def test_add_constant_layer_to_fully_connected(): 271 272 inputWidth = 1 273 inputHeight = 1 274 inputChannels = 5 275 inputNum = 2 276 277 outputChannels = 3 278 outputNum = 2 279 280 inputShape = ( inputNum, inputChannels, inputHeight, inputWidth ) 281 outputShape = ( outputNum, outputChannels ) 282 weightsShape = ( inputChannels, outputChannels ) 283 biasShape = ( outputChannels, ) 284 285 input = np.array([ 286 [1.0, 2.0, 3.0, 4.0, 5.0], 287 [5.0, 4.0, 3.0, 2.0, 1.0] 288 ], dtype=np.float32) 289 290 weights = np.array([ 291 [.5, 2., .5], 292 [.5, 2., 1.], 293 [.5, 2., 2.], 294 [.5, 2., 3.], 295 [.5, 2., 4.] 296 ], dtype=np.float32) 297 298 biasValues = np.array([10, 20, 30], dtype=np.float32) 299 300 expectedOutput = np.array([ 301 [0.5 + 1.0 + 1.5 + 2.0 + 2.5 + biasValues[0], 302 2.0 + 4.0 + 6.0 + 8.0 + 10. + biasValues[1], 303 0.5 + 2.0 + 6.0 + 12. + 20. + biasValues[2]], 304 [2.5 + 2.0 + 1.5 + 1.0 + 0.5 + biasValues[0], 305 10.0 + 8.0 + 6.0 + 4.0 + 2. + biasValues[1], 306 2.5 + 4.0 + 6.0 + 6. + 4. + biasValues[2]] 307 ], dtype=np.float32) 308 309 network = ann.INetwork() 310 311 input_info = ann.TensorInfo(ann.TensorShape(inputShape), ann.DataType_Float32, 0, 0, True) 312 input_tensor = ann.ConstTensor(input_info, input) 313 input_layer = network.AddInputLayer(0, "input") 314 315 w_info = ann.TensorInfo(ann.TensorShape(weightsShape), ann.DataType_Float32, 0, 0, True) 316 w_tensor = ann.ConstTensor(w_info, weights) 317 w_layer = network.AddConstantLayer(w_tensor, "weights") 318 319 b_info = ann.TensorInfo(ann.TensorShape(biasShape), ann.DataType_Float32, 0, 0, True) 320 b_tensor = ann.ConstTensor(b_info, biasValues) 321 b_layer = network.AddConstantLayer(b_tensor, "bias") 322 323 fc_descriptor = ann.FullyConnectedDescriptor() 324 fc_descriptor.m_BiasEnabled = True 325 fc_descriptor.m_ConstantWeights = True 326 fully_connected = network.AddFullyConnectedLayer(fc_descriptor, "fc") 327 328 output_info = ann.TensorInfo(ann.TensorShape(outputShape), ann.DataType_Float32) 329 output_tensor = ann.Tensor(output_info, np.zeros([1, 1], dtype=np.float32)) 330 output = network.AddOutputLayer(0, "output") 331 332 input_layer.GetOutputSlot(0).Connect(fully_connected.GetInputSlot(0)) 333 w_layer.GetOutputSlot(0).Connect(fully_connected.GetInputSlot(1)) 334 b_layer.GetOutputSlot(0).Connect(fully_connected.GetInputSlot(2)) 335 fully_connected.GetOutputSlot(0).Connect(output.GetInputSlot(0)) 336 337 input_layer.GetOutputSlot(0).SetTensorInfo(input_info) 338 w_layer.GetOutputSlot(0).SetTensorInfo(w_info) 339 b_layer.GetOutputSlot(0).SetTensorInfo(b_info) 340 fully_connected.GetOutputSlot(0).SetTensorInfo(output_info) 341 342 preferred_backends = [ann.BackendId('CpuRef')] 343 options = ann.CreationOptions() 344 runtime = ann.IRuntime(options) 345 opt_network, messages = ann.Optimize(network, preferred_backends, runtime.GetDeviceSpec(), ann.OptimizerOptions()) 346 net_id, messages = runtime.LoadNetwork(opt_network) 347 348 input_tensors = [(0, input_tensor)] 349 output_tensors = [(0, output_tensor)] 350 runtime.EnqueueWorkload(net_id, input_tensors, output_tensors) 351 352 output_vectors = ann.workload_tensors_to_ndarray(output_tensors) 353 354 assert (output_vectors==expectedOutput).all() 355