1# MIT License 2# 3# Copyright (c) 2021 VeriSilicon, INC. 4# Copyright (c) 2023 Tomeu Vizoso 5# 6# Permission is hereby granted, free of charge, to any person obtaining a copy 7# of this software and associated documentation files (the "Software"), to deal 8# in the Software without restriction, including without limitation the rights 9# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10# copies of the Software, and to permit persons to whom the Software is 11# furnished to do so, subject to the following conditions: 12# 13# The above copyright notice and this permission notice shall be included in all 14# copies or substantial portions of the Software. 15# 16# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22# SOFTWARE. 23 24import math 25import os 26import os.path 27import re 28import sys 29import tempfile 30import time 31 32import numpy as np 33import pytest 34import json 35 36import tensorflow as tf 37from tensorflow import keras 38 39MODEL_PATH = "conv2d.tflite" 40 41def create_model_keras(batch_size, in_w, in_h, k_w, k_h, in_ch, out_ch, stride, padding, signed, seed, depthwise): 42 tf.random.set_seed(seed) 43 44 input_shape = [batch_size, in_h, in_w, in_ch] 45 out_channel = out_ch 46 kernel_shape = [k_w, k_h] 47 input_dtype = tf.float32 48 49 if depthwise: 50 conv = keras.layers.DepthwiseConv2D(kernel_size=kernel_shape, strides=stride, padding=padding, depth_multiplier=1) 51 else: 52 conv = keras.layers.Conv2D(filters=out_channel, kernel_size=kernel_shape, strides=stride, padding=padding) 53 54 model = keras.models.Sequential([ 55 keras.layers.InputLayer(input_shape=input_shape[1:], batch_size=input_shape[0]), 56 conv 57 ]) 58 model.build(input_shape=input_shape) 59 60 if depthwise: 61 weight_shape = [k_w, k_h, in_ch, 1] 62 else: 63 weight_shape = [k_w, k_h, in_ch, out_ch] 64 65 weight_data = tf.random.normal(weight_shape, 0, 127, input_dtype, seed=seed) 66 bias_data = tf.random.normal((out_ch, ), 0, 127, input_dtype, seed=seed) 67 model.set_weights([np.asarray(weight_data, dtype=np.float32), np.asarray(bias_data, dtype=np.float32)]) 68 69 tmp = tempfile.NamedTemporaryFile(delete=False, prefix="conv2d-", suffix=".h5", mode="w") 70 model.save(tmp.name) 71 tmp.close() 72 converter = tf.compat.v1.lite.TFLiteConverter.from_keras_model_file(tmp.name) 73 os.unlink(tmp.name) 74 75 converter.quantized_input_stats = {model.layers[0].input.name: (128, 128.0)} 76 converter.default_ranges_stats = (0.0, 6.0) 77 78 if signed: 79 converter.inference_input_type = tf.int8 80 converter.inference_output_type = tf.int8 81 converter.inference_type = tf.int8 82 else: 83 converter.inference_input_type = tf.uint8 84 converter.inference_output_type = tf.uint8 85 converter.inference_type = tf.uint8 86 87 tflite_model = converter.convert() 88 89 fp = open(MODEL_PATH, "wb") 90 fp.write(tflite_model) 91 fp.flush() 92 93 tf.lite.experimental.Analyzer.analyze(model_path=MODEL_PATH, gpu_compatibility=True) 94 95 return MODEL_PATH 96 97def tflite_to_json(file_path): 98 ret = os.system("flatc --json src/gallium/frontends/teflon/tests/tflite_schema.fbs -- " + file_path) 99 assert(ret == 0) 100 return os.path.splitext(file_path)[0] + ".json" 101 102WEIGHTS_BUFFER = 2 103BIAS_BUFFER = 3 104VERSION_BUFFER = 5 105 106def zero_irrelevant_values(file_path, signed): 107 model_data = open(file_path).read() 108 model_data = re.sub("(\\\"(.*?)\\\"|(\\w+))(\\s*:\\s*(\\\".*?\\\"|.))", "\"\\2\\3\"\\4", model_data) 109 model = json.loads(model_data) 110 #print(json.dumps(model, indent=4)) 111 if "version" in model["operator_codes"][0].keys(): 112 del model["operator_codes"][0]["version"] 113 for subgraph in model["subgraphs"]: 114 for tensor in subgraph["tensors"]: 115 tensor["name"] = "" 116 if signed: 117 tensor["quantization"]["scale"] = [0.0] * len(tensor["quantization"]["scale"]) 118 else: 119 tensor["quantization"]["scale"] = [0.0] 120 if signed: 121 tensor["quantization"]["zero_point"] = [0] * len(tensor["quantization"]["zero_point"]) 122 else: 123 tensor["quantization"]["zero_point"] = [0] 124 125 model["buffers"][BIAS_BUFFER]["data"] = [0] * len(model["buffers"][BIAS_BUFFER]["data"]) 126 model["buffers"][WEIGHTS_BUFFER]["data"] = [0] * len(model["buffers"][WEIGHTS_BUFFER]["data"]) 127 model["buffers"][VERSION_BUFFER]["data"] = [0] 128 129 if "signature_defs" in model: 130 del model["signature_defs"] 131 132 open(file_path, "w").write(json.dumps(model, indent=4)) 133 134def diff(file_1, file_2): 135 ret = os.system("diff -U30 -u " + file_1 + " " + file_2) 136 assert(ret == 0) 137 138def create_model(batch_size, in_w, in_h, k_w, k_h, in_ch, out_ch, stride, padding, signed, seed, depthwise): 139 args = ['build/src/gallium/targets/teflon/test_teflon', 140 'generate_model', 141 str(in_w), 142 str(k_w), 143 str(in_ch), 144 str(out_ch), 145 str(stride), 146 "1" if padding == "same" else "0", 147 str(int(signed)), 148 str(int(depthwise)), 149 str(seed)] 150 print(' '.join(args)) 151 os.system(' '.join(args)) 152 return "model.tflite" 153 154def convolution(batch_size, input_size, weight_size, in_ch, out_ch, stride, padding, signed, seed, depthwise): 155 156 in_w = input_size 157 in_h = input_size 158 k_w = weight_size 159 k_h = weight_size 160 161 # Depthwise convolutions require the out channels to be a multiple of input channels 162 assert not (depthwise and out_ch % in_ch != 0) 163 164 # Depthwise convolutions with a single IFM don't make sense 165 assert not (depthwise and in_ch == 1) 166 167 # Depthwise convolutions with IFM != OFM are not supported 168 assert not (depthwise and out_ch != in_ch) 169 170 np.random.seed(seed) 171 172 model_file = create_model_keras(batch_size, in_w, in_h, k_w, k_h, in_ch, out_ch, stride, padding, signed, seed, depthwise) 173 model_file_2 = create_model(batch_size, in_w, in_h, k_w, k_h, in_ch, out_ch, stride, padding, signed, seed, depthwise) 174 175 json_file = tflite_to_json(model_file) 176 json_file_2 = tflite_to_json(model_file_2) 177 178 os.unlink(model_file) 179 os.unlink(model_file_2) 180 181 zero_irrelevant_values(json_file, signed) 182 zero_irrelevant_values(json_file_2, signed) 183 184 #print(json.dumps(json.loads(open(json_file).read()), indent=4)) 185 186 diff(json_file, json_file_2) 187 188 os.unlink(json_file) 189 os.unlink(json_file_2) 190 191@pytest.mark.parametrize("batch_size", [1]) 192@pytest.mark.parametrize("input_size", [4, 112]) 193@pytest.mark.parametrize("weight_size", [1, 3]) 194@pytest.mark.parametrize("in_ch", [1, 32, 120, 128, 256]) 195@pytest.mark.parametrize("out_ch", [1, 32, 120, 128, 256, 480]) 196@pytest.mark.parametrize("stride", [1, 2]) 197@pytest.mark.parametrize("padding", ["valid", "same"]) 198@pytest.mark.parametrize("signed", [False]) 199@pytest.mark.parametrize("seed", [4, 5]) 200def test_conv2d(batch_size, input_size, weight_size, in_ch, out_ch, stride, padding, signed, seed): 201 s = "%r-%r-%s-%r-%r-%r-%r-%r-%r" % (seed, signed, padding, stride, out_ch, in_ch, weight_size, input_size, batch_size) 202 print(s, file=sys.stderr) 203 convolution(batch_size, input_size, weight_size, in_ch, out_ch, stride, padding, signed, seed, depthwise=False) 204 205@pytest.mark.parametrize("batch_size", [1]) 206@pytest.mark.parametrize("input_size", [4, 112]) 207@pytest.mark.parametrize("weight_size", [3]) 208@pytest.mark.parametrize("channels", [32, 128, 256]) 209@pytest.mark.parametrize("stride", [1, 2]) 210@pytest.mark.parametrize("padding", ["valid", "same"]) 211@pytest.mark.parametrize("signed", [False]) 212@pytest.mark.parametrize("seed", [4, 5]) 213def test_depthwise(batch_size, input_size, weight_size, channels, stride, padding, signed, seed): 214 s = "%r-%s-%r-%r-%r-%r-%r-%r" % (seed, signed, padding, stride, channels, weight_size, input_size, batch_size) 215 print(s, file=sys.stderr) 216 convolution(batch_size, input_size, weight_size, channels, channels, stride, padding, signed, seed, depthwise=True) 217 218test_conv2d(1, 80, 5, 16, 128, 2, "same", False, 4)