xref: /aosp_15_r20/external/executorch/.ci/scripts/test_model.sh (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1#!/bin/bash
2# Copyright (c) Meta Platforms, Inc. and affiliates.
3# All rights reserved.
4#
5# This source code is licensed under the BSD-style license found in the
6# LICENSE file in the root directory of this source tree.
7
8set -exu
9
10# shellcheck source=/dev/null
11source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
12
13MODEL_NAME=$1
14if [[ -z "${MODEL_NAME:-}" ]]; then
15  echo "Missing model name, exiting..."
16  exit 1
17fi
18
19BUILD_TOOL=$2
20if [[ -z "${BUILD_TOOL:-}" ]]; then
21  echo "Missing build tool (require buck2 or cmake), exiting..."
22  exit 1
23fi
24
25BACKEND=$3
26if [[ -z "${BACKEND:-}" ]]; then
27  echo "Missing backend (require portable or xnnpack), exiting..."
28  exit 1
29fi
30
31UPLOAD_DIR=${4:-}
32
33if [[ -z "${PYTHON_EXECUTABLE:-}" ]]; then
34  PYTHON_EXECUTABLE=python3
35fi
36which "${PYTHON_EXECUTABLE}"
37
38# Just set this variable here, it's cheap even if we use buck2
39CMAKE_OUTPUT_DIR=cmake-out
40EXPORTED_MODEL=${MODEL_NAME}
41
42prepare_artifacts_upload() {
43  if [ -n "$UPLOAD_DIR" ]; then
44    echo "Preparing for uploading generated artifacs"
45    zip -j model.zip "${EXPORTED_MODEL}"
46    mkdir -p "${UPLOAD_DIR}"
47    mv model.zip "${UPLOAD_DIR}"
48  fi
49}
50
51build_cmake_executor_runner() {
52  echo "Building executor_runner"
53  rm -rf ${CMAKE_OUTPUT_DIR}
54  cmake -DCMAKE_BUILD_TYPE=Debug \
55      -DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
56      -DPYTHON_EXECUTABLE="$PYTHON_EXECUTABLE" \
57      -B${CMAKE_OUTPUT_DIR} .
58
59  cmake --build ${CMAKE_OUTPUT_DIR} -j4 --config Debug
60}
61
62run_portable_executor_runner() {
63  # Run test model
64  if [[ "${BUILD_TOOL}" == "buck2" ]]; then
65    buck2 run //examples/portable/executor_runner:executor_runner -- --model_path "./${MODEL_NAME}.pte"
66  elif [[ "${BUILD_TOOL}" == "cmake" ]]; then
67    build_cmake_executor_runner
68    ./${CMAKE_OUTPUT_DIR}/executor_runner --model_path "./${MODEL_NAME}.pte"
69  else
70    echo "Invalid build tool ${BUILD_TOOL}. Only buck2 and cmake are supported atm"
71    exit 1
72  fi
73}
74
75test_model() {
76  if [[ "${MODEL_NAME}" == "llama2" ]]; then
77    # Install requirements for export_llama
78    bash examples/models/llama/install_requirements.sh
79    # Test export_llama script: python3 -m examples.models.llama.export_llama
80    "${PYTHON_EXECUTABLE}" -m examples.models.llama.export_llama --model "${MODEL_NAME}" -c examples/models/llama/params/demo_rand_params.pth -p examples/models/llama/params/demo_config.json
81    run_portable_executor_runner
82    rm "./${MODEL_NAME}.pte"
83  fi
84  STRICT="--strict"
85  if [[ "${MODEL_NAME}" == "llava" ]]; then
86    # Install requirements for llava
87    bash examples/models/llava/install_requirements.sh
88    STRICT="--no-strict"
89  fi
90  if [[ "$MODEL_NAME" == "llama3_2_vision_encoder" || "$MODEL_NAME" == "llama3_2_text_decoder" ]]; then
91    # Install requirements for llama vision.
92    bash examples/models/llama3_2_vision/install_requirements.sh
93  fi
94  # python3 -m examples.portable.scripts.export --model_name="llama2" should works too
95  "${PYTHON_EXECUTABLE}" -m examples.portable.scripts.export --model_name="${MODEL_NAME}" "${STRICT}"
96  run_portable_executor_runner
97}
98
99build_cmake_xnn_executor_runner() {
100  echo "Building xnn_executor_runner"
101  SITE_PACKAGES="$(${PYTHON_EXECUTABLE} -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')"
102  CMAKE_PREFIX_PATH="${SITE_PACKAGES}/torch"
103
104  (rm -rf ${CMAKE_OUTPUT_DIR} \
105    && mkdir ${CMAKE_OUTPUT_DIR} \
106    && cd ${CMAKE_OUTPUT_DIR} \
107    && retry cmake -DCMAKE_BUILD_TYPE=Release \
108      -DEXECUTORCH_BUILD_XNNPACK=ON \
109      -DCMAKE_PREFIX_PATH="$CMAKE_PREFIX_PATH" \
110      -DPYTHON_EXECUTABLE="$PYTHON_EXECUTABLE" ..)
111
112  cmake --build ${CMAKE_OUTPUT_DIR} -j4
113}
114
115test_model_with_xnnpack() {
116  WITH_QUANTIZATION=$1
117  WITH_DELEGATION=$2
118
119  # Quantization-only
120  if [[ ${WITH_QUANTIZATION} == true ]] && [[ ${WITH_DELEGATION} == false ]]; then
121    bash examples/xnnpack/quantization/test_quantize.sh "${BUILD_TOOL}" "${MODEL_NAME}"
122    return 0
123  fi
124
125  # Delegation
126  if [[ ${WITH_QUANTIZATION} == true ]]; then
127    SUFFIX="q8"
128    "${PYTHON_EXECUTABLE}" -m examples.xnnpack.aot_compiler --model_name="${MODEL_NAME}" --delegate --quantize
129  else
130    SUFFIX="fp32"
131    "${PYTHON_EXECUTABLE}" -m examples.xnnpack.aot_compiler --model_name="${MODEL_NAME}" --delegate
132  fi
133
134  OUTPUT_MODEL_PATH="${MODEL_NAME}_xnnpack_${SUFFIX}.pte"
135  EXPORTED_MODEL=${OUTPUT_MODEL_PATH}
136
137  # Run test model
138  if [[ "${BUILD_TOOL}" == "buck2" ]]; then
139    buck2 run //examples/xnnpack:xnn_executor_runner -- --model_path "${OUTPUT_MODEL_PATH}"
140  elif [[ "${BUILD_TOOL}" == "cmake" ]]; then
141    if [[ ! -f ${CMAKE_OUTPUT_DIR}/backends/xnnpack/xnn_executor_runner ]]; then
142      build_cmake_xnn_executor_runner
143    fi
144    ./${CMAKE_OUTPUT_DIR}/backends/xnnpack/xnn_executor_runner --model_path "${OUTPUT_MODEL_PATH}"
145  else
146    echo "Invalid build tool ${BUILD_TOOL}. Only buck2 and cmake are supported atm"
147    exit 1
148  fi
149}
150
151test_model_with_qnn() {
152  source "$(dirname "${BASH_SOURCE[0]}")/build-qnn-sdk.sh"
153  echo "ANDROID_NDK_ROOT: $ANDROID_NDK_ROOT"
154  echo "QNN_SDK_ROOT: $QNN_SDK_ROOT"
155  echo "EXECUTORCH_ROOT: $EXECUTORCH_ROOT"
156
157  export LD_LIBRARY_PATH=$QNN_SDK_ROOT/lib/x86_64-linux-clang/
158  export PYTHONPATH=$EXECUTORCH_ROOT/..
159
160  if [[ "${MODEL_NAME}" == "dl3" ]]; then
161    EXPORT_SCRIPT=deeplab_v3
162  elif [[ "${MODEL_NAME}" == "mv3" ]]; then
163    EXPORT_SCRIPT=mobilenet_v3
164  elif [[ "${MODEL_NAME}" == "mv2" ]]; then
165    EXPORT_SCRIPT=mobilenet_v2
166  elif [[ "${MODEL_NAME}" == "ic4" ]]; then
167    EXPORT_SCRIPT=inception_v4
168  elif [[ "${MODEL_NAME}" == "ic3" ]]; then
169    EXPORT_SCRIPT=inception_v3
170  elif [[ "${MODEL_NAME}" == "vit" ]]; then
171    EXPORT_SCRIPT=torchvision_vit
172  fi
173
174  # Use SM8450 for S22, SM8550 for S23, and SM8560 for S24
175  # TODO(guangyang): Make QNN chipset matches the target device
176  QNN_CHIPSET=SM8450
177
178  "${PYTHON_EXECUTABLE}" -m examples.qualcomm.scripts.${EXPORT_SCRIPT} -b ${CMAKE_OUTPUT_DIR} -m ${QNN_CHIPSET} --compile_only
179  EXPORTED_MODEL=$(find "./${EXPORT_SCRIPT}" -type f -name "${MODEL_NAME}*.pte" -print -quit)
180}
181
182test_model_with_coreml() {
183  if [[ "${BUILD_TOOL}" == "buck2" ]]; then
184    echo "coreml doesn't support buck2."
185    exit 1
186  fi
187
188  DTYPE=float16
189
190  "${PYTHON_EXECUTABLE}" -m examples.apple.coreml.scripts.export --model_name="${MODEL_NAME}" --compute_precision "${DTYPE}"
191  EXPORTED_MODEL=$(find "." -type f -name "${MODEL_NAME}*.pte" -print -quit)
192  # TODO:
193  if [ -n "$EXPORTED_MODEL" ]; then
194    EXPORTED_MODEL_WITH_DTYPE="${EXPORTED_MODEL%.pte}_${DTYPE}.pte"
195    mv "$EXPORTED_MODEL" "$EXPORTED_MODEL_WITH_DTYPE"
196    EXPORTED_MODEL="$EXPORTED_MODEL_WITH_DTYPE"
197    echo "Renamed file path: $EXPORTED_MODEL"
198  else
199    echo "No .pte file found"
200    exit 1
201  fi
202}
203
204test_model_with_mps() {
205  "${PYTHON_EXECUTABLE}" -m examples.apple.mps.scripts.mps_example --model_name="${MODEL_NAME}" --use_fp16
206  EXPORTED_MODEL=$(find "." -type f -name "${MODEL_NAME}*.pte" -print -quit)
207  # TODO:
208  if [ -n "$EXPORTED_MODEL" ]; then
209    EXPORTED_MODEL_WITH_DTYPE="${EXPORTED_MODEL%.pte}_${DTYPE}.pte"
210    mv "$EXPORTED_MODEL" "$EXPORTED_MODEL_WITH_DTYPE"
211    EXPORTED_MODEL="$EXPORTED_MODEL_WITH_DTYPE"
212    echo "Renamed file path: $EXPORTED_MODEL"
213  else
214    echo "No .pte file found"
215    exit 1
216  fi
217}
218
219if [[ "${BACKEND}" == "portable" ]]; then
220  echo "Testing ${MODEL_NAME} with portable kernels..."
221  test_model
222elif [[ "${BACKEND}" == "qnn" ]]; then
223  echo "Testing ${MODEL_NAME} with qnn..."
224  test_model_with_qnn
225  if [[ $? -eq 0 ]]; then
226    prepare_artifacts_upload
227  fi
228elif [[ "${BACKEND}" == "coreml" ]]; then
229  echo "Testing ${MODEL_NAME} with coreml..."
230  test_model_with_coreml
231  if [[ $? -eq 0 ]]; then
232    prepare_artifacts_upload
233  fi
234elif [[ "${BACKEND}" == "mps" ]]; then
235  echo "Testing ${MODEL_NAME} with mps..."
236  test_model_with_mps
237  if [[ $? -eq 0 ]]; then
238    prepare_artifacts_upload
239  fi
240elif [[ "${BACKEND}" == "xnnpack" ]]; then
241  echo "Testing ${MODEL_NAME} with xnnpack..."
242  WITH_QUANTIZATION=true
243  WITH_DELEGATION=true
244  if [[ "$MODEL_NAME" == "mobilebert" ]]; then
245    # TODO(T197452682)
246    WITH_QUANTIZATION=false
247  fi
248  test_model_with_xnnpack "${WITH_QUANTIZATION}" "${WITH_DELEGATION}"
249  if [[ $? -eq 0 ]]; then
250    prepare_artifacts_upload
251  fi
252else
253  set +e
254  if [[ "${BACKEND}" == *"quantization"* ]]; then
255    echo "::group::Testing ${MODEL_NAME} with XNNPACK quantization only..."
256    test_model_with_xnnpack true false || Q_ERROR="error"
257    echo "::endgroup::"
258  fi
259  if [[ "${BACKEND}" == *"delegation"* ]]; then
260    echo "::group::Testing ${MODEL_NAME} with XNNPACK delegation only..."
261    test_model_with_xnnpack false true || D_ERROR="error"
262    echo "::endgroup::"
263  fi
264  if [[ "${BACKEND}" == *"quantization"* ]] && [[ "${BACKEND}" == *"delegation"* ]]; then
265    echo "::group::Testing ${MODEL_NAME} with XNNPACK quantization and delegation..."
266    test_model_with_xnnpack true true || Q_D_ERROR="error"
267    echo "::endgroup::"
268  fi
269  set -e
270  if [[ -n "${Q_ERROR:-}" ]] || [[ -n "${D_ERROR:-}" ]] || [[ -n "${Q_D_ERROR:-}" ]]; then
271    echo "Portable q8 ${Q_ERROR:-ok}," "Delegation fp32 ${D_ERROR:-ok}," "Delegation q8 ${Q_D_ERROR:-ok}"
272    exit 1
273  else
274    prepare_artifacts_upload
275  fi
276fi
277