xref: /aosp_15_r20/external/executorch/.ci/scripts/build_llama_android.sh (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1#!/bin/bash
2# Copyright (c) Meta Platforms, Inc. and affiliates.
3# All rights reserved.
4#
5# This source code is licensed under the BSD-style license found in the
6# LICENSE file in the root directory of this source tree.
7
8set -exu
9
10# shellcheck source=/dev/null
11source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
12
13install_executorch_and_backend_lib() {
14  echo "Installing executorch and xnnpack backend"
15  rm -rf cmake-android-out && mkdir cmake-android-out
16  ANDROID_NDK=/opt/ndk
17  BUCK2=buck2
18  ANDROID_ABI=arm64-v8a
19  cmake -DBUCK2="${BUCK2}" \
20    -DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK}/build/cmake/android.toolchain.cmake" \
21    -DANDROID_ABI="${ANDROID_ABI}" \
22    -DCMAKE_INSTALL_PREFIX=cmake-android-out \
23    -DCMAKE_BUILD_TYPE=Release \
24    -DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \
25    -DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \
26    -DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \
27    -DEXECUTORCH_BUILD_XNNPACK=ON \
28    -DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
29    -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
30    -DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
31    -DXNNPACK_ENABLE_ARM_BF16=OFF \
32    -Bcmake-android-out .
33
34  cmake --build cmake-android-out -j4 --target install --config Release
35}
36
37build_llama_runner() {
38    echo "Building llama runner for Android..."
39    ANDROID_ABI=arm64-v8a
40    cmake -DBUCK2="${BUCK2}" \
41    -DCMAKE_TOOLCHAIN_FILE="$ANDROID_NDK"/build/cmake/android.toolchain.cmake  \
42    -DANDROID_ABI="${ANDROID_ABI}" \
43    -DCMAKE_INSTALL_PREFIX=cmake-android-out \
44    -DCMAKE_BUILD_TYPE=Release -DPYTHON_EXECUTABLE=python \
45    -DEXECUTORCH_BUILD_XNNPACK=ON \
46    -DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
47    -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
48    -DEXECUTORCH_BUILD_KERNELS_CUSTOM=ON \
49    -Bcmake-android-out/examples/models/llama examples/models/llama
50
51    cmake --build cmake-android-out/examples/models/llama -j4 --config Release
52}
53install_flatc_from_source
54install_executorch_and_backend_lib
55build_llama_runner
56