xref: /aosp_15_r20/external/executorch/examples/models/__init__.py (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1# Copyright (c) Meta Platforms, Inc. and affiliates.
2# All rights reserved.
3# Copyright 2024 Arm Limited and/or its affiliates.
4#
5# This source code is licensed under the BSD-style license found in the
6# LICENSE file in the root directory of this source tree.
7
8MODEL_NAME_TO_MODEL = {
9    "mul": ("toy_model", "MulModule"),
10    "linear": ("toy_model", "LinearModule"),
11    "add": ("toy_model", "AddModule"),
12    "add_mul": ("toy_model", "AddMulModule"),
13    "softmax": ("toy_model", "SoftmaxModule"),
14    "dl3": ("deeplab_v3", "DeepLabV3ResNet50Model"),
15    "edsr": ("edsr", "EdsrModel"),
16    "emformer_transcribe": ("emformer_rnnt", "EmformerRnntTranscriberModel"),
17    "emformer_predict": ("emformer_rnnt", "EmformerRnntPredictorModel"),
18    "emformer_join": ("emformer_rnnt", "EmformerRnntJoinerModel"),
19    "llama2": ("llama", "Llama2Model"),
20    "llama": ("llama", "Llama2Model"),
21    "llama3_2_vision_encoder": ("llama3_2_vision", "FlamingoVisionEncoderModel"),
22    # "llama3_2_text_decoder": ("llama3_2_vision", "Llama3_2Decoder"),
23    "lstm": ("lstm", "LSTMModel"),
24    "mobilebert": ("mobilebert", "MobileBertModelExample"),
25    "mv2": ("mobilenet_v2", "MV2Model"),
26    "mv2_untrained": ("mobilenet_v2", "MV2UntrainedModel"),
27    "mv3": ("mobilenet_v3", "MV3Model"),
28    "vit": ("torchvision_vit", "TorchVisionViTModel"),
29    "w2l": ("wav2letter", "Wav2LetterModel"),
30    "ic3": ("inception_v3", "InceptionV3Model"),
31    "ic4": ("inception_v4", "InceptionV4Model"),
32    "resnet18": ("resnet", "ResNet18Model"),
33    "resnet50": ("resnet", "ResNet50Model"),
34    "llava": ("llava", "LlavaModel"),
35}
36
37__all__ = [
38    "MODEL_NAME_TO_MODEL",
39]
40