1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 #include <executorch/extension/data_loader/file_data_loader.h>
10 #include <executorch/runtime/executor/method.h>
11 #include <executorch/runtime/executor/program.h>
12 #include <executorch/runtime/platform/log.h>
13 #include <executorch/runtime/platform/profiler.h>
14 #include <executorch/runtime/platform/runtime.h>
15 #include <stdio.h>
16
17 using namespace torch::executor;
18 using torch::executor::util::FileDataLoader;
19
20 static uint8_t method_allocator_pool[1024];
21 static uint8_t activation_pool[512];
22
main(int argc,char ** argv)23 int main(int argc, char** argv) {
24 runtime_init();
25
26 ET_CHECK_MSG(argc == 2, "Expected model file argument.");
27
28 MemoryAllocator method_allocator(
29 sizeof(method_allocator_pool), method_allocator_pool);
30 method_allocator.enable_profiling("method allocator");
31
32 Span<uint8_t> memory_planned_buffers[1]{
33 {activation_pool, sizeof(activation_pool)}};
34 HierarchicalAllocator planned_memory({memory_planned_buffers, 1});
35
36 MemoryManager memory_manager(&method_allocator, &planned_memory);
37
38 Result<FileDataLoader> loader = FileDataLoader::from(argv[1]);
39 ET_CHECK_MSG(
40 loader.ok(),
41 "FileDataLoader::from() failed: 0x%" PRIx32,
42 static_cast<uint32_t>(loader.error()));
43
44 uint32_t prof_tok = EXECUTORCH_BEGIN_PROF("de-serialize model");
45 const auto program = Program::load(&loader.get());
46 EXECUTORCH_END_PROF(prof_tok);
47 ET_CHECK_MSG(
48 program.ok(),
49 "Program::load() failed: 0x%" PRIx32,
50 static_cast<uint32_t>(program.error()));
51 ET_LOG(Info, "Program file %s loaded.", argv[1]);
52
53 // Use the first method in the program.
54 const char* method_name = nullptr;
55 {
56 const auto method_name_result = program->get_method_name(0);
57 ET_CHECK_MSG(method_name_result.ok(), "Program has no methods");
58 method_name = *method_name_result;
59 }
60 ET_LOG(Info, "Loading method %s", method_name);
61
62 prof_tok = EXECUTORCH_BEGIN_PROF("load model");
63 Result<Method> method = program->load_method(method_name, &memory_manager);
64 EXECUTORCH_END_PROF(prof_tok);
65 ET_CHECK(method.ok());
66
67 ET_LOG(Info, "Method loaded.");
68
69 // Prepare for inputs
70 // It assumes the input is one tensor.
71 float data[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0};
72 Tensor::SizesType sizes[] = {6};
73 Tensor::DimOrderType dim_order[] = {0};
74 TensorImpl impl(ScalarType::Float, 1, sizes, data, dim_order);
75 Tensor t(&impl);
76 Error set_input_error = method->set_input(t, 0);
77 ET_CHECK(set_input_error == Error::Ok);
78
79 ET_LOG(Info, "Inputs prepared.");
80
81 prof_tok = EXECUTORCH_BEGIN_PROF("run model");
82 Error status = method->execute();
83 EXECUTORCH_END_PROF(prof_tok);
84 ET_CHECK(status == Error::Ok);
85 ET_LOG(Info, "Model executed successfully.");
86
87 // print output
88 auto output_list =
89 method_allocator.allocateList<EValue>(method->outputs_size());
90
91 status = method->get_outputs(output_list, method->outputs_size());
92 ET_CHECK(status == Error::Ok);
93
94 // It assumes the outputs are all tensors.
95 for (size_t i = 0; i < method->outputs_size(); i++) {
96 auto output_tensor = output_list[i].toTensor();
97 auto data_output = output_tensor.const_data_ptr<float>();
98 for (size_t j = 0; j < output_list[i].toTensor().numel(); ++j) {
99 ET_LOG(Info, "%f", data_output[j]);
100 }
101 }
102 prof_result_t prof_result;
103 EXECUTORCH_DUMP_PROFILE_RESULTS(&prof_result);
104 if (prof_result.num_bytes != 0) {
105 FILE* ptr = fopen("prof_result.bin", "w+");
106 fwrite(prof_result.prof_data, 1, prof_result.num_bytes, ptr);
107 fclose(ptr);
108 }
109
110 return 0;
111 }
112