1 /*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under the BSD-style license found in the
6 * LICENSE file in the root directory of this source tree.
7 */
8
9 /**
10 * @file
11 *
12 * This tool can run ExecuTorch model files that only use operators that
13 * are covered by the portable kernels, with possible delegate to the
14 * test_backend_compiler_lib.
15 *
16 * It sets all input tensor data to ones, and assumes that the outputs are
17 * all fp32 tensors.
18 */
19
20 #include <fstream>
21 #include <memory>
22
23 #include <gflags/gflags.h>
24
25 #include <executorch/devtools/bundled_program/bundled_program.h>
26 #include <executorch/devtools/etdump/etdump_flatcc.h>
27 #include <executorch/extension/data_loader/buffer_data_loader.h>
28 #include <executorch/runtime/executor/method.h>
29 #include <executorch/runtime/executor/program.h>
30 #include <executorch/runtime/platform/log.h>
31 #include <executorch/runtime/platform/runtime.h>
32
33 static std::array<uint8_t, 4 * 1024U * 1024U> method_allocator_pool; // 4MB
34
35 DEFINE_string(
36 bundled_program_path,
37 "model_bundled.bpte",
38 "Model serialized in flatbuffer format.");
39
40 DEFINE_int32(
41 testset_idx,
42 0,
43 "Index of bundled verification set to be run "
44 "by bundled model for verification");
45
46 DEFINE_string(
47 etdump_path,
48 "etdump.etdp",
49 "If etdump generation is enabled an etdump will be written out to this path");
50
51 DEFINE_bool(
52 output_verification,
53 false,
54 "Comapre the model output to the reference outputs present in the BundledProgram.");
55
56 DEFINE_bool(
57 print_output,
58 false,
59 "Print the output of the ET model to stdout, if needs.");
60
61 DEFINE_bool(dump_outputs, false, "Dump outputs to etdump file");
62
63 DEFINE_bool(
64 dump_intermediate_outputs,
65 false,
66 "Dump intermediate outputs to etdump file.");
67
68 DEFINE_string(
69 debug_output_path,
70 "debug_output.bin",
71 "Path to dump debug outputs to.");
72
73 DEFINE_int32(
74 debug_buffer_size,
75 262144, // 256 KB
76 "Size of the debug buffer in bytes to allocate for intermediate outputs and program outputs logging.");
77
78 using executorch::etdump::ETDumpGen;
79 using executorch::etdump::ETDumpResult;
80 using executorch::extension::BufferDataLoader;
81 using executorch::runtime::Error;
82 using executorch::runtime::EValue;
83 using executorch::runtime::EventTracerDebugLogLevel;
84 using executorch::runtime::HierarchicalAllocator;
85 using executorch::runtime::MemoryAllocator;
86 using executorch::runtime::MemoryManager;
87 using executorch::runtime::Method;
88 using executorch::runtime::MethodMeta;
89 using executorch::runtime::Program;
90 using executorch::runtime::Result;
91 using executorch::runtime::Span;
92
load_file_or_die(const char * path)93 std::vector<uint8_t> load_file_or_die(const char* path) {
94 std::ifstream file(path, std::ios::binary | std::ios::ate);
95 const size_t nbytes = file.tellg();
96 file.seekg(0, std::ios::beg);
97 auto file_data = std::vector<uint8_t>(nbytes);
98 ET_CHECK_MSG(
99 file.read(reinterpret_cast<char*>(file_data.data()), nbytes),
100 "Could not load contents of file '%s'",
101 path);
102 return file_data;
103 }
104
main(int argc,char ** argv)105 int main(int argc, char** argv) {
106 executorch::runtime::runtime_init();
107
108 gflags::ParseCommandLineFlags(&argc, &argv, true);
109 if (argc != 1) {
110 std::string msg = "Extra commandline args:";
111 for (int i = 1 /* skip argv[0] (program name) */; i < argc; i++) {
112 msg += std::string(" ") + argv[i];
113 }
114 ET_LOG(Error, "%s", msg.c_str());
115 return 1;
116 }
117
118 // Read in the entire file.
119 const char* bundled_program_path = FLAGS_bundled_program_path.c_str();
120 std::vector<uint8_t> file_data = load_file_or_die(bundled_program_path);
121
122 // Find the offset to the embedded Program.
123 const void* program_data;
124 size_t program_data_len;
125 Error status = executorch::bundled_program::get_program_data(
126 reinterpret_cast<void*>(file_data.data()),
127 file_data.size(),
128 &program_data,
129 &program_data_len);
130 ET_CHECK_MSG(
131 status == Error::Ok,
132 "get_program_data() failed on file '%s': 0x%x",
133 bundled_program_path,
134 (unsigned int)status);
135
136 auto buffer_data_loader = BufferDataLoader(program_data, program_data_len);
137
138 // Parse the program file. This is immutable, and can also be reused
139 // between multiple execution invocations across multiple threads.
140 Result<Program> program = Program::load(&buffer_data_loader);
141 if (!program.ok()) {
142 ET_LOG(Error, "Failed to parse model file %s", bundled_program_path);
143 return 1;
144 }
145 ET_LOG(Info, "Model file %s is loaded.", bundled_program_path);
146
147 // Use the first method in the program.
148 const char* method_name = nullptr;
149 {
150 const auto method_name_result = program->get_method_name(0);
151 ET_CHECK_MSG(method_name_result.ok(), "Program has no methods");
152 method_name = *method_name_result;
153 }
154 ET_LOG(Info, "Running method %s", method_name);
155
156 // MethodMeta describes the memory requirements of the method.
157 Result<MethodMeta> method_meta = program->method_meta(method_name);
158 ET_CHECK_MSG(
159 method_meta.ok(),
160 "Failed to get method_meta for %s: 0x%x",
161 method_name,
162 (unsigned int)method_meta.error());
163
164 //
165 // The runtime does not use malloc/new; it allocates all memory using the
166 // MemoryManger provided by the client. Clients are responsible for allocating
167 // the memory ahead of time, or providing MemoryAllocator subclasses that can
168 // do it dynamically.
169 //
170
171 // The method allocator is used to allocate all dynamic C++ metadata/objects
172 // used to represent the loaded method. This allocator is only used during
173 // loading a method of the program, which will return an error if there was
174 // not enough memory.
175 //
176 // The amount of memory required depends on the loaded method and the runtime
177 // code itself. The amount of memory here is usually determined by running the
178 // method and seeing how much memory is actually used, though it's possible to
179 // subclass MemoryAllocator so that it calls malloc() under the hood (see
180 // MallocMemoryAllocator).
181 //
182 // In this example we use a statically allocated memory pool.
183 MemoryAllocator method_allocator{MemoryAllocator(
184 sizeof(method_allocator_pool), method_allocator_pool.data())};
185
186 // The memory-planned buffers will back the mutable tensors used by the
187 // method. The sizes of these buffers were determined ahead of time during the
188 // memory-planning pasees.
189 //
190 // Each buffer typically corresponds to a different hardware memory bank. Most
191 // mobile environments will only have a single buffer. Some embedded
192 // environments may have more than one for, e.g., slow/large DRAM and
193 // fast/small SRAM, or for memory associated with particular cores.
194 std::vector<std::unique_ptr<uint8_t[]>> planned_buffers; // Owns the memory
195 std::vector<Span<uint8_t>> planned_spans; // Passed to the allocator
196 size_t num_memory_planned_buffers = method_meta->num_memory_planned_buffers();
197 for (size_t id = 0; id < num_memory_planned_buffers; ++id) {
198 // .get() will always succeed because id < num_memory_planned_buffers.
199 size_t buffer_size =
200 static_cast<size_t>(method_meta->memory_planned_buffer_size(id).get());
201 ET_LOG(Info, "Setting up planned buffer %zu, size %zu.", id, buffer_size);
202 planned_buffers.push_back(std::make_unique<uint8_t[]>(buffer_size));
203 planned_spans.push_back({planned_buffers.back().get(), buffer_size});
204 }
205 HierarchicalAllocator planned_memory(
206 {planned_spans.data(), planned_spans.size()});
207
208 // Assemble all of the allocators into the MemoryManager that the Executor
209 // will use.
210 MemoryManager memory_manager(&method_allocator, &planned_memory);
211
212 //
213 // Load the method from the program, using the provided allocators. Running
214 // the method can mutate the memory-planned buffers, so the method should only
215 // be used by a single thread at at time, but it can be reused.
216 //
217 ETDumpGen etdump_gen;
218 Result<Method> method =
219 program->load_method(method_name, &memory_manager, &etdump_gen);
220 ET_CHECK_MSG(
221 method.ok(),
222 "Loading of method %s failed with status 0x%" PRIx32,
223 method_name,
224 static_cast<int>(method.error()));
225 ET_LOG(Info, "Method loaded.");
226
227 void* debug_buffer = malloc(FLAGS_debug_buffer_size);
228 if (FLAGS_dump_intermediate_outputs) {
229 Span<uint8_t> buffer((uint8_t*)debug_buffer, FLAGS_debug_buffer_size);
230 etdump_gen.set_debug_buffer(buffer);
231 etdump_gen.set_event_tracer_debug_level(
232 EventTracerDebugLogLevel::kIntermediateOutputs);
233 } else if (FLAGS_dump_outputs) {
234 Span<uint8_t> buffer((uint8_t*)debug_buffer, FLAGS_debug_buffer_size);
235 etdump_gen.set_debug_buffer(buffer);
236 etdump_gen.set_event_tracer_debug_level(
237 EventTracerDebugLogLevel::kProgramOutputs);
238 }
239 // Use the inputs embedded in the bundled program.
240 status = executorch::bundled_program::load_bundled_input(
241 *method, file_data.data(), FLAGS_testset_idx);
242 ET_CHECK_MSG(
243 status == Error::Ok,
244 "LoadBundledInput failed with status 0x%" PRIx32,
245 static_cast<int>(status));
246
247 ET_LOG(Info, "Inputs prepared.");
248
249 // Run the model.
250 status = method->execute();
251 ET_CHECK_MSG(
252 status == Error::Ok,
253 "Execution of method %s failed with status 0x%" PRIx32,
254 method_name,
255 static_cast<int>(status));
256 ET_LOG(Info, "Model executed successfully.");
257
258 // Print the outputs.
259 if (FLAGS_print_output) {
260 std::vector<EValue> outputs(method->outputs_size());
261 status = method->get_outputs(outputs.data(), outputs.size());
262 ET_CHECK(status == Error::Ok);
263 for (EValue& output : outputs) {
264 // TODO(T159700776): This assumes that all outputs are fp32 tensors. Add
265 // support for other EValues and Tensor dtypes, and print tensors in a
266 // more readable way.
267 auto output_tensor = output.toTensor();
268 auto data_output = output_tensor.const_data_ptr<float>();
269 for (size_t j = 0; j < output_tensor.numel(); ++j) {
270 ET_LOG(Info, "%f", data_output[j]);
271 }
272 }
273 }
274
275 // Dump the etdump data containing profiling/debugging data to the specified
276 // file.
277 ETDumpResult result = etdump_gen.get_etdump_data();
278 if (result.buf != nullptr && result.size > 0) {
279 FILE* f = fopen(FLAGS_etdump_path.c_str(), "w+");
280 fwrite((uint8_t*)result.buf, 1, result.size, f);
281 fclose(f);
282 free(result.buf);
283 }
284
285 if (FLAGS_output_verification) {
286 // Verify the outputs.
287 status = executorch::bundled_program::verify_method_outputs(
288 *method,
289 file_data.data(),
290 FLAGS_testset_idx,
291 1e-3, // rtol
292 1e-5 // atol
293 );
294 ET_CHECK_MSG(
295 status == Error::Ok,
296 "Bundle verification failed with status 0x%" PRIx32,
297 static_cast<int>(status));
298 ET_LOG(Info, "Model verified successfully.");
299 }
300
301 if (FLAGS_dump_outputs || FLAGS_dump_intermediate_outputs) {
302 FILE* f = fopen(FLAGS_debug_output_path.c_str(), "w+");
303 fwrite((uint8_t*)debug_buffer, 1, FLAGS_debug_buffer_size, f);
304 fclose(f);
305 }
306 free(debug_buffer);
307
308 return 0;
309 }
310