xref: /aosp_15_r20/system/update_engine/payload_generator/full_update_generator.cc (revision 5a9231315b4521097b8dc3750bc806fcafe0c72f)
1 //
2 // Copyright (C) 2012 The Android Open Source Project
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16 
17 #include "update_engine/payload_generator/full_update_generator.h"
18 
19 #include <fcntl.h>
20 #include <inttypes.h>
21 
22 #include <algorithm>
23 #include <deque>
24 #include <memory>
25 
26 #include <base/format_macros.h>
27 #include <android-base/stringprintf.h>
28 #include <base/synchronization/lock.h>
29 #include <base/threading/simple_thread.h>
30 #include <brillo/secure_blob.h>
31 
32 #include "update_engine/common/utils.h"
33 #include "update_engine/payload_generator/delta_diff_utils.h"
34 
35 using std::vector;
36 
37 namespace chromeos_update_engine {
38 
39 namespace {
40 
41 const size_t kDefaultFullChunkSize = 1024 * 1024;  // 1 MiB
42 
43 // This class encapsulates a full update chunk processing thread work. The
44 // processor reads a chunk of data from the input file descriptor and compresses
45 // it. The processor will destroy itself when the work is done.
46 class ChunkProcessor : public base::DelegateSimpleThread::Delegate {
47  public:
48   // Read a chunk of |size| bytes from |fd| starting at offset |offset|.
ChunkProcessor(const PayloadVersion & version,int fd,off_t offset,size_t size,BlobFileWriter * blob_file,AnnotatedOperation * aop)49   ChunkProcessor(const PayloadVersion& version,
50                  int fd,
51                  off_t offset,
52                  size_t size,
53                  BlobFileWriter* blob_file,
54                  AnnotatedOperation* aop)
55       : version_(version),
56         fd_(fd),
57         offset_(offset),
58         size_(size),
59         blob_file_(blob_file),
60         aop_(aop) {}
61   // We use a default move constructor since all the data members are POD types.
62   ChunkProcessor(ChunkProcessor&&) = default;
63   ~ChunkProcessor() override = default;
64 
65   // Overrides DelegateSimpleThread::Delegate.
66   // Run() handles the read from |fd| in a thread-safe way, and stores the
67   // new operation to generate the region starting at |offset| of size |size|
68   // in the output operation |aop|. The associated blob data is stored in
69   // |blob_fd| and |blob_file_size| is updated.
70   void Run() override;
71 
72  private:
73   bool ProcessChunk();
74 
75   // Work parameters.
76   const PayloadVersion& version_;
77   int fd_;
78   off_t offset_;
79   size_t size_;
80   BlobFileWriter* blob_file_;
81   AnnotatedOperation* aop_;
82 
83   DISALLOW_COPY_AND_ASSIGN(ChunkProcessor);
84 };
85 
Run()86 void ChunkProcessor::Run() {
87   if (!ProcessChunk()) {
88     LOG(ERROR) << "Error processing region at " << offset_ << " of size "
89                << size_;
90   }
91 }
92 
ProcessChunk()93 bool ChunkProcessor::ProcessChunk() {
94   brillo::Blob buffer_in_(size_);
95   brillo::Blob op_blob;
96   ssize_t bytes_read = -1;
97   TEST_AND_RETURN_FALSE(utils::PReadAll(
98       fd_, buffer_in_.data(), buffer_in_.size(), offset_, &bytes_read));
99   TEST_AND_RETURN_FALSE(bytes_read == static_cast<ssize_t>(size_));
100 
101   InstallOperation::Type op_type;
102   TEST_AND_RETURN_FALSE(diff_utils::GenerateBestFullOperation(
103       buffer_in_, version_, &op_blob, &op_type));
104 
105   aop_->op.set_type(op_type);
106   TEST_AND_RETURN_FALSE(aop_->SetOperationBlob(op_blob, blob_file_));
107   return true;
108 }
109 
110 }  // namespace
111 
GenerateOperations(const PayloadGenerationConfig & config,const PartitionConfig & old_part,const PartitionConfig & new_part,BlobFileWriter * blob_file,vector<AnnotatedOperation> * aops)112 bool FullUpdateGenerator::GenerateOperations(
113     const PayloadGenerationConfig& config,
114     const PartitionConfig& old_part,
115     const PartitionConfig& new_part,
116     BlobFileWriter* blob_file,
117     vector<AnnotatedOperation>* aops) {
118   TEST_AND_RETURN_FALSE(new_part.ValidateExists());
119 
120   // FullUpdateGenerator requires a positive chunk_size, otherwise there will
121   // be only one operation with the whole partition which should not be allowed.
122   // For performance reasons, we force a small default hard limit of 1 MiB. This
123   // limit can be changed in the config, and we will use the smaller of the two
124   // soft/hard limits.
125   size_t full_chunk_size;
126   if (config.hard_chunk_size >= 0) {
127     full_chunk_size = std::min(static_cast<size_t>(config.hard_chunk_size),
128                                config.soft_chunk_size);
129   } else {
130     full_chunk_size = std::min(kDefaultFullChunkSize, config.soft_chunk_size);
131     LOG(INFO) << "No chunk_size provided, using the default chunk_size for the "
132               << "full operations: " << full_chunk_size << " bytes.";
133   }
134   TEST_AND_RETURN_FALSE(full_chunk_size > 0);
135   TEST_AND_RETURN_FALSE(full_chunk_size % config.block_size == 0);
136 
137   size_t chunk_blocks = full_chunk_size / config.block_size;
138   size_t max_threads = diff_utils::GetMaxThreads();
139   LOG(INFO) << "Compressing partition " << new_part.name << " from "
140             << new_part.path << " splitting in chunks of " << chunk_blocks
141             << " blocks (" << config.block_size << " bytes each) using "
142             << max_threads << " threads";
143 
144   int in_fd = open(new_part.path.c_str(), O_RDONLY, 0);
145   TEST_AND_RETURN_FALSE(in_fd >= 0);
146   ScopedFdCloser in_fd_closer(&in_fd);
147 
148   // We potentially have all the ChunkProcessors in memory but only
149   // |max_threads| will actually hold a block in memory while we process.
150   size_t partition_blocks = new_part.size / config.block_size;
151   size_t num_chunks = utils::DivRoundUp(partition_blocks, chunk_blocks);
152   aops->resize(num_chunks);
153   vector<ChunkProcessor> chunk_processors;
154   chunk_processors.reserve(num_chunks);
155   blob_file->IncTotalBlobs(num_chunks);
156 
157   for (size_t i = 0; i < num_chunks; ++i) {
158     size_t start_block = i * chunk_blocks;
159     // The last chunk could be smaller.
160     size_t num_blocks =
161         std::min(chunk_blocks, partition_blocks - i * chunk_blocks);
162 
163     // Preset all the static information about the operations. The
164     // ChunkProcessor will set the rest.
165     AnnotatedOperation* aop = aops->data() + i;
166     aop->name = android::base::StringPrintf(
167         "<%s-operation-%" PRIuS ">", new_part.name.c_str(), i);
168     Extent* dst_extent = aop->op.add_dst_extents();
169     dst_extent->set_start_block(start_block);
170     dst_extent->set_num_blocks(num_blocks);
171 
172     chunk_processors.emplace_back(
173         config.version,
174         in_fd,
175         static_cast<off_t>(start_block) * config.block_size,
176         num_blocks * config.block_size,
177         blob_file,
178         aop);
179   }
180 
181   // Thread pool used for worker threads.
182   base::DelegateSimpleThreadPool thread_pool("full-update-generator",
183                                              max_threads);
184   thread_pool.Start();
185   for (ChunkProcessor& processor : chunk_processors)
186     thread_pool.AddWork(&processor);
187   thread_pool.JoinAll();
188 
189   // All the operations must have a type set at this point. Otherwise, a
190   // ChunkProcessor failed to complete.
191   for (const AnnotatedOperation& aop : *aops) {
192     if (!aop.op.has_type())
193       return false;
194   }
195   return true;
196 }
197 
198 }  // namespace chromeos_update_engine
199