xref: /aosp_15_r20/system/core/fs_mgr/libsnapshot/snapuserd/user-space-merge/snapuserd_test.cpp (revision 00c7fec1bb09f3284aad6a6f96d2f63dfc3650ad)
1 // Copyright (C) 2018 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include <android-base/strings.h>
16 #include <gflags/gflags.h>
17 
18 #include <fcntl.h>
19 #include <linux/fs.h>
20 #include <sys/ioctl.h>
21 #include <sys/stat.h>
22 #include <sys/syscall.h>
23 #include <sys/types.h>
24 #include <unistd.h>
25 
26 #include <chrono>
27 #include <iostream>
28 #include <memory>
29 #include <string_view>
30 
31 #include <android-base/file.h>
32 #include <android-base/properties.h>
33 #include <android-base/unique_fd.h>
34 #include <fs_mgr/file_wait.h>
35 #include <gtest/gtest.h>
36 #include <libdm/dm.h>
37 #include <libdm/loop_control.h>
38 #include <libsnapshot/cow_writer.h>
39 #include <snapuserd/dm_user_block_server.h>
40 #include <storage_literals/storage_literals.h>
41 #include "handler_manager.h"
42 #include "merge_worker.h"
43 #include "read_worker.h"
44 #include "snapuserd_core.h"
45 #include "testing/dm_user_harness.h"
46 #include "testing/host_harness.h"
47 #include "testing/temp_device.h"
48 #include "utility.h"
49 
50 namespace android {
51 namespace snapshot {
52 
53 using namespace android::storage_literals;
54 using android::base::unique_fd;
55 using LoopDevice = android::dm::LoopDevice;
56 using namespace std::chrono_literals;
57 using namespace android::dm;
58 using namespace std;
59 using testing::AssertionFailure;
60 using testing::AssertionResult;
61 using testing::AssertionSuccess;
62 using ::testing::TestWithParam;
63 
64 struct TestParam {
65     bool io_uring;
66     bool o_direct;
67     std::string compression;
68     int block_size;
69     int num_threads;
70     uint32_t cow_op_merge_size;
71 };
72 
73 class SnapuserdTestBase : public ::testing::TestWithParam<TestParam> {
74   protected:
75     virtual void SetUp() override;
76     void TearDown() override;
77     void CreateBaseDevice();
78     void CreateCowDevice();
79     void SetDeviceControlName();
80     std::unique_ptr<ICowWriter> CreateCowDeviceInternal();
81     std::unique_ptr<ICowWriter> CreateV3Cow();
82 
GetCowFd()83     unique_fd GetCowFd() { return unique_fd{dup(cow_system_->fd)}; }
84 
85     bool ShouldSkipSetUp();
86 
87     std::unique_ptr<ITestHarness> harness_;
88     size_t size_ = 10_MiB;
89     int total_base_size_ = 0;
90     std::string system_device_ctrl_name_;
91     std::string system_device_name_;
92 
93     unique_ptr<IBackingDevice> base_dev_;
94     unique_fd base_fd_;
95 
96     std::unique_ptr<TemporaryFile> cow_system_;
97 
98     std::unique_ptr<uint8_t[]> orig_buffer_;
99 };
100 
SetUp()101 void SnapuserdTestBase::SetUp() {
102     if (ShouldSkipSetUp()) {
103         GTEST_SKIP() << "snapuserd not supported on this device";
104     }
105 
106 #if __ANDROID__
107     harness_ = std::make_unique<DmUserTestHarness>();
108 #else
109     harness_ = std::make_unique<HostTestHarness>();
110 #endif
111 }
112 
ShouldSkipSetUp()113 bool SnapuserdTestBase::ShouldSkipSetUp() {
114 #ifdef __ANDROID__
115     if (!android::snapshot::CanUseUserspaceSnapshots() ||
116         android::snapshot::IsVendorFromAndroid12()) {
117         return true;
118     }
119 #endif
120     return false;
121 }
122 
TearDown()123 void SnapuserdTestBase::TearDown() {
124     cow_system_ = nullptr;
125 }
126 
CreateBaseDevice()127 void SnapuserdTestBase::CreateBaseDevice() {
128     total_base_size_ = (size_ * 5);
129 
130     base_dev_ = harness_->CreateBackingDevice(total_base_size_);
131     ASSERT_NE(base_dev_, nullptr);
132 
133     base_fd_.reset(open(base_dev_->GetPath().c_str(), O_RDWR | O_CLOEXEC));
134     ASSERT_GE(base_fd_, 0);
135 
136     unique_fd rnd_fd(open("/dev/random", O_RDONLY));
137     ASSERT_GE(rnd_fd, 0);
138 
139     std::unique_ptr<uint8_t[]> random_buffer = std::make_unique<uint8_t[]>(1_MiB);
140 
141     for (size_t j = 0; j < ((total_base_size_) / 1_MiB); j++) {
142         ASSERT_EQ(ReadFullyAtOffset(rnd_fd, (char*)random_buffer.get(), 1_MiB, 0), true);
143         ASSERT_EQ(android::base::WriteFully(base_fd_, random_buffer.get(), 1_MiB), true);
144     }
145 
146     ASSERT_EQ(lseek(base_fd_, 0, SEEK_SET), 0);
147 }
148 
CreateCowDeviceInternal()149 std::unique_ptr<ICowWriter> SnapuserdTestBase::CreateCowDeviceInternal() {
150     std::string path = android::base::GetExecutableDirectory();
151     cow_system_ = std::make_unique<TemporaryFile>(path);
152 
153     CowOptions options;
154     options.compression = "gz";
155 
156     return CreateCowWriter(2, options, GetCowFd());
157 }
158 
CreateV3Cow()159 std::unique_ptr<ICowWriter> SnapuserdTestBase::CreateV3Cow() {
160     const TestParam params = GetParam();
161 
162     CowOptions options;
163     options.op_count_max = 100000;
164     options.compression = params.compression;
165     options.num_compress_threads = params.num_threads;
166     options.batch_write = true;
167     options.compression_factor = params.block_size;
168 
169     std::string path = android::base::GetExecutableDirectory();
170     cow_system_ = std::make_unique<TemporaryFile>(path);
171 
172     return CreateCowWriter(3, options, GetCowFd());
173 }
174 
CreateCowDevice()175 void SnapuserdTestBase::CreateCowDevice() {
176     unique_fd rnd_fd;
177     loff_t offset = 0;
178 
179     auto writer = CreateCowDeviceInternal();
180     ASSERT_NE(writer, nullptr);
181 
182     rnd_fd.reset(open("/dev/random", O_RDONLY));
183     ASSERT_TRUE(rnd_fd > 0);
184 
185     std::unique_ptr<uint8_t[]> random_buffer_1_ = std::make_unique<uint8_t[]>(size_);
186 
187     // Fill random data
188     for (size_t j = 0; j < (size_ / 1_MiB); j++) {
189         ASSERT_EQ(ReadFullyAtOffset(rnd_fd, (char*)random_buffer_1_.get() + offset, 1_MiB, 0),
190                   true);
191 
192         offset += 1_MiB;
193     }
194 
195     size_t num_blocks = size_ / writer->GetBlockSize();
196     size_t blk_end_copy = num_blocks * 2;
197     size_t source_blk = num_blocks - 1;
198     size_t blk_src_copy = blk_end_copy - 1;
199 
200     uint32_t sequence[num_blocks * 2];
201     // Sequence for Copy ops
202     for (int i = 0; i < num_blocks; i++) {
203         sequence[i] = num_blocks - 1 - i;
204     }
205     // Sequence for Xor ops
206     for (int i = 0; i < num_blocks; i++) {
207         sequence[num_blocks + i] = 5 * num_blocks - 1 - i;
208     }
209     ASSERT_TRUE(writer->AddSequenceData(2 * num_blocks, sequence));
210 
211     size_t x = num_blocks;
212     while (1) {
213         ASSERT_TRUE(writer->AddCopy(source_blk, blk_src_copy));
214         x -= 1;
215         if (x == 0) {
216             break;
217         }
218         source_blk -= 1;
219         blk_src_copy -= 1;
220     }
221 
222     source_blk = num_blocks;
223     blk_src_copy = blk_end_copy;
224 
225     ASSERT_TRUE(writer->AddRawBlocks(source_blk, random_buffer_1_.get(), size_));
226 
227     size_t blk_zero_copy_start = source_blk + num_blocks;
228     size_t blk_zero_copy_end = blk_zero_copy_start + num_blocks;
229 
230     ASSERT_TRUE(writer->AddZeroBlocks(blk_zero_copy_start, num_blocks));
231 
232     size_t blk_random2_replace_start = blk_zero_copy_end;
233 
234     ASSERT_TRUE(writer->AddRawBlocks(blk_random2_replace_start, random_buffer_1_.get(), size_));
235 
236     size_t blk_xor_start = blk_random2_replace_start + num_blocks;
237     size_t xor_offset = BLOCK_SZ / 2;
238     ASSERT_TRUE(writer->AddXorBlocks(blk_xor_start, random_buffer_1_.get(), size_, num_blocks,
239                                      xor_offset));
240 
241     // Flush operations
242     ASSERT_TRUE(writer->Finalize());
243     // Construct the buffer required for validation
244     orig_buffer_ = std::make_unique<uint8_t[]>(total_base_size_);
245     std::string zero_buffer(size_, 0);
246     ASSERT_EQ(android::base::ReadFullyAtOffset(base_fd_, orig_buffer_.get(), size_, size_), true);
247     memcpy((char*)orig_buffer_.get() + size_, random_buffer_1_.get(), size_);
248     memcpy((char*)orig_buffer_.get() + (size_ * 2), (void*)zero_buffer.c_str(), size_);
249     memcpy((char*)orig_buffer_.get() + (size_ * 3), random_buffer_1_.get(), size_);
250     ASSERT_EQ(android::base::ReadFullyAtOffset(base_fd_, &orig_buffer_.get()[size_ * 4], size_,
251                                                size_ + xor_offset),
252               true);
253     for (int i = 0; i < size_; i++) {
254         orig_buffer_.get()[(size_ * 4) + i] =
255                 (uint8_t)(orig_buffer_.get()[(size_ * 4) + i] ^ random_buffer_1_.get()[i]);
256     }
257 }
258 
SetDeviceControlName()259 void SnapuserdTestBase::SetDeviceControlName() {
260     system_device_name_.clear();
261     system_device_ctrl_name_.clear();
262 
263     std::string str(cow_system_->path);
264     std::size_t found = str.find_last_of("/\\");
265     ASSERT_NE(found, std::string::npos);
266     system_device_name_ = str.substr(found + 1);
267 
268     system_device_ctrl_name_ = system_device_name_ + "-ctrl";
269 }
270 
271 class SnapuserdTest : public SnapuserdTestBase {
272   public:
273     void SetupDefault();
274     void SetupOrderedOps();
275     void SetupOrderedOpsInverted();
276     void SetupCopyOverlap_1();
277     void SetupCopyOverlap_2();
278     void SetupDeviceForPassthrough();
279     bool Merge();
280     void ValidateMerge();
281     void ReadSnapshotDeviceAndValidate();
282     void ReadSnapshotAndValidateOverlappingBlocks();
283     void Shutdown();
284     void MergeInterrupt();
285     void MergeInterruptFixed(int duration);
286     void MergeInterruptAndValidate(int duration);
287     void MergeInterruptRandomly(int max_duration);
288     bool StartMerge();
289     void CheckMergeCompletion();
290 
291     static const uint64_t kSectorSize = 512;
292 
293   protected:
294     void SetUp() override;
295     void TearDown() override;
296 
297     void SetupImpl();
298 
299     void SimulateDaemonRestart();
300 
301     void CreateCowDeviceWithNoBlockChanges();
302     void ValidateDeviceWithNoBlockChanges();
303 
304     void CreateCowDeviceOrderedOps();
305     void CreateCowDeviceOrderedOpsInverted();
306     void CreateCowDeviceWithCopyOverlap_1();
307     void CreateCowDeviceWithCopyOverlap_2();
308     void SetupDaemon();
309     void InitCowDevice();
310     void InitDaemon();
311     void CreateUserDevice();
312 
313     unique_ptr<IUserDevice> dmuser_dev_;
314 
315     std::unique_ptr<uint8_t[]> merged_buffer_;
316     std::unique_ptr<SnapshotHandlerManager> handlers_;
317     int cow_num_sectors_;
318 };
319 
SetUp()320 void SnapuserdTest::SetUp() {
321     if (ShouldSkipSetUp()) {
322         GTEST_SKIP() << "snapuserd not supported on this device";
323     }
324     ASSERT_NO_FATAL_FAILURE(SnapuserdTestBase::SetUp());
325     handlers_ = std::make_unique<SnapshotHandlerManager>();
326 }
327 
TearDown()328 void SnapuserdTest::TearDown() {
329     SnapuserdTestBase::TearDown();
330     Shutdown();
331 }
332 
Shutdown()333 void SnapuserdTest::Shutdown() {
334     if (!handlers_) {
335         return;
336     }
337     if (dmuser_dev_) {
338         ASSERT_TRUE(dmuser_dev_->Destroy());
339     }
340 
341     auto misc_device = "/dev/dm-user/" + system_device_ctrl_name_;
342     ASSERT_TRUE(handlers_->DeleteHandler(system_device_ctrl_name_));
343     ASSERT_TRUE(android::fs_mgr::WaitForFileDeleted(misc_device, 10s));
344     handlers_->TerminateMergeThreads();
345     handlers_->JoinAllThreads();
346     handlers_ = std::make_unique<SnapshotHandlerManager>();
347 }
348 
SetupDefault()349 void SnapuserdTest::SetupDefault() {
350     ASSERT_NO_FATAL_FAILURE(SetupImpl());
351 }
352 
SetupOrderedOps()353 void SnapuserdTest::SetupOrderedOps() {
354     ASSERT_NO_FATAL_FAILURE(CreateBaseDevice());
355     ASSERT_NO_FATAL_FAILURE(CreateCowDeviceOrderedOps());
356     ASSERT_NO_FATAL_FAILURE(SetupDaemon());
357 }
358 
SetupDeviceForPassthrough()359 void SnapuserdTest::SetupDeviceForPassthrough() {
360     ASSERT_NO_FATAL_FAILURE(CreateBaseDevice());
361     ASSERT_NO_FATAL_FAILURE(CreateCowDeviceWithNoBlockChanges());
362     ASSERT_NO_FATAL_FAILURE(SetupDaemon());
363 }
364 
SetupOrderedOpsInverted()365 void SnapuserdTest::SetupOrderedOpsInverted() {
366     ASSERT_NO_FATAL_FAILURE(CreateBaseDevice());
367     ASSERT_NO_FATAL_FAILURE(CreateCowDeviceOrderedOpsInverted());
368     ASSERT_NO_FATAL_FAILURE(SetupDaemon());
369 }
370 
SetupCopyOverlap_1()371 void SnapuserdTest::SetupCopyOverlap_1() {
372     ASSERT_NO_FATAL_FAILURE(CreateBaseDevice());
373     ASSERT_NO_FATAL_FAILURE(CreateCowDeviceWithCopyOverlap_1());
374     ASSERT_NO_FATAL_FAILURE(SetupDaemon());
375 }
376 
SetupCopyOverlap_2()377 void SnapuserdTest::SetupCopyOverlap_2() {
378     ASSERT_NO_FATAL_FAILURE(CreateBaseDevice());
379     ASSERT_NO_FATAL_FAILURE(CreateCowDeviceWithCopyOverlap_2());
380     ASSERT_NO_FATAL_FAILURE(SetupDaemon());
381 }
382 
SetupDaemon()383 void SnapuserdTest::SetupDaemon() {
384     SetDeviceControlName();
385 
386     ASSERT_NO_FATAL_FAILURE(CreateUserDevice());
387     ASSERT_NO_FATAL_FAILURE(InitCowDevice());
388     ASSERT_NO_FATAL_FAILURE(InitDaemon());
389 }
390 
ReadSnapshotDeviceAndValidate()391 void SnapuserdTest::ReadSnapshotDeviceAndValidate() {
392     unique_fd fd(open(dmuser_dev_->GetPath().c_str(), O_RDONLY));
393     ASSERT_GE(fd, 0);
394     std::unique_ptr<uint8_t[]> snapuserd_buffer = std::make_unique<uint8_t[]>(size_);
395 
396     // COPY
397     loff_t offset = 0;
398     ASSERT_EQ(ReadFullyAtOffset(fd, snapuserd_buffer.get(), size_, offset), true);
399     ASSERT_EQ(memcmp(snapuserd_buffer.get(), orig_buffer_.get(), size_), 0);
400 
401     // REPLACE
402     offset += size_;
403     ASSERT_EQ(ReadFullyAtOffset(fd, snapuserd_buffer.get(), size_, offset), true);
404     ASSERT_EQ(memcmp(snapuserd_buffer.get(), (char*)orig_buffer_.get() + size_, size_), 0);
405 
406     // ZERO
407     offset += size_;
408     ASSERT_EQ(ReadFullyAtOffset(fd, snapuserd_buffer.get(), size_, offset), true);
409     ASSERT_EQ(memcmp(snapuserd_buffer.get(), (char*)orig_buffer_.get() + (size_ * 2), size_), 0);
410 
411     // REPLACE
412     offset += size_;
413     ASSERT_EQ(ReadFullyAtOffset(fd, snapuserd_buffer.get(), size_, offset), true);
414     ASSERT_EQ(memcmp(snapuserd_buffer.get(), (char*)orig_buffer_.get() + (size_ * 3), size_), 0);
415 
416     // XOR
417     offset += size_;
418     ASSERT_EQ(ReadFullyAtOffset(fd, snapuserd_buffer.get(), size_, offset), true);
419     ASSERT_EQ(memcmp(snapuserd_buffer.get(), (char*)orig_buffer_.get() + (size_ * 4), size_), 0);
420 }
421 
ReadSnapshotAndValidateOverlappingBlocks()422 void SnapuserdTest::ReadSnapshotAndValidateOverlappingBlocks() {
423     // Open COW device
424     unique_fd fd(open(cow_system_->path, O_RDONLY));
425     ASSERT_GE(fd, 0);
426 
427     CowReader reader;
428     ASSERT_TRUE(reader.Parse(fd));
429 
430     const auto& header = reader.GetHeader();
431     size_t total_mapped_addr_length = header.prefix.header_size + BUFFER_REGION_DEFAULT_SIZE;
432 
433     ASSERT_GE(header.prefix.major_version, 2);
434 
435     void* mapped_addr = mmap(NULL, total_mapped_addr_length, PROT_READ, MAP_SHARED, fd.get(), 0);
436     ASSERT_NE(mapped_addr, MAP_FAILED);
437 
438     bool populate_data_from_scratch = false;
439     struct BufferState* ra_state =
440             reinterpret_cast<struct BufferState*>((char*)mapped_addr + header.prefix.header_size);
441     if (ra_state->read_ahead_state == kCowReadAheadDone) {
442         populate_data_from_scratch = true;
443     }
444 
445     size_t num_merge_ops = header.num_merge_ops;
446     // We have some partial merge operations completed.
447     // To test the merge-resume path, forcefully corrupt the data of the base
448     // device for the offsets where the merge is still pending.
449     if (num_merge_ops && populate_data_from_scratch) {
450         std::string corrupt_buffer(4096, 0);
451         // Corrupt two blocks from the point where the merge has to be resumed by
452         // writing down zeroe's.
453         //
454         // Now, since this is a merge-resume path, the "correct" data should be
455         // in the scratch space of the COW device. When there is an I/O request
456         // from the snapshot device, the data has to be retrieved from the
457         // scratch space. If not and I/O is routed to the base device, we
458         // may end up with corruption.
459         off_t corrupt_offset = (num_merge_ops + 2) * 4096;
460 
461         if (corrupt_offset < size_) {
462             ASSERT_EQ(android::base::WriteFullyAtOffset(base_fd_, (void*)corrupt_buffer.c_str(),
463                                                         4096, corrupt_offset),
464                       true);
465             corrupt_offset -= 4096;
466             ASSERT_EQ(android::base::WriteFullyAtOffset(base_fd_, (void*)corrupt_buffer.c_str(),
467                                                         4096, corrupt_offset),
468                       true);
469             fsync(base_fd_.get());
470         }
471     }
472 
473     // Time to read the snapshot device.
474     unique_fd snapshot_fd(open(dmuser_dev_->GetPath().c_str(), O_RDONLY | O_DIRECT | O_SYNC));
475     ASSERT_GE(snapshot_fd, 0);
476 
477     void* buff_addr;
478     ASSERT_EQ(posix_memalign(&buff_addr, 4096, size_), 0);
479 
480     std::unique_ptr<void, decltype(&::free)> snapshot_buffer(buff_addr, ::free);
481 
482     // Scan the entire snapshot device and read the data and verify data
483     // integrity. Since the base device was forcefully corrupted, the data from
484     // this scan should be retrieved from scratch space of the COW partition.
485     //
486     // Furthermore, after the merge is complete, base device data is again
487     // verified as the aforementioned corrupted blocks aren't persisted.
488     ASSERT_EQ(ReadFullyAtOffset(snapshot_fd, snapshot_buffer.get(), size_, 0), true);
489     ASSERT_EQ(memcmp(snapshot_buffer.get(), orig_buffer_.get(), size_), 0);
490 }
491 
CreateCowDeviceWithCopyOverlap_2()492 void SnapuserdTest::CreateCowDeviceWithCopyOverlap_2() {
493     auto writer = CreateCowDeviceInternal();
494     ASSERT_NE(writer, nullptr);
495 
496     size_t num_blocks = size_ / writer->GetBlockSize();
497     size_t x = num_blocks;
498     size_t blk_src_copy = 0;
499 
500     // Create overlapping copy operations
501     while (1) {
502         ASSERT_TRUE(writer->AddCopy(blk_src_copy, blk_src_copy + 1));
503         x -= 1;
504         if (x == 1) {
505             break;
506         }
507         blk_src_copy += 1;
508     }
509 
510     // Flush operations
511     ASSERT_TRUE(writer->Finalize());
512 
513     // Construct the buffer required for validation
514     orig_buffer_ = std::make_unique<uint8_t[]>(total_base_size_);
515 
516     // Read the entire base device
517     ASSERT_EQ(android::base::ReadFullyAtOffset(base_fd_, orig_buffer_.get(), total_base_size_, 0),
518               true);
519 
520     // Merged operations required for validation
521     int block_size = 4096;
522     x = num_blocks;
523     loff_t src_offset = block_size;
524     loff_t dest_offset = 0;
525 
526     while (1) {
527         memmove((char*)orig_buffer_.get() + dest_offset, (char*)orig_buffer_.get() + src_offset,
528                 block_size);
529         x -= 1;
530         if (x == 1) {
531             break;
532         }
533         src_offset += block_size;
534         dest_offset += block_size;
535     }
536 }
537 
CreateCowDeviceWithNoBlockChanges()538 void SnapuserdTest::CreateCowDeviceWithNoBlockChanges() {
539     auto writer = CreateCowDeviceInternal();
540     ASSERT_NE(writer, nullptr);
541 
542     std::unique_ptr<uint8_t[]> buffer = std::make_unique<uint8_t[]>(BLOCK_SZ);
543     std::memset(buffer.get(), 'A', BLOCK_SZ);
544 
545     // This test focusses on not changing all the blocks thereby validating
546     // the pass-through I/O
547 
548     // Replace the first block
549     ASSERT_TRUE(writer->AddRawBlocks(1, buffer.get(), BLOCK_SZ));
550 
551     // Set zero block of Block 3
552     ASSERT_TRUE(writer->AddZeroBlocks(3, 1));
553 
554     ASSERT_TRUE(writer->Finalize());
555     orig_buffer_ = std::make_unique<uint8_t[]>(total_base_size_);
556 
557     // Read the entire base device
558     ASSERT_EQ(android::base::ReadFullyAtOffset(base_fd_, orig_buffer_.get(), total_base_size_, 0),
559               true);
560 
561     off_t offset = BLOCK_SZ;
562     std::memcpy(orig_buffer_.get() + offset, buffer.get(), BLOCK_SZ);
563     offset = 3 * BLOCK_SZ;
564     std::memset(orig_buffer_.get() + offset, 0, BLOCK_SZ);
565 }
566 
ValidateDeviceWithNoBlockChanges()567 void SnapuserdTest::ValidateDeviceWithNoBlockChanges() {
568     unique_fd fd(open(dmuser_dev_->GetPath().c_str(), O_RDONLY));
569     ASSERT_GE(fd, 0);
570     std::unique_ptr<uint8_t[]> snapshot_buffer = std::make_unique<uint8_t[]>(size_);
571     std::memset(snapshot_buffer.get(), 'B', size_);
572 
573     // All the I/O request should be a pass through to base device except for
574     // Block 1 and Block 3.
575     ASSERT_EQ(ReadFullyAtOffset(fd, snapshot_buffer.get(), size_, 0), true);
576     ASSERT_EQ(memcmp(snapshot_buffer.get(), orig_buffer_.get(), size_), 0);
577 }
578 
CreateCowDeviceWithCopyOverlap_1()579 void SnapuserdTest::CreateCowDeviceWithCopyOverlap_1() {
580     auto writer = CreateCowDeviceInternal();
581     ASSERT_NE(writer, nullptr);
582 
583     size_t num_blocks = size_ / writer->GetBlockSize();
584     size_t x = num_blocks;
585     size_t blk_src_copy = num_blocks - 1;
586 
587     // Create overlapping copy operations
588     while (1) {
589         ASSERT_TRUE(writer->AddCopy(blk_src_copy + 1, blk_src_copy));
590         x -= 1;
591         if (x == 0) {
592             ASSERT_EQ(blk_src_copy, 0);
593             break;
594         }
595         blk_src_copy -= 1;
596     }
597 
598     // Flush operations
599     ASSERT_TRUE(writer->Finalize());
600 
601     // Construct the buffer required for validation
602     orig_buffer_ = std::make_unique<uint8_t[]>(total_base_size_);
603 
604     // Read the entire base device
605     ASSERT_EQ(android::base::ReadFullyAtOffset(base_fd_, orig_buffer_.get(), total_base_size_, 0),
606               true);
607 
608     // Merged operations
609     ASSERT_EQ(android::base::ReadFullyAtOffset(base_fd_, orig_buffer_.get(), writer->GetBlockSize(),
610                                                0),
611               true);
612     ASSERT_EQ(android::base::ReadFullyAtOffset(
613                       base_fd_, (char*)orig_buffer_.get() + writer->GetBlockSize(), size_, 0),
614               true);
615 }
616 
CreateCowDeviceOrderedOpsInverted()617 void SnapuserdTest::CreateCowDeviceOrderedOpsInverted() {
618     unique_fd rnd_fd;
619     loff_t offset = 0;
620 
621     auto writer = CreateCowDeviceInternal();
622     ASSERT_NE(writer, nullptr);
623 
624     rnd_fd.reset(open("/dev/random", O_RDONLY));
625     ASSERT_TRUE(rnd_fd > 0);
626 
627     std::unique_ptr<uint8_t[]> random_buffer_1_ = std::make_unique<uint8_t[]>(size_);
628 
629     // Fill random data
630     for (size_t j = 0; j < (size_ / 1_MiB); j++) {
631         ASSERT_EQ(ReadFullyAtOffset(rnd_fd, (char*)random_buffer_1_.get() + offset, 1_MiB, 0),
632                   true);
633 
634         offset += 1_MiB;
635     }
636 
637     size_t num_blocks = size_ / writer->GetBlockSize();
638     size_t blk_end_copy = num_blocks * 3;
639     size_t source_blk = num_blocks - 1;
640     size_t blk_src_copy = blk_end_copy - 1;
641     uint16_t xor_offset = 5;
642 
643     size_t x = num_blocks;
644     while (1) {
645         ASSERT_TRUE(writer->AddCopy(source_blk, blk_src_copy));
646         x -= 1;
647         if (x == 0) {
648             break;
649         }
650         source_blk -= 1;
651         blk_src_copy -= 1;
652     }
653 
654     for (size_t i = num_blocks; i > 0; i--) {
655         ASSERT_TRUE(writer->AddXorBlocks(
656                 num_blocks + i - 1, &random_buffer_1_.get()[writer->GetBlockSize() * (i - 1)],
657                 writer->GetBlockSize(), 2 * num_blocks + i - 1, xor_offset));
658     }
659     // Flush operations
660     ASSERT_TRUE(writer->Finalize());
661     // Construct the buffer required for validation
662     orig_buffer_ = std::make_unique<uint8_t[]>(total_base_size_);
663     // Read the entire base device
664     ASSERT_EQ(android::base::ReadFullyAtOffset(base_fd_, orig_buffer_.get(), total_base_size_, 0),
665               true);
666     // Merged Buffer
667     memmove(orig_buffer_.get(), (char*)orig_buffer_.get() + 2 * size_, size_);
668     memmove(orig_buffer_.get() + size_, (char*)orig_buffer_.get() + 2 * size_ + xor_offset, size_);
669     for (int i = 0; i < size_; i++) {
670         orig_buffer_.get()[size_ + i] ^= random_buffer_1_.get()[i];
671     }
672 }
673 
CreateCowDeviceOrderedOps()674 void SnapuserdTest::CreateCowDeviceOrderedOps() {
675     unique_fd rnd_fd;
676     loff_t offset = 0;
677 
678     auto writer = CreateCowDeviceInternal();
679     ASSERT_NE(writer, nullptr);
680 
681     rnd_fd.reset(open("/dev/random", O_RDONLY));
682     ASSERT_TRUE(rnd_fd > 0);
683 
684     std::unique_ptr<uint8_t[]> random_buffer_1_ = std::make_unique<uint8_t[]>(size_);
685 
686     // Fill random data
687     for (size_t j = 0; j < (size_ / 1_MiB); j++) {
688         ASSERT_EQ(ReadFullyAtOffset(rnd_fd, (char*)random_buffer_1_.get() + offset, 1_MiB, 0),
689                   true);
690 
691         offset += 1_MiB;
692     }
693     memset(random_buffer_1_.get(), 0, size_);
694 
695     size_t num_blocks = size_ / writer->GetBlockSize();
696     size_t x = num_blocks;
697     size_t source_blk = 0;
698     size_t blk_src_copy = 2 * num_blocks;
699     uint16_t xor_offset = 5;
700 
701     while (1) {
702         ASSERT_TRUE(writer->AddCopy(source_blk, blk_src_copy));
703 
704         x -= 1;
705         if (x == 0) {
706             break;
707         }
708         source_blk += 1;
709         blk_src_copy += 1;
710     }
711 
712     ASSERT_TRUE(writer->AddXorBlocks(num_blocks, random_buffer_1_.get(), size_, 2 * num_blocks,
713                                      xor_offset));
714     // Flush operations
715     ASSERT_TRUE(writer->Finalize());
716     // Construct the buffer required for validation
717     orig_buffer_ = std::make_unique<uint8_t[]>(total_base_size_);
718     // Read the entire base device
719     ASSERT_EQ(android::base::ReadFullyAtOffset(base_fd_, orig_buffer_.get(), total_base_size_, 0),
720               true);
721     // Merged Buffer
722     memmove(orig_buffer_.get(), (char*)orig_buffer_.get() + 2 * size_, size_);
723     memmove(orig_buffer_.get() + size_, (char*)orig_buffer_.get() + 2 * size_ + xor_offset, size_);
724     for (int i = 0; i < size_; i++) {
725         orig_buffer_.get()[size_ + i] ^= random_buffer_1_.get()[i];
726     }
727 }
728 
InitCowDevice()729 void SnapuserdTest::InitCowDevice() {
730     auto factory = harness_->GetBlockServerFactory();
731     auto opener = factory->CreateOpener(system_device_ctrl_name_);
732     handlers_->DisableVerification();
733     const TestParam params = GetParam();
734     auto handler = handlers_->AddHandler(
735             system_device_ctrl_name_, cow_system_->path, base_dev_->GetPath(), base_dev_->GetPath(),
736             opener, 1, params.io_uring, params.o_direct, params.cow_op_merge_size);
737     ASSERT_NE(handler, nullptr);
738     ASSERT_NE(handler->snapuserd(), nullptr);
739 #ifdef __ANDROID__
740     ASSERT_NE(handler->snapuserd()->GetNumSectors(), 0);
741 #endif
742 }
743 
CreateUserDevice()744 void SnapuserdTest::CreateUserDevice() {
745     auto dev_sz = base_dev_->GetSize();
746     ASSERT_NE(dev_sz, 0);
747 
748     cow_num_sectors_ = dev_sz >> 9;
749 
750     dmuser_dev_ = harness_->CreateUserDevice(system_device_name_, system_device_ctrl_name_,
751                                              cow_num_sectors_);
752     ASSERT_NE(dmuser_dev_, nullptr);
753 }
754 
InitDaemon()755 void SnapuserdTest::InitDaemon() {
756     ASSERT_TRUE(handlers_->StartHandler(system_device_ctrl_name_));
757 }
758 
CheckMergeCompletion()759 void SnapuserdTest::CheckMergeCompletion() {
760     while (true) {
761         double percentage = handlers_->GetMergePercentage();
762         if ((int)percentage == 100) {
763             break;
764         }
765 
766         std::this_thread::sleep_for(1s);
767     }
768 }
769 
SetupImpl()770 void SnapuserdTest::SetupImpl() {
771     ASSERT_NO_FATAL_FAILURE(CreateBaseDevice());
772     ASSERT_NO_FATAL_FAILURE(CreateCowDevice());
773 
774     SetDeviceControlName();
775 
776     ASSERT_NO_FATAL_FAILURE(CreateUserDevice());
777     ASSERT_NO_FATAL_FAILURE(InitCowDevice());
778     ASSERT_NO_FATAL_FAILURE(InitDaemon());
779 }
780 
Merge()781 bool SnapuserdTest::Merge() {
782     if (!StartMerge()) {
783         return false;
784     }
785     CheckMergeCompletion();
786     return true;
787 }
788 
StartMerge()789 bool SnapuserdTest::StartMerge() {
790     return handlers_->InitiateMerge(system_device_ctrl_name_);
791 }
792 
ValidateMerge()793 void SnapuserdTest::ValidateMerge() {
794     merged_buffer_ = std::make_unique<uint8_t[]>(total_base_size_);
795     ASSERT_EQ(android::base::ReadFullyAtOffset(base_fd_, merged_buffer_.get(), total_base_size_, 0),
796               true);
797     ASSERT_EQ(memcmp(merged_buffer_.get(), orig_buffer_.get(), total_base_size_), 0);
798 }
799 
SimulateDaemonRestart()800 void SnapuserdTest::SimulateDaemonRestart() {
801     ASSERT_NO_FATAL_FAILURE(Shutdown());
802     std::this_thread::sleep_for(500ms);
803     SetDeviceControlName();
804     ASSERT_NO_FATAL_FAILURE(CreateUserDevice());
805     ASSERT_NO_FATAL_FAILURE(InitCowDevice());
806     ASSERT_NO_FATAL_FAILURE(InitDaemon());
807 }
808 
MergeInterruptRandomly(int max_duration)809 void SnapuserdTest::MergeInterruptRandomly(int max_duration) {
810     std::srand(std::time(nullptr));
811     ASSERT_TRUE(StartMerge());
812 
813     for (int i = 0; i < 20; i++) {
814         int duration = std::rand() % max_duration;
815         std::this_thread::sleep_for(std::chrono::milliseconds(duration));
816         ASSERT_NO_FATAL_FAILURE(SimulateDaemonRestart());
817         ASSERT_TRUE(StartMerge());
818     }
819 
820     ASSERT_NO_FATAL_FAILURE(SimulateDaemonRestart());
821     ASSERT_TRUE(Merge());
822 }
823 
MergeInterruptFixed(int duration)824 void SnapuserdTest::MergeInterruptFixed(int duration) {
825     ASSERT_TRUE(StartMerge());
826 
827     for (int i = 0; i < 25; i++) {
828         std::this_thread::sleep_for(std::chrono::milliseconds(duration));
829         ASSERT_NO_FATAL_FAILURE(SimulateDaemonRestart());
830         ASSERT_TRUE(StartMerge());
831     }
832 
833     ASSERT_NO_FATAL_FAILURE(SimulateDaemonRestart());
834     ASSERT_TRUE(Merge());
835 }
836 
MergeInterruptAndValidate(int duration)837 void SnapuserdTest::MergeInterruptAndValidate(int duration) {
838     ASSERT_TRUE(StartMerge());
839 
840     for (int i = 0; i < 15; i++) {
841         std::this_thread::sleep_for(std::chrono::milliseconds(duration));
842         ASSERT_NO_FATAL_FAILURE(SimulateDaemonRestart());
843         ReadSnapshotAndValidateOverlappingBlocks();
844         ASSERT_TRUE(StartMerge());
845     }
846 
847     ASSERT_NO_FATAL_FAILURE(SimulateDaemonRestart());
848     ASSERT_TRUE(Merge());
849 }
850 
MergeInterrupt()851 void SnapuserdTest::MergeInterrupt() {
852     // Interrupt merge at various intervals
853     ASSERT_TRUE(StartMerge());
854     std::this_thread::sleep_for(250ms);
855     ASSERT_NO_FATAL_FAILURE(SimulateDaemonRestart());
856 
857     ASSERT_TRUE(StartMerge());
858     std::this_thread::sleep_for(250ms);
859     ASSERT_NO_FATAL_FAILURE(SimulateDaemonRestart());
860 
861     ASSERT_TRUE(StartMerge());
862     std::this_thread::sleep_for(150ms);
863     ASSERT_NO_FATAL_FAILURE(SimulateDaemonRestart());
864 
865     ASSERT_TRUE(StartMerge());
866     std::this_thread::sleep_for(100ms);
867     ASSERT_NO_FATAL_FAILURE(SimulateDaemonRestart());
868 
869     ASSERT_TRUE(StartMerge());
870     std::this_thread::sleep_for(800ms);
871     ASSERT_NO_FATAL_FAILURE(SimulateDaemonRestart());
872 
873     ASSERT_TRUE(StartMerge());
874     std::this_thread::sleep_for(600ms);
875     ASSERT_NO_FATAL_FAILURE(SimulateDaemonRestart());
876 
877     ASSERT_TRUE(Merge());
878 }
879 
TEST_P(SnapuserdTest,Snapshot_Passthrough)880 TEST_P(SnapuserdTest, Snapshot_Passthrough) {
881     if (!harness_->HasUserDevice()) {
882         GTEST_SKIP() << "Skipping snapshot read; not supported";
883     }
884     ASSERT_NO_FATAL_FAILURE(SetupDeviceForPassthrough());
885     // I/O before merge
886     ASSERT_NO_FATAL_FAILURE(ValidateDeviceWithNoBlockChanges());
887     ASSERT_TRUE(Merge());
888     ValidateMerge();
889     // I/O after merge - daemon should read directly
890     // from base device
891     ASSERT_NO_FATAL_FAILURE(ValidateDeviceWithNoBlockChanges());
892 }
893 
TEST_P(SnapuserdTest,Snapshot_IO_TEST)894 TEST_P(SnapuserdTest, Snapshot_IO_TEST) {
895     if (!harness_->HasUserDevice()) {
896         GTEST_SKIP() << "Skipping snapshot read; not supported";
897     }
898     ASSERT_NO_FATAL_FAILURE(SetupDefault());
899     // I/O before merge
900     ASSERT_NO_FATAL_FAILURE(ReadSnapshotDeviceAndValidate());
901     ASSERT_TRUE(Merge());
902     ValidateMerge();
903     // I/O after merge - daemon should read directly
904     // from base device
905     ASSERT_NO_FATAL_FAILURE(ReadSnapshotDeviceAndValidate());
906 }
907 
TEST_P(SnapuserdTest,Snapshot_MERGE_IO_TEST)908 TEST_P(SnapuserdTest, Snapshot_MERGE_IO_TEST) {
909     if (!harness_->HasUserDevice()) {
910         GTEST_SKIP() << "Skipping snapshot read; not supported";
911     }
912     ASSERT_NO_FATAL_FAILURE(SetupDefault());
913     // Issue I/O before merge begins
914     auto read_future =
915             std::async(std::launch::async, &SnapuserdTest::ReadSnapshotDeviceAndValidate, this);
916     // Start the merge
917     ASSERT_TRUE(Merge());
918     ValidateMerge();
919     read_future.wait();
920 }
921 
TEST_P(SnapuserdTest,Snapshot_MERGE_IO_TEST_1)922 TEST_P(SnapuserdTest, Snapshot_MERGE_IO_TEST_1) {
923     if (!harness_->HasUserDevice()) {
924         GTEST_SKIP() << "Skipping snapshot read; not supported";
925     }
926     ASSERT_NO_FATAL_FAILURE(SetupDefault());
927     // Start the merge
928     ASSERT_TRUE(StartMerge());
929     // Issue I/O in parallel when merge is in-progress
930     auto read_future =
931             std::async(std::launch::async, &SnapuserdTest::ReadSnapshotDeviceAndValidate, this);
932     CheckMergeCompletion();
933     ValidateMerge();
934     read_future.wait();
935 }
936 
TEST_P(SnapuserdTest,Snapshot_Merge_Resume)937 TEST_P(SnapuserdTest, Snapshot_Merge_Resume) {
938     ASSERT_NO_FATAL_FAILURE(SetupDefault());
939     ASSERT_NO_FATAL_FAILURE(MergeInterrupt());
940     ValidateMerge();
941 }
942 
TEST_P(SnapuserdTest,Snapshot_COPY_Overlap_TEST_1)943 TEST_P(SnapuserdTest, Snapshot_COPY_Overlap_TEST_1) {
944     ASSERT_NO_FATAL_FAILURE(SetupCopyOverlap_1());
945     ASSERT_TRUE(Merge());
946     ValidateMerge();
947 }
948 
TEST_P(SnapuserdTest,Snapshot_COPY_Overlap_TEST_2)949 TEST_P(SnapuserdTest, Snapshot_COPY_Overlap_TEST_2) {
950     ASSERT_NO_FATAL_FAILURE(SetupCopyOverlap_2());
951     ASSERT_TRUE(Merge());
952     ValidateMerge();
953 }
954 
TEST_P(SnapuserdTest,Snapshot_COPY_Overlap_Merge_Resume_TEST)955 TEST_P(SnapuserdTest, Snapshot_COPY_Overlap_Merge_Resume_TEST) {
956     ASSERT_NO_FATAL_FAILURE(SetupCopyOverlap_1());
957     ASSERT_NO_FATAL_FAILURE(MergeInterrupt());
958     ValidateMerge();
959 }
960 
TEST_P(SnapuserdTest,Snapshot_COPY_Overlap_Merge_Resume_IO_Validate_TEST)961 TEST_P(SnapuserdTest, Snapshot_COPY_Overlap_Merge_Resume_IO_Validate_TEST) {
962     if (!harness_->HasUserDevice()) {
963         GTEST_SKIP() << "Skipping snapshot read; not supported";
964     }
965     ASSERT_NO_FATAL_FAILURE(SetupCopyOverlap_2());
966     ASSERT_NO_FATAL_FAILURE(MergeInterruptFixed(300));
967     ValidateMerge();
968 }
969 
TEST_P(SnapuserdTest,Snapshot_Merge_Crash_Fixed_Ordered)970 TEST_P(SnapuserdTest, Snapshot_Merge_Crash_Fixed_Ordered) {
971     ASSERT_NO_FATAL_FAILURE(SetupOrderedOps());
972     ASSERT_NO_FATAL_FAILURE(MergeInterruptFixed(300));
973     ValidateMerge();
974 }
975 
TEST_P(SnapuserdTest,Snapshot_Merge_Crash_Random_Ordered)976 TEST_P(SnapuserdTest, Snapshot_Merge_Crash_Random_Ordered) {
977     ASSERT_NO_FATAL_FAILURE(SetupOrderedOps());
978     ASSERT_NO_FATAL_FAILURE(MergeInterruptRandomly(500));
979     ValidateMerge();
980 }
981 
TEST_P(SnapuserdTest,Snapshot_Merge_Crash_Fixed_Inverted)982 TEST_P(SnapuserdTest, Snapshot_Merge_Crash_Fixed_Inverted) {
983     ASSERT_NO_FATAL_FAILURE(SetupOrderedOpsInverted());
984     ASSERT_NO_FATAL_FAILURE(MergeInterruptFixed(50));
985     ValidateMerge();
986 }
987 
TEST_P(SnapuserdTest,Snapshot_Merge_Crash_Random_Inverted)988 TEST_P(SnapuserdTest, Snapshot_Merge_Crash_Random_Inverted) {
989     ASSERT_NO_FATAL_FAILURE(SetupOrderedOpsInverted());
990     ASSERT_NO_FATAL_FAILURE(MergeInterruptRandomly(50));
991     ValidateMerge();
992 }
993 
994 class SnapuserdVariableBlockSizeTest : public SnapuserdTest {
995   public:
996     void SetupCowV3ForVariableBlockSize();
997     void ReadSnapshotWithVariableBlockSize();
998 
999   protected:
1000     void SetUp() override;
1001     void TearDown() override;
1002 
1003     void CreateV3CowDeviceForVariableBlockSize();
1004 };
1005 
SetupCowV3ForVariableBlockSize()1006 void SnapuserdVariableBlockSizeTest::SetupCowV3ForVariableBlockSize() {
1007     ASSERT_NO_FATAL_FAILURE(CreateBaseDevice());
1008     ASSERT_NO_FATAL_FAILURE(CreateV3CowDeviceForVariableBlockSize());
1009     ASSERT_NO_FATAL_FAILURE(SetupDaemon());
1010 }
1011 
CreateV3CowDeviceForVariableBlockSize()1012 void SnapuserdVariableBlockSizeTest::CreateV3CowDeviceForVariableBlockSize() {
1013     auto writer = CreateV3Cow();
1014 
1015     ASSERT_NE(writer, nullptr);
1016     size_t total_data_to_write = size_;
1017 
1018     size_t total_blocks_to_write = total_data_to_write / BLOCK_SZ;
1019     size_t num_blocks_per_op = total_blocks_to_write / 4;
1020     size_t source_block = 0;
1021 
1022     size_t seq_len = num_blocks_per_op;
1023     uint32_t sequence[seq_len];
1024     size_t xor_block_start = seq_len * 3;
1025     for (size_t i = 0; i < seq_len; i++) {
1026         sequence[i] = xor_block_start + i;
1027     }
1028     ASSERT_TRUE(writer->AddSequenceData(seq_len, sequence));
1029 
1030     size_t total_replace_blocks = num_blocks_per_op;
1031     // Write some data which can be compressed
1032     std::string data;
1033     data.resize(total_replace_blocks * BLOCK_SZ, '\0');
1034     for (size_t i = 0; i < data.size(); i++) {
1035         data[i] = static_cast<char>('A' + i / BLOCK_SZ);
1036     }
1037     // REPLACE ops
1038     ASSERT_TRUE(writer->AddRawBlocks(source_block, data.data(), data.size()));
1039 
1040     total_blocks_to_write -= total_replace_blocks;
1041     source_block = source_block + total_replace_blocks;
1042 
1043     // ZERO ops
1044     size_t total_zero_blocks = total_blocks_to_write / 3;
1045     ASSERT_TRUE(writer->AddZeroBlocks(source_block, total_zero_blocks));
1046 
1047     total_blocks_to_write -= total_zero_blocks;
1048     source_block = source_block + total_zero_blocks;
1049 
1050     // Generate some random data wherein few blocks cannot be compressed.
1051     // This is to test the I/O path for those blocks which aren't compressed.
1052     size_t total_random_data_blocks = total_blocks_to_write / 2;
1053     unique_fd rnd_fd(open("/dev/random", O_RDONLY));
1054 
1055     ASSERT_GE(rnd_fd, 0);
1056     std::string random_buffer;
1057     random_buffer.resize(total_random_data_blocks * BLOCK_SZ, '\0');
1058     ASSERT_EQ(
1059             android::base::ReadFullyAtOffset(rnd_fd, random_buffer.data(), random_buffer.size(), 0),
1060             true);
1061     // REPLACE ops
1062     ASSERT_TRUE(writer->AddRawBlocks(source_block, random_buffer.data(), random_buffer.size()));
1063 
1064     total_blocks_to_write -= total_random_data_blocks;
1065     source_block = source_block + total_random_data_blocks;
1066 
1067     // XOR ops will always be 4k blocks
1068     std::string xor_buffer;
1069     xor_buffer.resize(total_blocks_to_write * BLOCK_SZ, '\0');
1070     for (size_t i = 0; i < xor_buffer.size(); i++) {
1071         xor_buffer[i] = static_cast<char>('C' + i / BLOCK_SZ);
1072     }
1073     size_t xor_offset = 21;
1074     std::string source_buffer;
1075     source_buffer.resize(total_blocks_to_write * BLOCK_SZ, '\0');
1076     ASSERT_EQ(android::base::ReadFullyAtOffset(base_fd_, source_buffer.data(), source_buffer.size(),
1077                                                size_ + xor_offset),
1078               true);
1079     for (size_t i = 0; i < xor_buffer.size(); i++) {
1080         xor_buffer[i] ^= source_buffer[i];
1081     }
1082 
1083     ASSERT_EQ(xor_block_start, source_block);
1084 
1085     ASSERT_TRUE(writer->AddXorBlocks(source_block, xor_buffer.data(), xor_buffer.size(),
1086                                      (size_ / BLOCK_SZ), xor_offset));
1087     // Flush operations
1088     ASSERT_TRUE(writer->Finalize());
1089 
1090     // Construct the buffer required for validation
1091     orig_buffer_ = std::make_unique<uint8_t[]>(total_base_size_);
1092 
1093     // Read the entire base device
1094     ASSERT_EQ(android::base::ReadFullyAtOffset(base_fd_, orig_buffer_.get(), total_base_size_, 0),
1095               true);
1096 
1097     // REPLACE ops which are compressed
1098     std::memcpy(orig_buffer_.get(), data.data(), data.size());
1099     size_t offset = data.size();
1100 
1101     // ZERO ops
1102     std::string zero_buffer(total_zero_blocks * BLOCK_SZ, 0);
1103     std::memcpy((char*)orig_buffer_.get() + offset, (void*)zero_buffer.c_str(), zero_buffer.size());
1104     offset += zero_buffer.size();
1105 
1106     // REPLACE ops - Random buffers which aren't compressed
1107     std::memcpy((char*)orig_buffer_.get() + offset, random_buffer.c_str(), random_buffer.size());
1108     offset += random_buffer.size();
1109 
1110     // XOR Ops which default to 4k block size compression irrespective of
1111     // compression factor
1112     ASSERT_EQ(android::base::ReadFullyAtOffset(base_fd_, (char*)orig_buffer_.get() + offset,
1113                                                xor_buffer.size(), size_ + xor_offset),
1114               true);
1115     for (size_t i = 0; i < xor_buffer.size(); i++) {
1116         orig_buffer_.get()[offset + i] = (uint8_t)(orig_buffer_.get()[offset + i] ^ xor_buffer[i]);
1117     }
1118 }
1119 
ReadSnapshotWithVariableBlockSize()1120 void SnapuserdVariableBlockSizeTest::ReadSnapshotWithVariableBlockSize() {
1121     unique_fd fd(open(dmuser_dev_->GetPath().c_str(), O_RDONLY | O_DIRECT));
1122     ASSERT_GE(fd, 0);
1123 
1124     void* addr;
1125     ssize_t page_size = getpagesize();
1126     ASSERT_EQ(posix_memalign(&addr, page_size, size_), 0);
1127     std::unique_ptr<void, decltype(&::free)> snapshot_buffer(addr, ::free);
1128 
1129     const TestParam params = GetParam();
1130 
1131     // Issue I/O request with various block sizes
1132     size_t num_blocks = size_ / params.block_size;
1133     off_t offset = 0;
1134     for (size_t i = 0; i < num_blocks; i++) {
1135         ASSERT_EQ(ReadFullyAtOffset(fd, (char*)snapshot_buffer.get() + offset, params.block_size,
1136                                     offset),
1137                   true);
1138         offset += params.block_size;
1139     }
1140     // Validate buffer
1141     ASSERT_EQ(memcmp(snapshot_buffer.get(), orig_buffer_.get(), size_), 0);
1142 
1143     // Reset the buffer
1144     std::memset(snapshot_buffer.get(), 0, size_);
1145 
1146     // Read one full chunk in a single shot and re-validate.
1147     ASSERT_EQ(ReadFullyAtOffset(fd, snapshot_buffer.get(), size_, 0), true);
1148     ASSERT_EQ(memcmp(snapshot_buffer.get(), orig_buffer_.get(), size_), 0);
1149 
1150     // Reset the buffer
1151     std::memset(snapshot_buffer.get(), 0, size_);
1152 
1153     // Buffered I/O test
1154     fd.reset(open(dmuser_dev_->GetPath().c_str(), O_RDONLY));
1155     ASSERT_GE(fd, 0);
1156 
1157     // Try not to cache
1158     posix_fadvise(fd.get(), 0, size_, POSIX_FADV_DONTNEED);
1159 
1160     size_t num_blocks_per_op = (size_ / BLOCK_SZ) / 4;
1161     offset = num_blocks_per_op * BLOCK_SZ;
1162     size_t read_size = 1019;  // bytes
1163     offset -= 111;
1164 
1165     // Issue a un-aligned read which crosses the boundary between a REPLACE block and a ZERO
1166     // block.
1167     ASSERT_EQ(ReadFullyAtOffset(fd, snapshot_buffer.get(), read_size, offset), true);
1168 
1169     // Validate the data
1170     ASSERT_EQ(std::memcmp(snapshot_buffer.get(), (char*)orig_buffer_.get() + offset, read_size), 0);
1171 
1172     offset = (num_blocks_per_op * 3) * BLOCK_SZ;
1173     offset -= (BLOCK_SZ - 119);
1174     read_size = 8111;
1175 
1176     // Issue an un-aligned read which crosses the boundary between a REPLACE block of random
1177     // un-compressed data and a XOR block
1178     ASSERT_EQ(ReadFullyAtOffset(fd, snapshot_buffer.get(), read_size, offset), true);
1179 
1180     // Validate the data
1181     ASSERT_EQ(std::memcmp(snapshot_buffer.get(), (char*)orig_buffer_.get() + offset, read_size), 0);
1182 
1183     // Reset the buffer
1184     std::memset(snapshot_buffer.get(), 0, size_);
1185 
1186     // Read just one byte at an odd offset which is a REPLACE op
1187     offset = 19;
1188     read_size = 1;
1189     ASSERT_EQ(ReadFullyAtOffset(fd, snapshot_buffer.get(), read_size, offset), true);
1190     // Validate the data
1191     ASSERT_EQ(std::memcmp(snapshot_buffer.get(), (char*)orig_buffer_.get() + offset, read_size), 0);
1192 
1193     // Reset the buffer
1194     std::memset(snapshot_buffer.get(), 0, size_);
1195 
1196     // Read a block which has no mapping to a COW operation. This read should be
1197     // a pass-through to the underlying base device.
1198     offset = size_ + 9342;
1199     read_size = 30;
1200     ASSERT_EQ(ReadFullyAtOffset(fd, snapshot_buffer.get(), read_size, offset), true);
1201     // Validate the data
1202     ASSERT_EQ(std::memcmp(snapshot_buffer.get(), (char*)orig_buffer_.get() + offset, read_size), 0);
1203 }
1204 
SetUp()1205 void SnapuserdVariableBlockSizeTest::SetUp() {
1206     if (ShouldSkipSetUp()) {
1207         GTEST_SKIP() << "snapuserd not supported on this device";
1208     }
1209     ASSERT_NO_FATAL_FAILURE(SnapuserdTest::SetUp());
1210 }
1211 
TearDown()1212 void SnapuserdVariableBlockSizeTest::TearDown() {
1213     SnapuserdTest::TearDown();
1214 }
1215 
TEST_P(SnapuserdVariableBlockSizeTest,Snapshot_Test_Variable_Block_Size)1216 TEST_P(SnapuserdVariableBlockSizeTest, Snapshot_Test_Variable_Block_Size) {
1217     if (!harness_->HasUserDevice()) {
1218         GTEST_SKIP() << "Skipping snapshot read; not supported";
1219     }
1220     ASSERT_NO_FATAL_FAILURE(SetupCowV3ForVariableBlockSize());
1221     ASSERT_NO_FATAL_FAILURE(ReadSnapshotWithVariableBlockSize());
1222     ASSERT_TRUE(StartMerge());
1223     CheckMergeCompletion();
1224     ValidateMerge();
1225     ASSERT_NO_FATAL_FAILURE(ReadSnapshotWithVariableBlockSize());
1226 }
1227 
1228 class HandlerTest : public SnapuserdTestBase {
1229   protected:
1230     void SetUp() override;
1231     void TearDown() override;
1232 
1233     void SetUpV2Cow();
1234     void InitializeDevice();
1235     AssertionResult ReadSectors(sector_t sector, uint64_t size, void* buffer);
1236 
1237     TestBlockServerFactory factory_;
1238     std::shared_ptr<TestBlockServerOpener> opener_;
1239     std::shared_ptr<SnapshotHandler> handler_;
1240     std::unique_ptr<ReadWorker> read_worker_;
1241     TestBlockServer* block_server_;
1242     std::future<bool> handler_thread_;
1243 };
1244 
SetUpV2Cow()1245 void HandlerTest::SetUpV2Cow() {
1246     ASSERT_NO_FATAL_FAILURE(CreateCowDevice());
1247 }
1248 
InitializeDevice()1249 void HandlerTest::InitializeDevice() {
1250     ASSERT_NO_FATAL_FAILURE(SetDeviceControlName());
1251 
1252     opener_ = factory_.CreateTestOpener(system_device_ctrl_name_);
1253     ASSERT_NE(opener_, nullptr);
1254 
1255     const TestParam params = GetParam();
1256     handler_ = std::make_shared<SnapshotHandler>(
1257             system_device_ctrl_name_, cow_system_->path, base_dev_->GetPath(), base_dev_->GetPath(),
1258             opener_, 1, false, false, params.o_direct, params.cow_op_merge_size);
1259     ASSERT_TRUE(handler_->InitCowDevice());
1260     ASSERT_TRUE(handler_->InitializeWorkers());
1261 
1262     read_worker_ = std::make_unique<ReadWorker>(cow_system_->path, base_dev_->GetPath(),
1263                                                 system_device_ctrl_name_, base_dev_->GetPath(),
1264                                                 handler_->GetSharedPtr(), opener_);
1265     ASSERT_TRUE(read_worker_->Init());
1266     block_server_ = static_cast<TestBlockServer*>(read_worker_->block_server());
1267 
1268     handler_thread_ = std::async(std::launch::async, &SnapshotHandler::Start, handler_.get());
1269 }
1270 
SetUp()1271 void HandlerTest::SetUp() {
1272     if (ShouldSkipSetUp()) {
1273         GTEST_SKIP() << "snapuserd not supported on this device";
1274     }
1275     ASSERT_NO_FATAL_FAILURE(SnapuserdTestBase::SetUp());
1276     ASSERT_NO_FATAL_FAILURE(CreateBaseDevice());
1277     ASSERT_NO_FATAL_FAILURE(SetUpV2Cow());
1278     ASSERT_NO_FATAL_FAILURE(InitializeDevice());
1279 }
1280 
TearDown()1281 void HandlerTest::TearDown() {
1282     if (ShouldSkipSetUp()) {
1283         return;
1284     }
1285     ASSERT_TRUE(factory_.DeleteQueue(system_device_ctrl_name_));
1286     ASSERT_TRUE(handler_thread_.get());
1287     SnapuserdTestBase::TearDown();
1288 }
1289 
ReadSectors(sector_t sector,uint64_t size,void * buffer)1290 AssertionResult HandlerTest::ReadSectors(sector_t sector, uint64_t size, void* buffer) {
1291     if (!read_worker_->RequestSectors(sector, size)) {
1292         return AssertionFailure() << "request sectors failed";
1293     }
1294 
1295     std::string result = std::move(block_server_->sent_io());
1296     if (result.size() != size) {
1297         return AssertionFailure() << "size mismatch in result, got " << result.size()
1298                                   << ", expected " << size;
1299     }
1300 
1301     memcpy(buffer, result.data(), size);
1302     return AssertionSuccess();
1303 }
1304 
1305 // This test mirrors ReadSnapshotDeviceAndValidate.
TEST_P(HandlerTest,Read)1306 TEST_P(HandlerTest, Read) {
1307     std::unique_ptr<uint8_t[]> snapuserd_buffer = std::make_unique<uint8_t[]>(size_);
1308 
1309     // COPY
1310     loff_t offset = 0;
1311     ASSERT_TRUE(ReadSectors(offset / SECTOR_SIZE, size_, snapuserd_buffer.get()));
1312     ASSERT_EQ(memcmp(snapuserd_buffer.get(), orig_buffer_.get(), size_), 0);
1313 
1314     // REPLACE
1315     offset += size_;
1316     ASSERT_TRUE(ReadSectors(offset / SECTOR_SIZE, size_, snapuserd_buffer.get()));
1317     ASSERT_EQ(memcmp(snapuserd_buffer.get(), (char*)orig_buffer_.get() + size_, size_), 0);
1318 
1319     // ZERO
1320     offset += size_;
1321     ASSERT_TRUE(ReadSectors(offset / SECTOR_SIZE, size_, snapuserd_buffer.get()));
1322     ASSERT_EQ(memcmp(snapuserd_buffer.get(), (char*)orig_buffer_.get() + (size_ * 2), size_), 0);
1323 
1324     // REPLACE
1325     offset += size_;
1326     ASSERT_TRUE(ReadSectors(offset / SECTOR_SIZE, size_, snapuserd_buffer.get()));
1327     ASSERT_EQ(memcmp(snapuserd_buffer.get(), (char*)orig_buffer_.get() + (size_ * 3), size_), 0);
1328 
1329     // XOR
1330     offset += size_;
1331     ASSERT_TRUE(ReadSectors(offset / SECTOR_SIZE, size_, snapuserd_buffer.get()));
1332     ASSERT_EQ(memcmp(snapuserd_buffer.get(), (char*)orig_buffer_.get() + (size_ * 4), size_), 0);
1333 }
1334 
TEST_P(HandlerTest,ReadUnalignedSector)1335 TEST_P(HandlerTest, ReadUnalignedSector) {
1336     std::unique_ptr<uint8_t[]> snapuserd_buffer = std::make_unique<uint8_t[]>(BLOCK_SZ);
1337 
1338     ASSERT_TRUE(ReadSectors(1, BLOCK_SZ, snapuserd_buffer.get()));
1339     ASSERT_EQ(memcmp(snapuserd_buffer.get(), orig_buffer_.get() + SECTOR_SIZE, BLOCK_SZ), 0);
1340 }
1341 
TEST_P(HandlerTest,ReadUnalignedSize)1342 TEST_P(HandlerTest, ReadUnalignedSize) {
1343     std::unique_ptr<uint8_t[]> snapuserd_buffer = std::make_unique<uint8_t[]>(SECTOR_SIZE);
1344 
1345     ASSERT_TRUE(ReadSectors(0, SECTOR_SIZE, snapuserd_buffer.get()));
1346     ASSERT_EQ(memcmp(snapuserd_buffer.get(), orig_buffer_.get(), SECTOR_SIZE), 0);
1347 }
1348 
1349 class HandlerTestV3 : public HandlerTest {
1350   public:
1351     void ReadSnapshotWithVariableBlockSize();
1352 
1353   protected:
1354     void SetUp() override;
1355     void TearDown() override;
1356     void SetUpV3Cow();
1357 };
1358 
SetUp()1359 void HandlerTestV3::SetUp() {
1360     if (ShouldSkipSetUp()) {
1361         GTEST_SKIP() << "snapuserd not supported on this device";
1362     }
1363     ASSERT_NO_FATAL_FAILURE(SnapuserdTestBase::SetUp());
1364     ASSERT_NO_FATAL_FAILURE(CreateBaseDevice());
1365     ASSERT_NO_FATAL_FAILURE(SetUpV3Cow());
1366     ASSERT_NO_FATAL_FAILURE(InitializeDevice());
1367 }
1368 
TearDown()1369 void HandlerTestV3::TearDown() {
1370     ASSERT_NO_FATAL_FAILURE(HandlerTest::TearDown());
1371 }
1372 
SetUpV3Cow()1373 void HandlerTestV3::SetUpV3Cow() {
1374     auto writer = CreateV3Cow();
1375 
1376     ASSERT_NE(writer, nullptr);
1377     size_t total_data_to_write = size_;
1378 
1379     size_t total_blocks_to_write = total_data_to_write / BLOCK_SZ;
1380     size_t num_blocks_per_op = total_blocks_to_write / 4;
1381     size_t source_block = 0;
1382 
1383     size_t total_replace_blocks = num_blocks_per_op;
1384     // Write some data which can be compressed
1385     std::string data;
1386     data.resize(total_replace_blocks * BLOCK_SZ, '\0');
1387     for (size_t i = 0; i < data.size(); i++) {
1388         data[i] = static_cast<char>('A' + i / BLOCK_SZ);
1389     }
1390     // REPLACE ops
1391     ASSERT_TRUE(writer->AddRawBlocks(source_block, data.data(), data.size()));
1392 
1393     total_blocks_to_write -= total_replace_blocks;
1394     source_block = source_block + total_replace_blocks;
1395 
1396     // ZERO ops
1397     size_t total_zero_blocks = total_blocks_to_write / 3;
1398     ASSERT_TRUE(writer->AddZeroBlocks(source_block, total_zero_blocks));
1399 
1400     total_blocks_to_write -= total_zero_blocks;
1401     source_block = source_block + total_zero_blocks;
1402 
1403     // Generate some random data wherein few blocks cannot be compressed.
1404     // This is to test the I/O path for those blocks which aren't compressed.
1405     size_t total_random_data_blocks = total_blocks_to_write;
1406     unique_fd rnd_fd(open("/dev/random", O_RDONLY));
1407 
1408     ASSERT_GE(rnd_fd, 0);
1409     std::string random_buffer;
1410     random_buffer.resize(total_random_data_blocks * BLOCK_SZ, '\0');
1411     ASSERT_EQ(
1412             android::base::ReadFullyAtOffset(rnd_fd, random_buffer.data(), random_buffer.size(), 0),
1413             true);
1414     // REPLACE ops
1415     ASSERT_TRUE(writer->AddRawBlocks(source_block, random_buffer.data(), random_buffer.size()));
1416     // Flush operations
1417     ASSERT_TRUE(writer->Finalize());
1418 
1419     // Construct the buffer required for validation
1420     orig_buffer_ = std::make_unique<uint8_t[]>(total_base_size_);
1421 
1422     // Read the entire base device
1423     ASSERT_EQ(android::base::ReadFullyAtOffset(base_fd_, orig_buffer_.get(), total_base_size_, 0),
1424               true);
1425 
1426     // REPLACE ops which are compressed
1427     std::memcpy(orig_buffer_.get(), data.data(), data.size());
1428     size_t offset = data.size();
1429 
1430     // ZERO ops
1431     std::string zero_buffer(total_zero_blocks * BLOCK_SZ, 0);
1432     std::memcpy((char*)orig_buffer_.get() + offset, (void*)zero_buffer.c_str(), zero_buffer.size());
1433     offset += zero_buffer.size();
1434 
1435     // REPLACE ops - Random buffers which aren't compressed
1436     std::memcpy((char*)orig_buffer_.get() + offset, random_buffer.c_str(), random_buffer.size());
1437 }
1438 
TEST_P(HandlerTestV3,Read)1439 TEST_P(HandlerTestV3, Read) {
1440     std::unique_ptr<uint8_t[]> snapuserd_buffer = std::make_unique<uint8_t[]>(size_);
1441 
1442     size_t read_size = SECTOR_SIZE;
1443     off_t offset = 0;
1444     // Read the first sector
1445     ASSERT_TRUE(ReadSectors(1, read_size, snapuserd_buffer.get()));
1446     // Validate the data
1447     ASSERT_EQ(std::memcmp(snapuserd_buffer.get(), orig_buffer_.get(), read_size), 0);
1448 
1449     // Read the second block at offset 7680 (Sector 15). This will map to the
1450     // first COW operation for variable block size
1451     offset += (((BLOCK_SZ * 2) - SECTOR_SIZE));
1452     read_size = BLOCK_SZ;  // Span across two REPLACE ops
1453     ASSERT_TRUE(ReadSectors(offset / SECTOR_SIZE, read_size, snapuserd_buffer.get()));
1454     // Validate the data
1455     ASSERT_EQ(std::memcmp(snapuserd_buffer.get(), (char*)orig_buffer_.get() + offset, read_size),
1456               0);
1457 
1458     // Fill some other data since we are going to read zero blocks
1459     std::memset(snapuserd_buffer.get(), 'Z', size_);
1460 
1461     size_t num_blocks_per_op = (size_ / BLOCK_SZ) / 4;
1462     offset = num_blocks_per_op * BLOCK_SZ;
1463     // Issue read spanning between a REPLACE op and ZERO ops. The starting point
1464     // is the last REPLACE op at sector 5118
1465     offset -= (SECTOR_SIZE * 2);
1466     // This will make sure it falls back to aligned reads after reading the
1467     // first unaligned block
1468     read_size = BLOCK_SZ * 6;
1469     ASSERT_TRUE(ReadSectors(offset / SECTOR_SIZE, read_size, snapuserd_buffer.get()));
1470     // Validate the data
1471     ASSERT_EQ(std::memcmp(snapuserd_buffer.get(), (char*)orig_buffer_.get() + offset, read_size),
1472               0);
1473 
1474     // Issue I/O request at the last block. The first chunk of (SECTOR_SIZE * 2)
1475     // will be from REPLACE op which has random buffers
1476     offset = (size_ - (SECTOR_SIZE * 2));
1477     // Request will span beyond the COW mapping, thereby fetching data from base
1478     // device.
1479     read_size = BLOCK_SZ * 8;
1480     ASSERT_TRUE(ReadSectors(offset / SECTOR_SIZE, read_size, snapuserd_buffer.get()));
1481     // Validate the data
1482     ASSERT_EQ(std::memcmp(snapuserd_buffer.get(), (char*)orig_buffer_.get() + offset, read_size),
1483               0);
1484 
1485     // Issue I/O request which are not mapped to any COW operations
1486     offset = (size_ + (SECTOR_SIZE * 3));
1487     read_size = BLOCK_SZ * 3;
1488     ASSERT_TRUE(ReadSectors(offset / SECTOR_SIZE, read_size, snapuserd_buffer.get()));
1489     // Validate the data
1490     ASSERT_EQ(std::memcmp(snapuserd_buffer.get(), (char*)orig_buffer_.get() + offset, read_size),
1491               0);
1492 }
1493 
GetIoUringConfigs()1494 std::vector<bool> GetIoUringConfigs() {
1495 #if __ANDROID__
1496     if (!android::base::GetBoolProperty("ro.virtual_ab.io_uring.enabled", false)) {
1497         return {false};
1498     }
1499 #endif
1500     if (!KernelSupportsIoUring()) {
1501         return {false};
1502     }
1503     return {false, true};
1504 }
1505 
GetTestConfigs()1506 std::vector<TestParam> GetTestConfigs() {
1507     std::vector<TestParam> testParams;
1508     std::vector<bool> uring_configs = GetIoUringConfigs();
1509 
1510     for (bool config : uring_configs) {
1511         TestParam param;
1512         param.io_uring = config;
1513         param.o_direct = false;
1514         testParams.push_back(std::move(param));
1515     }
1516 
1517     for (bool config : uring_configs) {
1518         TestParam param;
1519         param.io_uring = config;
1520         param.o_direct = true;
1521         testParams.push_back(std::move(param));
1522     }
1523     return testParams;
1524 }
1525 
GetVariableBlockTestConfigs()1526 std::vector<TestParam> GetVariableBlockTestConfigs() {
1527     std::vector<TestParam> testParams;
1528 
1529     std::vector<int> block_sizes = {4096, 8192, 16384, 32768, 65536, 131072};
1530     std::vector<std::string> compression_algo = {"none", "lz4", "zstd", "gz"};
1531     std::vector<int> threads = {1, 2};
1532     std::vector<bool> uring_configs = GetIoUringConfigs();
1533 
1534     // This should test 96 combination and validates the I/O path
1535     for (auto block : block_sizes) {
1536         for (auto compression : compression_algo) {
1537             for (auto thread : threads) {
1538                 for (auto io_uring : uring_configs) {
1539                     TestParam param;
1540                     param.block_size = block;
1541                     param.compression = compression;
1542                     param.num_threads = thread;
1543                     param.io_uring = io_uring;
1544                     param.o_direct = false;
1545                     param.cow_op_merge_size = 0;
1546                     testParams.push_back(std::move(param));
1547                 }
1548             }
1549         }
1550     }
1551 
1552     return testParams;
1553 }
1554 
1555 INSTANTIATE_TEST_SUITE_P(Io, SnapuserdVariableBlockSizeTest,
1556                          ::testing::ValuesIn(GetVariableBlockTestConfigs()));
1557 INSTANTIATE_TEST_SUITE_P(Io, HandlerTestV3, ::testing::ValuesIn(GetVariableBlockTestConfigs()));
1558 INSTANTIATE_TEST_SUITE_P(Io, SnapuserdTest, ::testing::ValuesIn(GetTestConfigs()));
1559 INSTANTIATE_TEST_SUITE_P(Io, HandlerTest, ::testing::ValuesIn(GetTestConfigs()));
1560 
1561 }  // namespace snapshot
1562 }  // namespace android
1563 
main(int argc,char ** argv)1564 int main(int argc, char** argv) {
1565     ::testing::InitGoogleTest(&argc, argv);
1566 
1567     gflags::ParseCommandLineFlags(&argc, &argv, false);
1568 
1569     return RUN_ALL_TESTS();
1570 }
1571