1# Copyright (C) 2020 The Android Open Source Project 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14 15import copy 16import itertools 17import logging 18import os 19import shutil 20import struct 21import zipfile 22 23import ota_metadata_pb2 24import common 25import fnmatch 26from common import (ZipDelete, DoesInputFileContain, ReadBytesFromInputFile, OPTIONS, MakeTempFile, 27 ZipWriteStr, BuildInfo, LoadDictionaryFromFile, 28 SignFile, PARTITIONS_WITH_BUILD_PROP, PartitionBuildProps, 29 GetRamdiskFormat, ParseUpdateEngineConfig) 30import payload_signer 31from payload_signer import PayloadSigner, AddSigningArgumentParse, GeneratePayloadProperties 32 33 34logger = logging.getLogger(__name__) 35 36OPTIONS.no_signing = False 37OPTIONS.force_non_ab = False 38OPTIONS.wipe_user_data = False 39OPTIONS.downgrade = False 40OPTIONS.key_passwords = {} 41OPTIONS.incremental_source = None 42OPTIONS.retrofit_dynamic_partitions = False 43OPTIONS.output_metadata_path = None 44OPTIONS.boot_variable_file = None 45 46METADATA_NAME = 'META-INF/com/android/metadata' 47METADATA_PROTO_NAME = 'META-INF/com/android/metadata.pb' 48UNZIP_PATTERN = ['IMAGES/*', 'META/*', 'OTA/*', 49 'RADIO/*', '*/build.prop', '*/default.prop', '*/build.default', "*/etc/vintf/*"] 50SECURITY_PATCH_LEVEL_PROP_NAME = "ro.build.version.security_patch" 51TARGET_FILES_IMAGES_SUBDIR = ["IMAGES", "PREBUILT_IMAGES", "RADIO"] 52 53 54# Key is the compression algorithm, value is minimum API level required to 55# use this compression algorithm for VABC OTA on device. 56VABC_COMPRESSION_PARAM_SUPPORT = { 57 "gz": 31, 58 "brotli": 31, 59 "none": 31, 60 # lz4 support is added in Android U 61 "lz4": 34, 62 # zstd support is added in Android V 63 "zstd": 35, 64} 65 66 67def FinalizeMetadata(metadata, input_file, output_file, needed_property_files=None, package_key=None, pw=None): 68 """Finalizes the metadata and signs an A/B OTA package. 69 70 In order to stream an A/B OTA package, we need 'ota-streaming-property-files' 71 that contains the offsets and sizes for the ZIP entries. An example 72 property-files string is as follows. 73 74 "payload.bin:679:343,payload_properties.txt:378:45,metadata:69:379" 75 76 OTA server can pass down this string, in addition to the package URL, to the 77 system update client. System update client can then fetch individual ZIP 78 entries (ZIP_STORED) directly at the given offset of the URL. 79 80 Args: 81 metadata: The metadata dict for the package. 82 input_file: The input ZIP filename that doesn't contain the package METADATA 83 entry yet. 84 output_file: The final output ZIP filename. 85 needed_property_files: The list of PropertyFiles' to be generated. Default is [AbOtaPropertyFiles(), StreamingPropertyFiles()] 86 package_key: The key used to sign this OTA package 87 pw: Password for the package_key 88 """ 89 no_signing = package_key is None 90 91 if needed_property_files is None: 92 # AbOtaPropertyFiles intends to replace StreamingPropertyFiles, as it covers 93 # all the info of the latter. However, system updaters and OTA servers need to 94 # take time to switch to the new flag. We keep both of the flags for 95 # P-timeframe, and will remove StreamingPropertyFiles in later release. 96 needed_property_files = ( 97 AbOtaPropertyFiles(), 98 StreamingPropertyFiles(), 99 ) 100 101 def ComputeAllPropertyFiles(input_file, needed_property_files): 102 # Write the current metadata entry with placeholders. 103 with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip: 104 for property_files in needed_property_files: 105 metadata.property_files[property_files.name] = property_files.Compute( 106 input_zip) 107 108 ZipDelete(input_file, [METADATA_NAME, METADATA_PROTO_NAME], True) 109 with zipfile.ZipFile(input_file, 'a', allowZip64=True) as output_zip: 110 WriteMetadata(metadata, output_zip) 111 112 if no_signing: 113 return input_file 114 115 prelim_signing = MakeTempFile(suffix='.zip') 116 SignOutput(input_file, prelim_signing, package_key, pw) 117 return prelim_signing 118 119 def FinalizeAllPropertyFiles(prelim_signing, needed_property_files): 120 with zipfile.ZipFile(prelim_signing, 'r', allowZip64=True) as prelim_signing_zip: 121 for property_files in needed_property_files: 122 metadata.property_files[property_files.name] = property_files.Finalize( 123 prelim_signing_zip, 124 len(metadata.property_files[property_files.name])) 125 126 # SignOutput(), which in turn calls signapk.jar, will possibly reorder the ZIP 127 # entries, as well as padding the entry headers. We do a preliminary signing 128 # (with an incomplete metadata entry) to allow that to happen. Then compute 129 # the ZIP entry offsets, write back the final metadata and do the final 130 # signing. 131 prelim_signing = ComputeAllPropertyFiles(input_file, needed_property_files) 132 try: 133 FinalizeAllPropertyFiles(prelim_signing, needed_property_files) 134 except PropertyFiles.InsufficientSpaceException: 135 # Even with the preliminary signing, the entry orders may change 136 # dramatically, which leads to insufficiently reserved space during the 137 # first call to ComputeAllPropertyFiles(). In that case, we redo all the 138 # preliminary signing works, based on the already ordered ZIP entries, to 139 # address the issue. 140 prelim_signing = ComputeAllPropertyFiles( 141 prelim_signing, needed_property_files) 142 FinalizeAllPropertyFiles(prelim_signing, needed_property_files) 143 144 # Replace the METADATA entry. 145 ZipDelete(prelim_signing, [METADATA_NAME, METADATA_PROTO_NAME]) 146 with zipfile.ZipFile(prelim_signing, 'a', allowZip64=True) as output_zip: 147 WriteMetadata(metadata, output_zip) 148 149 # Re-sign the package after updating the metadata entry. 150 if no_signing: 151 logger.info(f"Signing disabled for output file {output_file}") 152 shutil.copy(prelim_signing, output_file) 153 else: 154 logger.info( 155 f"Signing the output file {output_file} with key {package_key}") 156 SignOutput(prelim_signing, output_file, package_key, pw) 157 158 # Reopen the final signed zip to double check the streaming metadata. 159 with zipfile.ZipFile(output_file, allowZip64=True) as output_zip: 160 for property_files in needed_property_files: 161 property_files.Verify( 162 output_zip, metadata.property_files[property_files.name].strip()) 163 164 # If requested, dump the metadata to a separate file. 165 output_metadata_path = OPTIONS.output_metadata_path 166 if output_metadata_path: 167 WriteMetadata(metadata, output_metadata_path) 168 169 170def WriteMetadata(metadata_proto, output): 171 """Writes the metadata to the zip archive or a file. 172 173 Args: 174 metadata_proto: The metadata protobuf for the package. 175 output: A ZipFile object or a string of the output file path. If a string 176 path is given, the metadata in the protobuf format will be written to 177 {output}.pb, e.g. ota_metadata.pb 178 """ 179 180 metadata_dict = BuildLegacyOtaMetadata(metadata_proto) 181 legacy_metadata = "".join(["%s=%s\n" % kv for kv in 182 sorted(metadata_dict.items())]) 183 if isinstance(output, zipfile.ZipFile): 184 ZipWriteStr(output, METADATA_PROTO_NAME, metadata_proto.SerializeToString(), 185 compress_type=zipfile.ZIP_STORED) 186 ZipWriteStr(output, METADATA_NAME, legacy_metadata, 187 compress_type=zipfile.ZIP_STORED) 188 return 189 190 with open('{}.pb'.format(output), 'wb') as f: 191 f.write(metadata_proto.SerializeToString()) 192 with open(output, 'w') as f: 193 f.write(legacy_metadata) 194 195 196def UpdateDeviceState(device_state, build_info, boot_variable_values, 197 is_post_build): 198 """Update the fields of the DeviceState proto with build info.""" 199 200 def UpdatePartitionStates(partition_states): 201 """Update the per-partition state according to its build.prop""" 202 if not build_info.is_ab: 203 return 204 build_info_set = ComputeRuntimeBuildInfos(build_info, 205 boot_variable_values) 206 assert "ab_partitions" in build_info.info_dict,\ 207 "ab_partitions property required for ab update." 208 ab_partitions = set(build_info.info_dict.get("ab_partitions")) 209 210 # delta_generator will error out on unused timestamps, 211 # so only generate timestamps for dynamic partitions 212 # used in OTA update. 213 for partition in sorted(set(PARTITIONS_WITH_BUILD_PROP) & ab_partitions): 214 partition_prop = build_info.info_dict.get( 215 '{}.build.prop'.format(partition)) 216 # Skip if the partition is missing, or it doesn't have a build.prop 217 if not partition_prop or not partition_prop.build_props: 218 continue 219 220 partition_state = partition_states.add() 221 partition_state.partition_name = partition 222 # Update the partition's runtime device names and fingerprints 223 partition_devices = set() 224 partition_fingerprints = set() 225 for runtime_build_info in build_info_set: 226 partition_devices.add( 227 runtime_build_info.GetPartitionBuildProp('ro.product.device', 228 partition)) 229 partition_fingerprints.add( 230 runtime_build_info.GetPartitionFingerprint(partition)) 231 232 partition_state.device.extend(sorted(partition_devices)) 233 partition_state.build.extend(sorted(partition_fingerprints)) 234 235 # TODO(xunchang) set the boot image's version with kmi. Note the boot 236 # image doesn't have a file map. 237 partition_state.version = build_info.GetPartitionBuildProp( 238 'ro.build.date.utc', partition) 239 240 # TODO(xunchang), we can save a call to ComputeRuntimeBuildInfos. 241 build_devices, build_fingerprints = \ 242 CalculateRuntimeDevicesAndFingerprints(build_info, boot_variable_values) 243 device_state.device.extend(sorted(build_devices)) 244 device_state.build.extend(sorted(build_fingerprints)) 245 device_state.build_incremental = build_info.GetBuildProp( 246 'ro.build.version.incremental') 247 248 UpdatePartitionStates(device_state.partition_state) 249 250 if is_post_build: 251 device_state.sdk_level = build_info.GetBuildProp( 252 'ro.build.version.sdk') 253 device_state.security_patch_level = build_info.GetBuildProp( 254 'ro.build.version.security_patch') 255 # Use the actual post-timestamp, even for a downgrade case. 256 device_state.timestamp = int(build_info.GetBuildProp('ro.build.date.utc')) 257 258 259def GetPackageMetadata(target_info, source_info=None): 260 """Generates and returns the metadata proto. 261 262 It generates a ota_metadata protobuf that contains the info to be written 263 into an OTA package (META-INF/com/android/metadata.pb). It also handles the 264 detection of downgrade / data wipe based on the global options. 265 266 Args: 267 target_info: The BuildInfo instance that holds the target build info. 268 source_info: The BuildInfo instance that holds the source build info, or 269 None if generating full OTA. 270 271 Returns: 272 A protobuf to be written into package metadata entry. 273 """ 274 assert isinstance(target_info, BuildInfo) 275 assert source_info is None or isinstance(source_info, BuildInfo) 276 277 boot_variable_values = {} 278 if OPTIONS.boot_variable_file: 279 d = LoadDictionaryFromFile(OPTIONS.boot_variable_file) 280 for key, values in d.items(): 281 boot_variable_values[key] = [val.strip() for val in values.split(',')] 282 283 metadata_proto = ota_metadata_pb2.OtaMetadata() 284 # TODO(xunchang) some fields, e.g. post-device isn't necessary. We can 285 # consider skipping them if they aren't used by clients. 286 UpdateDeviceState(metadata_proto.postcondition, target_info, 287 boot_variable_values, True) 288 289 if target_info.is_ab and not OPTIONS.force_non_ab: 290 metadata_proto.type = ota_metadata_pb2.OtaMetadata.AB 291 metadata_proto.required_cache = 0 292 else: 293 metadata_proto.type = ota_metadata_pb2.OtaMetadata.BLOCK 294 # cache requirement will be updated by the non-A/B codes. 295 296 if OPTIONS.wipe_user_data: 297 metadata_proto.wipe = True 298 299 if OPTIONS.retrofit_dynamic_partitions: 300 metadata_proto.retrofit_dynamic_partitions = True 301 302 is_incremental = source_info is not None 303 if is_incremental: 304 UpdateDeviceState(metadata_proto.precondition, source_info, 305 boot_variable_values, False) 306 else: 307 metadata_proto.precondition.device.extend( 308 metadata_proto.postcondition.device) 309 310 # Detect downgrades and set up downgrade flags accordingly. 311 if is_incremental: 312 HandleDowngradeMetadata(metadata_proto, target_info, source_info) 313 314 return metadata_proto 315 316 317def BuildLegacyOtaMetadata(metadata_proto): 318 """Converts the metadata proto to a legacy metadata dict. 319 320 This metadata dict is used to build the legacy metadata text file for 321 backward compatibility. We won't add new keys to the legacy metadata format. 322 If new information is needed, we should add it as a new field in OtaMetadata 323 proto definition. 324 """ 325 326 separator = '|' 327 328 metadata_dict = {} 329 if metadata_proto.type == ota_metadata_pb2.OtaMetadata.AB: 330 metadata_dict['ota-type'] = 'AB' 331 elif metadata_proto.type == ota_metadata_pb2.OtaMetadata.BLOCK: 332 metadata_dict['ota-type'] = 'BLOCK' 333 if metadata_proto.wipe: 334 metadata_dict['ota-wipe'] = 'yes' 335 if metadata_proto.retrofit_dynamic_partitions: 336 metadata_dict['ota-retrofit-dynamic-partitions'] = 'yes' 337 if metadata_proto.downgrade: 338 metadata_dict['ota-downgrade'] = 'yes' 339 340 metadata_dict['ota-required-cache'] = str(metadata_proto.required_cache) 341 342 post_build = metadata_proto.postcondition 343 metadata_dict['post-build'] = separator.join(post_build.build) 344 metadata_dict['post-build-incremental'] = post_build.build_incremental 345 metadata_dict['post-sdk-level'] = post_build.sdk_level 346 metadata_dict['post-security-patch-level'] = post_build.security_patch_level 347 metadata_dict['post-timestamp'] = str(post_build.timestamp) 348 349 pre_build = metadata_proto.precondition 350 metadata_dict['pre-device'] = separator.join(pre_build.device) 351 # incremental updates 352 if len(pre_build.build) != 0: 353 metadata_dict['pre-build'] = separator.join(pre_build.build) 354 metadata_dict['pre-build-incremental'] = pre_build.build_incremental 355 356 if metadata_proto.spl_downgrade: 357 metadata_dict['spl-downgrade'] = 'yes' 358 metadata_dict.update(metadata_proto.property_files) 359 360 return metadata_dict 361 362 363def HandleDowngradeMetadata(metadata_proto, target_info, source_info): 364 # Only incremental OTAs are allowed to reach here. 365 assert OPTIONS.incremental_source is not None 366 367 # used for logging upon errors 368 log_downgrades = [] 369 log_upgrades = [] 370 371 post_timestamp = target_info.GetBuildProp("ro.build.date.utc") 372 pre_timestamp = source_info.GetBuildProp("ro.build.date.utc") 373 if int(post_timestamp) < int(pre_timestamp): 374 logger.info(f"ro.build.date.utc pre timestamp: {pre_timestamp}, " 375 f"post timestamp: {post_timestamp}. Downgrade detected.") 376 log_downgrades.append(f"ro.build.date.utc pre: {pre_timestamp} post: {post_timestamp}") 377 else: 378 logger.info(f"ro.build.date.utc pre timestamp: {pre_timestamp}, " 379 f"post timestamp: {post_timestamp}.") 380 log_upgrades.append(f"ro.build.date.utc pre: {pre_timestamp} post: {post_timestamp}") 381 382 # When merging system and vendor target files, it is not enough 383 # to check ro.build.date.utc, the timestamp for each partition must 384 # be checked. 385 if source_info.is_ab: 386 ab_partitions = set(source_info.get("ab_partitions")) 387 for partition in sorted(set(PARTITIONS_WITH_BUILD_PROP) & ab_partitions): 388 389 partition_prop = source_info.get('{}.build.prop'.format(partition)) 390 # Skip if the partition is missing, or it doesn't have a build.prop 391 if not partition_prop or not partition_prop.build_props: 392 continue 393 partition_prop = target_info.get('{}.build.prop'.format(partition)) 394 # Skip if the partition is missing, or it doesn't have a build.prop 395 if not partition_prop or not partition_prop.build_props: 396 continue 397 398 post_timestamp = target_info.GetPartitionBuildProp( 399 'ro.build.date.utc', partition) 400 pre_timestamp = source_info.GetPartitionBuildProp( 401 'ro.build.date.utc', partition) 402 if int(post_timestamp) < int(pre_timestamp): 403 logger.info(f"Partition {partition} pre timestamp: {pre_timestamp}, " 404 f"post time: {post_timestamp}. Downgrade detected.") 405 log_downgrades.append(f"{partition} pre: {pre_timestamp} post: {post_timestamp}") 406 else: 407 logger.info(f"Partition {partition} pre timestamp: {pre_timestamp}, " 408 f"post timestamp: {post_timestamp}.") 409 log_upgrades.append(f"{partition} pre: {pre_timestamp} post: {post_timestamp}") 410 411 if OPTIONS.spl_downgrade: 412 metadata_proto.spl_downgrade = True 413 414 if OPTIONS.downgrade: 415 if len(log_downgrades) == 0: 416 raise RuntimeError( 417 "--downgrade or --override_timestamp specified but no downgrade " 418 "detected. Current values for ro.build.date.utc: " + ', '.join(log_upgrades)) 419 metadata_proto.downgrade = True 420 else: 421 if len(log_downgrades) != 0: 422 raise RuntimeError( 423 "Downgrade detected based on timestamp check in ro.build.date.utc. " 424 "Need to specify --override_timestamp OR --downgrade to allow " 425 "building the incremental. Downgrades detected for: " 426 + ', '.join(log_downgrades)) 427 428def ComputeRuntimeBuildInfos(default_build_info, boot_variable_values): 429 """Returns a set of build info objects that may exist during runtime.""" 430 431 build_info_set = {default_build_info} 432 if not boot_variable_values: 433 return build_info_set 434 435 # Calculate all possible combinations of the values for the boot variables. 436 keys = boot_variable_values.keys() 437 value_list = boot_variable_values.values() 438 combinations = [dict(zip(keys, values)) 439 for values in itertools.product(*value_list)] 440 for placeholder_values in combinations: 441 # Reload the info_dict as some build properties may change their values 442 # based on the value of ro.boot* properties. 443 info_dict = copy.deepcopy(default_build_info.info_dict) 444 for partition in PARTITIONS_WITH_BUILD_PROP: 445 partition_prop_key = "{}.build.prop".format(partition) 446 input_file = info_dict[partition_prop_key].input_file 447 ramdisk = GetRamdiskFormat(info_dict) 448 if isinstance(input_file, zipfile.ZipFile): 449 with zipfile.ZipFile(input_file.filename, allowZip64=True) as input_zip: 450 info_dict[partition_prop_key] = \ 451 PartitionBuildProps.FromInputFile(input_zip, partition, 452 placeholder_values, 453 ramdisk) 454 else: 455 info_dict[partition_prop_key] = \ 456 PartitionBuildProps.FromInputFile(input_file, partition, 457 placeholder_values, 458 ramdisk) 459 info_dict["build.prop"] = info_dict["system.build.prop"] 460 build_info_set.add(BuildInfo(info_dict, default_build_info.oem_dicts)) 461 462 return build_info_set 463 464 465def CalculateRuntimeDevicesAndFingerprints(default_build_info, 466 boot_variable_values): 467 """Returns a tuple of sets for runtime devices and fingerprints""" 468 469 device_names = set() 470 fingerprints = set() 471 build_info_set = ComputeRuntimeBuildInfos(default_build_info, 472 boot_variable_values) 473 for runtime_build_info in build_info_set: 474 device_names.add(runtime_build_info.device) 475 fingerprints.add(runtime_build_info.fingerprint) 476 return device_names, fingerprints 477 478 479def GetZipEntryOffset(zfp, entry_info): 480 """Get offset to a beginning of a particular zip entry 481 Args: 482 fp: zipfile.ZipFile 483 entry_info: zipfile.ZipInfo 484 485 Returns: 486 (offset, size) tuple 487 """ 488 # Don't use len(entry_info.extra). Because that returns size of extra 489 # fields in central directory. We need to look at local file directory, 490 # as these two might have different sizes. 491 492 # We cannot work with zipfile.ZipFile instances, we need a |fp| for the underlying file. 493 zfp = zfp.fp 494 zfp.seek(entry_info.header_offset) 495 data = zfp.read(zipfile.sizeFileHeader) 496 fheader = struct.unpack(zipfile.structFileHeader, data) 497 # Last two fields of local file header are filename length and 498 # extra length 499 filename_len = fheader[-2] 500 extra_len = fheader[-1] 501 offset = entry_info.header_offset 502 offset += zipfile.sizeFileHeader 503 offset += filename_len + extra_len 504 size = entry_info.file_size 505 return (offset, size) 506 507 508class PropertyFiles(object): 509 """A class that computes the property-files string for an OTA package. 510 511 A property-files string is a comma-separated string that contains the 512 offset/size info for an OTA package. The entries, which must be ZIP_STORED, 513 can be fetched directly with the package URL along with the offset/size info. 514 These strings can be used for streaming A/B OTAs, or allowing an updater to 515 download package metadata entry directly, without paying the cost of 516 downloading entire package. 517 518 Computing the final property-files string requires two passes. Because doing 519 the whole package signing (with signapk.jar) will possibly reorder the ZIP 520 entries, which may in turn invalidate earlier computed ZIP entry offset/size 521 values. 522 523 This class provides functions to be called for each pass. The general flow is 524 as follows. 525 526 property_files = PropertyFiles() 527 # The first pass, which writes placeholders before doing initial signing. 528 property_files.Compute() 529 SignOutput() 530 531 # The second pass, by replacing the placeholders with actual data. 532 property_files.Finalize() 533 SignOutput() 534 535 And the caller can additionally verify the final result. 536 537 property_files.Verify() 538 """ 539 540 def __init__(self): 541 self.name = None 542 self.required = () 543 self.optional = () 544 545 def Compute(self, input_zip): 546 """Computes and returns a property-files string with placeholders. 547 548 We reserve extra space for the offset and size of the metadata entry itself, 549 although we don't know the final values until the package gets signed. 550 551 Args: 552 input_zip: The input ZIP file. 553 554 Returns: 555 A string with placeholders for the metadata offset/size info, e.g. 556 "payload.bin:679:343,payload_properties.txt:378:45,metadata: ". 557 """ 558 return self.GetPropertyFilesString(input_zip, reserve_space=True) 559 560 class InsufficientSpaceException(Exception): 561 pass 562 563 def Finalize(self, input_zip, reserved_length): 564 """Finalizes a property-files string with actual METADATA offset/size info. 565 566 The input ZIP file has been signed, with the ZIP entries in the desired 567 place (signapk.jar will possibly reorder the ZIP entries). Now we compute 568 the ZIP entry offsets and construct the property-files string with actual 569 data. Note that during this process, we must pad the property-files string 570 to the reserved length, so that the METADATA entry size remains the same. 571 Otherwise the entries' offsets and sizes may change again. 572 573 Args: 574 input_zip: The input ZIP file. 575 reserved_length: The reserved length of the property-files string during 576 the call to Compute(). The final string must be no more than this 577 size. 578 579 Returns: 580 A property-files string including the metadata offset/size info, e.g. 581 "payload.bin:679:343,payload_properties.txt:378:45,metadata:69:379 ". 582 583 Raises: 584 InsufficientSpaceException: If the reserved length is insufficient to hold 585 the final string. 586 """ 587 result = self.GetPropertyFilesString(input_zip, reserve_space=False) 588 if len(result) > reserved_length: 589 raise self.InsufficientSpaceException( 590 'Insufficient reserved space: reserved={}, actual={}'.format( 591 reserved_length, len(result))) 592 593 result += ' ' * (reserved_length - len(result)) 594 return result 595 596 def Verify(self, input_zip, expected): 597 """Verifies the input ZIP file contains the expected property-files string. 598 599 Args: 600 input_zip: The input ZIP file. 601 expected: The property-files string that's computed from Finalize(). 602 603 Raises: 604 AssertionError: On finding a mismatch. 605 """ 606 actual = self.GetPropertyFilesString(input_zip) 607 assert actual == expected, \ 608 "Mismatching streaming metadata: {} vs {}.".format(actual, expected) 609 610 def GetPropertyFilesString(self, zip_file, reserve_space=False): 611 """ 612 Constructs the property-files string per request. 613 614 Args: 615 zip_file: The input ZIP file. 616 reserved_length: The reserved length of the property-files string. 617 618 Returns: 619 A property-files string including the metadata offset/size info, e.g. 620 "payload.bin:679:343,payload_properties.txt:378:45,metadata: ". 621 """ 622 623 def ComputeEntryOffsetSize(name): 624 """Computes the zip entry offset and size.""" 625 info = zip_file.getinfo(name) 626 (offset, size) = GetZipEntryOffset(zip_file, info) 627 return '%s:%d:%d' % (os.path.basename(name), offset, size) 628 629 tokens = [] 630 tokens.extend(self._GetPrecomputed(zip_file)) 631 for entry in self.required: 632 tokens.append(ComputeEntryOffsetSize(entry)) 633 for entry in self.optional: 634 if entry in zip_file.namelist(): 635 tokens.append(ComputeEntryOffsetSize(entry)) 636 637 # 'META-INF/com/android/metadata' is required. We don't know its actual 638 # offset and length (as well as the values for other entries). So we reserve 639 # 15-byte as a placeholder ('offset:length'), which is sufficient to cover 640 # the space for metadata entry. Because 'offset' allows a max of 10-digit 641 # (i.e. ~9 GiB), with a max of 4-digit for the length. Note that all the 642 # reserved space serves the metadata entry only. 643 if reserve_space: 644 tokens.append('metadata:' + ' ' * 15) 645 tokens.append('metadata.pb:' + ' ' * 15) 646 else: 647 tokens.append(ComputeEntryOffsetSize(METADATA_NAME)) 648 if METADATA_PROTO_NAME in zip_file.namelist(): 649 tokens.append(ComputeEntryOffsetSize(METADATA_PROTO_NAME)) 650 651 return ','.join(tokens) 652 653 def _GetPrecomputed(self, input_zip): 654 """Computes the additional tokens to be included into the property-files. 655 656 This applies to tokens without actual ZIP entries, such as 657 payload_metadata.bin. We want to expose the offset/size to updaters, so 658 that they can download the payload metadata directly with the info. 659 660 Args: 661 input_zip: The input zip file. 662 663 Returns: 664 A list of strings (tokens) to be added to the property-files string. 665 """ 666 # pylint: disable=no-self-use 667 # pylint: disable=unused-argument 668 return [] 669 670 671def SignOutput(temp_zip_name, output_zip_name, package_key=None, pw=None): 672 if package_key is None: 673 package_key = OPTIONS.package_key 674 if pw is None and OPTIONS.key_passwords: 675 pw = OPTIONS.key_passwords[package_key] 676 677 SignFile(temp_zip_name, output_zip_name, package_key, pw, 678 whole_file=True) 679 680 681def ConstructOtaApexInfo(target_zip, source_file=None): 682 """If applicable, add the source version to the apex info.""" 683 684 def _ReadApexInfo(input_zip): 685 if not DoesInputFileContain(input_zip, "META/apex_info.pb"): 686 logger.warning("target_file doesn't contain apex_info.pb %s", input_zip) 687 return None 688 return ReadBytesFromInputFile(input_zip, "META/apex_info.pb") 689 690 target_apex_string = _ReadApexInfo(target_zip) 691 # Return early if the target apex info doesn't exist or is empty. 692 if not target_apex_string: 693 return target_apex_string 694 695 # If the source apex info isn't available, just return the target info 696 if not source_file: 697 return target_apex_string 698 699 source_apex_string = _ReadApexInfo(source_file) 700 if not source_apex_string: 701 return target_apex_string 702 703 source_apex_proto = ota_metadata_pb2.ApexMetadata() 704 source_apex_proto.ParseFromString(source_apex_string) 705 source_apex_versions = {apex.package_name: apex.version for apex in 706 source_apex_proto.apex_info} 707 708 # If the apex package is available in the source build, initialize the source 709 # apex version. 710 target_apex_proto = ota_metadata_pb2.ApexMetadata() 711 target_apex_proto.ParseFromString(target_apex_string) 712 for target_apex in target_apex_proto.apex_info: 713 name = target_apex.package_name 714 if name in source_apex_versions: 715 target_apex.source_version = source_apex_versions[name] 716 717 return target_apex_proto.SerializeToString() 718 719 720def IsLz4diffCompatible(source_file: str, target_file: str): 721 """Check whether lz4diff versions in two builds are compatible 722 723 Args: 724 source_file: Path to source build's target_file.zip 725 target_file: Path to target build's target_file.zip 726 727 Returns: 728 bool true if and only if lz4diff versions are compatible 729 """ 730 if source_file is None or target_file is None: 731 return False 732 # Right now we enable lz4diff as long as source build has liblz4.so. 733 # In the future we might introduce version system to lz4diff as well. 734 if zipfile.is_zipfile(source_file): 735 with zipfile.ZipFile(source_file, "r") as zfp: 736 return "META/liblz4.so" in zfp.namelist() 737 else: 738 assert os.path.isdir(source_file) 739 return os.path.exists(os.path.join(source_file, "META", "liblz4.so")) 740 741 742def IsZucchiniCompatible(source_file: str, target_file: str): 743 """Check whether zucchini versions in two builds are compatible 744 745 Args: 746 source_file: Path to source build's target_file.zip 747 target_file: Path to target build's target_file.zip 748 749 Returns: 750 bool true if and only if zucchini versions are compatible 751 """ 752 if source_file is None or target_file is None: 753 return False 754 assert os.path.exists(source_file) 755 assert os.path.exists(target_file) 756 757 assert zipfile.is_zipfile(source_file) or os.path.isdir(source_file) 758 assert zipfile.is_zipfile(target_file) or os.path.isdir(target_file) 759 _ZUCCHINI_CONFIG_ENTRY_NAME = "META/zucchini_config.txt" 760 761 def ReadEntry(path, entry): 762 # Read an entry inside a .zip file or extracted dir of .zip file 763 if zipfile.is_zipfile(path): 764 with zipfile.ZipFile(path, "r", allowZip64=True) as zfp: 765 if entry in zfp.namelist(): 766 return zfp.read(entry).decode() 767 else: 768 entry_path = os.path.join(path, entry) 769 if os.path.exists(entry_path): 770 with open(entry_path, "r") as fp: 771 return fp.read() 772 return False 773 sourceEntry = ReadEntry(source_file, _ZUCCHINI_CONFIG_ENTRY_NAME) 774 targetEntry = ReadEntry(target_file, _ZUCCHINI_CONFIG_ENTRY_NAME) 775 return sourceEntry and targetEntry and sourceEntry == targetEntry 776 777 778def ExtractTargetFiles(path: str): 779 if os.path.isdir(path): 780 logger.info("target files %s is already extracted", path) 781 return path 782 extracted_dir = common.MakeTempDir("target_files") 783 logger.info(f"Extracting target files {path} to {extracted_dir}") 784 common.UnzipToDir(path, extracted_dir, UNZIP_PATTERN + [""]) 785 for subdir in TARGET_FILES_IMAGES_SUBDIR: 786 image_dir = os.path.join(extracted_dir, subdir) 787 if not os.path.exists(image_dir): 788 continue 789 for filename in os.listdir(image_dir): 790 if not filename.endswith(".img"): 791 continue 792 common.UnsparseImage(os.path.join(image_dir, filename)) 793 794 return extracted_dir 795 796 797def LocatePartitionPath(target_files_dir: str, partition: str, allow_empty): 798 for subdir in TARGET_FILES_IMAGES_SUBDIR: 799 path = os.path.join(target_files_dir, subdir, partition + ".img") 800 if os.path.exists(path): 801 return path 802 if allow_empty: 803 return "" 804 raise common.ExternalError( 805 "Partition {} not found in target files {}".format(partition, target_files_dir)) 806 807 808def GetPartitionImages(target_files_dir: str, ab_partitions, allow_empty=True): 809 assert os.path.isdir(target_files_dir) 810 return ":".join([LocatePartitionPath(target_files_dir, partition, allow_empty) for partition in ab_partitions]) 811 812 813def LocatePartitionMap(target_files_dir: str, partition: str): 814 for subdir in TARGET_FILES_IMAGES_SUBDIR: 815 path = os.path.join(target_files_dir, subdir, partition + ".map") 816 if os.path.exists(path): 817 return path 818 return "" 819 820 821def GetPartitionMaps(target_files_dir: str, ab_partitions): 822 assert os.path.isdir(target_files_dir) 823 return ":".join([LocatePartitionMap(target_files_dir, partition) for partition in ab_partitions]) 824 825 826class PayloadGenerator(object): 827 """Manages the creation and the signing of an A/B OTA Payload.""" 828 829 PAYLOAD_BIN = payload_signer.PAYLOAD_BIN 830 PAYLOAD_PROPERTIES_TXT = payload_signer.PAYLOAD_PROPERTIES_TXT 831 SECONDARY_PAYLOAD_BIN = 'secondary/payload.bin' 832 SECONDARY_PAYLOAD_PROPERTIES_TXT = 'secondary/payload_properties.txt' 833 834 def __init__(self, secondary=False, wipe_user_data=False, minor_version=None, is_partial_update=False, spl_downgrade=False): 835 """Initializes a Payload instance. 836 837 Args: 838 secondary: Whether it's generating a secondary payload (default: False). 839 """ 840 self.payload_file = None 841 self.payload_properties = None 842 self.secondary = secondary 843 self.wipe_user_data = wipe_user_data 844 self.minor_version = minor_version 845 self.is_partial_update = is_partial_update 846 self.spl_downgrade = spl_downgrade 847 848 def _Run(self, cmd, **kwargs): # pylint: disable=no-self-use 849 # Don't pipe (buffer) the output if verbose is set. Let 850 # brillo_update_payload write to stdout/stderr directly, so its progress can 851 # be monitored. 852 if OPTIONS.verbose: 853 common.RunAndCheckOutput(cmd, stdout=None, stderr=None, **kwargs) 854 else: 855 common.RunAndCheckOutput(cmd, **kwargs) 856 857 def Generate(self, target_file, source_file=None, additional_args=None, **kwargs): 858 """Generates a payload from the given target-files zip(s). 859 860 Args: 861 target_file: The filename of the target build target-files zip. 862 source_file: The filename of the source build target-files zip; or None if 863 generating a full OTA. 864 additional_args: A list of additional args that should be passed to 865 delta_generator binary; or None. 866 kwargs: Any additional args to pass to subprocess.Popen 867 """ 868 if additional_args is None: 869 additional_args = [] 870 871 payload_file = common.MakeTempFile(prefix="payload-", suffix=".bin") 872 target_dir = ExtractTargetFiles(target_file) 873 cmd = ["delta_generator", 874 "--out_file", payload_file] 875 with open(os.path.join(target_dir, "META", "ab_partitions.txt"), "r") as fp: 876 ab_partitions = fp.read().strip().splitlines() 877 cmd.extend(["--partition_names", ":".join(ab_partitions)]) 878 cmd.extend( 879 ["--new_partitions", GetPartitionImages(target_dir, ab_partitions, False)]) 880 cmd.extend( 881 ["--new_mapfiles", GetPartitionMaps(target_dir, ab_partitions)]) 882 if source_file is not None: 883 source_dir = ExtractTargetFiles(source_file) 884 cmd.extend( 885 ["--old_partitions", GetPartitionImages(source_dir, ab_partitions, True)]) 886 cmd.extend( 887 ["--old_mapfiles", GetPartitionMaps(source_dir, ab_partitions)]) 888 889 if OPTIONS.disable_fec_computation: 890 cmd.extend(["--disable_fec_computation=true"]) 891 if OPTIONS.disable_verity_computation: 892 cmd.extend(["--disable_verity_computation=true"]) 893 postinstall_config = os.path.join( 894 target_dir, "META", "postinstall_config.txt") 895 896 if os.path.exists(postinstall_config): 897 cmd.extend(["--new_postinstall_config_file", postinstall_config]) 898 dynamic_partition_info = os.path.join( 899 target_dir, "META", "dynamic_partitions_info.txt") 900 901 if os.path.exists(dynamic_partition_info): 902 cmd.extend(["--dynamic_partition_info_file", dynamic_partition_info]) 903 904 apex_info = os.path.join( 905 target_dir, "META", "apex_info.pb") 906 if os.path.exists(apex_info): 907 cmd.extend(["--apex_info_file", apex_info]) 908 909 major_version, minor_version = ParseUpdateEngineConfig( 910 os.path.join(target_dir, "META", "update_engine_config.txt")) 911 if source_file: 912 major_version, minor_version = ParseUpdateEngineConfig( 913 os.path.join(source_dir, "META", "update_engine_config.txt")) 914 if self.minor_version: 915 minor_version = self.minor_version 916 cmd.extend(["--major_version", str(major_version)]) 917 if source_file is not None or self.is_partial_update: 918 cmd.extend(["--minor_version", str(minor_version)]) 919 if self.is_partial_update: 920 cmd.extend(["--is_partial_update=true"]) 921 cmd.extend(additional_args) 922 self._Run(cmd, **kwargs) 923 924 self.payload_file = payload_file 925 self.payload_properties = None 926 927 def Sign(self, payload_signer): 928 """Generates and signs the hashes of the payload and metadata. 929 930 Args: 931 payload_signer: A PayloadSigner() instance that serves the signing work. 932 933 Raises: 934 AssertionError: On any failure when calling brillo_update_payload script. 935 """ 936 assert isinstance(payload_signer, PayloadSigner) 937 938 signed_payload_file = payload_signer.SignPayload(self.payload_file) 939 940 self.payload_file = signed_payload_file 941 942 def WriteToZip(self, output_zip): 943 """Writes the payload to the given zip. 944 945 Args: 946 output_zip: The output ZipFile instance. 947 """ 948 assert self.payload_file is not None 949 # 4. Dump the signed payload properties. 950 properties_file = GeneratePayloadProperties(self.payload_file) 951 952 953 with open(properties_file, "a") as f: 954 if self.wipe_user_data: 955 f.write("POWERWASH=1\n") 956 if self.secondary: 957 f.write("SWITCH_SLOT_ON_REBOOT=0\n") 958 if self.spl_downgrade: 959 f.write("SPL_DOWNGRADE=1\n") 960 961 962 self.payload_properties = properties_file 963 964 if self.secondary: 965 payload_arcname = PayloadGenerator.SECONDARY_PAYLOAD_BIN 966 payload_properties_arcname = PayloadGenerator.SECONDARY_PAYLOAD_PROPERTIES_TXT 967 else: 968 payload_arcname = PayloadGenerator.PAYLOAD_BIN 969 payload_properties_arcname = PayloadGenerator.PAYLOAD_PROPERTIES_TXT 970 971 # Add the signed payload file and properties into the zip. In order to 972 # support streaming, we pack them as ZIP_STORED. So these entries can be 973 # read directly with the offset and length pairs. 974 common.ZipWrite(output_zip, self.payload_file, arcname=payload_arcname, 975 compress_type=zipfile.ZIP_STORED) 976 common.ZipWrite(output_zip, self.payload_properties, 977 arcname=payload_properties_arcname, 978 compress_type=zipfile.ZIP_STORED) 979 980 981class StreamingPropertyFiles(PropertyFiles): 982 """A subclass for computing the property-files for streaming A/B OTAs.""" 983 984 def __init__(self): 985 super(StreamingPropertyFiles, self).__init__() 986 self.name = 'ota-streaming-property-files' 987 self.required = ( 988 # payload.bin and payload_properties.txt must exist. 989 'payload.bin', 990 'payload_properties.txt', 991 ) 992 self.optional = ( 993 # apex_info.pb isn't directly used in the update flow 994 'apex_info.pb', 995 # care_map is available only if dm-verity is enabled. 996 'care_map.pb', 997 'care_map.txt', 998 # compatibility.zip is available only if target supports Treble. 999 'compatibility.zip', 1000 ) 1001 1002 1003class AbOtaPropertyFiles(StreamingPropertyFiles): 1004 """The property-files for A/B OTA that includes payload_metadata.bin info. 1005 1006 Since P, we expose one more token (aka property-file), in addition to the ones 1007 for streaming A/B OTA, for a virtual entry of 'payload_metadata.bin'. 1008 'payload_metadata.bin' is the header part of a payload ('payload.bin'), which 1009 doesn't exist as a separate ZIP entry, but can be used to verify if the 1010 payload can be applied on the given device. 1011 1012 For backward compatibility, we keep both of the 'ota-streaming-property-files' 1013 and the newly added 'ota-property-files' in P. The new token will only be 1014 available in 'ota-property-files'. 1015 """ 1016 1017 def __init__(self): 1018 super(AbOtaPropertyFiles, self).__init__() 1019 self.name = 'ota-property-files' 1020 1021 def _GetPrecomputed(self, input_zip): 1022 offset, size = self._GetPayloadMetadataOffsetAndSize(input_zip) 1023 return ['payload_metadata.bin:{}:{}'.format(offset, size)] 1024 1025 @staticmethod 1026 def _GetPayloadMetadataOffsetAndSize(input_zip): 1027 """Computes the offset and size of the payload metadata for a given package. 1028 1029 (From system/update_engine/update_metadata.proto) 1030 A delta update file contains all the deltas needed to update a system from 1031 one specific version to another specific version. The update format is 1032 represented by this struct pseudocode: 1033 1034 struct delta_update_file { 1035 char magic[4] = "CrAU"; 1036 uint64 file_format_version; 1037 uint64 manifest_size; // Size of protobuf DeltaArchiveManifest 1038 1039 // Only present if format_version > 1: 1040 uint32 metadata_signature_size; 1041 1042 // The Bzip2 compressed DeltaArchiveManifest 1043 char manifest[metadata_signature_size]; 1044 1045 // The signature of the metadata (from the beginning of the payload up to 1046 // this location, not including the signature itself). This is a 1047 // serialized Signatures message. 1048 char medatada_signature_message[metadata_signature_size]; 1049 1050 // Data blobs for files, no specific format. The specific offset 1051 // and length of each data blob is recorded in the DeltaArchiveManifest. 1052 struct { 1053 char data[]; 1054 } blobs[]; 1055 1056 // These two are not signed: 1057 uint64 payload_signatures_message_size; 1058 char payload_signatures_message[]; 1059 }; 1060 1061 'payload-metadata.bin' contains all the bytes from the beginning of the 1062 payload, till the end of 'medatada_signature_message'. 1063 """ 1064 payload_info = input_zip.getinfo('payload.bin') 1065 (payload_offset, payload_size) = GetZipEntryOffset(input_zip, payload_info) 1066 1067 # Read the underlying raw zipfile at specified offset 1068 payload_fp = input_zip.fp 1069 payload_fp.seek(payload_offset) 1070 header_bin = payload_fp.read(24) 1071 1072 # network byte order (big-endian) 1073 header = struct.unpack("!IQQL", header_bin) 1074 1075 # 'CrAU' 1076 magic = header[0] 1077 assert magic == 0x43724155, "Invalid magic: {:x}, computed offset {}" \ 1078 .format(magic, payload_offset) 1079 1080 manifest_size = header[2] 1081 metadata_signature_size = header[3] 1082 metadata_total = 24 + manifest_size + metadata_signature_size 1083 assert metadata_total <= payload_size 1084 1085 return (payload_offset, metadata_total) 1086 1087 1088def Fnmatch(filename, pattersn): 1089 return any([fnmatch.fnmatch(filename, pat) for pat in pattersn]) 1090 1091 1092def CopyTargetFilesDir(input_dir): 1093 output_dir = common.MakeTempDir("target_files") 1094 1095 def SymlinkIfNotSparse(src, dst): 1096 if common.IsSparseImage(src): 1097 return common.UnsparseImage(src, dst) 1098 else: 1099 return os.symlink(os.path.realpath(src), dst) 1100 1101 for subdir in TARGET_FILES_IMAGES_SUBDIR: 1102 if not os.path.exists(os.path.join(input_dir, subdir)): 1103 continue 1104 shutil.copytree(os.path.join(input_dir, subdir), os.path.join( 1105 output_dir, subdir), dirs_exist_ok=True, copy_function=SymlinkIfNotSparse) 1106 shutil.copytree(os.path.join(input_dir, "META"), os.path.join( 1107 output_dir, "META"), dirs_exist_ok=True) 1108 1109 for (dirpath, _, filenames) in os.walk(input_dir): 1110 for filename in filenames: 1111 path = os.path.join(dirpath, filename) 1112 relative_path = path.removeprefix(input_dir).removeprefix("/") 1113 if not Fnmatch(relative_path, UNZIP_PATTERN): 1114 continue 1115 target_path = os.path.join( 1116 output_dir, relative_path) 1117 if os.path.exists(target_path): 1118 continue 1119 os.makedirs(os.path.dirname(target_path), exist_ok=True) 1120 shutil.copy(path, target_path) 1121 return output_dir 1122