xref: /aosp_15_r20/build/make/tools/releasetools/common.py (revision 9e94795a3d4ef5c1d47486f9a02bb378756cea8a)
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15from __future__ import print_function
16
17import base64
18import collections
19import copy
20import datetime
21import errno
22import fnmatch
23import getopt
24import getpass
25import gzip
26import imp
27import json
28import logging
29import logging.config
30import os
31import platform
32import re
33import shlex
34import shutil
35import subprocess
36import stat
37import sys
38import tempfile
39import threading
40import time
41import zipfile
42
43from typing import Iterable, Callable
44from dataclasses import dataclass
45from hashlib import sha1, sha256
46
47import images
48import sparse_img
49from blockimgdiff import BlockImageDiff
50
51logger = logging.getLogger(__name__)
52
53
54@dataclass
55class OptionHandler:
56  extra_long_opts: Iterable[str]
57  handler: Callable
58
59class Options(object):
60
61  def __init__(self):
62    # Set up search path, in order to find framework/ and lib64/. At the time of
63    # running this function, user-supplied search path (`--path`) hasn't been
64    # available. So the value set here is the default, which might be overridden
65    # by commandline flag later.
66    exec_path = os.path.realpath(sys.argv[0])
67    if exec_path.endswith('.py'):
68      script_name = os.path.basename(exec_path)
69      # logger hasn't been initialized yet at this point. Use print to output
70      # warnings.
71      print(
72          'Warning: releasetools script should be invoked as hermetic Python '
73          'executable -- build and run `{}` directly.'.format(
74              script_name[:-3]),
75          file=sys.stderr)
76    self.search_path = os.path.dirname(os.path.dirname(exec_path))
77
78    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
79    if not os.path.exists(os.path.join(self.search_path, self.signapk_path)):
80      if "ANDROID_HOST_OUT" in os.environ:
81        self.search_path = os.environ["ANDROID_HOST_OUT"]
82    self.signapk_shared_library_path = "lib64"   # Relative to search_path
83    self.extra_signapk_args = []
84    self.aapt2_path = "aapt2"
85    self.java_path = "java"  # Use the one on the path by default.
86    self.java_args = ["-Xmx4096m"]  # The default JVM args.
87    self.android_jar_path = None
88    self.public_key_suffix = ".x509.pem"
89    self.private_key_suffix = ".pk8"
90    # use otatools built boot_signer by default
91    self.verbose = False
92    self.tempfiles = []
93    self.device_specific = None
94    self.extras = {}
95    self.info_dict = None
96    self.source_info_dict = None
97    self.target_info_dict = None
98    self.worker_threads = None
99    # Stash size cannot exceed cache_size * threshold.
100    self.cache_size = None
101    self.stash_threshold = 0.8
102    self.logfile = None
103
104
105OPTIONS = Options()
106
107# The block size that's used across the releasetools scripts.
108BLOCK_SIZE = 4096
109
110# Values for "certificate" in apkcerts that mean special things.
111SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
112
113# The partitions allowed to be signed by AVB (Android Verified Boot 2.0). Note
114# that system_other is not in the list because we don't want to include its
115# descriptor into vbmeta.img. When adding a new entry here, the
116# AVB_FOOTER_ARGS_BY_PARTITION in sign_target_files_apks need to be updated
117# accordingly.
118AVB_PARTITIONS = ('boot', 'init_boot', 'dtbo', 'odm', 'product', 'pvmfw',
119                  'recovery', 'system', 'system_ext', 'vendor', 'vendor_boot',
120                  'vendor_kernel_boot', 'vendor_dlkm', 'odm_dlkm',
121                  'system_dlkm')
122
123# Chained VBMeta partitions.
124AVB_VBMETA_PARTITIONS = ('vbmeta_system', 'vbmeta_vendor')
125
126# avbtool arguments name
127AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG = '--include_descriptors_from_image'
128AVB_ARG_NAME_CHAIN_PARTITION = '--chain_partition'
129
130# Partitions that should have their care_map added to META/care_map.pb
131PARTITIONS_WITH_CARE_MAP = [
132    'system',
133    'vendor',
134    'product',
135    'system_ext',
136    'odm',
137    'vendor_dlkm',
138    'odm_dlkm',
139    'system_dlkm',
140]
141
142# Partitions with a build.prop file
143PARTITIONS_WITH_BUILD_PROP = PARTITIONS_WITH_CARE_MAP + ['boot', 'init_boot']
144
145# See sysprop.mk. If file is moved, add new search paths here; don't remove
146# existing search paths.
147RAMDISK_BUILD_PROP_REL_PATHS = ['system/etc/ramdisk/build.prop']
148
149
150@dataclass
151class AvbChainedPartitionArg:
152  """The required arguments for avbtool --chain_partition."""
153  partition: str
154  rollback_index_location: int
155  pubkey_path: str
156
157  def to_string(self):
158    """Convert to string command arguments."""
159    return '{}:{}:{}'.format(
160        self.partition, self.rollback_index_location, self.pubkey_path)
161
162
163class ErrorCode(object):
164  """Define error_codes for failures that happen during the actual
165  update package installation.
166
167  Error codes 0-999 are reserved for failures before the package
168  installation (i.e. low battery, package verification failure).
169  Detailed code in 'bootable/recovery/error_code.h' """
170
171  SYSTEM_VERIFICATION_FAILURE = 1000
172  SYSTEM_UPDATE_FAILURE = 1001
173  SYSTEM_UNEXPECTED_CONTENTS = 1002
174  SYSTEM_NONZERO_CONTENTS = 1003
175  SYSTEM_RECOVER_FAILURE = 1004
176  VENDOR_VERIFICATION_FAILURE = 2000
177  VENDOR_UPDATE_FAILURE = 2001
178  VENDOR_UNEXPECTED_CONTENTS = 2002
179  VENDOR_NONZERO_CONTENTS = 2003
180  VENDOR_RECOVER_FAILURE = 2004
181  OEM_PROP_MISMATCH = 3000
182  FINGERPRINT_MISMATCH = 3001
183  THUMBPRINT_MISMATCH = 3002
184  OLDER_BUILD = 3003
185  DEVICE_MISMATCH = 3004
186  BAD_PATCH_FILE = 3005
187  INSUFFICIENT_CACHE_SPACE = 3006
188  TUNE_PARTITION_FAILURE = 3007
189  APPLY_PATCH_FAILURE = 3008
190
191
192class ExternalError(RuntimeError):
193  pass
194
195
196def InitLogging():
197  DEFAULT_LOGGING_CONFIG = {
198      'version': 1,
199      'disable_existing_loggers': False,
200      'formatters': {
201          'standard': {
202              'format':
203                  '%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s',
204              'datefmt': '%Y-%m-%d %H:%M:%S',
205          },
206      },
207      'handlers': {
208          'default': {
209              'class': 'logging.StreamHandler',
210              'formatter': 'standard',
211              'level': 'WARNING',
212          },
213      },
214      'loggers': {
215          '': {
216              'handlers': ['default'],
217              'propagate': True,
218              'level': 'NOTSET',
219          }
220      }
221  }
222  env_config = os.getenv('LOGGING_CONFIG')
223  if env_config:
224    with open(env_config) as f:
225      config = json.load(f)
226  else:
227    config = DEFAULT_LOGGING_CONFIG
228
229    # Increase the logging level for verbose mode.
230    if OPTIONS.verbose:
231      config = copy.deepcopy(config)
232      config['handlers']['default']['level'] = 'INFO'
233
234    if OPTIONS.logfile:
235      config = copy.deepcopy(config)
236      config['handlers']['logfile'] = {
237          'class': 'logging.FileHandler',
238          'formatter': 'standard',
239          'level': 'INFO',
240          'mode': 'w',
241          'filename': OPTIONS.logfile,
242      }
243      config['loggers']['']['handlers'].append('logfile')
244
245  logging.config.dictConfig(config)
246
247
248def FindHostToolPath(tool_name):
249  """Finds the path to the host tool.
250
251  Args:
252    tool_name: name of the tool to find
253  Returns:
254    path to the tool if found under the same directory as this binary is located at. If not found,
255    tool_name is returned.
256  """
257  my_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
258  tool_path = os.path.join(my_dir, tool_name)
259  if os.path.exists(tool_path):
260    return tool_path
261
262  return tool_name
263
264
265def Run(args, verbose=None, **kwargs):
266  """Creates and returns a subprocess.Popen object.
267
268  Args:
269    args: The command represented as a list of strings.
270    verbose: Whether the commands should be shown. Default to the global
271        verbosity if unspecified.
272    kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
273        stdin, etc. stdout and stderr will default to subprocess.PIPE and
274        subprocess.STDOUT respectively unless caller specifies any of them.
275        universal_newlines will default to True, as most of the users in
276        releasetools expect string output.
277
278  Returns:
279    A subprocess.Popen object.
280  """
281  if 'stdout' not in kwargs and 'stderr' not in kwargs:
282    kwargs['stdout'] = subprocess.PIPE
283    kwargs['stderr'] = subprocess.STDOUT
284  if 'universal_newlines' not in kwargs:
285    kwargs['universal_newlines'] = True
286
287  if args:
288    # Make a copy of args in case client relies on the content of args later.
289    args = args[:]
290    args[0] = FindHostToolPath(args[0])
291
292  if verbose is None:
293    verbose = OPTIONS.verbose
294
295  # Don't log any if caller explicitly says so.
296  if verbose:
297    logger.info("  Running: \"%s\"", " ".join(args))
298  return subprocess.Popen(args, **kwargs)
299
300
301def RunAndCheckOutput(args, verbose=None, **kwargs):
302  """Runs the given command and returns the output.
303
304  Args:
305    args: The command represented as a list of strings.
306    verbose: Whether the commands should be shown. Default to the global
307        verbosity if unspecified.
308    kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
309        stdin, etc. stdout and stderr will default to subprocess.PIPE and
310        subprocess.STDOUT respectively unless caller specifies any of them.
311
312  Returns:
313    The output string.
314
315  Raises:
316    ExternalError: On non-zero exit from the command.
317  """
318  if verbose is None:
319    verbose = OPTIONS.verbose
320  proc = Run(args, verbose=verbose, **kwargs)
321  output, _ = proc.communicate()
322  if output is None:
323    output = ""
324  # Don't log any if caller explicitly says so.
325  if verbose:
326    logger.info("%s", output.rstrip())
327  if proc.returncode != 0:
328    raise ExternalError(
329        "Failed to run command '{}' (exit code {}):\n{}".format(
330            args, proc.returncode, output))
331  return output
332
333
334def RoundUpTo4K(value):
335  rounded_up = value + 4095
336  return rounded_up - (rounded_up % 4096)
337
338
339def CloseInheritedPipes():
340  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
341  before doing other work."""
342  if platform.system() != "Darwin":
343    return
344  for d in range(3, 1025):
345    try:
346      stat = os.fstat(d)
347      if stat is not None:
348        pipebit = stat[0] & 0x1000
349        if pipebit != 0:
350          os.close(d)
351    except OSError:
352      pass
353
354
355class BuildInfo(object):
356  """A class that holds the information for a given build.
357
358  This class wraps up the property querying for a given source or target build.
359  It abstracts away the logic of handling OEM-specific properties, and caches
360  the commonly used properties such as fingerprint.
361
362  There are two types of info dicts: a) build-time info dict, which is generated
363  at build time (i.e. included in a target_files zip); b) OEM info dict that is
364  specified at package generation time (via command line argument
365  '--oem_settings'). If a build doesn't use OEM-specific properties (i.e. not
366  having "oem_fingerprint_properties" in build-time info dict), all the queries
367  would be answered based on build-time info dict only. Otherwise if using
368  OEM-specific properties, some of them will be calculated from two info dicts.
369
370  Users can query properties similarly as using a dict() (e.g. info['fstab']),
371  or to query build properties via GetBuildProp() or GetPartitionBuildProp().
372
373  Attributes:
374    info_dict: The build-time info dict.
375    is_ab: Whether it's a build that uses A/B OTA.
376    oem_dicts: A list of OEM dicts.
377    oem_props: A list of OEM properties that should be read from OEM dicts; None
378        if the build doesn't use any OEM-specific property.
379    fingerprint: The fingerprint of the build, which would be calculated based
380        on OEM properties if applicable.
381    device: The device name, which could come from OEM dicts if applicable.
382  """
383
384  _RO_PRODUCT_RESOLVE_PROPS = ["ro.product.brand", "ro.product.device",
385                               "ro.product.manufacturer", "ro.product.model",
386                               "ro.product.name"]
387  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT = [
388      "product", "odm", "vendor", "system_ext", "system"]
389  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10 = [
390      "product", "product_services", "odm", "vendor", "system"]
391  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY = []
392
393  # The length of vbmeta digest to append to the fingerprint
394  _VBMETA_DIGEST_SIZE_USED = 8
395
396  def __init__(self, info_dict, oem_dicts=None, use_legacy_id=False):
397    """Initializes a BuildInfo instance with the given dicts.
398
399    Note that it only wraps up the given dicts, without making copies.
400
401    Arguments:
402      info_dict: The build-time info dict.
403      oem_dicts: A list of OEM dicts (which is parsed from --oem_settings). Note
404          that it always uses the first dict to calculate the fingerprint or the
405          device name. The rest would be used for asserting OEM properties only
406          (e.g. one package can be installed on one of these devices).
407      use_legacy_id: Use the legacy build id to construct the fingerprint. This
408          is used when we need a BuildInfo class, while the vbmeta digest is
409          unavailable.
410
411    Raises:
412      ValueError: On invalid inputs.
413    """
414    self.info_dict = info_dict
415    self.oem_dicts = oem_dicts
416
417    self._is_ab = info_dict.get("ab_update") == "true"
418    self.use_legacy_id = use_legacy_id
419
420    # Skip _oem_props if oem_dicts is None to use BuildInfo in
421    # sign_target_files_apks
422    if self.oem_dicts:
423      self._oem_props = info_dict.get("oem_fingerprint_properties")
424    else:
425      self._oem_props = None
426
427    def check_fingerprint(fingerprint):
428      if (" " in fingerprint or any(ord(ch) > 127 for ch in fingerprint)):
429        raise ValueError(
430            'Invalid build fingerprint: "{}". See the requirement in Android CDD '
431            "3.2.2. Build Parameters.".format(fingerprint))
432
433    self._partition_fingerprints = {}
434    for partition in PARTITIONS_WITH_BUILD_PROP:
435      try:
436        fingerprint = self.CalculatePartitionFingerprint(partition)
437        check_fingerprint(fingerprint)
438        self._partition_fingerprints[partition] = fingerprint
439      except ExternalError:
440        continue
441    if "system" in self._partition_fingerprints:
442      # system_other is not included in PARTITIONS_WITH_BUILD_PROP, but does
443      # need a fingerprint when creating the image.
444      self._partition_fingerprints[
445          "system_other"] = self._partition_fingerprints["system"]
446
447    # These two should be computed only after setting self._oem_props.
448    self._device = self.GetOemProperty("ro.product.device")
449    self._fingerprint = self.CalculateFingerprint()
450    check_fingerprint(self._fingerprint)
451
452  @property
453  def is_ab(self):
454    return self._is_ab
455
456  @property
457  def device(self):
458    return self._device
459
460  @property
461  def fingerprint(self):
462    return self._fingerprint
463
464  @property
465  def is_vabc(self):
466    return self.info_dict.get("virtual_ab_compression") == "true"
467
468  @property
469  def is_android_r(self):
470    system_prop = self.info_dict.get("system.build.prop")
471    return system_prop and system_prop.GetProp("ro.build.version.release") == "11"
472
473  @property
474  def is_release_key(self):
475    system_prop = self.info_dict.get("build.prop")
476    return system_prop and system_prop.GetProp("ro.build.tags") == "release-key"
477
478  @property
479  def vabc_compression_param(self):
480    return self.get("virtual_ab_compression_method", "")
481
482  @property
483  def vabc_cow_version(self):
484    return self.get("virtual_ab_cow_version", "")
485
486  @property
487  def vendor_api_level(self):
488    vendor_prop = self.info_dict.get("vendor.build.prop")
489    if not vendor_prop:
490      return -1
491
492    props = [
493        "ro.board.first_api_level",
494        "ro.product.first_api_level",
495    ]
496    for prop in props:
497      value = vendor_prop.GetProp(prop)
498      try:
499        return int(value)
500      except:
501        pass
502    return -1
503
504  @property
505  def is_vabc_xor(self):
506    vendor_prop = self.info_dict.get("vendor.build.prop")
507    vabc_xor_enabled = vendor_prop and \
508        vendor_prop.GetProp("ro.virtual_ab.compression.xor.enabled") == "true"
509    return vabc_xor_enabled
510
511  @property
512  def vendor_suppressed_vabc(self):
513    vendor_prop = self.info_dict.get("vendor.build.prop")
514    vabc_suppressed = vendor_prop and \
515        vendor_prop.GetProp("ro.vendor.build.dont_use_vabc")
516    return vabc_suppressed and vabc_suppressed.lower() == "true"
517
518  @property
519  def oem_props(self):
520    return self._oem_props
521
522  def __getitem__(self, key):
523    return self.info_dict[key]
524
525  def __setitem__(self, key, value):
526    self.info_dict[key] = value
527
528  def get(self, key, default=None):
529    return self.info_dict.get(key, default)
530
531  def items(self):
532    return self.info_dict.items()
533
534  def _GetRawBuildProp(self, prop, partition):
535    prop_file = '{}.build.prop'.format(
536        partition) if partition else 'build.prop'
537    partition_props = self.info_dict.get(prop_file)
538    if not partition_props:
539      return None
540    return partition_props.GetProp(prop)
541
542  def GetPartitionBuildProp(self, prop, partition):
543    """Returns the inquired build property for the provided partition."""
544
545    # Boot image and init_boot image uses ro.[product.]bootimage instead of boot.
546    # This comes from the generic ramdisk
547    prop_partition = "bootimage" if partition == "boot" or partition == "init_boot" else partition
548
549    # If provided a partition for this property, only look within that
550    # partition's build.prop.
551    if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS:
552      prop = prop.replace("ro.product", "ro.product.{}".format(prop_partition))
553    else:
554      prop = prop.replace("ro.", "ro.{}.".format(prop_partition))
555
556    prop_val = self._GetRawBuildProp(prop, partition)
557    if prop_val is not None:
558      return prop_val
559    raise ExternalError("couldn't find %s in %s.build.prop" %
560                        (prop, partition))
561
562  def GetBuildProp(self, prop):
563    """Returns the inquired build property from the standard build.prop file."""
564    if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS:
565      return self._ResolveRoProductBuildProp(prop)
566
567    if prop == "ro.build.id":
568      return self._GetBuildId()
569
570    prop_val = self._GetRawBuildProp(prop, None)
571    if prop_val is not None:
572      return prop_val
573
574    raise ExternalError("couldn't find %s in build.prop" % (prop,))
575
576  def _ResolveRoProductBuildProp(self, prop):
577    """Resolves the inquired ro.product.* build property"""
578    prop_val = self._GetRawBuildProp(prop, None)
579    if prop_val:
580      return prop_val
581
582    default_source_order = self._GetRoProductPropsDefaultSourceOrder()
583    source_order_val = self._GetRawBuildProp(
584        "ro.product.property_source_order", None)
585    if source_order_val:
586      source_order = source_order_val.split(",")
587    else:
588      source_order = default_source_order
589
590    # Check that all sources in ro.product.property_source_order are valid
591    if any([x not in default_source_order for x in source_order]):
592      raise ExternalError(
593          "Invalid ro.product.property_source_order '{}'".format(source_order))
594
595    for source_partition in source_order:
596      source_prop = prop.replace(
597          "ro.product", "ro.product.{}".format(source_partition), 1)
598      prop_val = self._GetRawBuildProp(source_prop, source_partition)
599      if prop_val:
600        return prop_val
601
602    raise ExternalError("couldn't resolve {}".format(prop))
603
604  def _GetRoProductPropsDefaultSourceOrder(self):
605    # NOTE: refer to CDDs and android.os.Build.VERSION for the definition and
606    # values of these properties for each Android release.
607    android_codename = self._GetRawBuildProp("ro.build.version.codename", None)
608    if android_codename == "REL":
609      android_version = self._GetRawBuildProp("ro.build.version.release", None)
610      if android_version == "10":
611        return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10
612      # NOTE: float() conversion of android_version will have rounding error.
613      # We are checking for "9" or less, and using "< 10" is well outside of
614      # possible floating point rounding.
615      try:
616        android_version_val = float(android_version)
617      except ValueError:
618        android_version_val = 0
619      if android_version_val < 10:
620        return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY
621    return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT
622
623  def _GetPlatformVersion(self):
624    version_sdk = self.GetBuildProp("ro.build.version.sdk")
625    # init code switches to version_release_or_codename (see b/158483506). After
626    # API finalization, release_or_codename will be the same as release. This
627    # is the best effort to support pre-S dev stage builds.
628    if int(version_sdk) >= 30:
629      try:
630        return self.GetBuildProp("ro.build.version.release_or_codename")
631      except ExternalError:
632        logger.warning('Failed to find ro.build.version.release_or_codename')
633
634    return self.GetBuildProp("ro.build.version.release")
635
636  def _GetBuildId(self):
637    build_id = self._GetRawBuildProp("ro.build.id", None)
638    if build_id:
639      return build_id
640
641    legacy_build_id = self.GetBuildProp("ro.build.legacy.id")
642    if not legacy_build_id:
643      raise ExternalError("Couldn't find build id in property file")
644
645    if self.use_legacy_id:
646      return legacy_build_id
647
648    # Append the top 8 chars of vbmeta digest to the existing build id. The
649    # logic needs to match the one in init, so that OTA can deliver correctly.
650    avb_enable = self.info_dict.get("avb_enable") == "true"
651    if not avb_enable:
652      raise ExternalError("AVB isn't enabled when using legacy build id")
653
654    vbmeta_digest = self.info_dict.get("vbmeta_digest")
655    if not vbmeta_digest:
656      raise ExternalError("Vbmeta digest isn't provided when using legacy build"
657                          " id")
658    if len(vbmeta_digest) < self._VBMETA_DIGEST_SIZE_USED:
659      raise ExternalError("Invalid vbmeta digest " + vbmeta_digest)
660
661    digest_prefix = vbmeta_digest[:self._VBMETA_DIGEST_SIZE_USED]
662    return legacy_build_id + '.' + digest_prefix
663
664  def _GetPartitionPlatformVersion(self, partition):
665    try:
666      return self.GetPartitionBuildProp("ro.build.version.release_or_codename",
667                                        partition)
668    except ExternalError:
669      return self.GetPartitionBuildProp("ro.build.version.release",
670                                        partition)
671
672  def GetOemProperty(self, key):
673    if self.oem_props is not None and key in self.oem_props:
674      return self.oem_dicts[0][key]
675    return self.GetBuildProp(key)
676
677  def GetPartitionFingerprint(self, partition):
678    return self._partition_fingerprints.get(partition, None)
679
680  def CalculatePartitionFingerprint(self, partition):
681    try:
682      return self.GetPartitionBuildProp("ro.build.fingerprint", partition)
683    except ExternalError:
684      return "{}/{}/{}:{}/{}/{}:{}/{}".format(
685          self.GetPartitionBuildProp("ro.product.brand", partition),
686          self.GetPartitionBuildProp("ro.product.name", partition),
687          self.GetPartitionBuildProp("ro.product.device", partition),
688          self._GetPartitionPlatformVersion(partition),
689          self.GetPartitionBuildProp("ro.build.id", partition),
690          self.GetPartitionBuildProp(
691              "ro.build.version.incremental", partition),
692          self.GetPartitionBuildProp("ro.build.type", partition),
693          self.GetPartitionBuildProp("ro.build.tags", partition))
694
695  def CalculateFingerprint(self):
696    if self.oem_props is None:
697      try:
698        return self.GetBuildProp("ro.build.fingerprint")
699      except ExternalError:
700        return "{}/{}/{}:{}/{}/{}:{}/{}".format(
701            self.GetBuildProp("ro.product.brand"),
702            self.GetBuildProp("ro.product.name"),
703            self.GetBuildProp("ro.product.device"),
704            self._GetPlatformVersion(),
705            self.GetBuildProp("ro.build.id"),
706            self.GetBuildProp("ro.build.version.incremental"),
707            self.GetBuildProp("ro.build.type"),
708            self.GetBuildProp("ro.build.tags"))
709    return "%s/%s/%s:%s" % (
710        self.GetOemProperty("ro.product.brand"),
711        self.GetOemProperty("ro.product.name"),
712        self.GetOemProperty("ro.product.device"),
713        self.GetBuildProp("ro.build.thumbprint"))
714
715  def WriteMountOemScript(self, script):
716    assert self.oem_props is not None
717    recovery_mount_options = self.info_dict.get("recovery_mount_options")
718    script.Mount("/oem", recovery_mount_options)
719
720  def WriteDeviceAssertions(self, script, oem_no_mount):
721    # Read the property directly if not using OEM properties.
722    if not self.oem_props:
723      script.AssertDevice(self.device)
724      return
725
726    # Otherwise assert OEM properties.
727    if not self.oem_dicts:
728      raise ExternalError(
729          "No OEM file provided to answer expected assertions")
730
731    for prop in self.oem_props.split():
732      values = []
733      for oem_dict in self.oem_dicts:
734        if prop in oem_dict:
735          values.append(oem_dict[prop])
736      if not values:
737        raise ExternalError(
738            "The OEM file is missing the property %s" % (prop,))
739      script.AssertOemProperty(prop, values, oem_no_mount)
740
741
742def DoesInputFileContain(input_file, fn):
743  """Check whether the input target_files.zip contain an entry `fn`"""
744  if isinstance(input_file, zipfile.ZipFile):
745    return fn in input_file.namelist()
746  elif zipfile.is_zipfile(input_file):
747    with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp:
748      return fn in zfp.namelist()
749  else:
750    if not os.path.isdir(input_file):
751      raise ValueError(
752          "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file)
753    path = os.path.join(input_file, *fn.split("/"))
754    return os.path.exists(path)
755
756
757def ReadBytesFromInputFile(input_file, fn):
758  """Reads the bytes of fn from input zipfile or directory."""
759  if isinstance(input_file, zipfile.ZipFile):
760    return input_file.read(fn)
761  elif zipfile.is_zipfile(input_file):
762    with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp:
763      return zfp.read(fn)
764  else:
765    if not os.path.isdir(input_file):
766      raise ValueError(
767          "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file)
768    path = os.path.join(input_file, *fn.split("/"))
769    try:
770      with open(path, "rb") as f:
771        return f.read()
772    except IOError as e:
773      if e.errno == errno.ENOENT:
774        raise KeyError(fn)
775
776
777def ReadFromInputFile(input_file, fn):
778  """Reads the str contents of fn from input zipfile or directory."""
779  return ReadBytesFromInputFile(input_file, fn).decode()
780
781
782def WriteBytesToInputFile(input_file, fn, data):
783  """Write bytes |data| contents to fn of input zipfile or directory."""
784  if isinstance(input_file, zipfile.ZipFile):
785    with input_file.open(fn, "w") as entry_fp:
786      return entry_fp.write(data)
787  elif zipfile.is_zipfile(input_file):
788    with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp:
789      with zfp.open(fn, "w") as entry_fp:
790        return entry_fp.write(data)
791  else:
792    if not os.path.isdir(input_file):
793      raise ValueError(
794          "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file)
795    path = os.path.join(input_file, *fn.split("/"))
796    try:
797      with open(path, "wb") as f:
798        return f.write(data)
799    except IOError as e:
800      if e.errno == errno.ENOENT:
801        raise KeyError(fn)
802
803
804def WriteToInputFile(input_file, fn, str: str):
805  """Write str content to fn of input file or directory"""
806  return WriteBytesToInputFile(input_file, fn, str.encode())
807
808
809def ExtractFromInputFile(input_file, fn):
810  """Extracts the contents of fn from input zipfile or directory into a file."""
811  if isinstance(input_file, zipfile.ZipFile):
812    tmp_file = MakeTempFile(os.path.basename(fn))
813    with open(tmp_file, 'wb') as f:
814      f.write(input_file.read(fn))
815    return tmp_file
816  elif zipfile.is_zipfile(input_file):
817    with zipfile.ZipFile(input_file, "r", allowZip64=True) as zfp:
818      tmp_file = MakeTempFile(os.path.basename(fn))
819      with open(tmp_file, "wb") as fp:
820        fp.write(zfp.read(fn))
821      return tmp_file
822  else:
823    if not os.path.isdir(input_file):
824      raise ValueError(
825          "Invalid input_file, accepted inputs are ZipFile object, path to .zip file on disk, or path to extracted directory. Actual: " + input_file)
826    file = os.path.join(input_file, *fn.split("/"))
827    if not os.path.exists(file):
828      raise KeyError(fn)
829    return file
830
831
832class RamdiskFormat(object):
833  LZ4 = 1
834  GZ = 2
835
836
837def GetRamdiskFormat(info_dict):
838  if info_dict.get('lz4_ramdisks') == 'true':
839    ramdisk_format = RamdiskFormat.LZ4
840  else:
841    ramdisk_format = RamdiskFormat.GZ
842  return ramdisk_format
843
844
845def LoadInfoDict(input_file, repacking=False):
846  """Loads the key/value pairs from the given input target_files.
847
848  It reads `META/misc_info.txt` file in the target_files input, does validation
849  checks and returns the parsed key/value pairs for to the given build. It's
850  usually called early when working on input target_files files, e.g. when
851  generating OTAs, or signing builds. Note that the function may be called
852  against an old target_files file (i.e. from past dessert releases). So the
853  property parsing needs to be backward compatible.
854
855  In a `META/misc_info.txt`, a few properties are stored as links to the files
856  in the PRODUCT_OUT directory. It works fine with the build system. However,
857  they are no longer available when (re)generating images from target_files zip.
858  When `repacking` is True, redirect these properties to the actual files in the
859  unzipped directory.
860
861  Args:
862    input_file: The input target_files file, which could be an open
863        zipfile.ZipFile instance, or a str for the dir that contains the files
864        unzipped from a target_files file.
865    repacking: Whether it's trying repack an target_files file after loading the
866        info dict (default: False). If so, it will rewrite a few loaded
867        properties (e.g. selinux_fc, root_dir) to point to the actual files in
868        target_files file. When doing repacking, `input_file` must be a dir.
869
870  Returns:
871    A dict that contains the parsed key/value pairs.
872
873  Raises:
874    AssertionError: On invalid input arguments.
875    ValueError: On malformed input values.
876  """
877  if repacking:
878    assert isinstance(input_file, str), \
879        "input_file must be a path str when doing repacking"
880
881  def read_helper(fn):
882    return ReadFromInputFile(input_file, fn)
883
884  try:
885    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
886  except KeyError:
887    raise ValueError("Failed to find META/misc_info.txt in input target-files")
888
889  if "recovery_api_version" not in d:
890    raise ValueError("Failed to find 'recovery_api_version'")
891  if "fstab_version" not in d:
892    raise ValueError("Failed to find 'fstab_version'")
893
894  if repacking:
895    # "selinux_fc" properties should point to the file_contexts files
896    # (file_contexts.bin) under META/.
897    for key in d:
898      if key.endswith("selinux_fc"):
899        fc_basename = os.path.basename(d[key])
900        fc_config = os.path.join(input_file, "META", fc_basename)
901        assert os.path.exists(fc_config), "{} does not exist".format(fc_config)
902
903        d[key] = fc_config
904
905    # Similarly we need to redirect "root_dir", and "root_fs_config".
906    d["root_dir"] = os.path.join(input_file, "ROOT")
907    d["root_fs_config"] = os.path.join(
908        input_file, "META", "root_filesystem_config.txt")
909
910    partitions = ["system", "vendor", "system_ext", "product", "odm",
911                  "vendor_dlkm", "odm_dlkm", "system_dlkm"]
912    # Redirect {partition}_base_fs_file for each of the named partitions.
913    for part_name in partitions:
914      key_name = part_name + "_base_fs_file"
915      if key_name not in d:
916        continue
917      basename = os.path.basename(d[key_name])
918      base_fs_file = os.path.join(input_file, "META", basename)
919      if os.path.exists(base_fs_file):
920        d[key_name] = base_fs_file
921      else:
922        logger.warning(
923            "Failed to find %s base fs file: %s", part_name, base_fs_file)
924        del d[key_name]
925
926    # Redirecting helper for optional properties like erofs_compress_hints
927    def redirect_file(prop, filename):
928      if prop not in d:
929        return
930      config_file = os.path.join(input_file, "META/" + filename)
931      if os.path.exists(config_file):
932        d[prop] = config_file
933      else:
934        logger.warning(
935            "Failed to find %s fro %s", filename, prop)
936        del d[prop]
937
938    # Redirect erofs_[default_]compress_hints files
939    redirect_file("erofs_default_compress_hints",
940                  "erofs_default_compress_hints.txt")
941    for part in partitions:
942      redirect_file(part + "_erofs_compress_hints",
943                    part + "_erofs_compress_hints.txt")
944
945  def makeint(key):
946    if key in d:
947      d[key] = int(d[key], 0)
948
949  makeint("recovery_api_version")
950  makeint("blocksize")
951  makeint("system_size")
952  makeint("vendor_size")
953  makeint("userdata_size")
954  makeint("cache_size")
955  makeint("recovery_size")
956  makeint("fstab_version")
957
958  boot_images = "boot.img"
959  if "boot_images" in d:
960    boot_images = d["boot_images"]
961  for b in boot_images.split():
962    makeint(b.replace(".img", "_size"))
963
964  # Load recovery fstab if applicable.
965  d["fstab"] = _FindAndLoadRecoveryFstab(d, input_file, read_helper)
966  ramdisk_format = GetRamdiskFormat(d)
967
968  # Tries to load the build props for all partitions with care_map, including
969  # system and vendor.
970  for partition in PARTITIONS_WITH_BUILD_PROP:
971    partition_prop = "{}.build.prop".format(partition)
972    d[partition_prop] = PartitionBuildProps.FromInputFile(
973        input_file, partition, ramdisk_format=ramdisk_format)
974  d["build.prop"] = d["system.build.prop"]
975
976  if d.get("avb_enable") == "true":
977    build_info = BuildInfo(d, use_legacy_id=True)
978    # Set up the salt for partitions without build.prop
979    if build_info.fingerprint:
980      if "fingerprint" not in d:
981        d["fingerprint"] = build_info.fingerprint
982      if "avb_salt" not in d:
983        d["avb_salt"] = sha256(build_info.fingerprint.encode()).hexdigest()
984    # Set the vbmeta digest if exists
985    try:
986      d["vbmeta_digest"] = read_helper("META/vbmeta_digest.txt").rstrip()
987    except KeyError:
988      pass
989
990  try:
991    d["ab_partitions"] = read_helper("META/ab_partitions.txt").split("\n")
992  except KeyError:
993    logger.warning("Can't find META/ab_partitions.txt")
994  return d
995
996
997def LoadListFromFile(file_path):
998  with open(file_path) as f:
999    return f.read().splitlines()
1000
1001
1002def LoadDictionaryFromFile(file_path):
1003  lines = LoadListFromFile(file_path)
1004  return LoadDictionaryFromLines(lines)
1005
1006
1007def LoadDictionaryFromLines(lines):
1008  d = {}
1009  for line in lines:
1010    line = line.strip()
1011    if not line or line.startswith("#"):
1012      continue
1013    if "=" in line:
1014      name, value = line.split("=", 1)
1015      d[name] = value
1016  return d
1017
1018
1019class PartitionBuildProps(object):
1020  """The class holds the build prop of a particular partition.
1021
1022  This class loads the build.prop and holds the build properties for a given
1023  partition. It also partially recognizes the 'import' statement in the
1024  build.prop; and calculates alternative values of some specific build
1025  properties during runtime.
1026
1027  Attributes:
1028    input_file: a zipped target-file or an unzipped target-file directory.
1029    partition: name of the partition.
1030    props_allow_override: a list of build properties to search for the
1031        alternative values during runtime.
1032    build_props: a dict of build properties for the given partition.
1033    prop_overrides: a set of props that are overridden by import.
1034    placeholder_values: A dict of runtime variables' values to replace the
1035        placeholders in the build.prop file. We expect exactly one value for
1036        each of the variables.
1037    ramdisk_format: If name is "boot", the format of ramdisk inside the
1038        boot image. Otherwise, its value is ignored.
1039        Use lz4 to decompress by default. If its value is gzip, use gzip.
1040  """
1041
1042  def __init__(self, input_file, name, placeholder_values=None):
1043    self.input_file = input_file
1044    self.partition = name
1045    self.props_allow_override = [props.format(name) for props in [
1046        'ro.product.{}.brand', 'ro.product.{}.name', 'ro.product.{}.device']]
1047    self.build_props = {}
1048    self.prop_overrides = set()
1049    self.placeholder_values = {}
1050    if placeholder_values:
1051      self.placeholder_values = copy.deepcopy(placeholder_values)
1052
1053  @staticmethod
1054  def FromDictionary(name, build_props):
1055    """Constructs an instance from a build prop dictionary."""
1056
1057    props = PartitionBuildProps("unknown", name)
1058    props.build_props = build_props.copy()
1059    return props
1060
1061  @staticmethod
1062  def FromInputFile(input_file, name, placeholder_values=None, ramdisk_format=RamdiskFormat.LZ4):
1063    """Loads the build.prop file and builds the attributes."""
1064
1065    if name in ("boot", "init_boot"):
1066      data = PartitionBuildProps._ReadBootPropFile(
1067          input_file, name, ramdisk_format=ramdisk_format)
1068    else:
1069      data = PartitionBuildProps._ReadPartitionPropFile(input_file, name)
1070
1071    props = PartitionBuildProps(input_file, name, placeholder_values)
1072    props._LoadBuildProp(data)
1073    return props
1074
1075  @staticmethod
1076  def _ReadBootPropFile(input_file, partition_name, ramdisk_format):
1077    """
1078    Read build.prop for boot image from input_file.
1079    Return empty string if not found.
1080    """
1081    image_path = 'IMAGES/' + partition_name + '.img'
1082    try:
1083      boot_img = ExtractFromInputFile(input_file, image_path)
1084    except KeyError:
1085      logger.warning('Failed to read %s', image_path)
1086      return ''
1087    prop_file = GetBootImageBuildProp(boot_img, ramdisk_format=ramdisk_format)
1088    if prop_file is None:
1089      return ''
1090    with open(prop_file, "r") as f:
1091      return f.read()
1092
1093  @staticmethod
1094  def _ReadPartitionPropFile(input_file, name):
1095    """
1096    Read build.prop for name from input_file.
1097    Return empty string if not found.
1098    """
1099    data = ''
1100    for prop_file in ['{}/etc/build.prop'.format(name.upper()),
1101                      '{}/build.prop'.format(name.upper())]:
1102      try:
1103        data = ReadFromInputFile(input_file, prop_file)
1104        break
1105      except KeyError:
1106        logger.warning('Failed to read %s', prop_file)
1107    if data == '':
1108      logger.warning("Failed to read build.prop for partition {}".format(name))
1109    return data
1110
1111  @staticmethod
1112  def FromBuildPropFile(name, build_prop_file):
1113    """Constructs an instance from a build prop file."""
1114
1115    props = PartitionBuildProps("unknown", name)
1116    with open(build_prop_file) as f:
1117      props._LoadBuildProp(f.read())
1118    return props
1119
1120  def _LoadBuildProp(self, data):
1121    for line in data.split('\n'):
1122      line = line.strip()
1123      if not line or line.startswith("#"):
1124        continue
1125      if line.startswith("import"):
1126        overrides = self._ImportParser(line)
1127        duplicates = self.prop_overrides.intersection(overrides.keys())
1128        if duplicates:
1129          raise ValueError('prop {} is overridden multiple times'.format(
1130              ','.join(duplicates)))
1131        self.prop_overrides = self.prop_overrides.union(overrides.keys())
1132        self.build_props.update(overrides)
1133      elif "=" in line:
1134        name, value = line.split("=", 1)
1135        if name in self.prop_overrides:
1136          raise ValueError('prop {} is set again after overridden by import '
1137                           'statement'.format(name))
1138        self.build_props[name] = value
1139
1140  def _ImportParser(self, line):
1141    """Parses the build prop in a given import statement."""
1142
1143    tokens = line.split()
1144    if tokens[0] != 'import' or (len(tokens) != 2 and len(tokens) != 3):
1145      raise ValueError('Unrecognized import statement {}'.format(line))
1146
1147    if len(tokens) == 3:
1148      logger.info("Import %s from %s, skip", tokens[2], tokens[1])
1149      return {}
1150
1151    import_path = tokens[1]
1152    if not re.match(r'^/{}/.*\.prop$'.format(self.partition), import_path):
1153      logger.warn('Unrecognized import path {}'.format(line))
1154      return {}
1155
1156    # We only recognize a subset of import statement that the init process
1157    # supports. And we can loose the restriction based on how the dynamic
1158    # fingerprint is used in practice. The placeholder format should be
1159    # ${placeholder}, and its value should be provided by the caller through
1160    # the placeholder_values.
1161    for prop, value in self.placeholder_values.items():
1162      prop_place_holder = '${{{}}}'.format(prop)
1163      if prop_place_holder in import_path:
1164        import_path = import_path.replace(prop_place_holder, value)
1165    if '$' in import_path:
1166      logger.info('Unresolved place holder in import path %s', import_path)
1167      return {}
1168
1169    import_path = import_path.replace('/{}'.format(self.partition),
1170                                      self.partition.upper())
1171    logger.info('Parsing build props override from %s', import_path)
1172
1173    lines = ReadFromInputFile(self.input_file, import_path).split('\n')
1174    d = LoadDictionaryFromLines(lines)
1175    return {key: val for key, val in d.items()
1176            if key in self.props_allow_override}
1177
1178  def __getstate__(self):
1179    state = self.__dict__.copy()
1180    # Don't pickle baz
1181    if "input_file" in state and isinstance(state["input_file"], zipfile.ZipFile):
1182      state["input_file"] = state["input_file"].filename
1183    return state
1184
1185  def GetProp(self, prop):
1186    return self.build_props.get(prop)
1187
1188
1189def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path):
1190  class Partition(object):
1191    def __init__(self, mount_point, fs_type, device, length, context, slotselect):
1192      self.mount_point = mount_point
1193      self.fs_type = fs_type
1194      self.device = device
1195      self.length = length
1196      self.context = context
1197      self.slotselect = slotselect
1198
1199  try:
1200    data = read_helper(recovery_fstab_path)
1201  except KeyError:
1202    logger.warning("Failed to find %s", recovery_fstab_path)
1203    data = ""
1204
1205  assert fstab_version == 2
1206
1207  d = {}
1208  for line in data.split("\n"):
1209    line = line.strip()
1210    if not line or line.startswith("#"):
1211      continue
1212
1213    # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
1214    pieces = line.split()
1215    if len(pieces) != 5:
1216      raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
1217
1218    # Ignore entries that are managed by vold.
1219    options = pieces[4]
1220    if "voldmanaged=" in options:
1221      continue
1222
1223    # It's a good line, parse it.
1224    length = 0
1225    slotselect = False
1226    options = options.split(",")
1227    for i in options:
1228      if i.startswith("length="):
1229        length = int(i[7:])
1230      elif i == "slotselect":
1231        slotselect = True
1232      else:
1233        # Ignore all unknown options in the unified fstab.
1234        continue
1235
1236    mount_flags = pieces[3]
1237    # Honor the SELinux context if present.
1238    context = None
1239    for i in mount_flags.split(","):
1240      if i.startswith("context="):
1241        context = i
1242
1243    mount_point = pieces[1]
1244    d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
1245                               device=pieces[0], length=length, context=context,
1246                               slotselect=slotselect)
1247
1248  return d
1249
1250
1251def _FindAndLoadRecoveryFstab(info_dict, input_file, read_helper):
1252  """Finds the path to recovery fstab and loads its contents."""
1253  # recovery fstab is only meaningful when installing an update via recovery
1254  # (i.e. non-A/B OTA). Skip loading fstab if device used A/B OTA.
1255  if info_dict.get('ab_update') == 'true' and \
1256     info_dict.get("allow_non_ab") != "true":
1257    return None
1258
1259  # We changed recovery.fstab path in Q, from ../RAMDISK/etc/recovery.fstab to
1260  # ../RAMDISK/system/etc/recovery.fstab. This function has to handle both
1261  # cases, since it may load the info_dict from an old build (e.g. when
1262  # generating incremental OTAs from that build).
1263  if info_dict.get('no_recovery') != 'true':
1264    recovery_fstab_path = 'RECOVERY/RAMDISK/system/etc/recovery.fstab'
1265    if not DoesInputFileContain(input_file, recovery_fstab_path):
1266      recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab'
1267    return LoadRecoveryFSTab(
1268        read_helper, info_dict['fstab_version'], recovery_fstab_path)
1269
1270  if info_dict.get('recovery_as_boot') == 'true':
1271    recovery_fstab_path = 'BOOT/RAMDISK/system/etc/recovery.fstab'
1272    if not DoesInputFileContain(input_file, recovery_fstab_path):
1273      recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab'
1274    return LoadRecoveryFSTab(
1275        read_helper, info_dict['fstab_version'], recovery_fstab_path)
1276
1277  return None
1278
1279
1280def DumpInfoDict(d):
1281  for k, v in sorted(d.items()):
1282    logger.info("%-25s = (%s) %s", k, type(v).__name__, v)
1283
1284
1285def MergeDynamicPartitionInfoDicts(framework_dict, vendor_dict):
1286  """Merges dynamic partition info variables.
1287
1288  Args:
1289    framework_dict: The dictionary of dynamic partition info variables from the
1290      partial framework target files.
1291    vendor_dict: The dictionary of dynamic partition info variables from the
1292      partial vendor target files.
1293
1294  Returns:
1295    The merged dynamic partition info dictionary.
1296  """
1297
1298  def uniq_concat(a, b):
1299    combined = set(a.split())
1300    combined.update(set(b.split()))
1301    combined = [item.strip() for item in combined if item.strip()]
1302    return " ".join(sorted(combined))
1303
1304  if (framework_dict.get("use_dynamic_partitions") !=
1305          "true") or (vendor_dict.get("use_dynamic_partitions") != "true"):
1306    raise ValueError("Both dictionaries must have use_dynamic_partitions=true")
1307
1308  merged_dict = {"use_dynamic_partitions": "true"}
1309  # For keys-value pairs that are the same, copy to merged dict
1310  for key in vendor_dict.keys():
1311    if key in framework_dict and framework_dict[key] == vendor_dict[key]:
1312      merged_dict[key] = vendor_dict[key]
1313
1314  merged_dict["dynamic_partition_list"] = uniq_concat(
1315      framework_dict.get("dynamic_partition_list", ""),
1316      vendor_dict.get("dynamic_partition_list", ""))
1317
1318  # Super block devices are defined by the vendor dict.
1319  if "super_block_devices" in vendor_dict:
1320    merged_dict["super_block_devices"] = vendor_dict["super_block_devices"]
1321    for block_device in merged_dict["super_block_devices"].split():
1322      key = "super_%s_device_size" % block_device
1323      if key not in vendor_dict:
1324        raise ValueError("Vendor dict does not contain required key %s." % key)
1325      merged_dict[key] = vendor_dict[key]
1326
1327  # Partition groups and group sizes are defined by the vendor dict because
1328  # these values may vary for each board that uses a shared system image.
1329  merged_dict["super_partition_groups"] = vendor_dict["super_partition_groups"]
1330  for partition_group in merged_dict["super_partition_groups"].split():
1331    # Set the partition group's size using the value from the vendor dict.
1332    key = "super_%s_group_size" % partition_group
1333    if key not in vendor_dict:
1334      raise ValueError("Vendor dict does not contain required key %s." % key)
1335    merged_dict[key] = vendor_dict[key]
1336
1337    # Set the partition group's partition list using a concatenation of the
1338    # framework and vendor partition lists.
1339    key = "super_%s_partition_list" % partition_group
1340    merged_dict[key] = uniq_concat(
1341        framework_dict.get(key, ""), vendor_dict.get(key, ""))
1342  # in the case that vendor is on s build, but is taking a v3 -> v3 vabc ota, we want to fallback to v2
1343  if "vabc_cow_version" not in vendor_dict or "vabc_cow_version" not in framework_dict:
1344    merged_dict["vabc_cow_version"] = '2'
1345  else:
1346    merged_dict["vabc_cow_version"] = min(vendor_dict["vabc_cow_version"], framework_dict["vabc_cow_version"])
1347  # Various other flags should be copied from the vendor dict, if defined.
1348  for key in ("virtual_ab", "virtual_ab_retrofit", "lpmake",
1349              "super_metadata_device", "super_partition_error_limit",
1350              "super_partition_size"):
1351    if key in vendor_dict.keys():
1352      merged_dict[key] = vendor_dict[key]
1353
1354  return merged_dict
1355
1356
1357def PartitionMapFromTargetFiles(target_files_dir):
1358  """Builds a map from partition -> path within an extracted target files directory."""
1359  # Keep possible_subdirs in sync with build/make/core/board_config.mk.
1360  possible_subdirs = {
1361      "system": ["SYSTEM"],
1362      "vendor": ["VENDOR", "SYSTEM/vendor"],
1363      "product": ["PRODUCT", "SYSTEM/product"],
1364      "system_ext": ["SYSTEM_EXT", "SYSTEM/system_ext"],
1365      "odm": ["ODM", "VENDOR/odm", "SYSTEM/vendor/odm"],
1366      "vendor_dlkm": [
1367          "VENDOR_DLKM", "VENDOR/vendor_dlkm", "SYSTEM/vendor/vendor_dlkm"
1368      ],
1369      "odm_dlkm": ["ODM_DLKM", "VENDOR/odm_dlkm", "SYSTEM/vendor/odm_dlkm"],
1370      "system_dlkm": ["SYSTEM_DLKM", "SYSTEM/system_dlkm"],
1371  }
1372  partition_map = {}
1373  for partition, subdirs in possible_subdirs.items():
1374    for subdir in subdirs:
1375      if os.path.exists(os.path.join(target_files_dir, subdir)):
1376        partition_map[partition] = subdir
1377        break
1378  return partition_map
1379
1380
1381def SharedUidPartitionViolations(uid_dict, partition_groups):
1382  """Checks for APK sharedUserIds that cross partition group boundaries.
1383
1384  This uses a single or merged build's shareduid_violation_modules.json
1385  output file, as generated by find_shareduid_violation.py or
1386  core/tasks/find-shareduid-violation.mk.
1387
1388  An error is defined as a sharedUserId that is found in a set of partitions
1389  that span more than one partition group.
1390
1391  Args:
1392    uid_dict: A dictionary created by using the standard json module to read a
1393      complete shareduid_violation_modules.json file.
1394    partition_groups: A list of groups, where each group is a list of
1395      partitions.
1396
1397  Returns:
1398    A list of error messages.
1399  """
1400  errors = []
1401  for uid, partitions in uid_dict.items():
1402    found_in_groups = [
1403        group for group in partition_groups
1404        if set(partitions.keys()) & set(group)
1405    ]
1406    if len(found_in_groups) > 1:
1407      errors.append(
1408          "APK sharedUserId \"%s\" found across partition groups in partitions \"%s\""
1409          % (uid, ",".join(sorted(partitions.keys()))))
1410  return errors
1411
1412
1413def RunHostInitVerifier(product_out, partition_map):
1414  """Runs host_init_verifier on the init rc files within partitions.
1415
1416  host_init_verifier searches the etc/init path within each partition.
1417
1418  Args:
1419    product_out: PRODUCT_OUT directory, containing partition directories.
1420    partition_map: A map of partition name -> relative path within product_out.
1421  """
1422  allowed_partitions = ("system", "system_ext", "product", "vendor", "odm")
1423  cmd = ["host_init_verifier"]
1424  for partition, path in partition_map.items():
1425    if partition not in allowed_partitions:
1426      raise ExternalError("Unable to call host_init_verifier for partition %s" %
1427                          partition)
1428    cmd.extend(["--out_%s" % partition, os.path.join(product_out, path)])
1429    # Add --property-contexts if the file exists on the partition.
1430    property_contexts = "%s_property_contexts" % (
1431        "plat" if partition == "system" else partition)
1432    property_contexts_path = os.path.join(product_out, path, "etc", "selinux",
1433                                          property_contexts)
1434    if os.path.exists(property_contexts_path):
1435      cmd.append("--property-contexts=%s" % property_contexts_path)
1436    # Add the passwd file if the file exists on the partition.
1437    passwd_path = os.path.join(product_out, path, "etc", "passwd")
1438    if os.path.exists(passwd_path):
1439      cmd.extend(["-p", passwd_path])
1440  return RunAndCheckOutput(cmd)
1441
1442
1443def AppendAVBSigningArgs(cmd, partition, avb_salt=None):
1444  """Append signing arguments for avbtool."""
1445  # e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096"
1446  key_path = ResolveAVBSigningPathArgs(
1447      OPTIONS.info_dict.get("avb_" + partition + "_key_path"))
1448  algorithm = OPTIONS.info_dict.get("avb_" + partition + "_algorithm")
1449  if key_path and algorithm:
1450    cmd.extend(["--key", key_path, "--algorithm", algorithm])
1451  if avb_salt is None:
1452    avb_salt = OPTIONS.info_dict.get("avb_salt")
1453  # make_vbmeta_image doesn't like "--salt" (and it's not needed).
1454  if avb_salt and not partition.startswith("vbmeta"):
1455    cmd.extend(["--salt", avb_salt])
1456
1457
1458def ResolveAVBSigningPathArgs(split_args):
1459
1460  def ResolveBinaryPath(path):
1461    if os.path.exists(path):
1462      return path
1463    if OPTIONS.search_path:
1464      new_path = os.path.join(OPTIONS.search_path, path)
1465      if os.path.exists(new_path):
1466        return new_path
1467    raise ExternalError(
1468        "Failed to find {}".format(path))
1469
1470  if not split_args:
1471    return split_args
1472
1473  if isinstance(split_args, list):
1474    for index, arg in enumerate(split_args[:-1]):
1475      if arg == '--signing_helper':
1476        signing_helper_path = split_args[index + 1]
1477        split_args[index + 1] = ResolveBinaryPath(signing_helper_path)
1478        break
1479  elif isinstance(split_args, str):
1480    split_args = ResolveBinaryPath(split_args)
1481
1482  return split_args
1483
1484
1485def GetAvbPartitionArg(partition, image, info_dict=None):
1486  """Returns the VBMeta arguments for one partition.
1487
1488  It sets up the VBMeta argument by including the partition descriptor from the
1489  given 'image', or by configuring the partition as a chained partition.
1490
1491  Args:
1492    partition: The name of the partition (e.g. "system").
1493    image: The path to the partition image.
1494    info_dict: A dict returned by common.LoadInfoDict(). Will use
1495        OPTIONS.info_dict if None has been given.
1496
1497  Returns:
1498    A list of VBMeta arguments for one partition.
1499  """
1500  if info_dict is None:
1501    info_dict = OPTIONS.info_dict
1502
1503  # Check if chain partition is used.
1504  key_path = info_dict.get("avb_" + partition + "_key_path")
1505  if not key_path:
1506    return [AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG, image]
1507
1508  # For a non-A/B device, we don't chain /recovery nor include its descriptor
1509  # into vbmeta.img. The recovery image will be configured on an independent
1510  # boot chain, to be verified with AVB_SLOT_VERIFY_FLAGS_NO_VBMETA_PARTITION.
1511  # See details at
1512  # https://android.googlesource.com/platform/external/avb/+/master/README.md#booting-into-recovery.
1513  if info_dict.get("ab_update") != "true" and partition == "recovery":
1514    return []
1515
1516  # Otherwise chain the partition into vbmeta.
1517  chained_partition_arg = GetAvbChainedPartitionArg(partition, info_dict)
1518  return [AVB_ARG_NAME_CHAIN_PARTITION, chained_partition_arg]
1519
1520
1521def GetAvbPartitionsArg(partitions,
1522                        resolve_rollback_index_location_conflict=False,
1523                        info_dict=None):
1524  """Returns the VBMeta arguments for all AVB partitions.
1525
1526  It sets up the VBMeta argument by calling GetAvbPartitionArg of all
1527  partitions.
1528
1529  Args:
1530    partitions: A dict of all AVB partitions.
1531    resolve_rollback_index_location_conflict: If true, resolve conflicting avb
1532        rollback index locations by assigning the smallest unused value.
1533    info_dict: A dict returned by common.LoadInfoDict().
1534
1535  Returns:
1536    A list of VBMeta arguments for all partitions.
1537  """
1538  # An AVB partition will be linked into a vbmeta partition by either
1539  # AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG or AVB_ARG_NAME_CHAIN_PARTITION, there
1540  # should be no other cases.
1541  valid_args = {
1542      AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG: [],
1543      AVB_ARG_NAME_CHAIN_PARTITION: []
1544  }
1545
1546  for partition, path in sorted(partitions.items()):
1547    avb_partition_arg = GetAvbPartitionArg(partition, path, info_dict)
1548    if not avb_partition_arg:
1549      continue
1550    arg_name, arg_value = avb_partition_arg
1551    assert arg_name in valid_args
1552    valid_args[arg_name].append(arg_value)
1553
1554  # Copy the arguments for non-chained AVB partitions directly without
1555  # intervention.
1556  avb_args = []
1557  for image in valid_args[AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG]:
1558    avb_args.extend([AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG, image])
1559
1560  # Handle chained AVB partitions. The rollback index location might be
1561  # adjusted if two partitions use the same value. This may happen when mixing
1562  # a shared system image with other vendor images.
1563  used_index_loc = set()
1564  for chained_partition_arg in valid_args[AVB_ARG_NAME_CHAIN_PARTITION]:
1565    if resolve_rollback_index_location_conflict:
1566      while chained_partition_arg.rollback_index_location in used_index_loc:
1567        chained_partition_arg.rollback_index_location += 1
1568
1569    used_index_loc.add(chained_partition_arg.rollback_index_location)
1570    avb_args.extend([AVB_ARG_NAME_CHAIN_PARTITION,
1571                     chained_partition_arg.to_string()])
1572
1573  return avb_args
1574
1575
1576def GetAvbChainedPartitionArg(partition, info_dict, key=None):
1577  """Constructs and returns the arg to build or verify a chained partition.
1578
1579  Args:
1580    partition: The partition name.
1581    info_dict: The info dict to look up the key info and rollback index
1582        location.
1583    key: The key to be used for building or verifying the partition. Defaults to
1584        the key listed in info_dict.
1585
1586  Returns:
1587    An AvbChainedPartitionArg object with rollback_index_location and
1588    pubkey_path that can be used to build or verify vbmeta image.
1589  """
1590  if key is None:
1591    key = info_dict["avb_" + partition + "_key_path"]
1592  key = ResolveAVBSigningPathArgs(key)
1593  pubkey_path = ExtractAvbPublicKey(info_dict["avb_avbtool"], key)
1594  rollback_index_location = info_dict[
1595      "avb_" + partition + "_rollback_index_location"]
1596  return AvbChainedPartitionArg(
1597      partition=partition,
1598      rollback_index_location=int(rollback_index_location),
1599      pubkey_path=pubkey_path)
1600
1601
1602def BuildVBMeta(image_path, partitions, name, needed_partitions,
1603                resolve_rollback_index_location_conflict=False):
1604  """Creates a VBMeta image.
1605
1606  It generates the requested VBMeta image. The requested image could be for
1607  top-level or chained VBMeta image, which is determined based on the name.
1608
1609  Args:
1610    image_path: The output path for the new VBMeta image.
1611    partitions: A dict that's keyed by partition names with image paths as
1612        values. Only valid partition names are accepted, as partitions listed
1613        in common.AVB_PARTITIONS and custom partitions listed in
1614        OPTIONS.info_dict.get("avb_custom_images_partition_list")
1615    name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_system'.
1616    needed_partitions: Partitions whose descriptors should be included into the
1617        generated VBMeta image.
1618    resolve_rollback_index_location_conflict: If true, resolve conflicting avb
1619        rollback index locations by assigning the smallest unused value.
1620
1621  Raises:
1622    AssertionError: On invalid input args.
1623  """
1624  avbtool = OPTIONS.info_dict["avb_avbtool"]
1625  cmd = [avbtool, "make_vbmeta_image", "--output", image_path]
1626  AppendAVBSigningArgs(cmd, name)
1627
1628  custom_partitions = OPTIONS.info_dict.get(
1629      "avb_custom_images_partition_list", "").strip().split()
1630  custom_avb_partitions = ["vbmeta_" + part for part in OPTIONS.info_dict.get(
1631      "avb_custom_vbmeta_images_partition_list", "").strip().split()]
1632
1633  avb_partitions = {}
1634  for partition, path in sorted(partitions.items()):
1635    if partition not in needed_partitions:
1636      continue
1637    assert (partition in AVB_PARTITIONS or
1638            partition in AVB_VBMETA_PARTITIONS or
1639            partition in custom_avb_partitions or
1640            partition in custom_partitions), \
1641        'Unknown partition: {}'.format(partition)
1642    assert os.path.exists(path), \
1643        'Failed to find {} for {}'.format(path, partition)
1644    avb_partitions[partition] = path
1645  cmd.extend(GetAvbPartitionsArg(avb_partitions,
1646                                 resolve_rollback_index_location_conflict))
1647
1648  args = OPTIONS.info_dict.get("avb_{}_args".format(name))
1649  if args and args.strip():
1650    split_args = shlex.split(args)
1651    for index, arg in enumerate(split_args[:-1]):
1652      # Check that the image file exists. Some images might be defined
1653      # as a path relative to source tree, which may not be available at the
1654      # same location when running this script (we have the input target_files
1655      # zip only). For such cases, we additionally scan other locations (e.g.
1656      # IMAGES/, RADIO/, etc) before bailing out.
1657      if arg == AVB_ARG_NAME_INCLUDE_DESC_FROM_IMG:
1658        chained_image = split_args[index + 1]
1659        if os.path.exists(chained_image):
1660          continue
1661        found = False
1662        for dir_name in ['IMAGES', 'RADIO', 'PREBUILT_IMAGES']:
1663          alt_path = os.path.join(
1664              OPTIONS.input_tmp, dir_name, os.path.basename(chained_image))
1665          if os.path.exists(alt_path):
1666            split_args[index + 1] = alt_path
1667            found = True
1668            break
1669        assert found, 'Failed to find {}'.format(chained_image)
1670
1671    split_args = ResolveAVBSigningPathArgs(split_args)
1672    cmd.extend(split_args)
1673
1674  RunAndCheckOutput(cmd)
1675
1676
1677def _MakeRamdisk(sourcedir, fs_config_file=None,
1678                 dev_node_file=None,
1679                 ramdisk_format=RamdiskFormat.GZ):
1680  ramdisk_img = tempfile.NamedTemporaryFile()
1681
1682  cmd = ["mkbootfs"]
1683
1684  if fs_config_file and os.access(fs_config_file, os.F_OK):
1685    cmd.extend(["-f", fs_config_file])
1686
1687  if dev_node_file and os.access(dev_node_file, os.F_OK):
1688    cmd.extend(["-n", dev_node_file])
1689
1690  cmd.append(os.path.join(sourcedir, "RAMDISK"))
1691
1692  p1 = Run(cmd, stdout=subprocess.PIPE)
1693  if ramdisk_format == RamdiskFormat.LZ4:
1694    p2 = Run(["lz4", "-l", "-12", "--favor-decSpeed"], stdin=p1.stdout,
1695             stdout=ramdisk_img.file.fileno())
1696  elif ramdisk_format == RamdiskFormat.GZ:
1697    p2 = Run(["gzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
1698  else:
1699    raise ValueError("Only support lz4 or gzip ramdisk format.")
1700
1701  p2.wait()
1702  p1.wait()
1703  assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
1704  assert p2.returncode == 0, "compression of %s ramdisk failed" % (sourcedir,)
1705
1706  return ramdisk_img
1707
1708
1709def _BuildBootableImage(image_name, sourcedir, fs_config_file,
1710                        dev_node_file=None, info_dict=None,
1711                        has_ramdisk=False, two_step_image=False):
1712  """Build a bootable image from the specified sourcedir.
1713
1714  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
1715  'sourcedir'), and turn them into a boot image. 'two_step_image' indicates if
1716  we are building a two-step special image (i.e. building a recovery image to
1717  be loaded into /boot in two-step OTAs).
1718
1719  Return the image data, or None if sourcedir does not appear to contains files
1720  for building the requested image.
1721  """
1722
1723  if info_dict is None:
1724    info_dict = OPTIONS.info_dict
1725
1726  # "boot" or "recovery", without extension.
1727  partition_name = os.path.basename(sourcedir).lower()
1728
1729  kernel = None
1730  if partition_name == "recovery":
1731    if info_dict.get("exclude_kernel_from_recovery_image") == "true":
1732      logger.info("Excluded kernel binary from recovery image.")
1733    else:
1734      kernel = "kernel"
1735  elif partition_name == "init_boot":
1736    pass
1737  else:
1738    kernel = image_name.replace("boot", "kernel")
1739    kernel = kernel.replace(".img", "")
1740  if kernel and not os.access(os.path.join(sourcedir, kernel), os.F_OK):
1741    return None
1742
1743  kernel_path = os.path.join(sourcedir, kernel) if kernel else None
1744
1745  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
1746    return None
1747
1748  img = tempfile.NamedTemporaryFile()
1749
1750  if has_ramdisk:
1751    ramdisk_format = GetRamdiskFormat(info_dict)
1752    ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file, dev_node_file,
1753                               ramdisk_format=ramdisk_format)
1754
1755  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
1756  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
1757
1758  cmd = [mkbootimg]
1759  if kernel_path is not None:
1760    cmd.extend(["--kernel", kernel_path])
1761
1762  fn = os.path.join(sourcedir, "second")
1763  if os.access(fn, os.F_OK):
1764    cmd.append("--second")
1765    cmd.append(fn)
1766
1767  fn = os.path.join(sourcedir, "dtb")
1768  if os.access(fn, os.F_OK):
1769    cmd.append("--dtb")
1770    cmd.append(fn)
1771
1772  fn = os.path.join(sourcedir, "cmdline")
1773  if os.access(fn, os.F_OK):
1774    cmd.append("--cmdline")
1775    cmd.append(open(fn).read().rstrip("\n"))
1776
1777  fn = os.path.join(sourcedir, "base")
1778  if os.access(fn, os.F_OK):
1779    cmd.append("--base")
1780    cmd.append(open(fn).read().rstrip("\n"))
1781
1782  fn = os.path.join(sourcedir, "pagesize")
1783  if os.access(fn, os.F_OK):
1784    cmd.append("--pagesize")
1785    cmd.append(open(fn).read().rstrip("\n"))
1786
1787  if partition_name == "recovery":
1788    args = info_dict.get("recovery_mkbootimg_args")
1789    if not args:
1790      # Fall back to "mkbootimg_args" for recovery image
1791      # in case "recovery_mkbootimg_args" is not set.
1792      args = info_dict.get("mkbootimg_args")
1793  elif partition_name == "init_boot":
1794    args = info_dict.get("mkbootimg_init_args")
1795  else:
1796    args = info_dict.get("mkbootimg_args")
1797  if args and args.strip():
1798    cmd.extend(shlex.split(args))
1799
1800  args = info_dict.get("mkbootimg_version_args")
1801  if args and args.strip():
1802    cmd.extend(shlex.split(args))
1803
1804  if has_ramdisk:
1805    cmd.extend(["--ramdisk", ramdisk_img.name])
1806
1807  cmd.extend(["--output", img.name])
1808
1809  if partition_name == "recovery":
1810    if info_dict.get("include_recovery_dtbo") == "true":
1811      fn = os.path.join(sourcedir, "recovery_dtbo")
1812      cmd.extend(["--recovery_dtbo", fn])
1813    if info_dict.get("include_recovery_acpio") == "true":
1814      fn = os.path.join(sourcedir, "recovery_acpio")
1815      cmd.extend(["--recovery_acpio", fn])
1816
1817  RunAndCheckOutput(cmd)
1818
1819  # AVB: if enabled, calculate and add hash to boot.img or recovery.img.
1820  if info_dict.get("avb_enable") == "true":
1821    avbtool = info_dict["avb_avbtool"]
1822    if partition_name == "recovery":
1823      part_size = info_dict["recovery_size"]
1824    else:
1825      part_size = info_dict[image_name.replace(".img", "_size")]
1826    cmd = [avbtool, "add_hash_footer", "--image", img.name,
1827           "--partition_size", str(part_size), "--partition_name",
1828           partition_name]
1829    salt = None
1830    if kernel_path is not None:
1831      with open(kernel_path, "rb") as fp:
1832        salt = sha256(fp.read()).hexdigest()
1833    AppendAVBSigningArgs(cmd, partition_name, salt)
1834    args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
1835    if args and args.strip():
1836      split_args = ResolveAVBSigningPathArgs(shlex.split(args))
1837      cmd.extend(split_args)
1838    RunAndCheckOutput(cmd)
1839
1840  img.seek(os.SEEK_SET, 0)
1841  data = img.read()
1842
1843  if has_ramdisk:
1844    ramdisk_img.close()
1845  img.close()
1846
1847  return data
1848
1849
1850def _SignBootableImage(image_path, prebuilt_name, partition_name,
1851                       info_dict=None):
1852  """Performs AVB signing for a prebuilt boot.img.
1853
1854  Args:
1855    image_path: The full path of the image, e.g., /path/to/boot.img.
1856    prebuilt_name: The prebuilt image name, e.g., boot.img, boot-5.4-gz.img,
1857        boot-5.10.img, recovery.img or init_boot.img.
1858    partition_name: The partition name, e.g., 'boot', 'init_boot' or 'recovery'.
1859    info_dict: The information dict read from misc_info.txt.
1860  """
1861  if info_dict is None:
1862    info_dict = OPTIONS.info_dict
1863
1864  # AVB: if enabled, calculate and add hash to boot.img or recovery.img.
1865  if info_dict.get("avb_enable") == "true":
1866    avbtool = info_dict["avb_avbtool"]
1867    if partition_name == "recovery":
1868      part_size = info_dict["recovery_size"]
1869    else:
1870      part_size = info_dict[prebuilt_name.replace(".img", "_size")]
1871
1872    cmd = [avbtool, "add_hash_footer", "--image", image_path,
1873           "--partition_size", str(part_size), "--partition_name",
1874           partition_name]
1875    # Use sha256 of the kernel as salt for reproducible builds
1876    with tempfile.TemporaryDirectory() as tmpdir:
1877      RunAndCheckOutput(["unpack_bootimg", "--boot_img", image_path, "--out", tmpdir])
1878      for filename in ["kernel", "ramdisk", "vendor_ramdisk00"]:
1879        path = os.path.join(tmpdir, filename)
1880        if os.path.exists(path) and os.path.getsize(path):
1881          print("Using {} as salt for avb footer of {}".format(
1882              filename, partition_name))
1883          with open(path, "rb") as fp:
1884            salt = sha256(fp.read()).hexdigest()
1885            break
1886    AppendAVBSigningArgs(cmd, partition_name, salt)
1887    args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
1888    if args and args.strip():
1889      split_args = ResolveAVBSigningPathArgs(shlex.split(args))
1890      cmd.extend(split_args)
1891    RunAndCheckOutput(cmd)
1892
1893
1894def HasRamdisk(partition_name, info_dict=None):
1895  """Returns true/false to see if a bootable image should have a ramdisk.
1896
1897  Args:
1898    partition_name: The partition name, e.g., 'boot', 'init_boot' or 'recovery'.
1899    info_dict: The information dict read from misc_info.txt.
1900  """
1901  if info_dict is None:
1902    info_dict = OPTIONS.info_dict
1903
1904  if partition_name != "boot":
1905    return True  # init_boot.img or recovery.img has a ramdisk.
1906
1907  if info_dict.get("recovery_as_boot") == "true":
1908    return True  # the recovery-as-boot boot.img has a RECOVERY ramdisk.
1909
1910  if info_dict.get("gki_boot_image_without_ramdisk") == "true":
1911    return False  # A GKI boot.img has no ramdisk since Android-13.
1912
1913  if info_dict.get("init_boot") == "true":
1914    # The ramdisk is moved to the init_boot.img, so there is NO
1915    # ramdisk in the boot.img or boot-<kernel version>.img.
1916    return False
1917
1918  return True
1919
1920
1921def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
1922                     info_dict=None, two_step_image=False,
1923                     dev_nodes=False):
1924  """Return a File object with the desired bootable image.
1925
1926  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
1927  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
1928  the source files in 'unpack_dir'/'tree_subdir'."""
1929
1930  if info_dict is None:
1931    info_dict = OPTIONS.info_dict
1932
1933  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
1934  if os.path.exists(prebuilt_path):
1935    logger.info("using prebuilt %s from BOOTABLE_IMAGES...", prebuilt_name)
1936    return File.FromLocalFile(name, prebuilt_path)
1937
1938  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
1939  if os.path.exists(prebuilt_path):
1940    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
1941    return File.FromLocalFile(name, prebuilt_path)
1942
1943  partition_name = tree_subdir.lower()
1944  prebuilt_path = os.path.join(unpack_dir, "PREBUILT_IMAGES", prebuilt_name)
1945  if os.path.exists(prebuilt_path):
1946    logger.info("Re-signing prebuilt %s from PREBUILT_IMAGES...", prebuilt_name)
1947    signed_img = MakeTempFile()
1948    shutil.copy(prebuilt_path, signed_img)
1949    _SignBootableImage(signed_img, prebuilt_name, partition_name, info_dict)
1950    return File.FromLocalFile(name, signed_img)
1951
1952  logger.info("building image from target_files %s...", tree_subdir)
1953
1954  has_ramdisk = HasRamdisk(partition_name, info_dict)
1955
1956  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
1957  data = _BuildBootableImage(prebuilt_name, os.path.join(unpack_dir, tree_subdir),
1958                             os.path.join(unpack_dir, fs_config),
1959                             os.path.join(unpack_dir, 'META/ramdisk_node_list')
1960                             if dev_nodes else None,
1961                             info_dict, has_ramdisk, two_step_image)
1962  if data:
1963    return File(name, data)
1964  return None
1965
1966
1967def _BuildVendorBootImage(sourcedir, fs_config_file, partition_name, info_dict=None):
1968  """Build a vendor boot image from the specified sourcedir.
1969
1970  Take a ramdisk, dtb, and vendor_cmdline from the input (in 'sourcedir'), and
1971  turn them into a vendor boot image.
1972
1973  Return the image data, or None if sourcedir does not appear to contains files
1974  for building the requested image.
1975  """
1976
1977  if info_dict is None:
1978    info_dict = OPTIONS.info_dict
1979
1980  img = tempfile.NamedTemporaryFile()
1981
1982  ramdisk_format = GetRamdiskFormat(info_dict)
1983  ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file=fs_config_file, ramdisk_format=ramdisk_format)
1984
1985  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
1986  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
1987
1988  cmd = [mkbootimg]
1989
1990  fn = os.path.join(sourcedir, "dtb")
1991  if os.access(fn, os.F_OK):
1992    has_vendor_kernel_boot = (info_dict.get(
1993        "vendor_kernel_boot", "").lower() == "true")
1994
1995    # Pack dtb into vendor_kernel_boot if building vendor_kernel_boot.
1996    # Otherwise pack dtb into vendor_boot.
1997    if not has_vendor_kernel_boot or partition_name == "vendor_kernel_boot":
1998      cmd.append("--dtb")
1999      cmd.append(fn)
2000
2001  fn = os.path.join(sourcedir, "vendor_cmdline")
2002  if os.access(fn, os.F_OK):
2003    cmd.append("--vendor_cmdline")
2004    cmd.append(open(fn).read().rstrip("\n"))
2005
2006  fn = os.path.join(sourcedir, "base")
2007  if os.access(fn, os.F_OK):
2008    cmd.append("--base")
2009    cmd.append(open(fn).read().rstrip("\n"))
2010
2011  fn = os.path.join(sourcedir, "pagesize")
2012  if os.access(fn, os.F_OK):
2013    cmd.append("--pagesize")
2014    cmd.append(open(fn).read().rstrip("\n"))
2015
2016  args = info_dict.get("mkbootimg_args")
2017  if args and args.strip():
2018    cmd.extend(shlex.split(args))
2019
2020  args = info_dict.get("mkbootimg_version_args")
2021  if args and args.strip():
2022    cmd.extend(shlex.split(args))
2023
2024  cmd.extend(["--vendor_ramdisk", ramdisk_img.name])
2025  cmd.extend(["--vendor_boot", img.name])
2026
2027  fn = os.path.join(sourcedir, "vendor_bootconfig")
2028  if os.access(fn, os.F_OK):
2029    cmd.append("--vendor_bootconfig")
2030    cmd.append(fn)
2031
2032  ramdisk_fragment_imgs = []
2033  fn = os.path.join(sourcedir, "vendor_ramdisk_fragments")
2034  if os.access(fn, os.F_OK):
2035    ramdisk_fragments = shlex.split(open(fn).read().rstrip("\n"))
2036    for ramdisk_fragment in ramdisk_fragments:
2037      fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
2038                        ramdisk_fragment, "mkbootimg_args")
2039      cmd.extend(shlex.split(open(fn).read().rstrip("\n")))
2040      fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS",
2041                        ramdisk_fragment, "prebuilt_ramdisk")
2042      # Use prebuilt image if found, else create ramdisk from supplied files.
2043      if os.access(fn, os.F_OK):
2044        ramdisk_fragment_pathname = fn
2045      else:
2046        ramdisk_fragment_root = os.path.join(
2047            sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment)
2048        ramdisk_fragment_img = _MakeRamdisk(ramdisk_fragment_root,
2049                                            ramdisk_format=ramdisk_format)
2050        ramdisk_fragment_imgs.append(ramdisk_fragment_img)
2051        ramdisk_fragment_pathname = ramdisk_fragment_img.name
2052      cmd.extend(["--vendor_ramdisk_fragment", ramdisk_fragment_pathname])
2053
2054  RunAndCheckOutput(cmd)
2055
2056  # AVB: if enabled, calculate and add hash.
2057  if info_dict.get("avb_enable") == "true":
2058    avbtool = info_dict["avb_avbtool"]
2059    part_size = info_dict[f'{partition_name}_size']
2060    cmd = [avbtool, "add_hash_footer", "--image", img.name,
2061           "--partition_size", str(part_size), "--partition_name", partition_name]
2062    AppendAVBSigningArgs(cmd, partition_name)
2063    args = info_dict.get(f'avb_{partition_name}_add_hash_footer_args')
2064    if args and args.strip():
2065      split_args = ResolveAVBSigningPathArgs(shlex.split(args))
2066      cmd.extend(split_args)
2067    RunAndCheckOutput(cmd)
2068
2069  img.seek(os.SEEK_SET, 0)
2070  data = img.read()
2071
2072  for f in ramdisk_fragment_imgs:
2073    f.close()
2074  ramdisk_img.close()
2075  img.close()
2076
2077  return data
2078
2079
2080def GetVendorBootImage(name, prebuilt_name, unpack_dir, tree_subdir,
2081                       info_dict=None):
2082  """Return a File object with the desired vendor boot image.
2083
2084  Look for it under 'unpack_dir'/IMAGES, otherwise construct it from
2085  the source files in 'unpack_dir'/'tree_subdir'."""
2086
2087  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
2088  if os.path.exists(prebuilt_path):
2089    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
2090    return File.FromLocalFile(name, prebuilt_path)
2091
2092  logger.info("building image from target_files %s...", tree_subdir)
2093
2094  if info_dict is None:
2095    info_dict = OPTIONS.info_dict
2096
2097  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
2098  data = _BuildVendorBootImage(
2099      os.path.join(unpack_dir, tree_subdir), os.path.join(unpack_dir, fs_config), "vendor_boot", info_dict)
2100  if data:
2101    return File(name, data)
2102  return None
2103
2104
2105def GetVendorKernelBootImage(name, prebuilt_name, unpack_dir, tree_subdir,
2106                             info_dict=None):
2107  """Return a File object with the desired vendor kernel boot image.
2108
2109  Look for it under 'unpack_dir'/IMAGES, otherwise construct it from
2110  the source files in 'unpack_dir'/'tree_subdir'."""
2111
2112  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
2113  if os.path.exists(prebuilt_path):
2114    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
2115    return File.FromLocalFile(name, prebuilt_path)
2116
2117  logger.info("building image from target_files %s...", tree_subdir)
2118
2119  if info_dict is None:
2120    info_dict = OPTIONS.info_dict
2121
2122  data = _BuildVendorBootImage(
2123      os.path.join(unpack_dir, tree_subdir), None, "vendor_kernel_boot", info_dict)
2124  if data:
2125    return File(name, data)
2126  return None
2127
2128
2129def Gunzip(in_filename, out_filename):
2130  """Gunzips the given gzip compressed file to a given output file."""
2131  with gzip.open(in_filename, "rb") as in_file, \
2132          open(out_filename, "wb") as out_file:
2133    shutil.copyfileobj(in_file, out_file)
2134
2135
2136def UnzipSingleFile(input_zip: zipfile.ZipFile, info: zipfile.ZipInfo, dirname: str):
2137  # According to https://stackoverflow.com/questions/434641/how-do-i-set-permissions-attributes-on-a-file-in-a-zip-file-using-pythons-zip/6297838#6297838
2138  # higher bits of |external_attr| are unix file permission and types
2139  unix_filetype = info.external_attr >> 16
2140  file_perm = unix_filetype & 0o777
2141
2142  def CheckMask(a, mask):
2143    return (a & mask) == mask
2144
2145  def IsSymlink(a):
2146    return CheckMask(a, stat.S_IFLNK)
2147
2148  def IsDir(a):
2149    return CheckMask(a, stat.S_IFDIR)
2150  # python3.11 zipfile implementation doesn't handle symlink correctly
2151  if not IsSymlink(unix_filetype):
2152    target = input_zip.extract(info, dirname)
2153    # We want to ensure that the file is at least read/writable by owner and readable by all users
2154    if IsDir(unix_filetype):
2155      os.chmod(target, file_perm | 0o755)
2156    else:
2157      os.chmod(target, file_perm | 0o644)
2158    return target
2159  if dirname is None:
2160    dirname = os.getcwd()
2161  target = os.path.join(dirname, info.filename)
2162  os.makedirs(os.path.dirname(target), exist_ok=True)
2163  if os.path.exists(target):
2164    os.unlink(target)
2165  os.symlink(input_zip.read(info).decode(), target)
2166  return target
2167
2168
2169def UnzipToDir(filename, dirname, patterns=None):
2170  """Unzips the archive to the given directory.
2171
2172  Args:
2173    filename: The name of the zip file to unzip.
2174    dirname: Where the unziped files will land.
2175    patterns: Files to unzip from the archive. If omitted, will unzip the entire
2176        archvie. Non-matching patterns will be filtered out. If there's no match
2177        after the filtering, no file will be unzipped.
2178  """
2179  with zipfile.ZipFile(filename, allowZip64=True, mode="r") as input_zip:
2180    # Filter out non-matching patterns. unzip will complain otherwise.
2181    entries = input_zip.infolist()
2182    # b/283033491
2183    # Per https://en.wikipedia.org/wiki/ZIP_(file_format)#Central_directory_file_header
2184    # In zip64 mode, central directory record's header_offset field might be
2185    # set to 0xFFFFFFFF if header offset is > 2^32. In this case, the extra
2186    # fields will contain an 8 byte little endian integer at offset 20
2187    # to indicate the actual local header offset.
2188    # As of python3.11, python does not handle zip64 central directories
2189    # correctly, so we will manually do the parsing here.
2190
2191    # ZIP64 central directory extra field has two required fields:
2192    # 2 bytes header ID and 2 bytes size field. Thes two require fields have
2193    # a total size of 4 bytes. Then it has three other 8 bytes field, followed
2194    # by a 4 byte disk number field. The last disk number field is not required
2195    # to be present, but if it is present, the total size of extra field will be
2196    # divisible by 8(because 2+2+4+8*n is always going to be multiple of 8)
2197    # Most extra fields are optional, but when they appear, their must appear
2198    # in the order defined by zip64 spec. Since file header offset is the 2nd
2199    # to last field in zip64 spec, it will only be at last 8 bytes or last 12-4
2200    # bytes, depending on whether disk number is present.
2201    for entry in entries:
2202      if entry.header_offset == 0xFFFFFFFF:
2203        if len(entry.extra) % 8 == 0:
2204          entry.header_offset = int.from_bytes(entry.extra[-12:-4], "little")
2205        else:
2206          entry.header_offset = int.from_bytes(entry.extra[-8:], "little")
2207    if patterns is not None:
2208      filtered = [info for info in entries if any(
2209          [fnmatch.fnmatch(info.filename, p) for p in patterns])]
2210
2211      # There isn't any matching files. Don't unzip anything.
2212      if not filtered:
2213        return
2214      for info in filtered:
2215        UnzipSingleFile(input_zip, info, dirname)
2216    else:
2217      for info in entries:
2218        UnzipSingleFile(input_zip, info, dirname)
2219
2220
2221def UnzipTemp(filename, patterns=None):
2222  """Unzips the given archive into a temporary directory and returns the name.
2223
2224  Args:
2225    filename: If filename is of the form "foo.zip+bar.zip", unzip foo.zip into
2226    a temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
2227
2228    patterns: Files to unzip from the archive. If omitted, will unzip the entire
2229    archvie.
2230
2231  Returns:
2232    The name of the temporary directory.
2233  """
2234
2235  tmp = MakeTempDir(prefix="targetfiles-")
2236  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
2237  if m:
2238    UnzipToDir(m.group(1), tmp, patterns)
2239    UnzipToDir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"), patterns)
2240    filename = m.group(1)
2241  else:
2242    UnzipToDir(filename, tmp, patterns)
2243
2244  return tmp
2245
2246
2247def GetUserImage(which, tmpdir, input_zip,
2248                 info_dict=None,
2249                 allow_shared_blocks=None,
2250                 reset_file_map=False):
2251  """Returns an Image object suitable for passing to BlockImageDiff.
2252
2253  This function loads the specified image from the given path. If the specified
2254  image is sparse, it also performs additional processing for OTA purpose. For
2255  example, it always adds block 0 to clobbered blocks list. It also detects
2256  files that cannot be reconstructed from the block list, for whom we should
2257  avoid applying imgdiff.
2258
2259  Args:
2260    which: The partition name.
2261    tmpdir: The directory that contains the prebuilt image and block map file.
2262    input_zip: The target-files ZIP archive.
2263    info_dict: The dict to be looked up for relevant info.
2264    allow_shared_blocks: If image is sparse, whether having shared blocks is
2265        allowed. If none, it is looked up from info_dict.
2266    reset_file_map: If true and image is sparse, reset file map before returning
2267        the image.
2268  Returns:
2269    A Image object. If it is a sparse image and reset_file_map is False, the
2270    image will have file_map info loaded.
2271  """
2272  if info_dict is None:
2273    info_dict = LoadInfoDict(input_zip)
2274
2275  is_sparse = IsSparseImage(os.path.join(tmpdir, "IMAGES", which + ".img"))
2276
2277  # When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain
2278  # shared blocks (i.e. some blocks will show up in multiple files' block
2279  # list). We can only allocate such shared blocks to the first "owner", and
2280  # disable imgdiff for all later occurrences.
2281  if allow_shared_blocks is None:
2282    allow_shared_blocks = info_dict.get("ext4_share_dup_blocks") == "true"
2283
2284  if is_sparse:
2285    img = GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks)
2286    if reset_file_map:
2287      img.ResetFileMap()
2288    return img
2289  return GetNonSparseImage(which, tmpdir)
2290
2291
2292def GetNonSparseImage(which, tmpdir):
2293  """Returns a Image object suitable for passing to BlockImageDiff.
2294
2295  This function loads the specified non-sparse image from the given path.
2296
2297  Args:
2298    which: The partition name.
2299    tmpdir: The directory that contains the prebuilt image and block map file.
2300  Returns:
2301    A Image object.
2302  """
2303  path = os.path.join(tmpdir, "IMAGES", which + ".img")
2304  mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
2305
2306  # The image and map files must have been created prior to calling
2307  # ota_from_target_files.py (since LMP).
2308  assert os.path.exists(path) and os.path.exists(mappath)
2309
2310  return images.FileImage(path)
2311
2312
2313def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks):
2314  """Returns a SparseImage object suitable for passing to BlockImageDiff.
2315
2316  This function loads the specified sparse image from the given path, and
2317  performs additional processing for OTA purpose. For example, it always adds
2318  block 0 to clobbered blocks list. It also detects files that cannot be
2319  reconstructed from the block list, for whom we should avoid applying imgdiff.
2320
2321  Args:
2322    which: The partition name, e.g. "system", "vendor".
2323    tmpdir: The directory that contains the prebuilt image and block map file.
2324    input_zip: The target-files ZIP archive.
2325    allow_shared_blocks: Whether having shared blocks is allowed.
2326  Returns:
2327    A SparseImage object, with file_map info loaded.
2328  """
2329  path = os.path.join(tmpdir, "IMAGES", which + ".img")
2330  mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
2331
2332  # The image and map files must have been created prior to calling
2333  # ota_from_target_files.py (since LMP).
2334  assert os.path.exists(path) and os.path.exists(mappath)
2335
2336  # In ext4 filesystems, block 0 might be changed even being mounted R/O. We add
2337  # it to clobbered_blocks so that it will be written to the target
2338  # unconditionally. Note that they are still part of care_map. (Bug: 20939131)
2339  clobbered_blocks = "0"
2340
2341  image = sparse_img.SparseImage(
2342      path, mappath, clobbered_blocks, allow_shared_blocks=allow_shared_blocks)
2343
2344  # block.map may contain less blocks, because mke2fs may skip allocating blocks
2345  # if they contain all zeros. We can't reconstruct such a file from its block
2346  # list. Tag such entries accordingly. (Bug: 65213616)
2347  for entry in image.file_map:
2348    # Skip artificial names, such as "__ZERO", "__NONZERO-1".
2349    if not entry.startswith('/'):
2350      continue
2351
2352    # "/system/framework/am.jar" => "SYSTEM/framework/am.jar". Note that the
2353    # filename listed in system.map may contain an additional leading slash
2354    # (i.e. "//system/framework/am.jar"). Using lstrip to get consistent
2355    # results.
2356    # And handle another special case, where files not under /system
2357    # (e.g. "/sbin/charger") are packed under ROOT/ in a target_files.zip.
2358    arcname = entry.lstrip('/')
2359    if which == 'system' and not arcname.startswith('system'):
2360      arcname = 'ROOT/' + arcname
2361    else:
2362      arcname = arcname.replace(which, which.upper(), 1)
2363
2364    assert arcname in input_zip.namelist(), \
2365        "Failed to find the ZIP entry for {}".format(entry)
2366
2367    info = input_zip.getinfo(arcname)
2368    ranges = image.file_map[entry]
2369
2370    # If a RangeSet has been tagged as using shared blocks while loading the
2371    # image, check the original block list to determine its completeness. Note
2372    # that the 'incomplete' flag would be tagged to the original RangeSet only.
2373    if ranges.extra.get('uses_shared_blocks'):
2374      ranges = ranges.extra['uses_shared_blocks']
2375
2376    if RoundUpTo4K(info.file_size) > ranges.size() * 4096:
2377      ranges.extra['incomplete'] = True
2378
2379  return image
2380
2381
2382def GetKeyPasswords(keylist):
2383  """Given a list of keys, prompt the user to enter passwords for
2384  those which require them.  Return a {key: password} dict.  password
2385  will be None if the key has no password."""
2386
2387  no_passwords = []
2388  need_passwords = []
2389  key_passwords = {}
2390  devnull = open("/dev/null", "w+b")
2391
2392  # sorted() can't compare strings to None, so convert Nones to strings
2393  for k in sorted(keylist, key=lambda x: x if x is not None else ""):
2394    # We don't need a password for things that aren't really keys.
2395    if k in SPECIAL_CERT_STRINGS or k is None:
2396      no_passwords.append(k)
2397      continue
2398
2399    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
2400             "-inform", "DER", "-nocrypt"],
2401            stdin=devnull.fileno(),
2402            stdout=devnull.fileno(),
2403            stderr=subprocess.STDOUT)
2404    p.communicate()
2405    if p.returncode == 0:
2406      # Definitely an unencrypted key.
2407      no_passwords.append(k)
2408    else:
2409      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
2410               "-inform", "DER", "-passin", "pass:"],
2411              stdin=devnull.fileno(),
2412              stdout=devnull.fileno(),
2413              stderr=subprocess.PIPE)
2414      _, stderr = p.communicate()
2415      if p.returncode == 0:
2416        # Encrypted key with empty string as password.
2417        key_passwords[k] = ''
2418      elif stderr.startswith('Error decrypting key'):
2419        # Definitely encrypted key.
2420        # It would have said "Error reading key" if it didn't parse correctly.
2421        need_passwords.append(k)
2422      else:
2423        # Potentially, a type of key that openssl doesn't understand.
2424        # We'll let the routines in signapk.jar handle it.
2425        no_passwords.append(k)
2426  devnull.close()
2427
2428  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
2429  key_passwords.update(dict.fromkeys(no_passwords))
2430  return key_passwords
2431
2432
2433def GetMinSdkVersion(apk_name):
2434  """Gets the minSdkVersion declared in the APK.
2435
2436  It calls OPTIONS.aapt2_path to query the embedded minSdkVersion from the given
2437  APK file. This can be both a decimal number (API Level) or a codename.
2438
2439  Args:
2440    apk_name: The APK filename.
2441
2442  Returns:
2443    The parsed SDK version string.
2444
2445  Raises:
2446    ExternalError: On failing to obtain the min SDK version.
2447  """
2448  proc = Run(
2449      [OPTIONS.aapt2_path, "dump", "badging", apk_name], stdout=subprocess.PIPE,
2450      stderr=subprocess.PIPE)
2451  stdoutdata, stderrdata = proc.communicate()
2452  if proc.returncode != 0:
2453    raise ExternalError(
2454        "Failed to obtain minSdkVersion for {}: aapt2 return code {}:\n{}\n{}".format(
2455            apk_name, proc.returncode, stdoutdata, stderrdata))
2456
2457  is_split_apk = False
2458  for line in stdoutdata.split("\n"):
2459    # See b/353837347 , split APKs do not have sdk version defined,
2460    # so we default to 21 as split APKs are only supported since SDK
2461    # 21.
2462    if (re.search(r"split=[\"'].*[\"']", line)):
2463      is_split_apk = True
2464    # Due to ag/24161708, looking for lines such as minSdkVersion:'23',minSdkVersion:'M'
2465    # or sdkVersion:'23', sdkVersion:'M'.
2466    m = re.match(r'(?:minSdkVersion|sdkVersion):\'([^\']*)\'', line)
2467    if m:
2468      return m.group(1)
2469  if is_split_apk:
2470    logger.info("%s is a split APK, it does not have minimum SDK version"
2471                " defined. Defaulting to 21 because split APK isn't supported"
2472                " before that.", apk_name)
2473    return 21
2474  raise ExternalError("No minSdkVersion returned by aapt2 for apk: {}".format(apk_name))
2475
2476
2477def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
2478  """Returns the minSdkVersion declared in the APK as a number (API Level).
2479
2480  If minSdkVersion is set to a codename, it is translated to a number using the
2481  provided map.
2482
2483  Args:
2484    apk_name: The APK filename.
2485
2486  Returns:
2487    The parsed SDK version number.
2488
2489  Raises:
2490    ExternalError: On failing to get the min SDK version number.
2491  """
2492  version = GetMinSdkVersion(apk_name)
2493  try:
2494    return int(version)
2495  except ValueError:
2496    # Not a decimal number.
2497    #
2498    # It could be either a straight codename, e.g.
2499    #     UpsideDownCake
2500    #
2501    # Or a codename with API fingerprint SHA, e.g.
2502    #     UpsideDownCake.e7d3947f14eb9dc4fec25ff6c5f8563e
2503    #
2504    # Extract the codename and try and map it to a version number.
2505    split = version.split(".")
2506    codename = split[0]
2507    if codename in codename_to_api_level_map:
2508      return codename_to_api_level_map[codename]
2509    raise ExternalError(
2510        "Unknown codename: '{}' from minSdkVersion: '{}'. Known codenames: {}".format(
2511            codename, version, codename_to_api_level_map))
2512
2513
2514def SignFile(input_name, output_name, key, password, min_api_level=None,
2515             codename_to_api_level_map=None, whole_file=False,
2516             extra_signapk_args=None):
2517  """Sign the input_name zip/jar/apk, producing output_name.  Use the
2518  given key and password (the latter may be None if the key does not
2519  have a password.
2520
2521  If whole_file is true, use the "-w" option to SignApk to embed a
2522  signature that covers the whole file in the archive comment of the
2523  zip file.
2524
2525  min_api_level is the API Level (int) of the oldest platform this file may end
2526  up on. If not specified for an APK, the API Level is obtained by interpreting
2527  the minSdkVersion attribute of the APK's AndroidManifest.xml.
2528
2529  codename_to_api_level_map is needed to translate the codename which may be
2530  encountered as the APK's minSdkVersion.
2531
2532  Caller may optionally specify extra args to be passed to SignApk, which
2533  defaults to OPTIONS.extra_signapk_args if omitted.
2534  """
2535  if codename_to_api_level_map is None:
2536    codename_to_api_level_map = {}
2537  if extra_signapk_args is None:
2538    extra_signapk_args = OPTIONS.extra_signapk_args
2539
2540  java_library_path = os.path.join(
2541      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
2542
2543  cmd = ([OPTIONS.java_path] + OPTIONS.java_args +
2544         ["-Djava.library.path=" + java_library_path,
2545          "-jar", os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] +
2546         extra_signapk_args)
2547  if whole_file:
2548    cmd.append("-w")
2549
2550  min_sdk_version = min_api_level
2551  if min_sdk_version is None:
2552    if not whole_file:
2553      min_sdk_version = GetMinSdkVersionInt(
2554          input_name, codename_to_api_level_map)
2555  if min_sdk_version is not None:
2556    cmd.extend(["--min-sdk-version", str(min_sdk_version)])
2557
2558  cmd.extend([key + OPTIONS.public_key_suffix,
2559              key + OPTIONS.private_key_suffix,
2560              input_name, output_name])
2561
2562  proc = Run(cmd, stdin=subprocess.PIPE)
2563  if password is not None:
2564    password += "\n"
2565  stdoutdata, _ = proc.communicate(password)
2566  if proc.returncode != 0:
2567    raise ExternalError(
2568        "Failed to run {}: return code {}:\n{}".format(cmd,
2569                                                       proc.returncode, stdoutdata))
2570
2571
2572def CheckSize(data, target, info_dict):
2573  """Checks the data string passed against the max size limit.
2574
2575  For non-AVB images, raise exception if the data is too big. Print a warning
2576  if the data is nearing the maximum size.
2577
2578  For AVB images, the actual image size should be identical to the limit.
2579
2580  Args:
2581    data: A string that contains all the data for the partition.
2582    target: The partition name. The ".img" suffix is optional.
2583    info_dict: The dict to be looked up for relevant info.
2584  """
2585  if target.endswith(".img"):
2586    target = target[:-4]
2587  mount_point = "/" + target
2588
2589  fs_type = None
2590  limit = None
2591  if info_dict["fstab"]:
2592    if mount_point == "/userdata":
2593      mount_point = "/data"
2594    p = info_dict["fstab"][mount_point]
2595    fs_type = p.fs_type
2596    device = p.device
2597    if "/" in device:
2598      device = device[device.rfind("/")+1:]
2599    limit = info_dict.get(device + "_size", 0)
2600    if isinstance(limit, str):
2601      limit = int(limit, 0)
2602  if not fs_type or not limit:
2603    return
2604
2605  size = len(data)
2606  # target could be 'userdata' or 'cache'. They should follow the non-AVB image
2607  # path.
2608  if info_dict.get("avb_enable") == "true" and target in AVB_PARTITIONS:
2609    if size != limit:
2610      raise ExternalError(
2611          "Mismatching image size for %s: expected %d actual %d" % (
2612              target, limit, size))
2613  else:
2614    pct = float(size) * 100.0 / limit
2615    msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
2616    if pct >= 99.0:
2617      raise ExternalError(msg)
2618
2619    if pct >= 95.0:
2620      logger.warning("\n  WARNING: %s\n", msg)
2621    else:
2622      logger.info("  %s", msg)
2623
2624
2625def ReadApkCerts(tf_zip):
2626  """Parses the APK certs info from a given target-files zip.
2627
2628  Given a target-files ZipFile, parses the META/apkcerts.txt entry and returns a
2629  tuple with the following elements: (1) a dictionary that maps packages to
2630  certs (based on the "certificate" and "private_key" attributes in the file;
2631  (2) a string representing the extension of compressed APKs in the target files
2632  (e.g ".gz", ".bro").
2633
2634  Args:
2635    tf_zip: The input target_files ZipFile (already open).
2636
2637  Returns:
2638    (certmap, ext): certmap is a dictionary that maps packages to certs; ext is
2639        the extension string of compressed APKs (e.g. ".gz"), or None if there's
2640        no compressed APKs.
2641  """
2642  certmap = {}
2643  compressed_extension = None
2644
2645  # META/apkcerts.txt contains the info for _all_ the packages known at build
2646  # time. Filter out the ones that are not installed.
2647  installed_files = set()
2648  for name in tf_zip.namelist():
2649    basename = os.path.basename(name)
2650    if basename:
2651      installed_files.add(basename)
2652
2653  for line in tf_zip.read('META/apkcerts.txt').decode().split('\n'):
2654    line = line.strip()
2655    if not line:
2656      continue
2657    m = re.match(
2658        r'^name="(?P<NAME>.*)"\s+certificate="(?P<CERT>.*)"\s+'
2659        r'private_key="(?P<PRIVKEY>.*?)"(\s+compressed="(?P<COMPRESSED>.*?)")?'
2660        r'(\s+partition="(?P<PARTITION>.*?)")?$',
2661        line)
2662    if not m:
2663      continue
2664
2665    matches = m.groupdict()
2666    cert = matches["CERT"]
2667    privkey = matches["PRIVKEY"]
2668    name = matches["NAME"]
2669    this_compressed_extension = matches["COMPRESSED"]
2670
2671    public_key_suffix_len = len(OPTIONS.public_key_suffix)
2672    private_key_suffix_len = len(OPTIONS.private_key_suffix)
2673    if cert in SPECIAL_CERT_STRINGS and not privkey:
2674      certmap[name] = cert
2675    elif (cert.endswith(OPTIONS.public_key_suffix) and
2676          privkey.endswith(OPTIONS.private_key_suffix) and
2677          cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
2678      certmap[name] = cert[:-public_key_suffix_len]
2679    else:
2680      raise ValueError("Failed to parse line from apkcerts.txt:\n" + line)
2681
2682    if not this_compressed_extension:
2683      continue
2684
2685    # Only count the installed files.
2686    filename = name + '.' + this_compressed_extension
2687    if filename not in installed_files:
2688      continue
2689
2690    # Make sure that all the values in the compression map have the same
2691    # extension. We don't support multiple compression methods in the same
2692    # system image.
2693    if compressed_extension:
2694      if this_compressed_extension != compressed_extension:
2695        raise ValueError(
2696            "Multiple compressed extensions: {} vs {}".format(
2697                compressed_extension, this_compressed_extension))
2698    else:
2699      compressed_extension = this_compressed_extension
2700
2701  return (certmap,
2702          ("." + compressed_extension) if compressed_extension else None)
2703
2704
2705COMMON_DOCSTRING = """
2706Global options
2707
2708  -p  (--path) <dir>
2709      Prepend <dir>/bin to the list of places to search for binaries run by this
2710      script, and expect to find jars in <dir>/framework.
2711
2712  -s  (--device_specific) <file>
2713      Path to the Python module containing device-specific releasetools code.
2714
2715  -x  (--extra) <key=value>
2716      Add a key/value pair to the 'extras' dict, which device-specific extension
2717      code may look at.
2718
2719  -v  (--verbose)
2720      Show command lines being executed.
2721
2722  -h  (--help)
2723      Display this usage message and exit.
2724
2725  --logfile <file>
2726      Put verbose logs to specified file (regardless of --verbose option.)
2727"""
2728
2729
2730def Usage(docstring):
2731  print(docstring.rstrip("\n"))
2732  print(COMMON_DOCSTRING)
2733
2734
2735def ParseOptions(argv,
2736                 docstring,
2737                 extra_opts="", extra_long_opts=(),
2738                 extra_option_handler: Iterable[OptionHandler] = None):
2739  """Parse the options in argv and return any arguments that aren't
2740  flags.  docstring is the calling module's docstring, to be displayed
2741  for errors and -h.  extra_opts and extra_long_opts are for flags
2742  defined by the caller, which are processed by passing them to
2743  extra_option_handler."""
2744  extra_long_opts = list(extra_long_opts)
2745  if not isinstance(extra_option_handler, Iterable):
2746    extra_option_handler = [extra_option_handler]
2747
2748  for handler in extra_option_handler:
2749    if isinstance(handler, OptionHandler):
2750      extra_long_opts.extend(handler.extra_long_opts)
2751
2752  try:
2753    opts, args = getopt.getopt(
2754        argv, "hvp:s:x:" + extra_opts,
2755        ["help", "verbose", "path=", "signapk_path=",
2756         "signapk_shared_library_path=", "extra_signapk_args=", "aapt2_path=",
2757         "java_path=", "java_args=", "android_jar_path=", "public_key_suffix=",
2758         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
2759         "verity_signer_path=", "verity_signer_args=", "device_specific=",
2760         "extra=", "logfile="] + list(extra_long_opts))
2761  except getopt.GetoptError as err:
2762    Usage(docstring)
2763    print("**", str(err), "**")
2764    sys.exit(2)
2765
2766  for o, a in opts:
2767    if o in ("-h", "--help"):
2768      Usage(docstring)
2769      sys.exit()
2770    elif o in ("-v", "--verbose"):
2771      OPTIONS.verbose = True
2772    elif o in ("-p", "--path"):
2773      OPTIONS.search_path = a
2774    elif o in ("--signapk_path",):
2775      OPTIONS.signapk_path = a
2776    elif o in ("--signapk_shared_library_path",):
2777      OPTIONS.signapk_shared_library_path = a
2778    elif o in ("--extra_signapk_args",):
2779      OPTIONS.extra_signapk_args = shlex.split(a)
2780    elif o in ("--aapt2_path",):
2781      OPTIONS.aapt2_path = a
2782    elif o in ("--java_path",):
2783      OPTIONS.java_path = a
2784    elif o in ("--java_args",):
2785      OPTIONS.java_args = shlex.split(a)
2786    elif o in ("--android_jar_path",):
2787      OPTIONS.android_jar_path = a
2788    elif o in ("--public_key_suffix",):
2789      OPTIONS.public_key_suffix = a
2790    elif o in ("--private_key_suffix",):
2791      OPTIONS.private_key_suffix = a
2792    elif o in ("--boot_signer_path",):
2793      raise ValueError(
2794          "--boot_signer_path is no longer supported, please switch to AVB")
2795    elif o in ("--boot_signer_args",):
2796      raise ValueError(
2797          "--boot_signer_args is no longer supported, please switch to AVB")
2798    elif o in ("--verity_signer_path",):
2799      raise ValueError(
2800          "--verity_signer_path is no longer supported, please switch to AVB")
2801    elif o in ("--verity_signer_args",):
2802      raise ValueError(
2803          "--verity_signer_args is no longer supported, please switch to AVB")
2804    elif o in ("-s", "--device_specific"):
2805      OPTIONS.device_specific = a
2806    elif o in ("-x", "--extra"):
2807      key, value = a.split("=", 1)
2808      OPTIONS.extras[key] = value
2809    elif o in ("--logfile",):
2810      OPTIONS.logfile = a
2811    else:
2812      if extra_option_handler is None:
2813        raise ValueError("unknown option \"%s\"" % (o,))
2814      success = False
2815      for handler in extra_option_handler:
2816        if isinstance(handler, OptionHandler):
2817          if handler.handler(o, a):
2818            success = True
2819            break
2820        elif handler(o, a):
2821          success = True
2822          break
2823      if not success:
2824        raise ValueError("unknown option \"%s\"" % (o,))
2825
2826
2827  if OPTIONS.search_path:
2828    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
2829                          os.pathsep + os.environ["PATH"])
2830
2831  return args
2832
2833
2834def MakeTempFile(prefix='tmp', suffix=''):
2835  """Make a temp file and add it to the list of things to be deleted
2836  when Cleanup() is called.  Return the filename."""
2837  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
2838  os.close(fd)
2839  OPTIONS.tempfiles.append(fn)
2840  return fn
2841
2842
2843def MakeTempDir(prefix='tmp', suffix=''):
2844  """Makes a temporary dir that will be cleaned up with a call to Cleanup().
2845
2846  Returns:
2847    The absolute pathname of the new directory.
2848  """
2849  dir_name = tempfile.mkdtemp(suffix=suffix, prefix=prefix)
2850  OPTIONS.tempfiles.append(dir_name)
2851  return dir_name
2852
2853
2854def Cleanup():
2855  for i in OPTIONS.tempfiles:
2856    if not os.path.exists(i):
2857      continue
2858    if os.path.isdir(i):
2859      shutil.rmtree(i, ignore_errors=True)
2860    else:
2861      os.remove(i)
2862  del OPTIONS.tempfiles[:]
2863
2864
2865class PasswordManager(object):
2866  def __init__(self):
2867    self.editor = os.getenv("EDITOR")
2868    self.pwfile = os.getenv("ANDROID_PW_FILE")
2869
2870  def GetPasswords(self, items):
2871    """Get passwords corresponding to each string in 'items',
2872    returning a dict.  (The dict may have keys in addition to the
2873    values in 'items'.)
2874
2875    Uses the passwords in $ANDROID_PW_FILE if available, letting the
2876    user edit that file to add more needed passwords.  If no editor is
2877    available, or $ANDROID_PW_FILE isn't define, prompts the user
2878    interactively in the ordinary way.
2879    """
2880
2881    current = self.ReadFile()
2882
2883    first = True
2884    while True:
2885      missing = []
2886      for i in items:
2887        if i not in current or not current[i]:
2888          missing.append(i)
2889      # Are all the passwords already in the file?
2890      if not missing:
2891        return current
2892
2893      for i in missing:
2894        current[i] = ""
2895
2896      if not first:
2897        print("key file %s still missing some passwords." % (self.pwfile,))
2898        if sys.version_info[0] >= 3:
2899          raw_input = input  # pylint: disable=redefined-builtin
2900        answer = raw_input("try to edit again? [y]> ").strip()
2901        if answer and answer[0] not in 'yY':
2902          raise RuntimeError("key passwords unavailable")
2903      first = False
2904
2905      current = self.UpdateAndReadFile(current)
2906
2907  def PromptResult(self, current):  # pylint: disable=no-self-use
2908    """Prompt the user to enter a value (password) for each key in
2909    'current' whose value is fales.  Returns a new dict with all the
2910    values.
2911    """
2912    result = {}
2913    for k, v in sorted(current.items()):
2914      if v:
2915        result[k] = v
2916      else:
2917        while True:
2918          result[k] = getpass.getpass(
2919              "Enter password for %s key> " % k).strip()
2920          if result[k]:
2921            break
2922    return result
2923
2924  def UpdateAndReadFile(self, current):
2925    if not self.editor or not self.pwfile:
2926      return self.PromptResult(current)
2927
2928    f = open(self.pwfile, "w")
2929    os.chmod(self.pwfile, 0o600)
2930    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
2931    f.write("# (Additional spaces are harmless.)\n\n")
2932
2933    first_line = None
2934    sorted_list = sorted([(not v, k, v) for (k, v) in current.items()])
2935    for i, (_, k, v) in enumerate(sorted_list):
2936      f.write("[[[  %s  ]]] %s\n" % (v, k))
2937      if not v and first_line is None:
2938        # position cursor on first line with no password.
2939        first_line = i + 4
2940    f.close()
2941
2942    RunAndCheckOutput([self.editor, "+%d" % (first_line,), self.pwfile])
2943
2944    return self.ReadFile()
2945
2946  def ReadFile(self):
2947    result = {}
2948    if self.pwfile is None:
2949      return result
2950    try:
2951      f = open(self.pwfile, "r")
2952      for line in f:
2953        line = line.strip()
2954        if not line or line[0] == '#':
2955          continue
2956        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
2957        if not m:
2958          logger.warning("Failed to parse password file: %s", line)
2959        else:
2960          result[m.group(2)] = m.group(1)
2961      f.close()
2962    except IOError as e:
2963      if e.errno != errno.ENOENT:
2964        logger.exception("Error reading password file:")
2965    return result
2966
2967
2968def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
2969             compress_type=None):
2970
2971  # http://b/18015246
2972  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
2973  # for files larger than 2GiB. We can work around this by adjusting their
2974  # limit. Note that `zipfile.writestr()` will not work for strings larger than
2975  # 2GiB. The Python interpreter sometimes rejects strings that large (though
2976  # it isn't clear to me exactly what circumstances cause this).
2977  # `zipfile.write()` must be used directly to work around this.
2978  #
2979  # This mess can be avoided if we port to python3.
2980  saved_zip64_limit = zipfile.ZIP64_LIMIT
2981  zipfile.ZIP64_LIMIT = (1 << 32) - 1
2982
2983  if compress_type is None:
2984    compress_type = zip_file.compression
2985  if arcname is None:
2986    arcname = filename
2987
2988  saved_stat = os.stat(filename)
2989
2990  try:
2991    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
2992    # file to be zipped and reset it when we're done.
2993    os.chmod(filename, perms)
2994
2995    # Use a fixed timestamp so the output is repeatable.
2996    # Note: Use of fromtimestamp rather than utcfromtimestamp here is
2997    # intentional. zip stores datetimes in local time without a time zone
2998    # attached, so we need "epoch" but in the local time zone to get 2009/01/01
2999    # in the zip archive.
3000    local_epoch = datetime.datetime.fromtimestamp(0)
3001    timestamp = (datetime.datetime(2009, 1, 1) - local_epoch).total_seconds()
3002    os.utime(filename, (timestamp, timestamp))
3003
3004    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
3005  finally:
3006    os.chmod(filename, saved_stat.st_mode)
3007    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
3008    zipfile.ZIP64_LIMIT = saved_zip64_limit
3009
3010
3011def ZipWriteStr(zip_file: zipfile.ZipFile, zinfo_or_arcname, data, perms=None,
3012                compress_type=None):
3013  """Wrap zipfile.writestr() function to work around the zip64 limit.
3014
3015  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
3016  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
3017  when calling crc32(bytes).
3018
3019  But it still works fine to write a shorter string into a large zip file.
3020  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
3021  when we know the string won't be too long.
3022  """
3023
3024  saved_zip64_limit = zipfile.ZIP64_LIMIT
3025  zipfile.ZIP64_LIMIT = (1 << 32) - 1
3026
3027  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
3028    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
3029    zinfo.compress_type = zip_file.compression
3030    if perms is None:
3031      perms = 0o100644
3032  else:
3033    zinfo = zinfo_or_arcname
3034    # Python 2 and 3 behave differently when calling ZipFile.writestr() with
3035    # zinfo.external_attr being 0. Python 3 uses `0o600 << 16` as the value for
3036    # such a case (since
3037    # https://github.com/python/cpython/commit/18ee29d0b870caddc0806916ca2c823254f1a1f9),
3038    # which seems to make more sense. Otherwise the entry will have 0o000 as the
3039    # permission bits. We follow the logic in Python 3 to get consistent
3040    # behavior between using the two versions.
3041    if not zinfo.external_attr:
3042      zinfo.external_attr = 0o600 << 16
3043
3044  # If compress_type is given, it overrides the value in zinfo.
3045  if compress_type is not None:
3046    zinfo.compress_type = compress_type
3047
3048  # If perms is given, it has a priority.
3049  if perms is not None:
3050    # If perms doesn't set the file type, mark it as a regular file.
3051    if perms & 0o770000 == 0:
3052      perms |= 0o100000
3053    zinfo.external_attr = perms << 16
3054
3055  # Use a fixed timestamp so the output is repeatable.
3056  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
3057
3058  zip_file.writestr(zinfo, data)
3059  zipfile.ZIP64_LIMIT = saved_zip64_limit
3060
3061def ZipExclude(input_zip, output_zip, entries, force=False):
3062  """Deletes entries from a ZIP file.
3063
3064  Args:
3065    zip_filename: The name of the ZIP file.
3066    entries: The name of the entry, or the list of names to be deleted.
3067  """
3068  if isinstance(entries, str):
3069    entries = [entries]
3070  # If list is empty, nothing to do
3071  if not entries:
3072    shutil.copy(input_zip, output_zip)
3073    return
3074
3075  with zipfile.ZipFile(input_zip, 'r') as zin:
3076    if not force and len(set(zin.namelist()).intersection(entries)) == 0:
3077      raise ExternalError(
3078          "Failed to delete zip entries, name not matched: %s" % entries)
3079
3080    fd, new_zipfile = tempfile.mkstemp(dir=os.path.dirname(input_zip))
3081    os.close(fd)
3082    cmd = ["zip2zip", "-i", input_zip, "-o", new_zipfile]
3083    for entry in entries:
3084      cmd.append("-x")
3085      cmd.append(entry)
3086    RunAndCheckOutput(cmd)
3087  os.replace(new_zipfile, output_zip)
3088
3089
3090def ZipDelete(zip_filename, entries, force=False):
3091  """Deletes entries from a ZIP file.
3092
3093  Args:
3094    zip_filename: The name of the ZIP file.
3095    entries: The name of the entry, or the list of names to be deleted.
3096  """
3097  if isinstance(entries, str):
3098    entries = [entries]
3099  # If list is empty, nothing to do
3100  if not entries:
3101    return
3102
3103  ZipExclude(zip_filename, zip_filename, entries, force)
3104
3105
3106def ZipClose(zip_file):
3107  # http://b/18015246
3108  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
3109  # central directory.
3110  saved_zip64_limit = zipfile.ZIP64_LIMIT
3111  zipfile.ZIP64_LIMIT = (1 << 32) - 1
3112
3113  zip_file.close()
3114
3115  zipfile.ZIP64_LIMIT = saved_zip64_limit
3116
3117
3118class DeviceSpecificParams(object):
3119  module = None
3120
3121  def __init__(self, **kwargs):
3122    """Keyword arguments to the constructor become attributes of this
3123    object, which is passed to all functions in the device-specific
3124    module."""
3125    for k, v in kwargs.items():
3126      setattr(self, k, v)
3127    self.extras = OPTIONS.extras
3128
3129    if self.module is None:
3130      path = OPTIONS.device_specific
3131      if not path:
3132        return
3133      try:
3134        if os.path.isdir(path):
3135          info = imp.find_module("releasetools", [path])
3136        else:
3137          d, f = os.path.split(path)
3138          b, x = os.path.splitext(f)
3139          if x == ".py":
3140            f = b
3141          info = imp.find_module(f, [d])
3142        logger.info("loaded device-specific extensions from %s", path)
3143        self.module = imp.load_module("device_specific", *info)
3144      except ImportError:
3145        logger.info("unable to load device-specific module; assuming none")
3146
3147  def _DoCall(self, function_name, *args, **kwargs):
3148    """Call the named function in the device-specific module, passing
3149    the given args and kwargs.  The first argument to the call will be
3150    the DeviceSpecific object itself.  If there is no module, or the
3151    module does not define the function, return the value of the
3152    'default' kwarg (which itself defaults to None)."""
3153    if self.module is None or not hasattr(self.module, function_name):
3154      return kwargs.get("default")
3155    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
3156
3157  def FullOTA_Assertions(self):
3158    """Called after emitting the block of assertions at the top of a
3159    full OTA package.  Implementations can add whatever additional
3160    assertions they like."""
3161    return self._DoCall("FullOTA_Assertions")
3162
3163  def FullOTA_InstallBegin(self):
3164    """Called at the start of full OTA installation."""
3165    return self._DoCall("FullOTA_InstallBegin")
3166
3167  def FullOTA_GetBlockDifferences(self):
3168    """Called during full OTA installation and verification.
3169    Implementation should return a list of BlockDifference objects describing
3170    the update on each additional partitions.
3171    """
3172    return self._DoCall("FullOTA_GetBlockDifferences")
3173
3174  def FullOTA_InstallEnd(self):
3175    """Called at the end of full OTA installation; typically this is
3176    used to install the image for the device's baseband processor."""
3177    return self._DoCall("FullOTA_InstallEnd")
3178
3179  def IncrementalOTA_Assertions(self):
3180    """Called after emitting the block of assertions at the top of an
3181    incremental OTA package.  Implementations can add whatever
3182    additional assertions they like."""
3183    return self._DoCall("IncrementalOTA_Assertions")
3184
3185  def IncrementalOTA_VerifyBegin(self):
3186    """Called at the start of the verification phase of incremental
3187    OTA installation; additional checks can be placed here to abort
3188    the script before any changes are made."""
3189    return self._DoCall("IncrementalOTA_VerifyBegin")
3190
3191  def IncrementalOTA_VerifyEnd(self):
3192    """Called at the end of the verification phase of incremental OTA
3193    installation; additional checks can be placed here to abort the
3194    script before any changes are made."""
3195    return self._DoCall("IncrementalOTA_VerifyEnd")
3196
3197  def IncrementalOTA_InstallBegin(self):
3198    """Called at the start of incremental OTA installation (after
3199    verification is complete)."""
3200    return self._DoCall("IncrementalOTA_InstallBegin")
3201
3202  def IncrementalOTA_GetBlockDifferences(self):
3203    """Called during incremental OTA installation and verification.
3204    Implementation should return a list of BlockDifference objects describing
3205    the update on each additional partitions.
3206    """
3207    return self._DoCall("IncrementalOTA_GetBlockDifferences")
3208
3209  def IncrementalOTA_InstallEnd(self):
3210    """Called at the end of incremental OTA installation; typically
3211    this is used to install the image for the device's baseband
3212    processor."""
3213    return self._DoCall("IncrementalOTA_InstallEnd")
3214
3215  def VerifyOTA_Assertions(self):
3216    return self._DoCall("VerifyOTA_Assertions")
3217
3218
3219class File(object):
3220  def __init__(self, name, data, compress_size=None):
3221    self.name = name
3222    self.data = data
3223    self.size = len(data)
3224    self.compress_size = compress_size or self.size
3225    self.sha1 = sha1(data).hexdigest()
3226
3227  @classmethod
3228  def FromLocalFile(cls, name, diskname):
3229    f = open(diskname, "rb")
3230    data = f.read()
3231    f.close()
3232    return File(name, data)
3233
3234  def WriteToTemp(self):
3235    t = tempfile.NamedTemporaryFile()
3236    t.write(self.data)
3237    t.flush()
3238    return t
3239
3240  def WriteToDir(self, d):
3241    output_path = os.path.join(d, self.name)
3242    os.makedirs(os.path.dirname(output_path), exist_ok=True)
3243    with open(output_path, "wb") as fp:
3244      fp.write(self.data)
3245
3246  def AddToZip(self, z, compression=None):
3247    ZipWriteStr(z, self.name, self.data, compress_type=compression)
3248
3249
3250DIFF_PROGRAM_BY_EXT = {
3251    ".gz": "imgdiff",
3252    ".zip": ["imgdiff", "-z"],
3253    ".jar": ["imgdiff", "-z"],
3254    ".apk": ["imgdiff", "-z"],
3255    ".img": "imgdiff",
3256}
3257
3258
3259class Difference(object):
3260  def __init__(self, tf, sf, diff_program=None):
3261    self.tf = tf
3262    self.sf = sf
3263    self.patch = None
3264    self.diff_program = diff_program
3265
3266  def ComputePatch(self):
3267    """Compute the patch (as a string of data) needed to turn sf into
3268    tf.  Returns the same tuple as GetPatch()."""
3269
3270    tf = self.tf
3271    sf = self.sf
3272
3273    if self.diff_program:
3274      diff_program = self.diff_program
3275    else:
3276      ext = os.path.splitext(tf.name)[1]
3277      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
3278
3279    ttemp = tf.WriteToTemp()
3280    stemp = sf.WriteToTemp()
3281
3282    ext = os.path.splitext(tf.name)[1]
3283
3284    try:
3285      ptemp = tempfile.NamedTemporaryFile()
3286      if isinstance(diff_program, list):
3287        cmd = copy.copy(diff_program)
3288      else:
3289        cmd = [diff_program]
3290      cmd.append(stemp.name)
3291      cmd.append(ttemp.name)
3292      cmd.append(ptemp.name)
3293      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3294      err = []
3295
3296      def run():
3297        _, e = p.communicate()
3298        if e:
3299          err.append(e)
3300      th = threading.Thread(target=run)
3301      th.start()
3302      th.join(timeout=300)   # 5 mins
3303      if th.is_alive():
3304        logger.warning("diff command timed out")
3305        p.terminate()
3306        th.join(5)
3307        if th.is_alive():
3308          p.kill()
3309          th.join()
3310
3311      if p.returncode != 0:
3312        logger.warning("Failure running %s:\n%s\n", cmd, "".join(err))
3313        self.patch = None
3314        return None, None, None
3315      diff = ptemp.read()
3316    finally:
3317      ptemp.close()
3318      stemp.close()
3319      ttemp.close()
3320
3321    self.patch = diff
3322    return self.tf, self.sf, self.patch
3323
3324  def GetPatch(self):
3325    """Returns a tuple of (target_file, source_file, patch_data).
3326
3327    patch_data may be None if ComputePatch hasn't been called, or if
3328    computing the patch failed.
3329    """
3330    return self.tf, self.sf, self.patch
3331
3332
3333def ComputeDifferences(diffs):
3334  """Call ComputePatch on all the Difference objects in 'diffs'."""
3335  logger.info("%d diffs to compute", len(diffs))
3336
3337  # Do the largest files first, to try and reduce the long-pole effect.
3338  by_size = [(i.tf.size, i) for i in diffs]
3339  by_size.sort(reverse=True)
3340  by_size = [i[1] for i in by_size]
3341
3342  lock = threading.Lock()
3343  diff_iter = iter(by_size)   # accessed under lock
3344
3345  def worker():
3346    try:
3347      lock.acquire()
3348      for d in diff_iter:
3349        lock.release()
3350        start = time.time()
3351        d.ComputePatch()
3352        dur = time.time() - start
3353        lock.acquire()
3354
3355        tf, sf, patch = d.GetPatch()
3356        if sf.name == tf.name:
3357          name = tf.name
3358        else:
3359          name = "%s (%s)" % (tf.name, sf.name)
3360        if patch is None:
3361          logger.error("patching failed! %40s", name)
3362        else:
3363          logger.info(
3364              "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
3365              tf.size, 100.0 * len(patch) / tf.size, name)
3366      lock.release()
3367    except Exception:
3368      logger.exception("Failed to compute diff from worker")
3369      raise
3370
3371  # start worker threads; wait for them all to finish.
3372  threads = [threading.Thread(target=worker)
3373             for i in range(OPTIONS.worker_threads)]
3374  for th in threads:
3375    th.start()
3376  while threads:
3377    threads.pop().join()
3378
3379
3380class BlockDifference(object):
3381  def __init__(self, partition, tgt, src=None, check_first_block=False,
3382               version=None, disable_imgdiff=False):
3383    self.tgt = tgt
3384    self.src = src
3385    self.partition = partition
3386    self.check_first_block = check_first_block
3387    self.disable_imgdiff = disable_imgdiff
3388
3389    if version is None:
3390      version = max(
3391          int(i) for i in
3392          OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
3393    assert version >= 3
3394    self.version = version
3395
3396    b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
3397                       version=self.version,
3398                       disable_imgdiff=self.disable_imgdiff)
3399    self.path = os.path.join(MakeTempDir(), partition)
3400    b.Compute(self.path)
3401    self._required_cache = b.max_stashed_size
3402    self.touched_src_ranges = b.touched_src_ranges
3403    self.touched_src_sha1 = b.touched_src_sha1
3404
3405    # On devices with dynamic partitions, for new partitions,
3406    # src is None but OPTIONS.source_info_dict is not.
3407    if OPTIONS.source_info_dict is None:
3408      is_dynamic_build = OPTIONS.info_dict.get(
3409          "use_dynamic_partitions") == "true"
3410      is_dynamic_source = False
3411    else:
3412      is_dynamic_build = OPTIONS.source_info_dict.get(
3413          "use_dynamic_partitions") == "true"
3414      is_dynamic_source = partition in shlex.split(
3415          OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip())
3416
3417    is_dynamic_target = partition in shlex.split(
3418        OPTIONS.info_dict.get("dynamic_partition_list", "").strip())
3419
3420    # For dynamic partitions builds, check partition list in both source
3421    # and target build because new partitions may be added, and existing
3422    # partitions may be removed.
3423    is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target)
3424
3425    if is_dynamic:
3426      self.device = 'map_partition("%s")' % partition
3427    else:
3428      if OPTIONS.source_info_dict is None:
3429        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
3430                                              OPTIONS.info_dict)
3431      else:
3432        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
3433                                              OPTIONS.source_info_dict)
3434      self.device = device_expr
3435
3436  @property
3437  def required_cache(self):
3438    return self._required_cache
3439
3440  def WriteScript(self, script, output_zip, progress=None,
3441                  write_verify_script=False):
3442    if not self.src:
3443      # write the output unconditionally
3444      script.Print("Patching %s image unconditionally..." % (self.partition,))
3445    else:
3446      script.Print("Patching %s image after verification." % (self.partition,))
3447
3448    if progress:
3449      script.ShowProgress(progress, 0)
3450    self._WriteUpdate(script, output_zip)
3451
3452    if write_verify_script:
3453      self.WritePostInstallVerifyScript(script)
3454
3455  def WriteStrictVerifyScript(self, script):
3456    """Verify all the blocks in the care_map, including clobbered blocks.
3457
3458    This differs from the WriteVerifyScript() function: a) it prints different
3459    error messages; b) it doesn't allow half-way updated images to pass the
3460    verification."""
3461
3462    partition = self.partition
3463    script.Print("Verifying %s..." % (partition,))
3464    ranges = self.tgt.care_map
3465    ranges_str = ranges.to_string_raw()
3466    script.AppendExtra(
3467        'range_sha1(%s, "%s") == "%s" && ui_print("    Verified.") || '
3468        'ui_print("%s has unexpected contents.");' % (
3469            self.device, ranges_str,
3470            self.tgt.TotalSha1(include_clobbered_blocks=True),
3471            self.partition))
3472    script.AppendExtra("")
3473
3474  def WriteVerifyScript(self, script, touched_blocks_only=False):
3475    partition = self.partition
3476
3477    # full OTA
3478    if not self.src:
3479      script.Print("Image %s will be patched unconditionally." % (partition,))
3480
3481    # incremental OTA
3482    else:
3483      if touched_blocks_only:
3484        ranges = self.touched_src_ranges
3485        expected_sha1 = self.touched_src_sha1
3486      else:
3487        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
3488        expected_sha1 = self.src.TotalSha1()
3489
3490      # No blocks to be checked, skipping.
3491      if not ranges:
3492        return
3493
3494      ranges_str = ranges.to_string_raw()
3495      script.AppendExtra(
3496          'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, '
3497          'package_extract_file("%s.transfer.list"), "%s.new.dat", '
3498          '"%s.patch.dat")) then' % (
3499              self.device, ranges_str, expected_sha1,
3500              self.device, partition, partition, partition))
3501      script.Print('Verified %s image...' % (partition,))
3502      script.AppendExtra('else')
3503
3504      if self.version >= 4:
3505
3506        # Bug: 21124327
3507        # When generating incrementals for the system and vendor partitions in
3508        # version 4 or newer, explicitly check the first block (which contains
3509        # the superblock) of the partition to see if it's what we expect. If
3510        # this check fails, give an explicit log message about the partition
3511        # having been remounted R/W (the most likely explanation).
3512        if self.check_first_block:
3513          script.AppendExtra('check_first_block(%s);' % (self.device,))
3514
3515        # If version >= 4, try block recovery before abort update
3516        if partition == "system":
3517          code = ErrorCode.SYSTEM_RECOVER_FAILURE
3518        else:
3519          code = ErrorCode.VENDOR_RECOVER_FAILURE
3520        script.AppendExtra((
3521            'ifelse (block_image_recover({device}, "{ranges}") && '
3522            'block_image_verify({device}, '
3523            'package_extract_file("{partition}.transfer.list"), '
3524            '"{partition}.new.dat", "{partition}.patch.dat"), '
3525            'ui_print("{partition} recovered successfully."), '
3526            'abort("E{code}: {partition} partition fails to recover"));\n'
3527            'endif;').format(device=self.device, ranges=ranges_str,
3528                             partition=partition, code=code))
3529
3530      # Abort the OTA update. Note that the incremental OTA cannot be applied
3531      # even if it may match the checksum of the target partition.
3532      # a) If version < 3, operations like move and erase will make changes
3533      #    unconditionally and damage the partition.
3534      # b) If version >= 3, it won't even reach here.
3535      else:
3536        if partition == "system":
3537          code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
3538        else:
3539          code = ErrorCode.VENDOR_VERIFICATION_FAILURE
3540        script.AppendExtra((
3541            'abort("E%d: %s partition has unexpected contents");\n'
3542            'endif;') % (code, partition))
3543
3544  def WritePostInstallVerifyScript(self, script):
3545    partition = self.partition
3546    script.Print('Verifying the updated %s image...' % (partition,))
3547    # Unlike pre-install verification, clobbered_blocks should not be ignored.
3548    ranges = self.tgt.care_map
3549    ranges_str = ranges.to_string_raw()
3550    script.AppendExtra(
3551        'if range_sha1(%s, "%s") == "%s" then' % (
3552            self.device, ranges_str,
3553            self.tgt.TotalSha1(include_clobbered_blocks=True)))
3554
3555    # Bug: 20881595
3556    # Verify that extended blocks are really zeroed out.
3557    if self.tgt.extended:
3558      ranges_str = self.tgt.extended.to_string_raw()
3559      script.AppendExtra(
3560          'if range_sha1(%s, "%s") == "%s" then' % (
3561              self.device, ranges_str,
3562              self._HashZeroBlocks(self.tgt.extended.size())))
3563      script.Print('Verified the updated %s image.' % (partition,))
3564      if partition == "system":
3565        code = ErrorCode.SYSTEM_NONZERO_CONTENTS
3566      else:
3567        code = ErrorCode.VENDOR_NONZERO_CONTENTS
3568      script.AppendExtra(
3569          'else\n'
3570          '  abort("E%d: %s partition has unexpected non-zero contents after '
3571          'OTA update");\n'
3572          'endif;' % (code, partition))
3573    else:
3574      script.Print('Verified the updated %s image.' % (partition,))
3575
3576    if partition == "system":
3577      code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
3578    else:
3579      code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
3580
3581    script.AppendExtra(
3582        'else\n'
3583        '  abort("E%d: %s partition has unexpected contents after OTA '
3584        'update");\n'
3585        'endif;' % (code, partition))
3586
3587  def _WriteUpdate(self, script, output_zip):
3588    ZipWrite(output_zip,
3589             '{}.transfer.list'.format(self.path),
3590             '{}.transfer.list'.format(self.partition))
3591
3592    # For full OTA, compress the new.dat with brotli with quality 6 to reduce
3593    # its size. Quailty 9 almost triples the compression time but doesn't
3594    # further reduce the size too much. For a typical 1.8G system.new.dat
3595    #                       zip  | brotli(quality 6)  | brotli(quality 9)
3596    #   compressed_size:    942M | 869M (~8% reduced) | 854M
3597    #   compression_time:   75s  | 265s               | 719s
3598    #   decompression_time: 15s  | 25s                | 25s
3599
3600    if not self.src:
3601      brotli_cmd = ['brotli', '--quality=6',
3602                    '--output={}.new.dat.br'.format(self.path),
3603                    '{}.new.dat'.format(self.path)]
3604      print("Compressing {}.new.dat with brotli".format(self.partition))
3605      RunAndCheckOutput(brotli_cmd)
3606
3607      new_data_name = '{}.new.dat.br'.format(self.partition)
3608      ZipWrite(output_zip,
3609               '{}.new.dat.br'.format(self.path),
3610               new_data_name,
3611               compress_type=zipfile.ZIP_STORED)
3612    else:
3613      new_data_name = '{}.new.dat'.format(self.partition)
3614      ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
3615
3616    ZipWrite(output_zip,
3617             '{}.patch.dat'.format(self.path),
3618             '{}.patch.dat'.format(self.partition),
3619             compress_type=zipfile.ZIP_STORED)
3620
3621    if self.partition == "system":
3622      code = ErrorCode.SYSTEM_UPDATE_FAILURE
3623    else:
3624      code = ErrorCode.VENDOR_UPDATE_FAILURE
3625
3626    call = ('block_image_update({device}, '
3627            'package_extract_file("{partition}.transfer.list"), '
3628            '"{new_data_name}", "{partition}.patch.dat") ||\n'
3629            '  abort("E{code}: Failed to update {partition} image.");'.format(
3630                device=self.device, partition=self.partition,
3631                new_data_name=new_data_name, code=code))
3632    script.AppendExtra(script.WordWrap(call))
3633
3634  def _HashBlocks(self, source, ranges):  # pylint: disable=no-self-use
3635    data = source.ReadRangeSet(ranges)
3636    ctx = sha1()
3637
3638    for p in data:
3639      ctx.update(p)
3640
3641    return ctx.hexdigest()
3642
3643  def _HashZeroBlocks(self, num_blocks):  # pylint: disable=no-self-use
3644    """Return the hash value for all zero blocks."""
3645    zero_block = '\x00' * 4096
3646    ctx = sha1()
3647    for _ in range(num_blocks):
3648      ctx.update(zero_block)
3649
3650    return ctx.hexdigest()
3651
3652
3653# Expose these two classes to support vendor-specific scripts
3654DataImage = images.DataImage
3655EmptyImage = images.EmptyImage
3656
3657
3658# map recovery.fstab's fs_types to mount/format "partition types"
3659PARTITION_TYPES = {
3660    "ext4": "EMMC",
3661    "emmc": "EMMC",
3662    "f2fs": "EMMC",
3663    "squashfs": "EMMC",
3664    "erofs": "EMMC"
3665}
3666
3667
3668def GetTypeAndDevice(mount_point, info, check_no_slot=True):
3669  """
3670  Use GetTypeAndDeviceExpr whenever possible. This function is kept for
3671  backwards compatibility. It aborts if the fstab entry has slotselect option
3672  (unless check_no_slot is explicitly set to False).
3673  """
3674  fstab = info["fstab"]
3675  if fstab:
3676    if check_no_slot:
3677      assert not fstab[mount_point].slotselect, \
3678          "Use GetTypeAndDeviceExpr instead"
3679    return (PARTITION_TYPES[fstab[mount_point].fs_type],
3680            fstab[mount_point].device)
3681  raise KeyError
3682
3683
3684def GetTypeAndDeviceExpr(mount_point, info):
3685  """
3686  Return the filesystem of the partition, and an edify expression that evaluates
3687  to the device at runtime.
3688  """
3689  fstab = info["fstab"]
3690  if fstab:
3691    p = fstab[mount_point]
3692    device_expr = '"%s"' % fstab[mount_point].device
3693    if p.slotselect:
3694      device_expr = 'add_slot_suffix(%s)' % device_expr
3695    return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr)
3696  raise KeyError
3697
3698
3699def GetEntryForDevice(fstab, device):
3700  """
3701  Returns:
3702    The first entry in fstab whose device is the given value.
3703  """
3704  if not fstab:
3705    return None
3706  for mount_point in fstab:
3707    if fstab[mount_point].device == device:
3708      return fstab[mount_point]
3709  return None
3710
3711
3712def ParseCertificate(data):
3713  """Parses and converts a PEM-encoded certificate into DER-encoded.
3714
3715  This gives the same result as `openssl x509 -in <filename> -outform DER`.
3716
3717  Returns:
3718    The decoded certificate bytes.
3719  """
3720  cert_buffer = []
3721  save = False
3722  for line in data.split("\n"):
3723    if "--END CERTIFICATE--" in line:
3724      break
3725    if save:
3726      cert_buffer.append(line)
3727    if "--BEGIN CERTIFICATE--" in line:
3728      save = True
3729  cert = base64.b64decode("".join(cert_buffer))
3730  return cert
3731
3732
3733def ExtractPublicKey(cert):
3734  """Extracts the public key (PEM-encoded) from the given certificate file.
3735
3736  Args:
3737    cert: The certificate filename.
3738
3739  Returns:
3740    The public key string.
3741
3742  Raises:
3743    AssertionError: On non-zero return from 'openssl'.
3744  """
3745  # The behavior with '-out' is different between openssl 1.1 and openssl 1.0.
3746  # While openssl 1.1 writes the key into the given filename followed by '-out',
3747  # openssl 1.0 (both of 1.0.1 and 1.0.2) doesn't. So we collect the output from
3748  # stdout instead.
3749  cmd = ['openssl', 'x509', '-pubkey', '-noout', '-in', cert]
3750  proc = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3751  pubkey, stderrdata = proc.communicate()
3752  assert proc.returncode == 0, \
3753      'Failed to dump public key from certificate: %s\n%s' % (cert, stderrdata)
3754  return pubkey
3755
3756
3757def ExtractAvbPublicKey(avbtool, key):
3758  """Extracts the AVB public key from the given public or private key.
3759
3760  Args:
3761    avbtool: The AVB tool to use.
3762    key: The input key file, which should be PEM-encoded public or private key.
3763
3764  Returns:
3765    The path to the extracted AVB public key file.
3766  """
3767  output = MakeTempFile(prefix='avb-', suffix='.avbpubkey')
3768  RunAndCheckOutput(
3769      [avbtool, 'extract_public_key', "--key", key, "--output", output])
3770  return output
3771
3772
3773def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
3774                      info_dict=None):
3775  """Generates the recovery-from-boot patch and writes the script to output.
3776
3777  Most of the space in the boot and recovery images is just the kernel, which is
3778  identical for the two, so the resulting patch should be efficient. Add it to
3779  the output zip, along with a shell script that is run from init.rc on first
3780  boot to actually do the patching and install the new recovery image.
3781
3782  Args:
3783    input_dir: The top-level input directory of the target-files.zip.
3784    output_sink: The callback function that writes the result.
3785    recovery_img: File object for the recovery image.
3786    boot_img: File objects for the boot image.
3787    info_dict: A dict returned by common.LoadInfoDict() on the input
3788        target_files. Will use OPTIONS.info_dict if None has been given.
3789  """
3790  if info_dict is None:
3791    info_dict = OPTIONS.info_dict
3792
3793  full_recovery_image = info_dict.get("full_recovery_image") == "true"
3794  board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true"
3795
3796  if board_uses_vendorimage:
3797    # In this case, the output sink is rooted at VENDOR
3798    recovery_img_path = "etc/recovery.img"
3799    recovery_resource_dat_path = "VENDOR/etc/recovery-resource.dat"
3800    sh_dir = "bin"
3801  else:
3802    # In this case the output sink is rooted at SYSTEM
3803    recovery_img_path = "vendor/etc/recovery.img"
3804    recovery_resource_dat_path = "SYSTEM/vendor/etc/recovery-resource.dat"
3805    sh_dir = "vendor/bin"
3806
3807  if full_recovery_image:
3808    output_sink(recovery_img_path, recovery_img.data)
3809
3810  else:
3811    include_recovery_dtbo = info_dict.get("include_recovery_dtbo") == "true"
3812    include_recovery_acpio = info_dict.get("include_recovery_acpio") == "true"
3813    path = os.path.join(input_dir, recovery_resource_dat_path)
3814    # Use bsdiff to handle mismatching entries (Bug: 72731506)
3815    if include_recovery_dtbo or include_recovery_acpio:
3816      diff_program = ["bsdiff"]
3817      bonus_args = ""
3818      assert not os.path.exists(path)
3819    else:
3820      diff_program = ["imgdiff"]
3821      if os.path.exists(path):
3822        diff_program.append("-b")
3823        diff_program.append(path)
3824        bonus_args = "--bonus /vendor/etc/recovery-resource.dat"
3825      else:
3826        bonus_args = ""
3827
3828    d = Difference(recovery_img, boot_img, diff_program=diff_program)
3829    _, _, patch = d.ComputePatch()
3830    output_sink("recovery-from-boot.p", patch)
3831
3832  try:
3833    # The following GetTypeAndDevice()s need to use the path in the target
3834    # info_dict instead of source_info_dict.
3835    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict,
3836                                              check_no_slot=False)
3837    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict,
3838                                                      check_no_slot=False)
3839  except KeyError:
3840    return
3841
3842  if full_recovery_image:
3843
3844    # Note that we use /vendor to refer to the recovery resources. This will
3845    # work for a separate vendor partition mounted at /vendor or a
3846    # /system/vendor subdirectory on the system partition, for which init will
3847    # create a symlink from /vendor to /system/vendor.
3848
3849    sh = """#!/vendor/bin/sh
3850if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then
3851  applypatch \\
3852          --flash /vendor/etc/recovery.img \\
3853          --target %(type)s:%(device)s:%(size)d:%(sha1)s && \\
3854      log -t recovery "Installing new recovery image: succeeded" || \\
3855      log -t recovery "Installing new recovery image: failed"
3856else
3857  log -t recovery "Recovery image already installed"
3858fi
3859""" % {'type': recovery_type,
3860       'device': recovery_device,
3861       'sha1': recovery_img.sha1,
3862       'size': recovery_img.size}
3863  else:
3864    sh = """#!/vendor/bin/sh
3865if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
3866  applypatch %(bonus_args)s \\
3867          --patch /vendor/recovery-from-boot.p \\
3868          --source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\
3869          --target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\
3870      log -t recovery "Installing new recovery image: succeeded" || \\
3871      log -t recovery "Installing new recovery image: failed"
3872else
3873  log -t recovery "Recovery image already installed"
3874fi
3875""" % {'boot_size': boot_img.size,
3876       'boot_sha1': boot_img.sha1,
3877       'recovery_size': recovery_img.size,
3878       'recovery_sha1': recovery_img.sha1,
3879       'boot_type': boot_type,
3880       'boot_device': boot_device + '$(getprop ro.boot.slot_suffix)',
3881       'recovery_type': recovery_type,
3882       'recovery_device': recovery_device + '$(getprop ro.boot.slot_suffix)',
3883       'bonus_args': bonus_args}
3884
3885  # The install script location moved from /system/etc to /system/bin in the L
3886  # release. In the R release it is in VENDOR/bin or SYSTEM/vendor/bin.
3887  sh_location = os.path.join(sh_dir, "install-recovery.sh")
3888
3889  logger.info("putting script in %s", sh_location)
3890
3891  output_sink(sh_location, sh.encode())
3892
3893
3894class DynamicPartitionUpdate(object):
3895  def __init__(self, src_group=None, tgt_group=None, progress=None,
3896               block_difference=None):
3897    self.src_group = src_group
3898    self.tgt_group = tgt_group
3899    self.progress = progress
3900    self.block_difference = block_difference
3901
3902  @property
3903  def src_size(self):
3904    if not self.block_difference:
3905      return 0
3906    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src)
3907
3908  @property
3909  def tgt_size(self):
3910    if not self.block_difference:
3911      return 0
3912    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt)
3913
3914  @staticmethod
3915  def _GetSparseImageSize(img):
3916    if not img:
3917      return 0
3918    return img.blocksize * img.total_blocks
3919
3920
3921class DynamicGroupUpdate(object):
3922  def __init__(self, src_size=None, tgt_size=None):
3923    # None: group does not exist. 0: no size limits.
3924    self.src_size = src_size
3925    self.tgt_size = tgt_size
3926
3927
3928class DynamicPartitionsDifference(object):
3929  def __init__(self, info_dict, block_diffs, progress_dict=None,
3930               source_info_dict=None):
3931    if progress_dict is None:
3932      progress_dict = {}
3933
3934    self._remove_all_before_apply = False
3935    if source_info_dict is None:
3936      self._remove_all_before_apply = True
3937      source_info_dict = {}
3938
3939    block_diff_dict = collections.OrderedDict(
3940        [(e.partition, e) for e in block_diffs])
3941
3942    assert len(block_diff_dict) == len(block_diffs), \
3943        "Duplicated BlockDifference object for {}".format(
3944            [partition for partition, count in
3945             collections.Counter(e.partition for e in block_diffs).items()
3946             if count > 1])
3947
3948    self._partition_updates = collections.OrderedDict()
3949
3950    for p, block_diff in block_diff_dict.items():
3951      self._partition_updates[p] = DynamicPartitionUpdate()
3952      self._partition_updates[p].block_difference = block_diff
3953
3954    for p, progress in progress_dict.items():
3955      if p in self._partition_updates:
3956        self._partition_updates[p].progress = progress
3957
3958    tgt_groups = shlex.split(info_dict.get(
3959        "super_partition_groups", "").strip())
3960    src_groups = shlex.split(source_info_dict.get(
3961        "super_partition_groups", "").strip())
3962
3963    for g in tgt_groups:
3964      for p in shlex.split(info_dict.get(
3965              "super_%s_partition_list" % g, "").strip()):
3966        assert p in self._partition_updates, \
3967            "{} is in target super_{}_partition_list but no BlockDifference " \
3968            "object is provided.".format(p, g)
3969        self._partition_updates[p].tgt_group = g
3970
3971    for g in src_groups:
3972      for p in shlex.split(source_info_dict.get(
3973              "super_%s_partition_list" % g, "").strip()):
3974        assert p in self._partition_updates, \
3975            "{} is in source super_{}_partition_list but no BlockDifference " \
3976            "object is provided.".format(p, g)
3977        self._partition_updates[p].src_group = g
3978
3979    target_dynamic_partitions = set(shlex.split(info_dict.get(
3980        "dynamic_partition_list", "").strip()))
3981    block_diffs_with_target = set(p for p, u in self._partition_updates.items()
3982                                  if u.tgt_size)
3983    assert block_diffs_with_target == target_dynamic_partitions, \
3984        "Target Dynamic partitions: {}, BlockDifference with target: {}".format(
3985            list(target_dynamic_partitions), list(block_diffs_with_target))
3986
3987    source_dynamic_partitions = set(shlex.split(source_info_dict.get(
3988        "dynamic_partition_list", "").strip()))
3989    block_diffs_with_source = set(p for p, u in self._partition_updates.items()
3990                                  if u.src_size)
3991    assert block_diffs_with_source == source_dynamic_partitions, \
3992        "Source Dynamic partitions: {}, BlockDifference with source: {}".format(
3993            list(source_dynamic_partitions), list(block_diffs_with_source))
3994
3995    if self._partition_updates:
3996      logger.info("Updating dynamic partitions %s",
3997                  self._partition_updates.keys())
3998
3999    self._group_updates = collections.OrderedDict()
4000
4001    for g in tgt_groups:
4002      self._group_updates[g] = DynamicGroupUpdate()
4003      self._group_updates[g].tgt_size = int(info_dict.get(
4004          "super_%s_group_size" % g, "0").strip())
4005
4006    for g in src_groups:
4007      if g not in self._group_updates:
4008        self._group_updates[g] = DynamicGroupUpdate()
4009      self._group_updates[g].src_size = int(source_info_dict.get(
4010          "super_%s_group_size" % g, "0").strip())
4011
4012    self._Compute()
4013
4014  def WriteScript(self, script, output_zip, write_verify_script=False):
4015    script.Comment('--- Start patching dynamic partitions ---')
4016    for p, u in self._partition_updates.items():
4017      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
4018        script.Comment('Patch partition %s' % p)
4019        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
4020                                       write_verify_script=False)
4021
4022    op_list_path = MakeTempFile()
4023    with open(op_list_path, 'w') as f:
4024      for line in self._op_list:
4025        f.write('{}\n'.format(line))
4026
4027    ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list")
4028
4029    script.Comment('Update dynamic partition metadata')
4030    script.AppendExtra('assert(update_dynamic_partitions('
4031                       'package_extract_file("dynamic_partitions_op_list")));')
4032
4033    if write_verify_script:
4034      for p, u in self._partition_updates.items():
4035        if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
4036          u.block_difference.WritePostInstallVerifyScript(script)
4037          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
4038
4039    for p, u in self._partition_updates.items():
4040      if u.tgt_size and u.src_size <= u.tgt_size:
4041        script.Comment('Patch partition %s' % p)
4042        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
4043                                       write_verify_script=write_verify_script)
4044        if write_verify_script:
4045          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
4046
4047    script.Comment('--- End patching dynamic partitions ---')
4048
4049  def _Compute(self):
4050    self._op_list = list()
4051
4052    def append(line):
4053      self._op_list.append(line)
4054
4055    def comment(line):
4056      self._op_list.append("# %s" % line)
4057
4058    if self._remove_all_before_apply:
4059      comment('Remove all existing dynamic partitions and groups before '
4060              'applying full OTA')
4061      append('remove_all_groups')
4062
4063    for p, u in self._partition_updates.items():
4064      if u.src_group and not u.tgt_group:
4065        append('remove %s' % p)
4066
4067    for p, u in self._partition_updates.items():
4068      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
4069        comment('Move partition %s from %s to default' % (p, u.src_group))
4070        append('move %s default' % p)
4071
4072    for p, u in self._partition_updates.items():
4073      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
4074        comment('Shrink partition %s from %d to %d' %
4075                (p, u.src_size, u.tgt_size))
4076        append('resize %s %s' % (p, u.tgt_size))
4077
4078    for g, u in self._group_updates.items():
4079      if u.src_size is not None and u.tgt_size is None:
4080        append('remove_group %s' % g)
4081      if (u.src_size is not None and u.tgt_size is not None and
4082              u.src_size > u.tgt_size):
4083        comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
4084        append('resize_group %s %d' % (g, u.tgt_size))
4085
4086    for g, u in self._group_updates.items():
4087      if u.src_size is None and u.tgt_size is not None:
4088        comment('Add group %s with maximum size %d' % (g, u.tgt_size))
4089        append('add_group %s %d' % (g, u.tgt_size))
4090      if (u.src_size is not None and u.tgt_size is not None and
4091              u.src_size < u.tgt_size):
4092        comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
4093        append('resize_group %s %d' % (g, u.tgt_size))
4094
4095    for p, u in self._partition_updates.items():
4096      if u.tgt_group and not u.src_group:
4097        comment('Add partition %s to group %s' % (p, u.tgt_group))
4098        append('add %s %s' % (p, u.tgt_group))
4099
4100    for p, u in self._partition_updates.items():
4101      if u.tgt_size and u.src_size < u.tgt_size:
4102        comment('Grow partition %s from %d to %d' %
4103                (p, u.src_size, u.tgt_size))
4104        append('resize %s %d' % (p, u.tgt_size))
4105
4106    for p, u in self._partition_updates.items():
4107      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
4108        comment('Move partition %s from default to %s' %
4109                (p, u.tgt_group))
4110        append('move %s %s' % (p, u.tgt_group))
4111
4112
4113def GetBootImageBuildProp(boot_img, ramdisk_format=RamdiskFormat.LZ4):
4114  """
4115  Get build.prop from ramdisk within the boot image
4116
4117  Args:
4118    boot_img: the boot image file. Ramdisk must be compressed with lz4 or gzip format.
4119
4120  Return:
4121    An extracted file that stores properties in the boot image.
4122  """
4123  tmp_dir = MakeTempDir('boot_', suffix='.img')
4124  try:
4125    RunAndCheckOutput(['unpack_bootimg', '--boot_img',
4126                      boot_img, '--out', tmp_dir])
4127    ramdisk = os.path.join(tmp_dir, 'ramdisk')
4128    if not os.path.isfile(ramdisk):
4129      logger.warning('Unable to get boot image timestamp: no ramdisk in boot')
4130      return None
4131    uncompressed_ramdisk = os.path.join(tmp_dir, 'uncompressed_ramdisk')
4132    if ramdisk_format == RamdiskFormat.LZ4:
4133      RunAndCheckOutput(['lz4', '-d', ramdisk, uncompressed_ramdisk])
4134    elif ramdisk_format == RamdiskFormat.GZ:
4135      with open(ramdisk, 'rb') as input_stream:
4136        with open(uncompressed_ramdisk, 'wb') as output_stream:
4137          p2 = Run(['gzip', '-d'], stdin=input_stream.fileno(),
4138                   stdout=output_stream.fileno())
4139          p2.wait()
4140    else:
4141      logger.error('Only support lz4 or gzip ramdisk format.')
4142      return None
4143
4144    abs_uncompressed_ramdisk = os.path.abspath(uncompressed_ramdisk)
4145    extracted_ramdisk = MakeTempDir('extracted_ramdisk')
4146    # Use "toybox cpio" instead of "cpio" because the latter invokes cpio from
4147    # the host environment.
4148    RunAndCheckOutput(['toybox', 'cpio', '-F', abs_uncompressed_ramdisk, '-i'],
4149                      cwd=extracted_ramdisk)
4150
4151    for search_path in RAMDISK_BUILD_PROP_REL_PATHS:
4152      prop_file = os.path.join(extracted_ramdisk, search_path)
4153      if os.path.isfile(prop_file):
4154        return prop_file
4155      logger.warning(
4156          'Unable to get boot image timestamp: no %s in ramdisk', search_path)
4157
4158    return None
4159
4160  except ExternalError as e:
4161    logger.warning('Unable to get boot image build props: %s', e)
4162    return None
4163
4164
4165def GetBootImageTimestamp(boot_img):
4166  """
4167  Get timestamp from ramdisk within the boot image
4168
4169  Args:
4170    boot_img: the boot image file. Ramdisk must be compressed with lz4 format.
4171
4172  Return:
4173    An integer that corresponds to the timestamp of the boot image, or None
4174    if file has unknown format. Raise exception if an unexpected error has
4175    occurred.
4176  """
4177  prop_file = GetBootImageBuildProp(boot_img)
4178  if not prop_file:
4179    return None
4180
4181  props = PartitionBuildProps.FromBuildPropFile('boot', prop_file)
4182  if props is None:
4183    return None
4184
4185  try:
4186    timestamp = props.GetProp('ro.bootimage.build.date.utc')
4187    if timestamp:
4188      return int(timestamp)
4189    logger.warning(
4190        'Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined')
4191    return None
4192
4193  except ExternalError as e:
4194    logger.warning('Unable to get boot image timestamp: %s', e)
4195    return None
4196
4197
4198def IsSparseImage(filepath):
4199  if not os.path.exists(filepath):
4200    return False
4201  with open(filepath, 'rb') as fp:
4202    # Magic for android sparse image format
4203    # https://source.android.com/devices/bootloader/images
4204    return fp.read(4) == b'\x3A\xFF\x26\xED'
4205
4206
4207def UnsparseImage(filepath, target_path=None):
4208  if not IsSparseImage(filepath):
4209    return
4210  if target_path is None:
4211    tmp_img = MakeTempFile(suffix=".img")
4212    RunAndCheckOutput(["simg2img", filepath, tmp_img])
4213    os.rename(tmp_img, filepath)
4214  else:
4215    RunAndCheckOutput(["simg2img", filepath, target_path])
4216
4217
4218def ParseUpdateEngineConfig(path: str):
4219  """Parse the update_engine config stored in file `path`
4220  Args
4221    path: Path to update_engine_config.txt file in target_files
4222
4223  Returns
4224    A tuple of (major, minor) version number . E.g. (2, 8)
4225  """
4226  with open(path, "r") as fp:
4227    # update_engine_config.txt is only supposed to contain two lines,
4228    # PAYLOAD_MAJOR_VERSION and PAYLOAD_MINOR_VERSION. 1024 should be more than
4229    # sufficient. If the length is more than that, something is wrong.
4230    data = fp.read(1024)
4231    major = re.search(r"PAYLOAD_MAJOR_VERSION=(\d+)", data)
4232    if not major:
4233      raise ValueError(
4234          f"{path} is an invalid update_engine config, missing PAYLOAD_MAJOR_VERSION {data}")
4235    minor = re.search(r"PAYLOAD_MINOR_VERSION=(\d+)", data)
4236    if not minor:
4237      raise ValueError(
4238          f"{path} is an invalid update_engine config, missing PAYLOAD_MINOR_VERSION {data}")
4239    return (int(major.group(1)), int(minor.group(1)))
4240