1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15from __future__ import print_function
16
17import base64
18import collections
19import copy
20import datetime
21import errno
22import fnmatch
23import getopt
24import getpass
25import gzip
26import imp
27import json
28import logging
29import logging.config
30import os
31import platform
32import re
33import shlex
34import shutil
35import subprocess
36import sys
37import tempfile
38import threading
39import time
40import zipfile
41from hashlib import sha1, sha256
42
43import images
44import sparse_img
45from blockimgdiff import BlockImageDiff
46
47logger = logging.getLogger(__name__)
48
49
50class Options(object):
51
52  def __init__(self):
53    # Set up search path, in order to find framework/ and lib64/. At the time of
54    # running this function, user-supplied search path (`--path`) hasn't been
55    # available. So the value set here is the default, which might be overridden
56    # by commandline flag later.
57    exec_path = os.path.realpath(sys.argv[0])
58    if exec_path.endswith('.py'):
59      script_name = os.path.basename(exec_path)
60      # logger hasn't been initialized yet at this point. Use print to output
61      # warnings.
62      print(
63          'Warning: releasetools script should be invoked as hermetic Python '
64          'executable -- build and run `{}` directly.'.format(
65              script_name[:-3]),
66          file=sys.stderr)
67    self.search_path = os.path.dirname(os.path.dirname(exec_path))
68
69    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
70    self.signapk_shared_library_path = "lib64"   # Relative to search_path
71    self.extra_signapk_args = []
72    self.java_path = "java"  # Use the one on the path by default.
73    self.java_args = ["-Xmx2048m"]  # The default JVM args.
74    self.android_jar_path = None
75    self.public_key_suffix = ".x509.pem"
76    self.private_key_suffix = ".pk8"
77    # use otatools built boot_signer by default
78    self.boot_signer_path = "boot_signer"
79    self.boot_signer_args = []
80    self.verity_signer_path = None
81    self.verity_signer_args = []
82    self.aftl_tool_path = None
83    self.aftl_server = None
84    self.aftl_key_path = None
85    self.aftl_manufacturer_key_path = None
86    self.aftl_signer_helper = None
87    self.verbose = False
88    self.tempfiles = []
89    self.device_specific = None
90    self.extras = {}
91    self.info_dict = None
92    self.source_info_dict = None
93    self.target_info_dict = None
94    self.worker_threads = None
95    # Stash size cannot exceed cache_size * threshold.
96    self.cache_size = None
97    self.stash_threshold = 0.8
98    self.logfile = None
99    self.host_tools = {}
100
101
102OPTIONS = Options()
103
104# The block size that's used across the releasetools scripts.
105BLOCK_SIZE = 4096
106
107# Values for "certificate" in apkcerts that mean special things.
108SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
109
110# The partitions allowed to be signed by AVB (Android Verified Boot 2.0). Note
111# that system_other is not in the list because we don't want to include its
112# descriptor into vbmeta.img.
113AVB_PARTITIONS = ('boot', 'dtbo', 'odm', 'product', 'recovery', 'system',
114                  'system_ext', 'vendor', 'vendor_boot', 'vendor_dlkm',
115                  'odm_dlkm')
116
117# Chained VBMeta partitions.
118AVB_VBMETA_PARTITIONS = ('vbmeta_system', 'vbmeta_vendor')
119
120# Partitions that should have their care_map added to META/care_map.pb
121PARTITIONS_WITH_CARE_MAP = (
122    'system',
123    'vendor',
124    'product',
125    'system_ext',
126    'odm',
127    'vendor_dlkm',
128    'odm_dlkm',
129)
130
131
132class ErrorCode(object):
133  """Define error_codes for failures that happen during the actual
134  update package installation.
135
136  Error codes 0-999 are reserved for failures before the package
137  installation (i.e. low battery, package verification failure).
138  Detailed code in 'bootable/recovery/error_code.h' """
139
140  SYSTEM_VERIFICATION_FAILURE = 1000
141  SYSTEM_UPDATE_FAILURE = 1001
142  SYSTEM_UNEXPECTED_CONTENTS = 1002
143  SYSTEM_NONZERO_CONTENTS = 1003
144  SYSTEM_RECOVER_FAILURE = 1004
145  VENDOR_VERIFICATION_FAILURE = 2000
146  VENDOR_UPDATE_FAILURE = 2001
147  VENDOR_UNEXPECTED_CONTENTS = 2002
148  VENDOR_NONZERO_CONTENTS = 2003
149  VENDOR_RECOVER_FAILURE = 2004
150  OEM_PROP_MISMATCH = 3000
151  FINGERPRINT_MISMATCH = 3001
152  THUMBPRINT_MISMATCH = 3002
153  OLDER_BUILD = 3003
154  DEVICE_MISMATCH = 3004
155  BAD_PATCH_FILE = 3005
156  INSUFFICIENT_CACHE_SPACE = 3006
157  TUNE_PARTITION_FAILURE = 3007
158  APPLY_PATCH_FAILURE = 3008
159
160
161class ExternalError(RuntimeError):
162  pass
163
164
165def InitLogging():
166  DEFAULT_LOGGING_CONFIG = {
167      'version': 1,
168      'disable_existing_loggers': False,
169      'formatters': {
170          'standard': {
171              'format':
172                  '%(asctime)s - %(filename)s - %(levelname)-8s: %(message)s',
173              'datefmt': '%Y-%m-%d %H:%M:%S',
174          },
175      },
176      'handlers': {
177          'default': {
178              'class': 'logging.StreamHandler',
179              'formatter': 'standard',
180              'level': 'WARNING',
181          },
182      },
183      'loggers': {
184          '': {
185              'handlers': ['default'],
186              'propagate': True,
187              'level': 'INFO',
188          }
189      }
190  }
191  env_config = os.getenv('LOGGING_CONFIG')
192  if env_config:
193    with open(env_config) as f:
194      config = json.load(f)
195  else:
196    config = DEFAULT_LOGGING_CONFIG
197
198    # Increase the logging level for verbose mode.
199    if OPTIONS.verbose:
200      config = copy.deepcopy(config)
201      config['handlers']['default']['level'] = 'INFO'
202
203    if OPTIONS.logfile:
204      config = copy.deepcopy(config)
205      config['handlers']['logfile'] = {
206          'class': 'logging.FileHandler',
207          'formatter': 'standard',
208          'level': 'INFO',
209          'mode': 'w',
210          'filename': OPTIONS.logfile,
211      }
212      config['loggers']['']['handlers'].append('logfile')
213
214  logging.config.dictConfig(config)
215
216
217def SetHostToolLocation(tool_name, location):
218  OPTIONS.host_tools[tool_name] = location
219
220
221def Run(args, verbose=None, **kwargs):
222  """Creates and returns a subprocess.Popen object.
223
224  Args:
225    args: The command represented as a list of strings.
226    verbose: Whether the commands should be shown. Default to the global
227        verbosity if unspecified.
228    kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
229        stdin, etc. stdout and stderr will default to subprocess.PIPE and
230        subprocess.STDOUT respectively unless caller specifies any of them.
231        universal_newlines will default to True, as most of the users in
232        releasetools expect string output.
233
234  Returns:
235    A subprocess.Popen object.
236  """
237  if 'stdout' not in kwargs and 'stderr' not in kwargs:
238    kwargs['stdout'] = subprocess.PIPE
239    kwargs['stderr'] = subprocess.STDOUT
240  if 'universal_newlines' not in kwargs:
241    kwargs['universal_newlines'] = True
242
243  # If explicitly set host tool location before, use that location to avoid
244  # PATH violation. Make a copy of args in case client relies on the content
245  # of args later.
246  if args and args[0] in OPTIONS.host_tools:
247    args = args[:]
248    args[0] = OPTIONS.host_tools[args[0]]
249
250  # Don't log any if caller explicitly says so.
251  if verbose:
252    logger.info("  Running: \"%s\"", " ".join(args))
253  return subprocess.Popen(args, **kwargs)
254
255
256def RunAndWait(args, verbose=None, **kwargs):
257  """Runs the given command waiting for it to complete.
258
259  Args:
260    args: The command represented as a list of strings.
261    verbose: Whether the commands should be shown. Default to the global
262        verbosity if unspecified.
263    kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
264        stdin, etc. stdout and stderr will default to subprocess.PIPE and
265        subprocess.STDOUT respectively unless caller specifies any of them.
266
267  Raises:
268    ExternalError: On non-zero exit from the command.
269  """
270  proc = Run(args, verbose=verbose, **kwargs)
271  proc.wait()
272
273  if proc.returncode != 0:
274    raise ExternalError(
275        "Failed to run command '{}' (exit code {})".format(
276            args, proc.returncode))
277
278
279def RunAndCheckOutput(args, verbose=None, **kwargs):
280  """Runs the given command and returns the output.
281
282  Args:
283    args: The command represented as a list of strings.
284    verbose: Whether the commands should be shown. Default to the global
285        verbosity if unspecified.
286    kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
287        stdin, etc. stdout and stderr will default to subprocess.PIPE and
288        subprocess.STDOUT respectively unless caller specifies any of them.
289
290  Returns:
291    The output string.
292
293  Raises:
294    ExternalError: On non-zero exit from the command.
295  """
296  proc = Run(args, verbose=verbose, **kwargs)
297  output, _ = proc.communicate()
298  if output is None:
299    output = ""
300  # Don't log any if caller explicitly says so.
301  if verbose:
302    logger.info("%s", output.rstrip())
303  if proc.returncode != 0:
304    raise ExternalError(
305        "Failed to run command '{}' (exit code {}):\n{}".format(
306            args, proc.returncode, output))
307  return output
308
309
310def RoundUpTo4K(value):
311  rounded_up = value + 4095
312  return rounded_up - (rounded_up % 4096)
313
314
315def CloseInheritedPipes():
316  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
317  before doing other work."""
318  if platform.system() != "Darwin":
319    return
320  for d in range(3, 1025):
321    try:
322      stat = os.fstat(d)
323      if stat is not None:
324        pipebit = stat[0] & 0x1000
325        if pipebit != 0:
326          os.close(d)
327    except OSError:
328      pass
329
330
331class BuildInfo(object):
332  """A class that holds the information for a given build.
333
334  This class wraps up the property querying for a given source or target build.
335  It abstracts away the logic of handling OEM-specific properties, and caches
336  the commonly used properties such as fingerprint.
337
338  There are two types of info dicts: a) build-time info dict, which is generated
339  at build time (i.e. included in a target_files zip); b) OEM info dict that is
340  specified at package generation time (via command line argument
341  '--oem_settings'). If a build doesn't use OEM-specific properties (i.e. not
342  having "oem_fingerprint_properties" in build-time info dict), all the queries
343  would be answered based on build-time info dict only. Otherwise if using
344  OEM-specific properties, some of them will be calculated from two info dicts.
345
346  Users can query properties similarly as using a dict() (e.g. info['fstab']),
347  or to query build properties via GetBuildProp() or GetPartitionBuildProp().
348
349  Attributes:
350    info_dict: The build-time info dict.
351    is_ab: Whether it's a build that uses A/B OTA.
352    oem_dicts: A list of OEM dicts.
353    oem_props: A list of OEM properties that should be read from OEM dicts; None
354        if the build doesn't use any OEM-specific property.
355    fingerprint: The fingerprint of the build, which would be calculated based
356        on OEM properties if applicable.
357    device: The device name, which could come from OEM dicts if applicable.
358  """
359
360  _RO_PRODUCT_RESOLVE_PROPS = ["ro.product.brand", "ro.product.device",
361                               "ro.product.manufacturer", "ro.product.model",
362                               "ro.product.name"]
363  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT = [
364      "product", "odm", "vendor", "system_ext", "system"]
365  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10 = [
366      "product", "product_services", "odm", "vendor", "system"]
367  _RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY = []
368
369  def __init__(self, info_dict, oem_dicts=None):
370    """Initializes a BuildInfo instance with the given dicts.
371
372    Note that it only wraps up the given dicts, without making copies.
373
374    Arguments:
375      info_dict: The build-time info dict.
376      oem_dicts: A list of OEM dicts (which is parsed from --oem_settings). Note
377          that it always uses the first dict to calculate the fingerprint or the
378          device name. The rest would be used for asserting OEM properties only
379          (e.g. one package can be installed on one of these devices).
380
381    Raises:
382      ValueError: On invalid inputs.
383    """
384    self.info_dict = info_dict
385    self.oem_dicts = oem_dicts
386
387    self._is_ab = info_dict.get("ab_update") == "true"
388
389    # Skip _oem_props if oem_dicts is None to use BuildInfo in
390    # sign_target_files_apks
391    if self.oem_dicts:
392      self._oem_props = info_dict.get("oem_fingerprint_properties")
393    else:
394      self._oem_props = None
395
396    def check_fingerprint(fingerprint):
397      if (" " in fingerprint or any(ord(ch) > 127 for ch in fingerprint)):
398        raise ValueError(
399            'Invalid build fingerprint: "{}". See the requirement in Android CDD '
400            "3.2.2. Build Parameters.".format(fingerprint))
401
402    self._partition_fingerprints = {}
403    for partition in PARTITIONS_WITH_CARE_MAP:
404      try:
405        fingerprint = self.CalculatePartitionFingerprint(partition)
406        check_fingerprint(fingerprint)
407        self._partition_fingerprints[partition] = fingerprint
408      except ExternalError:
409        continue
410    if "system" in self._partition_fingerprints:
411      # system_other is not included in PARTITIONS_WITH_CARE_MAP, but does
412      # need a fingerprint when creating the image.
413      self._partition_fingerprints[
414          "system_other"] = self._partition_fingerprints["system"]
415
416    # These two should be computed only after setting self._oem_props.
417    self._device = self.GetOemProperty("ro.product.device")
418    self._fingerprint = self.CalculateFingerprint()
419    check_fingerprint(self._fingerprint)
420
421  @property
422  def is_ab(self):
423    return self._is_ab
424
425  @property
426  def device(self):
427    return self._device
428
429  @property
430  def fingerprint(self):
431    return self._fingerprint
432
433  @property
434  def oem_props(self):
435    return self._oem_props
436
437  def __getitem__(self, key):
438    return self.info_dict[key]
439
440  def __setitem__(self, key, value):
441    self.info_dict[key] = value
442
443  def get(self, key, default=None):
444    return self.info_dict.get(key, default)
445
446  def items(self):
447    return self.info_dict.items()
448
449  def _GetRawBuildProp(self, prop, partition):
450    prop_file = '{}.build.prop'.format(
451        partition) if partition else 'build.prop'
452    partition_props = self.info_dict.get(prop_file)
453    if not partition_props:
454      return None
455    return partition_props.GetProp(prop)
456
457  def GetPartitionBuildProp(self, prop, partition):
458    """Returns the inquired build property for the provided partition."""
459    # If provided a partition for this property, only look within that
460    # partition's build.prop.
461    if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS:
462      prop = prop.replace("ro.product", "ro.product.{}".format(partition))
463    else:
464      prop = prop.replace("ro.", "ro.{}.".format(partition))
465
466    prop_val = self._GetRawBuildProp(prop, partition)
467    if prop_val is not None:
468      return prop_val
469    raise ExternalError("couldn't find %s in %s.build.prop" %
470                        (prop, partition))
471
472  def GetBuildProp(self, prop):
473    """Returns the inquired build property from the standard build.prop file."""
474    if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS:
475      return self._ResolveRoProductBuildProp(prop)
476
477    prop_val = self._GetRawBuildProp(prop, None)
478    if prop_val is not None:
479      return prop_val
480
481    raise ExternalError("couldn't find %s in build.prop" % (prop,))
482
483  def _ResolveRoProductBuildProp(self, prop):
484    """Resolves the inquired ro.product.* build property"""
485    prop_val = self._GetRawBuildProp(prop, None)
486    if prop_val:
487      return prop_val
488
489    default_source_order = self._GetRoProductPropsDefaultSourceOrder()
490    source_order_val = self._GetRawBuildProp(
491        "ro.product.property_source_order", None)
492    if source_order_val:
493      source_order = source_order_val.split(",")
494    else:
495      source_order = default_source_order
496
497    # Check that all sources in ro.product.property_source_order are valid
498    if any([x not in default_source_order for x in source_order]):
499      raise ExternalError(
500          "Invalid ro.product.property_source_order '{}'".format(source_order))
501
502    for source_partition in source_order:
503      source_prop = prop.replace(
504          "ro.product", "ro.product.{}".format(source_partition), 1)
505      prop_val = self._GetRawBuildProp(source_prop, source_partition)
506      if prop_val:
507        return prop_val
508
509    raise ExternalError("couldn't resolve {}".format(prop))
510
511  def _GetRoProductPropsDefaultSourceOrder(self):
512    # NOTE: refer to CDDs and android.os.Build.VERSION for the definition and
513    # values of these properties for each Android release.
514    android_codename = self._GetRawBuildProp("ro.build.version.codename", None)
515    if android_codename == "REL":
516      android_version = self._GetRawBuildProp("ro.build.version.release", None)
517      if android_version == "10":
518        return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_ANDROID_10
519      # NOTE: float() conversion of android_version will have rounding error.
520      # We are checking for "9" or less, and using "< 10" is well outside of
521      # possible floating point rounding.
522      try:
523        android_version_val = float(android_version)
524      except ValueError:
525        android_version_val = 0
526      if android_version_val < 10:
527        return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY
528    return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT
529
530  def GetOemProperty(self, key):
531    if self.oem_props is not None and key in self.oem_props:
532      return self.oem_dicts[0][key]
533    return self.GetBuildProp(key)
534
535  def GetPartitionFingerprint(self, partition):
536    return self._partition_fingerprints.get(partition, None)
537
538  def CalculatePartitionFingerprint(self, partition):
539    try:
540      return self.GetPartitionBuildProp("ro.build.fingerprint", partition)
541    except ExternalError:
542      return "{}/{}/{}:{}/{}/{}:{}/{}".format(
543          self.GetPartitionBuildProp("ro.product.brand", partition),
544          self.GetPartitionBuildProp("ro.product.name", partition),
545          self.GetPartitionBuildProp("ro.product.device", partition),
546          self.GetPartitionBuildProp("ro.build.version.release", partition),
547          self.GetPartitionBuildProp("ro.build.id", partition),
548          self.GetPartitionBuildProp(
549              "ro.build.version.incremental", partition),
550          self.GetPartitionBuildProp("ro.build.type", partition),
551          self.GetPartitionBuildProp("ro.build.tags", partition))
552
553  def CalculateFingerprint(self):
554    if self.oem_props is None:
555      try:
556        return self.GetBuildProp("ro.build.fingerprint")
557      except ExternalError:
558        return "{}/{}/{}:{}/{}/{}:{}/{}".format(
559            self.GetBuildProp("ro.product.brand"),
560            self.GetBuildProp("ro.product.name"),
561            self.GetBuildProp("ro.product.device"),
562            self.GetBuildProp("ro.build.version.release"),
563            self.GetBuildProp("ro.build.id"),
564            self.GetBuildProp("ro.build.version.incremental"),
565            self.GetBuildProp("ro.build.type"),
566            self.GetBuildProp("ro.build.tags"))
567    return "%s/%s/%s:%s" % (
568        self.GetOemProperty("ro.product.brand"),
569        self.GetOemProperty("ro.product.name"),
570        self.GetOemProperty("ro.product.device"),
571        self.GetBuildProp("ro.build.thumbprint"))
572
573  def WriteMountOemScript(self, script):
574    assert self.oem_props is not None
575    recovery_mount_options = self.info_dict.get("recovery_mount_options")
576    script.Mount("/oem", recovery_mount_options)
577
578  def WriteDeviceAssertions(self, script, oem_no_mount):
579    # Read the property directly if not using OEM properties.
580    if not self.oem_props:
581      script.AssertDevice(self.device)
582      return
583
584    # Otherwise assert OEM properties.
585    if not self.oem_dicts:
586      raise ExternalError(
587          "No OEM file provided to answer expected assertions")
588
589    for prop in self.oem_props.split():
590      values = []
591      for oem_dict in self.oem_dicts:
592        if prop in oem_dict:
593          values.append(oem_dict[prop])
594      if not values:
595        raise ExternalError(
596            "The OEM file is missing the property %s" % (prop,))
597      script.AssertOemProperty(prop, values, oem_no_mount)
598
599
600def ReadFromInputFile(input_file, fn):
601  """Reads the contents of fn from input zipfile or directory."""
602  if isinstance(input_file, zipfile.ZipFile):
603    return input_file.read(fn).decode()
604  else:
605    path = os.path.join(input_file, *fn.split("/"))
606    try:
607      with open(path) as f:
608        return f.read()
609    except IOError as e:
610      if e.errno == errno.ENOENT:
611        raise KeyError(fn)
612
613
614def LoadInfoDict(input_file, repacking=False):
615  """Loads the key/value pairs from the given input target_files.
616
617  It reads `META/misc_info.txt` file in the target_files input, does validation
618  checks and returns the parsed key/value pairs for to the given build. It's
619  usually called early when working on input target_files files, e.g. when
620  generating OTAs, or signing builds. Note that the function may be called
621  against an old target_files file (i.e. from past dessert releases). So the
622  property parsing needs to be backward compatible.
623
624  In a `META/misc_info.txt`, a few properties are stored as links to the files
625  in the PRODUCT_OUT directory. It works fine with the build system. However,
626  they are no longer available when (re)generating images from target_files zip.
627  When `repacking` is True, redirect these properties to the actual files in the
628  unzipped directory.
629
630  Args:
631    input_file: The input target_files file, which could be an open
632        zipfile.ZipFile instance, or a str for the dir that contains the files
633        unzipped from a target_files file.
634    repacking: Whether it's trying repack an target_files file after loading the
635        info dict (default: False). If so, it will rewrite a few loaded
636        properties (e.g. selinux_fc, root_dir) to point to the actual files in
637        target_files file. When doing repacking, `input_file` must be a dir.
638
639  Returns:
640    A dict that contains the parsed key/value pairs.
641
642  Raises:
643    AssertionError: On invalid input arguments.
644    ValueError: On malformed input values.
645  """
646  if repacking:
647    assert isinstance(input_file, str), \
648        "input_file must be a path str when doing repacking"
649
650  def read_helper(fn):
651    return ReadFromInputFile(input_file, fn)
652
653  try:
654    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
655  except KeyError:
656    raise ValueError("Failed to find META/misc_info.txt in input target-files")
657
658  if "recovery_api_version" not in d:
659    raise ValueError("Failed to find 'recovery_api_version'")
660  if "fstab_version" not in d:
661    raise ValueError("Failed to find 'fstab_version'")
662
663  if repacking:
664    # "selinux_fc" properties should point to the file_contexts files
665    # (file_contexts.bin) under META/.
666    for key in d:
667      if key.endswith("selinux_fc"):
668        fc_basename = os.path.basename(d[key])
669        fc_config = os.path.join(input_file, "META", fc_basename)
670        assert os.path.exists(fc_config)
671
672        d[key] = fc_config
673
674    # Similarly we need to redirect "root_dir", and "root_fs_config".
675    d["root_dir"] = os.path.join(input_file, "ROOT")
676    d["root_fs_config"] = os.path.join(
677        input_file, "META", "root_filesystem_config.txt")
678
679    # Redirect {partition}_base_fs_file for each of the named partitions.
680    for part_name in ["system", "vendor", "system_ext", "product", "odm",
681                      "vendor_dlkm", "odm_dlkm"]:
682      key_name = part_name + "_base_fs_file"
683      if key_name not in d:
684        continue
685      basename = os.path.basename(d[key_name])
686      base_fs_file = os.path.join(input_file, "META", basename)
687      if os.path.exists(base_fs_file):
688        d[key_name] = base_fs_file
689      else:
690        logger.warning(
691            "Failed to find %s base fs file: %s", part_name, base_fs_file)
692        del d[key_name]
693
694  def makeint(key):
695    if key in d:
696      d[key] = int(d[key], 0)
697
698  makeint("recovery_api_version")
699  makeint("blocksize")
700  makeint("system_size")
701  makeint("vendor_size")
702  makeint("userdata_size")
703  makeint("cache_size")
704  makeint("recovery_size")
705  makeint("fstab_version")
706
707  boot_images = "boot.img"
708  if "boot_images" in d:
709    boot_images = d["boot_images"]
710  for b in boot_images.split():
711    makeint(b.replace(".img", "_size"))
712
713  # Load recovery fstab if applicable.
714  d["fstab"] = _FindAndLoadRecoveryFstab(d, input_file, read_helper)
715
716  # Tries to load the build props for all partitions with care_map, including
717  # system and vendor.
718  for partition in PARTITIONS_WITH_CARE_MAP:
719    partition_prop = "{}.build.prop".format(partition)
720    d[partition_prop] = PartitionBuildProps.FromInputFile(
721        input_file, partition)
722  d["build.prop"] = d["system.build.prop"]
723
724  # Set up the salt (based on fingerprint) that will be used when adding AVB
725  # hash / hashtree footers.
726  if d.get("avb_enable") == "true":
727    build_info = BuildInfo(d)
728    for partition in PARTITIONS_WITH_CARE_MAP:
729      fingerprint = build_info.GetPartitionFingerprint(partition)
730      if fingerprint:
731        d["avb_{}_salt".format(partition)] = sha256(fingerprint.encode()).hexdigest()
732
733  return d
734
735
736def LoadListFromFile(file_path):
737  with open(file_path) as f:
738    return f.read().splitlines()
739
740
741def LoadDictionaryFromFile(file_path):
742  lines = LoadListFromFile(file_path)
743  return LoadDictionaryFromLines(lines)
744
745
746def LoadDictionaryFromLines(lines):
747  d = {}
748  for line in lines:
749    line = line.strip()
750    if not line or line.startswith("#"):
751      continue
752    if "=" in line:
753      name, value = line.split("=", 1)
754      d[name] = value
755  return d
756
757
758class PartitionBuildProps(object):
759  """The class holds the build prop of a particular partition.
760
761  This class loads the build.prop and holds the build properties for a given
762  partition. It also partially recognizes the 'import' statement in the
763  build.prop; and calculates alternative values of some specific build
764  properties during runtime.
765
766  Attributes:
767    input_file: a zipped target-file or an unzipped target-file directory.
768    partition: name of the partition.
769    props_allow_override: a list of build properties to search for the
770        alternative values during runtime.
771    build_props: a dict of build properties for the given partition.
772    prop_overrides: a set of props that are overridden by import.
773    placeholder_values: A dict of runtime variables' values to replace the
774        placeholders in the build.prop file. We expect exactly one value for
775        each of the variables.
776  """
777
778  def __init__(self, input_file, name, placeholder_values=None):
779    self.input_file = input_file
780    self.partition = name
781    self.props_allow_override = [props.format(name) for props in [
782        'ro.product.{}.brand', 'ro.product.{}.name', 'ro.product.{}.device']]
783    self.build_props = {}
784    self.prop_overrides = set()
785    self.placeholder_values = {}
786    if placeholder_values:
787      self.placeholder_values = copy.deepcopy(placeholder_values)
788
789  @staticmethod
790  def FromDictionary(name, build_props):
791    """Constructs an instance from a build prop dictionary."""
792
793    props = PartitionBuildProps("unknown", name)
794    props.build_props = build_props.copy()
795    return props
796
797  @staticmethod
798  def FromInputFile(input_file, name, placeholder_values=None):
799    """Loads the build.prop file and builds the attributes."""
800    data = ''
801    for prop_file in ['{}/etc/build.prop'.format(name.upper()),
802                      '{}/build.prop'.format(name.upper())]:
803      try:
804        data = ReadFromInputFile(input_file, prop_file)
805        break
806      except KeyError:
807        logger.warning('Failed to read %s', prop_file)
808
809    props = PartitionBuildProps(input_file, name, placeholder_values)
810    props._LoadBuildProp(data)
811    return props
812
813  def _LoadBuildProp(self, data):
814    for line in data.split('\n'):
815      line = line.strip()
816      if not line or line.startswith("#"):
817        continue
818      if line.startswith("import"):
819        overrides = self._ImportParser(line)
820        duplicates = self.prop_overrides.intersection(overrides.keys())
821        if duplicates:
822          raise ValueError('prop {} is overridden multiple times'.format(
823              ','.join(duplicates)))
824        self.prop_overrides = self.prop_overrides.union(overrides.keys())
825        self.build_props.update(overrides)
826      elif "=" in line:
827        name, value = line.split("=", 1)
828        if name in self.prop_overrides:
829          raise ValueError('prop {} is set again after overridden by import '
830                           'statement'.format(name))
831        self.build_props[name] = value
832
833  def _ImportParser(self, line):
834    """Parses the build prop in a given import statement."""
835
836    tokens = line.split()
837    if tokens[0] != 'import' or (len(tokens) != 2 and len(tokens) != 3):
838      raise ValueError('Unrecognized import statement {}'.format(line))
839
840    if len(tokens) == 3:
841      logger.info("Import %s from %s, skip", tokens[2], tokens[1])
842      return {}
843
844    import_path = tokens[1]
845    if not re.match(r'^/{}/.*\.prop$'.format(self.partition), import_path):
846      raise ValueError('Unrecognized import path {}'.format(line))
847
848    # We only recognize a subset of import statement that the init process
849    # supports. And we can loose the restriction based on how the dynamic
850    # fingerprint is used in practice. The placeholder format should be
851    # ${placeholder}, and its value should be provided by the caller through
852    # the placeholder_values.
853    for prop, value in self.placeholder_values.items():
854      prop_place_holder = '${{{}}}'.format(prop)
855      if prop_place_holder in import_path:
856        import_path = import_path.replace(prop_place_holder, value)
857    if '$' in import_path:
858      logger.info('Unresolved place holder in import path %s', import_path)
859      return {}
860
861    import_path = import_path.replace('/{}'.format(self.partition),
862                                      self.partition.upper())
863    logger.info('Parsing build props override from %s', import_path)
864
865    lines = ReadFromInputFile(self.input_file, import_path).split('\n')
866    d = LoadDictionaryFromLines(lines)
867    return {key: val for key, val in d.items()
868            if key in self.props_allow_override}
869
870  def GetProp(self, prop):
871    return self.build_props.get(prop)
872
873
874def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path,
875                      system_root_image=False):
876  class Partition(object):
877    def __init__(self, mount_point, fs_type, device, length, context, slotselect):
878      self.mount_point = mount_point
879      self.fs_type = fs_type
880      self.device = device
881      self.length = length
882      self.context = context
883      self.slotselect = slotselect
884
885  try:
886    data = read_helper(recovery_fstab_path)
887  except KeyError:
888    logger.warning("Failed to find %s", recovery_fstab_path)
889    data = ""
890
891  assert fstab_version == 2
892
893  d = {}
894  for line in data.split("\n"):
895    line = line.strip()
896    if not line or line.startswith("#"):
897      continue
898
899    # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
900    pieces = line.split()
901    if len(pieces) != 5:
902      raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
903
904    # Ignore entries that are managed by vold.
905    options = pieces[4]
906    if "voldmanaged=" in options:
907      continue
908
909    # It's a good line, parse it.
910    length = 0
911    slotselect = False
912    options = options.split(",")
913    for i in options:
914      if i.startswith("length="):
915        length = int(i[7:])
916      elif i == "slotselect":
917        slotselect = True
918      else:
919        # Ignore all unknown options in the unified fstab.
920        continue
921
922    mount_flags = pieces[3]
923    # Honor the SELinux context if present.
924    context = None
925    for i in mount_flags.split(","):
926      if i.startswith("context="):
927        context = i
928
929    mount_point = pieces[1]
930    d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
931                               device=pieces[0], length=length, context=context,
932                               slotselect=slotselect)
933
934  # / is used for the system mount point when the root directory is included in
935  # system. Other areas assume system is always at "/system" so point /system
936  # at /.
937  if system_root_image:
938    assert '/system' not in d and '/' in d
939    d["/system"] = d["/"]
940  return d
941
942
943def _FindAndLoadRecoveryFstab(info_dict, input_file, read_helper):
944  """Finds the path to recovery fstab and loads its contents."""
945  # recovery fstab is only meaningful when installing an update via recovery
946  # (i.e. non-A/B OTA). Skip loading fstab if device used A/B OTA.
947  if info_dict.get('ab_update') == 'true' and \
948     info_dict.get("allow_non_ab") != "true":
949    return None
950
951  # We changed recovery.fstab path in Q, from ../RAMDISK/etc/recovery.fstab to
952  # ../RAMDISK/system/etc/recovery.fstab. This function has to handle both
953  # cases, since it may load the info_dict from an old build (e.g. when
954  # generating incremental OTAs from that build).
955  system_root_image = info_dict.get('system_root_image') == 'true'
956  if info_dict.get('no_recovery') != 'true':
957    recovery_fstab_path = 'RECOVERY/RAMDISK/system/etc/recovery.fstab'
958    if isinstance(input_file, zipfile.ZipFile):
959      if recovery_fstab_path not in input_file.namelist():
960        recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab'
961    else:
962      path = os.path.join(input_file, *recovery_fstab_path.split('/'))
963      if not os.path.exists(path):
964        recovery_fstab_path = 'RECOVERY/RAMDISK/etc/recovery.fstab'
965    return LoadRecoveryFSTab(
966        read_helper, info_dict['fstab_version'], recovery_fstab_path,
967        system_root_image)
968
969  if info_dict.get('recovery_as_boot') == 'true':
970    recovery_fstab_path = 'BOOT/RAMDISK/system/etc/recovery.fstab'
971    if isinstance(input_file, zipfile.ZipFile):
972      if recovery_fstab_path not in input_file.namelist():
973        recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab'
974    else:
975      path = os.path.join(input_file, *recovery_fstab_path.split('/'))
976      if not os.path.exists(path):
977        recovery_fstab_path = 'BOOT/RAMDISK/etc/recovery.fstab'
978    return LoadRecoveryFSTab(
979        read_helper, info_dict['fstab_version'], recovery_fstab_path,
980        system_root_image)
981
982  return None
983
984
985def DumpInfoDict(d):
986  for k, v in sorted(d.items()):
987    logger.info("%-25s = (%s) %s", k, type(v).__name__, v)
988
989
990def MergeDynamicPartitionInfoDicts(framework_dict, vendor_dict):
991  """Merges dynamic partition info variables.
992
993  Args:
994    framework_dict: The dictionary of dynamic partition info variables from the
995      partial framework target files.
996    vendor_dict: The dictionary of dynamic partition info variables from the
997      partial vendor target files.
998
999  Returns:
1000    The merged dynamic partition info dictionary.
1001  """
1002  merged_dict = {}
1003  # Partition groups and group sizes are defined by the vendor dict because
1004  # these values may vary for each board that uses a shared system image.
1005  merged_dict["super_partition_groups"] = vendor_dict["super_partition_groups"]
1006  framework_dynamic_partition_list = framework_dict.get(
1007      "dynamic_partition_list", "")
1008  vendor_dynamic_partition_list = vendor_dict.get("dynamic_partition_list", "")
1009  merged_dict["dynamic_partition_list"] = ("%s %s" % (
1010      framework_dynamic_partition_list, vendor_dynamic_partition_list)).strip()
1011  for partition_group in merged_dict["super_partition_groups"].split(" "):
1012    # Set the partition group's size using the value from the vendor dict.
1013    key = "super_%s_group_size" % partition_group
1014    if key not in vendor_dict:
1015      raise ValueError("Vendor dict does not contain required key %s." % key)
1016    merged_dict[key] = vendor_dict[key]
1017
1018    # Set the partition group's partition list using a concatenation of the
1019    # framework and vendor partition lists.
1020    key = "super_%s_partition_list" % partition_group
1021    merged_dict[key] = (
1022        "%s %s" %
1023        (framework_dict.get(key, ""), vendor_dict.get(key, ""))).strip()
1024
1025  # Pick virtual ab related flags from vendor dict, if defined.
1026  if "virtual_ab" in vendor_dict.keys():
1027    merged_dict["virtual_ab"] = vendor_dict["virtual_ab"]
1028  if "virtual_ab_retrofit" in vendor_dict.keys():
1029    merged_dict["virtual_ab_retrofit"] = vendor_dict["virtual_ab_retrofit"]
1030  return merged_dict
1031
1032
1033def AppendAVBSigningArgs(cmd, partition):
1034  """Append signing arguments for avbtool."""
1035  # e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096"
1036  key_path = OPTIONS.info_dict.get("avb_" + partition + "_key_path")
1037  if key_path and not os.path.exists(key_path) and OPTIONS.search_path:
1038    new_key_path = os.path.join(OPTIONS.search_path, key_path)
1039    if os.path.exists(new_key_path):
1040      key_path = new_key_path
1041  algorithm = OPTIONS.info_dict.get("avb_" + partition + "_algorithm")
1042  if key_path and algorithm:
1043    cmd.extend(["--key", key_path, "--algorithm", algorithm])
1044  avb_salt = OPTIONS.info_dict.get("avb_salt")
1045  # make_vbmeta_image doesn't like "--salt" (and it's not needed).
1046  if avb_salt and not partition.startswith("vbmeta"):
1047    cmd.extend(["--salt", avb_salt])
1048
1049
1050def GetAvbPartitionArg(partition, image, info_dict=None):
1051  """Returns the VBMeta arguments for partition.
1052
1053  It sets up the VBMeta argument by including the partition descriptor from the
1054  given 'image', or by configuring the partition as a chained partition.
1055
1056  Args:
1057    partition: The name of the partition (e.g. "system").
1058    image: The path to the partition image.
1059    info_dict: A dict returned by common.LoadInfoDict(). Will use
1060        OPTIONS.info_dict if None has been given.
1061
1062  Returns:
1063    A list of VBMeta arguments.
1064  """
1065  if info_dict is None:
1066    info_dict = OPTIONS.info_dict
1067
1068  # Check if chain partition is used.
1069  key_path = info_dict.get("avb_" + partition + "_key_path")
1070  if not key_path:
1071    return ["--include_descriptors_from_image", image]
1072
1073  # For a non-A/B device, we don't chain /recovery nor include its descriptor
1074  # into vbmeta.img. The recovery image will be configured on an independent
1075  # boot chain, to be verified with AVB_SLOT_VERIFY_FLAGS_NO_VBMETA_PARTITION.
1076  # See details at
1077  # https://android.googlesource.com/platform/external/avb/+/master/README.md#booting-into-recovery.
1078  if info_dict.get("ab_update") != "true" and partition == "recovery":
1079    return []
1080
1081  # Otherwise chain the partition into vbmeta.
1082  chained_partition_arg = GetAvbChainedPartitionArg(partition, info_dict)
1083  return ["--chain_partition", chained_partition_arg]
1084
1085
1086def GetAvbChainedPartitionArg(partition, info_dict, key=None):
1087  """Constructs and returns the arg to build or verify a chained partition.
1088
1089  Args:
1090    partition: The partition name.
1091    info_dict: The info dict to look up the key info and rollback index
1092        location.
1093    key: The key to be used for building or verifying the partition. Defaults to
1094        the key listed in info_dict.
1095
1096  Returns:
1097    A string of form "partition:rollback_index_location:key" that can be used to
1098    build or verify vbmeta image.
1099  """
1100  if key is None:
1101    key = info_dict["avb_" + partition + "_key_path"]
1102  if key and not os.path.exists(key) and OPTIONS.search_path:
1103    new_key_path = os.path.join(OPTIONS.search_path, key)
1104    if os.path.exists(new_key_path):
1105      key = new_key_path
1106  pubkey_path = ExtractAvbPublicKey(info_dict["avb_avbtool"], key)
1107  rollback_index_location = info_dict[
1108      "avb_" + partition + "_rollback_index_location"]
1109  return "{}:{}:{}".format(partition, rollback_index_location, pubkey_path)
1110
1111
1112def ConstructAftlMakeImageCommands(output_image):
1113  """Constructs the command to append the aftl image to vbmeta."""
1114
1115  # Ensure the other AFTL parameters are set as well.
1116  assert OPTIONS.aftl_tool_path is not None, 'No aftl tool provided.'
1117  assert OPTIONS.aftl_key_path is not None, 'No AFTL key provided.'
1118  assert OPTIONS.aftl_manufacturer_key_path is not None, \
1119      'No AFTL manufacturer key provided.'
1120
1121  vbmeta_image = MakeTempFile()
1122  os.rename(output_image, vbmeta_image)
1123  build_info = BuildInfo(OPTIONS.info_dict)
1124  version_incremental = build_info.GetBuildProp("ro.build.version.incremental")
1125  aftltool = OPTIONS.aftl_tool_path
1126  server_argument_list = [OPTIONS.aftl_server, OPTIONS.aftl_key_path]
1127  aftl_cmd = [aftltool, "make_icp_from_vbmeta",
1128              "--vbmeta_image_path", vbmeta_image,
1129              "--output", output_image,
1130              "--version_incremental", version_incremental,
1131              "--transparency_log_servers", ','.join(server_argument_list),
1132              "--manufacturer_key", OPTIONS.aftl_manufacturer_key_path,
1133              "--algorithm", "SHA256_RSA4096",
1134              "--padding", "4096"]
1135  if OPTIONS.aftl_signer_helper:
1136    aftl_cmd.extend(shlex.split(OPTIONS.aftl_signer_helper))
1137  return aftl_cmd
1138
1139
1140def AddAftlInclusionProof(output_image):
1141  """Appends the aftl inclusion proof to the vbmeta image."""
1142
1143  aftl_cmd = ConstructAftlMakeImageCommands(output_image)
1144  RunAndCheckOutput(aftl_cmd)
1145
1146  verify_cmd = ['aftltool', 'verify_image_icp', '--vbmeta_image_path',
1147                output_image, '--transparency_log_pub_keys',
1148                OPTIONS.aftl_key_path]
1149  RunAndCheckOutput(verify_cmd)
1150
1151
1152def BuildVBMeta(image_path, partitions, name, needed_partitions):
1153  """Creates a VBMeta image.
1154
1155  It generates the requested VBMeta image. The requested image could be for
1156  top-level or chained VBMeta image, which is determined based on the name.
1157
1158  Args:
1159    image_path: The output path for the new VBMeta image.
1160    partitions: A dict that's keyed by partition names with image paths as
1161        values. Only valid partition names are accepted, as partitions listed
1162        in common.AVB_PARTITIONS and custom partitions listed in
1163        OPTIONS.info_dict.get("avb_custom_images_partition_list")
1164    name: Name of the VBMeta partition, e.g. 'vbmeta', 'vbmeta_system'.
1165    needed_partitions: Partitions whose descriptors should be included into the
1166        generated VBMeta image.
1167
1168  Raises:
1169    AssertionError: On invalid input args.
1170  """
1171  avbtool = OPTIONS.info_dict["avb_avbtool"]
1172  cmd = [avbtool, "make_vbmeta_image", "--output", image_path]
1173  AppendAVBSigningArgs(cmd, name)
1174
1175  custom_partitions = OPTIONS.info_dict.get(
1176      "avb_custom_images_partition_list", "").strip().split()
1177
1178  for partition, path in partitions.items():
1179    if partition not in needed_partitions:
1180      continue
1181    assert (partition in AVB_PARTITIONS or
1182            partition in AVB_VBMETA_PARTITIONS or
1183            partition in custom_partitions), \
1184        'Unknown partition: {}'.format(partition)
1185    assert os.path.exists(path), \
1186        'Failed to find {} for {}'.format(path, partition)
1187    cmd.extend(GetAvbPartitionArg(partition, path))
1188
1189  args = OPTIONS.info_dict.get("avb_{}_args".format(name))
1190  if args and args.strip():
1191    split_args = shlex.split(args)
1192    for index, arg in enumerate(split_args[:-1]):
1193      # Check that the image file exists. Some images might be defined
1194      # as a path relative to source tree, which may not be available at the
1195      # same location when running this script (we have the input target_files
1196      # zip only). For such cases, we additionally scan other locations (e.g.
1197      # IMAGES/, RADIO/, etc) before bailing out.
1198      if arg == '--include_descriptors_from_image':
1199        chained_image = split_args[index + 1]
1200        if os.path.exists(chained_image):
1201          continue
1202        found = False
1203        for dir_name in ['IMAGES', 'RADIO', 'PREBUILT_IMAGES']:
1204          alt_path = os.path.join(
1205              OPTIONS.input_tmp, dir_name, os.path.basename(chained_image))
1206          if os.path.exists(alt_path):
1207            split_args[index + 1] = alt_path
1208            found = True
1209            break
1210        assert found, 'Failed to find {}'.format(chained_image)
1211    cmd.extend(split_args)
1212
1213  RunAndCheckOutput(cmd)
1214
1215  # Generate the AFTL inclusion proof.
1216  if OPTIONS.aftl_server is not None:
1217    AddAftlInclusionProof(image_path)
1218
1219
1220def _MakeRamdisk(sourcedir, fs_config_file=None, lz4_ramdisks=False):
1221  ramdisk_img = tempfile.NamedTemporaryFile()
1222
1223  if fs_config_file is not None and os.access(fs_config_file, os.F_OK):
1224    cmd = ["mkbootfs", "-f", fs_config_file,
1225           os.path.join(sourcedir, "RAMDISK")]
1226  else:
1227    cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
1228  p1 = Run(cmd, stdout=subprocess.PIPE)
1229  if lz4_ramdisks:
1230    p2 = Run(["lz4", "-l", "-12" , "--favor-decSpeed"], stdin=p1.stdout,
1231             stdout=ramdisk_img.file.fileno())
1232  else:
1233    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
1234
1235  p2.wait()
1236  p1.wait()
1237  assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
1238  assert p2.returncode == 0, "compression of %s ramdisk failed" % (sourcedir,)
1239
1240  return ramdisk_img
1241
1242
1243def _BuildBootableImage(image_name, sourcedir, fs_config_file, info_dict=None,
1244                        has_ramdisk=False, two_step_image=False):
1245  """Build a bootable image from the specified sourcedir.
1246
1247  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
1248  'sourcedir'), and turn them into a boot image. 'two_step_image' indicates if
1249  we are building a two-step special image (i.e. building a recovery image to
1250  be loaded into /boot in two-step OTAs).
1251
1252  Return the image data, or None if sourcedir does not appear to contains files
1253  for building the requested image.
1254  """
1255
1256  # "boot" or "recovery", without extension.
1257  partition_name = os.path.basename(sourcedir).lower()
1258
1259  if partition_name == "recovery":
1260    kernel = "kernel"
1261  else:
1262    kernel = image_name.replace("boot", "kernel")
1263    kernel = kernel.replace(".img", "")
1264  if not os.access(os.path.join(sourcedir, kernel), os.F_OK):
1265    return None
1266
1267  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
1268    return None
1269
1270  if info_dict is None:
1271    info_dict = OPTIONS.info_dict
1272
1273  img = tempfile.NamedTemporaryFile()
1274
1275  if has_ramdisk:
1276    use_lz4 = info_dict.get("lz4_ramdisks") == 'true'
1277    ramdisk_img = _MakeRamdisk(sourcedir, fs_config_file, lz4_ramdisks=use_lz4)
1278
1279  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
1280  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
1281
1282  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, kernel)]
1283
1284  fn = os.path.join(sourcedir, "second")
1285  if os.access(fn, os.F_OK):
1286    cmd.append("--second")
1287    cmd.append(fn)
1288
1289  fn = os.path.join(sourcedir, "dtb")
1290  if os.access(fn, os.F_OK):
1291    cmd.append("--dtb")
1292    cmd.append(fn)
1293
1294  fn = os.path.join(sourcedir, "cmdline")
1295  if os.access(fn, os.F_OK):
1296    cmd.append("--cmdline")
1297    cmd.append(open(fn).read().rstrip("\n"))
1298
1299  fn = os.path.join(sourcedir, "base")
1300  if os.access(fn, os.F_OK):
1301    cmd.append("--base")
1302    cmd.append(open(fn).read().rstrip("\n"))
1303
1304  fn = os.path.join(sourcedir, "pagesize")
1305  if os.access(fn, os.F_OK):
1306    cmd.append("--pagesize")
1307    cmd.append(open(fn).read().rstrip("\n"))
1308
1309  if partition_name == "recovery":
1310    args = info_dict.get("recovery_mkbootimg_args")
1311    if not args:
1312      # Fall back to "mkbootimg_args" for recovery image
1313      # in case "recovery_mkbootimg_args" is not set.
1314      args = info_dict.get("mkbootimg_args")
1315  else:
1316    args = info_dict.get("mkbootimg_args")
1317  if args and args.strip():
1318    cmd.extend(shlex.split(args))
1319
1320  args = info_dict.get("mkbootimg_version_args")
1321  if args and args.strip():
1322    cmd.extend(shlex.split(args))
1323
1324  if has_ramdisk:
1325    cmd.extend(["--ramdisk", ramdisk_img.name])
1326
1327  img_unsigned = None
1328  if info_dict.get("vboot"):
1329    img_unsigned = tempfile.NamedTemporaryFile()
1330    cmd.extend(["--output", img_unsigned.name])
1331  else:
1332    cmd.extend(["--output", img.name])
1333
1334  if partition_name == "recovery":
1335    if info_dict.get("include_recovery_dtbo") == "true":
1336      fn = os.path.join(sourcedir, "recovery_dtbo")
1337      cmd.extend(["--recovery_dtbo", fn])
1338    if info_dict.get("include_recovery_acpio") == "true":
1339      fn = os.path.join(sourcedir, "recovery_acpio")
1340      cmd.extend(["--recovery_acpio", fn])
1341
1342  RunAndCheckOutput(cmd)
1343
1344  if (info_dict.get("boot_signer") == "true" and
1345      info_dict.get("verity_key")):
1346    # Hard-code the path as "/boot" for two-step special recovery image (which
1347    # will be loaded into /boot during the two-step OTA).
1348    if two_step_image:
1349      path = "/boot"
1350    else:
1351      path = "/" + partition_name
1352    cmd = [OPTIONS.boot_signer_path]
1353    cmd.extend(OPTIONS.boot_signer_args)
1354    cmd.extend([path, img.name,
1355                info_dict["verity_key"] + ".pk8",
1356                info_dict["verity_key"] + ".x509.pem", img.name])
1357    RunAndCheckOutput(cmd)
1358
1359  # Sign the image if vboot is non-empty.
1360  elif info_dict.get("vboot"):
1361    path = "/" + partition_name
1362    img_keyblock = tempfile.NamedTemporaryFile()
1363    # We have switched from the prebuilt futility binary to using the tool
1364    # (futility-host) built from the source. Override the setting in the old
1365    # TF.zip.
1366    futility = info_dict["futility"]
1367    if futility.startswith("prebuilts/"):
1368      futility = "futility-host"
1369    cmd = [info_dict["vboot_signer_cmd"], futility,
1370           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
1371           info_dict["vboot_key"] + ".vbprivk",
1372           info_dict["vboot_subkey"] + ".vbprivk",
1373           img_keyblock.name,
1374           img.name]
1375    RunAndCheckOutput(cmd)
1376
1377    # Clean up the temp files.
1378    img_unsigned.close()
1379    img_keyblock.close()
1380
1381  # AVB: if enabled, calculate and add hash to boot.img or recovery.img.
1382  if info_dict.get("avb_enable") == "true":
1383    avbtool = info_dict["avb_avbtool"]
1384    if partition_name == "recovery":
1385      part_size = info_dict["recovery_size"]
1386    else:
1387      part_size = info_dict[image_name.replace(".img", "_size")]
1388    cmd = [avbtool, "add_hash_footer", "--image", img.name,
1389           "--partition_size", str(part_size), "--partition_name",
1390           partition_name]
1391    AppendAVBSigningArgs(cmd, partition_name)
1392    args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
1393    if args and args.strip():
1394      cmd.extend(shlex.split(args))
1395    RunAndCheckOutput(cmd)
1396
1397  img.seek(os.SEEK_SET, 0)
1398  data = img.read()
1399
1400  if has_ramdisk:
1401    ramdisk_img.close()
1402  img.close()
1403
1404  return data
1405
1406
1407def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
1408                     info_dict=None, two_step_image=False):
1409  """Return a File object with the desired bootable image.
1410
1411  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
1412  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
1413  the source files in 'unpack_dir'/'tree_subdir'."""
1414
1415  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
1416  if os.path.exists(prebuilt_path):
1417    logger.info("using prebuilt %s from BOOTABLE_IMAGES...", prebuilt_name)
1418    return File.FromLocalFile(name, prebuilt_path)
1419
1420  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
1421  if os.path.exists(prebuilt_path):
1422    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
1423    return File.FromLocalFile(name, prebuilt_path)
1424
1425  logger.info("building image from target_files %s...", tree_subdir)
1426
1427  if info_dict is None:
1428    info_dict = OPTIONS.info_dict
1429
1430  # With system_root_image == "true", we don't pack ramdisk into the boot image.
1431  # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
1432  # for recovery.
1433  has_ramdisk = (info_dict.get("system_root_image") != "true" or
1434                 prebuilt_name != "boot.img" or
1435                 info_dict.get("recovery_as_boot") == "true")
1436
1437  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
1438  data = _BuildBootableImage(prebuilt_name, os.path.join(unpack_dir, tree_subdir),
1439                             os.path.join(unpack_dir, fs_config),
1440                             info_dict, has_ramdisk, two_step_image)
1441  if data:
1442    return File(name, data)
1443  return None
1444
1445
1446def _BuildVendorBootImage(sourcedir, info_dict=None):
1447  """Build a vendor boot image from the specified sourcedir.
1448
1449  Take a ramdisk, dtb, and vendor_cmdline from the input (in 'sourcedir'), and
1450  turn them into a vendor boot image.
1451
1452  Return the image data, or None if sourcedir does not appear to contains files
1453  for building the requested image.
1454  """
1455
1456  if info_dict is None:
1457    info_dict = OPTIONS.info_dict
1458
1459  img = tempfile.NamedTemporaryFile()
1460
1461  use_lz4 = info_dict.get("lz4_ramdisks") == 'true'
1462  ramdisk_img = _MakeRamdisk(sourcedir, lz4_ramdisks=use_lz4)
1463
1464  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
1465  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
1466
1467  cmd = [mkbootimg]
1468
1469  fn = os.path.join(sourcedir, "dtb")
1470  if os.access(fn, os.F_OK):
1471    cmd.append("--dtb")
1472    cmd.append(fn)
1473
1474  fn = os.path.join(sourcedir, "vendor_cmdline")
1475  if os.access(fn, os.F_OK):
1476    cmd.append("--vendor_cmdline")
1477    cmd.append(open(fn).read().rstrip("\n"))
1478
1479  fn = os.path.join(sourcedir, "base")
1480  if os.access(fn, os.F_OK):
1481    cmd.append("--base")
1482    cmd.append(open(fn).read().rstrip("\n"))
1483
1484  fn = os.path.join(sourcedir, "pagesize")
1485  if os.access(fn, os.F_OK):
1486    cmd.append("--pagesize")
1487    cmd.append(open(fn).read().rstrip("\n"))
1488
1489  args = info_dict.get("mkbootimg_args")
1490  if args and args.strip():
1491    cmd.extend(shlex.split(args))
1492
1493  args = info_dict.get("mkbootimg_version_args")
1494  if args and args.strip():
1495    cmd.extend(shlex.split(args))
1496
1497  cmd.extend(["--vendor_ramdisk", ramdisk_img.name])
1498  cmd.extend(["--vendor_boot", img.name])
1499
1500  RunAndCheckOutput(cmd)
1501
1502  # AVB: if enabled, calculate and add hash.
1503  if info_dict.get("avb_enable") == "true":
1504    avbtool = info_dict["avb_avbtool"]
1505    part_size = info_dict["vendor_boot_size"]
1506    cmd = [avbtool, "add_hash_footer", "--image", img.name,
1507           "--partition_size", str(part_size), "--partition_name", "vendor_boot"]
1508    AppendAVBSigningArgs(cmd, "vendor_boot")
1509    args = info_dict.get("avb_vendor_boot_add_hash_footer_args")
1510    if args and args.strip():
1511      cmd.extend(shlex.split(args))
1512    RunAndCheckOutput(cmd)
1513
1514  img.seek(os.SEEK_SET, 0)
1515  data = img.read()
1516
1517  ramdisk_img.close()
1518  img.close()
1519
1520  return data
1521
1522
1523def GetVendorBootImage(name, prebuilt_name, unpack_dir, tree_subdir,
1524                       info_dict=None):
1525  """Return a File object with the desired vendor boot image.
1526
1527  Look for it under 'unpack_dir'/IMAGES, otherwise construct it from
1528  the source files in 'unpack_dir'/'tree_subdir'."""
1529
1530  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
1531  if os.path.exists(prebuilt_path):
1532    logger.info("using prebuilt %s from IMAGES...", prebuilt_name)
1533    return File.FromLocalFile(name, prebuilt_path)
1534
1535  logger.info("building image from target_files %s...", tree_subdir)
1536
1537  if info_dict is None:
1538    info_dict = OPTIONS.info_dict
1539
1540  data = _BuildVendorBootImage(
1541      os.path.join(unpack_dir, tree_subdir), info_dict)
1542  if data:
1543    return File(name, data)
1544  return None
1545
1546
1547def Gunzip(in_filename, out_filename):
1548  """Gunzips the given gzip compressed file to a given output file."""
1549  with gzip.open(in_filename, "rb") as in_file, \
1550          open(out_filename, "wb") as out_file:
1551    shutil.copyfileobj(in_file, out_file)
1552
1553
1554def UnzipToDir(filename, dirname, patterns=None):
1555  """Unzips the archive to the given directory.
1556
1557  Args:
1558    filename: The name of the zip file to unzip.
1559    dirname: Where the unziped files will land.
1560    patterns: Files to unzip from the archive. If omitted, will unzip the entire
1561        archvie. Non-matching patterns will be filtered out. If there's no match
1562        after the filtering, no file will be unzipped.
1563  """
1564  cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
1565  if patterns is not None:
1566    # Filter out non-matching patterns. unzip will complain otherwise.
1567    with zipfile.ZipFile(filename) as input_zip:
1568      names = input_zip.namelist()
1569    filtered = [
1570        pattern for pattern in patterns if fnmatch.filter(names, pattern)]
1571
1572    # There isn't any matching files. Don't unzip anything.
1573    if not filtered:
1574      return
1575    cmd.extend(filtered)
1576
1577  RunAndCheckOutput(cmd)
1578
1579
1580def UnzipTemp(filename, pattern=None):
1581  """Unzips the given archive into a temporary directory and returns the name.
1582
1583  Args:
1584    filename: If filename is of the form "foo.zip+bar.zip", unzip foo.zip into
1585    a temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
1586
1587    pattern: Files to unzip from the archive. If omitted, will unzip the entire
1588    archvie.
1589
1590  Returns:
1591    The name of the temporary directory.
1592  """
1593
1594  tmp = MakeTempDir(prefix="targetfiles-")
1595  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
1596  if m:
1597    UnzipToDir(m.group(1), tmp, pattern)
1598    UnzipToDir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"), pattern)
1599    filename = m.group(1)
1600  else:
1601    UnzipToDir(filename, tmp, pattern)
1602
1603  return tmp
1604
1605
1606def GetUserImage(which, tmpdir, input_zip,
1607                 info_dict=None,
1608                 allow_shared_blocks=None,
1609                 hashtree_info_generator=None,
1610                 reset_file_map=False):
1611  """Returns an Image object suitable for passing to BlockImageDiff.
1612
1613  This function loads the specified image from the given path. If the specified
1614  image is sparse, it also performs additional processing for OTA purpose. For
1615  example, it always adds block 0 to clobbered blocks list. It also detects
1616  files that cannot be reconstructed from the block list, for whom we should
1617  avoid applying imgdiff.
1618
1619  Args:
1620    which: The partition name.
1621    tmpdir: The directory that contains the prebuilt image and block map file.
1622    input_zip: The target-files ZIP archive.
1623    info_dict: The dict to be looked up for relevant info.
1624    allow_shared_blocks: If image is sparse, whether having shared blocks is
1625        allowed. If none, it is looked up from info_dict.
1626    hashtree_info_generator: If present and image is sparse, generates the
1627        hashtree_info for this sparse image.
1628    reset_file_map: If true and image is sparse, reset file map before returning
1629        the image.
1630  Returns:
1631    A Image object. If it is a sparse image and reset_file_map is False, the
1632    image will have file_map info loaded.
1633  """
1634  if info_dict is None:
1635    info_dict = LoadInfoDict(input_zip)
1636
1637  is_sparse = info_dict.get("extfs_sparse_flag")
1638
1639  # When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain
1640  # shared blocks (i.e. some blocks will show up in multiple files' block
1641  # list). We can only allocate such shared blocks to the first "owner", and
1642  # disable imgdiff for all later occurrences.
1643  if allow_shared_blocks is None:
1644    allow_shared_blocks = info_dict.get("ext4_share_dup_blocks") == "true"
1645
1646  if is_sparse:
1647    img = GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
1648                         hashtree_info_generator)
1649    if reset_file_map:
1650      img.ResetFileMap()
1651    return img
1652  return GetNonSparseImage(which, tmpdir, hashtree_info_generator)
1653
1654
1655def GetNonSparseImage(which, tmpdir, hashtree_info_generator=None):
1656  """Returns a Image object suitable for passing to BlockImageDiff.
1657
1658  This function loads the specified non-sparse image from the given path.
1659
1660  Args:
1661    which: The partition name.
1662    tmpdir: The directory that contains the prebuilt image and block map file.
1663  Returns:
1664    A Image object.
1665  """
1666  path = os.path.join(tmpdir, "IMAGES", which + ".img")
1667  mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
1668
1669  # The image and map files must have been created prior to calling
1670  # ota_from_target_files.py (since LMP).
1671  assert os.path.exists(path) and os.path.exists(mappath)
1672
1673  return images.FileImage(path, hashtree_info_generator=hashtree_info_generator)
1674
1675
1676def GetSparseImage(which, tmpdir, input_zip, allow_shared_blocks,
1677                   hashtree_info_generator=None):
1678  """Returns a SparseImage object suitable for passing to BlockImageDiff.
1679
1680  This function loads the specified sparse image from the given path, and
1681  performs additional processing for OTA purpose. For example, it always adds
1682  block 0 to clobbered blocks list. It also detects files that cannot be
1683  reconstructed from the block list, for whom we should avoid applying imgdiff.
1684
1685  Args:
1686    which: The partition name, e.g. "system", "vendor".
1687    tmpdir: The directory that contains the prebuilt image and block map file.
1688    input_zip: The target-files ZIP archive.
1689    allow_shared_blocks: Whether having shared blocks is allowed.
1690    hashtree_info_generator: If present, generates the hashtree_info for this
1691        sparse image.
1692  Returns:
1693    A SparseImage object, with file_map info loaded.
1694  """
1695  path = os.path.join(tmpdir, "IMAGES", which + ".img")
1696  mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
1697
1698  # The image and map files must have been created prior to calling
1699  # ota_from_target_files.py (since LMP).
1700  assert os.path.exists(path) and os.path.exists(mappath)
1701
1702  # In ext4 filesystems, block 0 might be changed even being mounted R/O. We add
1703  # it to clobbered_blocks so that it will be written to the target
1704  # unconditionally. Note that they are still part of care_map. (Bug: 20939131)
1705  clobbered_blocks = "0"
1706
1707  image = sparse_img.SparseImage(
1708      path, mappath, clobbered_blocks, allow_shared_blocks=allow_shared_blocks,
1709      hashtree_info_generator=hashtree_info_generator)
1710
1711  # block.map may contain less blocks, because mke2fs may skip allocating blocks
1712  # if they contain all zeros. We can't reconstruct such a file from its block
1713  # list. Tag such entries accordingly. (Bug: 65213616)
1714  for entry in image.file_map:
1715    # Skip artificial names, such as "__ZERO", "__NONZERO-1".
1716    if not entry.startswith('/'):
1717      continue
1718
1719    # "/system/framework/am.jar" => "SYSTEM/framework/am.jar". Note that the
1720    # filename listed in system.map may contain an additional leading slash
1721    # (i.e. "//system/framework/am.jar"). Using lstrip to get consistent
1722    # results.
1723    arcname = entry.replace(which, which.upper(), 1).lstrip('/')
1724
1725    # Special handling another case, where files not under /system
1726    # (e.g. "/sbin/charger") are packed under ROOT/ in a target_files.zip.
1727    if which == 'system' and not arcname.startswith('SYSTEM'):
1728      arcname = 'ROOT/' + arcname
1729
1730    assert arcname in input_zip.namelist(), \
1731        "Failed to find the ZIP entry for {}".format(entry)
1732
1733    info = input_zip.getinfo(arcname)
1734    ranges = image.file_map[entry]
1735
1736    # If a RangeSet has been tagged as using shared blocks while loading the
1737    # image, check the original block list to determine its completeness. Note
1738    # that the 'incomplete' flag would be tagged to the original RangeSet only.
1739    if ranges.extra.get('uses_shared_blocks'):
1740      ranges = ranges.extra['uses_shared_blocks']
1741
1742    if RoundUpTo4K(info.file_size) > ranges.size() * 4096:
1743      ranges.extra['incomplete'] = True
1744
1745  return image
1746
1747
1748def GetKeyPasswords(keylist):
1749  """Given a list of keys, prompt the user to enter passwords for
1750  those which require them.  Return a {key: password} dict.  password
1751  will be None if the key has no password."""
1752
1753  no_passwords = []
1754  need_passwords = []
1755  key_passwords = {}
1756  devnull = open("/dev/null", "w+b")
1757  for k in sorted(keylist):
1758    # We don't need a password for things that aren't really keys.
1759    if k in SPECIAL_CERT_STRINGS:
1760      no_passwords.append(k)
1761      continue
1762
1763    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
1764             "-inform", "DER", "-nocrypt"],
1765            stdin=devnull.fileno(),
1766            stdout=devnull.fileno(),
1767            stderr=subprocess.STDOUT)
1768    p.communicate()
1769    if p.returncode == 0:
1770      # Definitely an unencrypted key.
1771      no_passwords.append(k)
1772    else:
1773      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
1774               "-inform", "DER", "-passin", "pass:"],
1775              stdin=devnull.fileno(),
1776              stdout=devnull.fileno(),
1777              stderr=subprocess.PIPE)
1778      _, stderr = p.communicate()
1779      if p.returncode == 0:
1780        # Encrypted key with empty string as password.
1781        key_passwords[k] = ''
1782      elif stderr.startswith('Error decrypting key'):
1783        # Definitely encrypted key.
1784        # It would have said "Error reading key" if it didn't parse correctly.
1785        need_passwords.append(k)
1786      else:
1787        # Potentially, a type of key that openssl doesn't understand.
1788        # We'll let the routines in signapk.jar handle it.
1789        no_passwords.append(k)
1790  devnull.close()
1791
1792  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
1793  key_passwords.update(dict.fromkeys(no_passwords))
1794  return key_passwords
1795
1796
1797def GetMinSdkVersion(apk_name):
1798  """Gets the minSdkVersion declared in the APK.
1799
1800  It calls 'aapt2' to query the embedded minSdkVersion from the given APK file.
1801  This can be both a decimal number (API Level) or a codename.
1802
1803  Args:
1804    apk_name: The APK filename.
1805
1806  Returns:
1807    The parsed SDK version string.
1808
1809  Raises:
1810    ExternalError: On failing to obtain the min SDK version.
1811  """
1812  proc = Run(
1813      ["aapt2", "dump", "badging", apk_name], stdout=subprocess.PIPE,
1814      stderr=subprocess.PIPE)
1815  stdoutdata, stderrdata = proc.communicate()
1816  if proc.returncode != 0:
1817    raise ExternalError(
1818        "Failed to obtain minSdkVersion: aapt2 return code {}:\n{}\n{}".format(
1819            proc.returncode, stdoutdata, stderrdata))
1820
1821  for line in stdoutdata.split("\n"):
1822    # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'.
1823    m = re.match(r'sdkVersion:\'([^\']*)\'', line)
1824    if m:
1825      return m.group(1)
1826  raise ExternalError("No minSdkVersion returned by aapt2")
1827
1828
1829def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
1830  """Returns the minSdkVersion declared in the APK as a number (API Level).
1831
1832  If minSdkVersion is set to a codename, it is translated to a number using the
1833  provided map.
1834
1835  Args:
1836    apk_name: The APK filename.
1837
1838  Returns:
1839    The parsed SDK version number.
1840
1841  Raises:
1842    ExternalError: On failing to get the min SDK version number.
1843  """
1844  version = GetMinSdkVersion(apk_name)
1845  try:
1846    return int(version)
1847  except ValueError:
1848    # Not a decimal number. Codename?
1849    if version in codename_to_api_level_map:
1850      return codename_to_api_level_map[version]
1851    raise ExternalError(
1852        "Unknown minSdkVersion: '{}'. Known codenames: {}".format(
1853            version, codename_to_api_level_map))
1854
1855
1856def SignFile(input_name, output_name, key, password, min_api_level=None,
1857             codename_to_api_level_map=None, whole_file=False,
1858             extra_signapk_args=None):
1859  """Sign the input_name zip/jar/apk, producing output_name.  Use the
1860  given key and password (the latter may be None if the key does not
1861  have a password.
1862
1863  If whole_file is true, use the "-w" option to SignApk to embed a
1864  signature that covers the whole file in the archive comment of the
1865  zip file.
1866
1867  min_api_level is the API Level (int) of the oldest platform this file may end
1868  up on. If not specified for an APK, the API Level is obtained by interpreting
1869  the minSdkVersion attribute of the APK's AndroidManifest.xml.
1870
1871  codename_to_api_level_map is needed to translate the codename which may be
1872  encountered as the APK's minSdkVersion.
1873
1874  Caller may optionally specify extra args to be passed to SignApk, which
1875  defaults to OPTIONS.extra_signapk_args if omitted.
1876  """
1877  if codename_to_api_level_map is None:
1878    codename_to_api_level_map = {}
1879  if extra_signapk_args is None:
1880    extra_signapk_args = OPTIONS.extra_signapk_args
1881
1882  java_library_path = os.path.join(
1883      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
1884
1885  cmd = ([OPTIONS.java_path] + OPTIONS.java_args +
1886         ["-Djava.library.path=" + java_library_path,
1887          "-jar", os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)] +
1888         extra_signapk_args)
1889  if whole_file:
1890    cmd.append("-w")
1891
1892  min_sdk_version = min_api_level
1893  if min_sdk_version is None:
1894    if not whole_file:
1895      min_sdk_version = GetMinSdkVersionInt(
1896          input_name, codename_to_api_level_map)
1897  if min_sdk_version is not None:
1898    cmd.extend(["--min-sdk-version", str(min_sdk_version)])
1899
1900  cmd.extend([key + OPTIONS.public_key_suffix,
1901              key + OPTIONS.private_key_suffix,
1902              input_name, output_name])
1903
1904  proc = Run(cmd, stdin=subprocess.PIPE)
1905  if password is not None:
1906    password += "\n"
1907  stdoutdata, _ = proc.communicate(password)
1908  if proc.returncode != 0:
1909    raise ExternalError(
1910        "Failed to run signapk.jar: return code {}:\n{}".format(
1911            proc.returncode, stdoutdata))
1912
1913
1914def CheckSize(data, target, info_dict):
1915  """Checks the data string passed against the max size limit.
1916
1917  For non-AVB images, raise exception if the data is too big. Print a warning
1918  if the data is nearing the maximum size.
1919
1920  For AVB images, the actual image size should be identical to the limit.
1921
1922  Args:
1923    data: A string that contains all the data for the partition.
1924    target: The partition name. The ".img" suffix is optional.
1925    info_dict: The dict to be looked up for relevant info.
1926  """
1927  if target.endswith(".img"):
1928    target = target[:-4]
1929  mount_point = "/" + target
1930
1931  fs_type = None
1932  limit = None
1933  if info_dict["fstab"]:
1934    if mount_point == "/userdata":
1935      mount_point = "/data"
1936    p = info_dict["fstab"][mount_point]
1937    fs_type = p.fs_type
1938    device = p.device
1939    if "/" in device:
1940      device = device[device.rfind("/")+1:]
1941    limit = info_dict.get(device + "_size")
1942  if not fs_type or not limit:
1943    return
1944
1945  size = len(data)
1946  # target could be 'userdata' or 'cache'. They should follow the non-AVB image
1947  # path.
1948  if info_dict.get("avb_enable") == "true" and target in AVB_PARTITIONS:
1949    if size != limit:
1950      raise ExternalError(
1951          "Mismatching image size for %s: expected %d actual %d" % (
1952              target, limit, size))
1953  else:
1954    pct = float(size) * 100.0 / limit
1955    msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
1956    if pct >= 99.0:
1957      raise ExternalError(msg)
1958
1959    if pct >= 95.0:
1960      logger.warning("\n  WARNING: %s\n", msg)
1961    else:
1962      logger.info("  %s", msg)
1963
1964
1965def ReadApkCerts(tf_zip):
1966  """Parses the APK certs info from a given target-files zip.
1967
1968  Given a target-files ZipFile, parses the META/apkcerts.txt entry and returns a
1969  tuple with the following elements: (1) a dictionary that maps packages to
1970  certs (based on the "certificate" and "private_key" attributes in the file;
1971  (2) a string representing the extension of compressed APKs in the target files
1972  (e.g ".gz", ".bro").
1973
1974  Args:
1975    tf_zip: The input target_files ZipFile (already open).
1976
1977  Returns:
1978    (certmap, ext): certmap is a dictionary that maps packages to certs; ext is
1979        the extension string of compressed APKs (e.g. ".gz"), or None if there's
1980        no compressed APKs.
1981  """
1982  certmap = {}
1983  compressed_extension = None
1984
1985  # META/apkcerts.txt contains the info for _all_ the packages known at build
1986  # time. Filter out the ones that are not installed.
1987  installed_files = set()
1988  for name in tf_zip.namelist():
1989    basename = os.path.basename(name)
1990    if basename:
1991      installed_files.add(basename)
1992
1993  for line in tf_zip.read('META/apkcerts.txt').decode().split('\n'):
1994    line = line.strip()
1995    if not line:
1996      continue
1997    m = re.match(
1998        r'^name="(?P<NAME>.*)"\s+certificate="(?P<CERT>.*)"\s+'
1999        r'private_key="(?P<PRIVKEY>.*?)"(\s+compressed="(?P<COMPRESSED>.*?)")?'
2000        r'(\s+partition="(?P<PARTITION>.*?)")?$',
2001        line)
2002    if not m:
2003      continue
2004
2005    matches = m.groupdict()
2006    cert = matches["CERT"]
2007    privkey = matches["PRIVKEY"]
2008    name = matches["NAME"]
2009    this_compressed_extension = matches["COMPRESSED"]
2010
2011    public_key_suffix_len = len(OPTIONS.public_key_suffix)
2012    private_key_suffix_len = len(OPTIONS.private_key_suffix)
2013    if cert in SPECIAL_CERT_STRINGS and not privkey:
2014      certmap[name] = cert
2015    elif (cert.endswith(OPTIONS.public_key_suffix) and
2016          privkey.endswith(OPTIONS.private_key_suffix) and
2017          cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
2018      certmap[name] = cert[:-public_key_suffix_len]
2019    else:
2020      raise ValueError("Failed to parse line from apkcerts.txt:\n" + line)
2021
2022    if not this_compressed_extension:
2023      continue
2024
2025    # Only count the installed files.
2026    filename = name + '.' + this_compressed_extension
2027    if filename not in installed_files:
2028      continue
2029
2030    # Make sure that all the values in the compression map have the same
2031    # extension. We don't support multiple compression methods in the same
2032    # system image.
2033    if compressed_extension:
2034      if this_compressed_extension != compressed_extension:
2035        raise ValueError(
2036            "Multiple compressed extensions: {} vs {}".format(
2037                compressed_extension, this_compressed_extension))
2038    else:
2039      compressed_extension = this_compressed_extension
2040
2041  return (certmap,
2042          ("." + compressed_extension) if compressed_extension else None)
2043
2044
2045COMMON_DOCSTRING = """
2046Global options
2047
2048  -p  (--path) <dir>
2049      Prepend <dir>/bin to the list of places to search for binaries run by this
2050      script, and expect to find jars in <dir>/framework.
2051
2052  -s  (--device_specific) <file>
2053      Path to the Python module containing device-specific releasetools code.
2054
2055  -x  (--extra) <key=value>
2056      Add a key/value pair to the 'extras' dict, which device-specific extension
2057      code may look at.
2058
2059  -v  (--verbose)
2060      Show command lines being executed.
2061
2062  -h  (--help)
2063      Display this usage message and exit.
2064
2065  --logfile <file>
2066      Put verbose logs to specified file (regardless of --verbose option.)
2067"""
2068
2069
2070def Usage(docstring):
2071  print(docstring.rstrip("\n"))
2072  print(COMMON_DOCSTRING)
2073
2074
2075def ParseOptions(argv,
2076                 docstring,
2077                 extra_opts="", extra_long_opts=(),
2078                 extra_option_handler=None):
2079  """Parse the options in argv and return any arguments that aren't
2080  flags.  docstring is the calling module's docstring, to be displayed
2081  for errors and -h.  extra_opts and extra_long_opts are for flags
2082  defined by the caller, which are processed by passing them to
2083  extra_option_handler."""
2084
2085  try:
2086    opts, args = getopt.getopt(
2087        argv, "hvp:s:x:" + extra_opts,
2088        ["help", "verbose", "path=", "signapk_path=",
2089         "signapk_shared_library_path=", "extra_signapk_args=",
2090         "java_path=", "java_args=", "android_jar_path=", "public_key_suffix=",
2091         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
2092         "verity_signer_path=", "verity_signer_args=", "device_specific=",
2093         "extra=", "logfile=", "aftl_tool_path=", "aftl_server=",
2094         "aftl_key_path=", "aftl_manufacturer_key_path=",
2095         "aftl_signer_helper="] + list(extra_long_opts))
2096  except getopt.GetoptError as err:
2097    Usage(docstring)
2098    print("**", str(err), "**")
2099    sys.exit(2)
2100
2101  for o, a in opts:
2102    if o in ("-h", "--help"):
2103      Usage(docstring)
2104      sys.exit()
2105    elif o in ("-v", "--verbose"):
2106      OPTIONS.verbose = True
2107    elif o in ("-p", "--path"):
2108      OPTIONS.search_path = a
2109    elif o in ("--signapk_path",):
2110      OPTIONS.signapk_path = a
2111    elif o in ("--signapk_shared_library_path",):
2112      OPTIONS.signapk_shared_library_path = a
2113    elif o in ("--extra_signapk_args",):
2114      OPTIONS.extra_signapk_args = shlex.split(a)
2115    elif o in ("--java_path",):
2116      OPTIONS.java_path = a
2117    elif o in ("--java_args",):
2118      OPTIONS.java_args = shlex.split(a)
2119    elif o in ("--android_jar_path",):
2120      OPTIONS.android_jar_path = a
2121    elif o in ("--public_key_suffix",):
2122      OPTIONS.public_key_suffix = a
2123    elif o in ("--private_key_suffix",):
2124      OPTIONS.private_key_suffix = a
2125    elif o in ("--boot_signer_path",):
2126      OPTIONS.boot_signer_path = a
2127    elif o in ("--boot_signer_args",):
2128      OPTIONS.boot_signer_args = shlex.split(a)
2129    elif o in ("--verity_signer_path",):
2130      OPTIONS.verity_signer_path = a
2131    elif o in ("--verity_signer_args",):
2132      OPTIONS.verity_signer_args = shlex.split(a)
2133    elif o in ("--aftl_tool_path",):
2134      OPTIONS.aftl_tool_path = a
2135    elif o in ("--aftl_server",):
2136      OPTIONS.aftl_server = a
2137    elif o in ("--aftl_key_path",):
2138      OPTIONS.aftl_key_path = a
2139    elif o in ("--aftl_manufacturer_key_path",):
2140      OPTIONS.aftl_manufacturer_key_path = a
2141    elif o in ("--aftl_signer_helper",):
2142      OPTIONS.aftl_signer_helper = a
2143    elif o in ("-s", "--device_specific"):
2144      OPTIONS.device_specific = a
2145    elif o in ("-x", "--extra"):
2146      key, value = a.split("=", 1)
2147      OPTIONS.extras[key] = value
2148    elif o in ("--logfile",):
2149      OPTIONS.logfile = a
2150    else:
2151      if extra_option_handler is None or not extra_option_handler(o, a):
2152        assert False, "unknown option \"%s\"" % (o,)
2153
2154  if OPTIONS.search_path:
2155    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
2156                          os.pathsep + os.environ["PATH"])
2157
2158  return args
2159
2160
2161def MakeTempFile(prefix='tmp', suffix=''):
2162  """Make a temp file and add it to the list of things to be deleted
2163  when Cleanup() is called.  Return the filename."""
2164  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
2165  os.close(fd)
2166  OPTIONS.tempfiles.append(fn)
2167  return fn
2168
2169
2170def MakeTempDir(prefix='tmp', suffix=''):
2171  """Makes a temporary dir that will be cleaned up with a call to Cleanup().
2172
2173  Returns:
2174    The absolute pathname of the new directory.
2175  """
2176  dir_name = tempfile.mkdtemp(suffix=suffix, prefix=prefix)
2177  OPTIONS.tempfiles.append(dir_name)
2178  return dir_name
2179
2180
2181def Cleanup():
2182  for i in OPTIONS.tempfiles:
2183    if os.path.isdir(i):
2184      shutil.rmtree(i, ignore_errors=True)
2185    else:
2186      os.remove(i)
2187  del OPTIONS.tempfiles[:]
2188
2189
2190class PasswordManager(object):
2191  def __init__(self):
2192    self.editor = os.getenv("EDITOR")
2193    self.pwfile = os.getenv("ANDROID_PW_FILE")
2194
2195  def GetPasswords(self, items):
2196    """Get passwords corresponding to each string in 'items',
2197    returning a dict.  (The dict may have keys in addition to the
2198    values in 'items'.)
2199
2200    Uses the passwords in $ANDROID_PW_FILE if available, letting the
2201    user edit that file to add more needed passwords.  If no editor is
2202    available, or $ANDROID_PW_FILE isn't define, prompts the user
2203    interactively in the ordinary way.
2204    """
2205
2206    current = self.ReadFile()
2207
2208    first = True
2209    while True:
2210      missing = []
2211      for i in items:
2212        if i not in current or not current[i]:
2213          missing.append(i)
2214      # Are all the passwords already in the file?
2215      if not missing:
2216        return current
2217
2218      for i in missing:
2219        current[i] = ""
2220
2221      if not first:
2222        print("key file %s still missing some passwords." % (self.pwfile,))
2223        if sys.version_info[0] >= 3:
2224          raw_input = input  # pylint: disable=redefined-builtin
2225        answer = raw_input("try to edit again? [y]> ").strip()
2226        if answer and answer[0] not in 'yY':
2227          raise RuntimeError("key passwords unavailable")
2228      first = False
2229
2230      current = self.UpdateAndReadFile(current)
2231
2232  def PromptResult(self, current):  # pylint: disable=no-self-use
2233    """Prompt the user to enter a value (password) for each key in
2234    'current' whose value is fales.  Returns a new dict with all the
2235    values.
2236    """
2237    result = {}
2238    for k, v in sorted(current.items()):
2239      if v:
2240        result[k] = v
2241      else:
2242        while True:
2243          result[k] = getpass.getpass(
2244              "Enter password for %s key> " % k).strip()
2245          if result[k]:
2246            break
2247    return result
2248
2249  def UpdateAndReadFile(self, current):
2250    if not self.editor or not self.pwfile:
2251      return self.PromptResult(current)
2252
2253    f = open(self.pwfile, "w")
2254    os.chmod(self.pwfile, 0o600)
2255    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
2256    f.write("# (Additional spaces are harmless.)\n\n")
2257
2258    first_line = None
2259    sorted_list = sorted([(not v, k, v) for (k, v) in current.items()])
2260    for i, (_, k, v) in enumerate(sorted_list):
2261      f.write("[[[  %s  ]]] %s\n" % (v, k))
2262      if not v and first_line is None:
2263        # position cursor on first line with no password.
2264        first_line = i + 4
2265    f.close()
2266
2267    RunAndCheckOutput([self.editor, "+%d" % (first_line,), self.pwfile])
2268
2269    return self.ReadFile()
2270
2271  def ReadFile(self):
2272    result = {}
2273    if self.pwfile is None:
2274      return result
2275    try:
2276      f = open(self.pwfile, "r")
2277      for line in f:
2278        line = line.strip()
2279        if not line or line[0] == '#':
2280          continue
2281        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
2282        if not m:
2283          logger.warning("Failed to parse password file: %s", line)
2284        else:
2285          result[m.group(2)] = m.group(1)
2286      f.close()
2287    except IOError as e:
2288      if e.errno != errno.ENOENT:
2289        logger.exception("Error reading password file:")
2290    return result
2291
2292
2293def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
2294             compress_type=None):
2295
2296  # http://b/18015246
2297  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
2298  # for files larger than 2GiB. We can work around this by adjusting their
2299  # limit. Note that `zipfile.writestr()` will not work for strings larger than
2300  # 2GiB. The Python interpreter sometimes rejects strings that large (though
2301  # it isn't clear to me exactly what circumstances cause this).
2302  # `zipfile.write()` must be used directly to work around this.
2303  #
2304  # This mess can be avoided if we port to python3.
2305  saved_zip64_limit = zipfile.ZIP64_LIMIT
2306  zipfile.ZIP64_LIMIT = (1 << 32) - 1
2307
2308  if compress_type is None:
2309    compress_type = zip_file.compression
2310  if arcname is None:
2311    arcname = filename
2312
2313  saved_stat = os.stat(filename)
2314
2315  try:
2316    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
2317    # file to be zipped and reset it when we're done.
2318    os.chmod(filename, perms)
2319
2320    # Use a fixed timestamp so the output is repeatable.
2321    # Note: Use of fromtimestamp rather than utcfromtimestamp here is
2322    # intentional. zip stores datetimes in local time without a time zone
2323    # attached, so we need "epoch" but in the local time zone to get 2009/01/01
2324    # in the zip archive.
2325    local_epoch = datetime.datetime.fromtimestamp(0)
2326    timestamp = (datetime.datetime(2009, 1, 1) - local_epoch).total_seconds()
2327    os.utime(filename, (timestamp, timestamp))
2328
2329    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
2330  finally:
2331    os.chmod(filename, saved_stat.st_mode)
2332    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
2333    zipfile.ZIP64_LIMIT = saved_zip64_limit
2334
2335
2336def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
2337                compress_type=None):
2338  """Wrap zipfile.writestr() function to work around the zip64 limit.
2339
2340  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
2341  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
2342  when calling crc32(bytes).
2343
2344  But it still works fine to write a shorter string into a large zip file.
2345  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
2346  when we know the string won't be too long.
2347  """
2348
2349  saved_zip64_limit = zipfile.ZIP64_LIMIT
2350  zipfile.ZIP64_LIMIT = (1 << 32) - 1
2351
2352  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
2353    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
2354    zinfo.compress_type = zip_file.compression
2355    if perms is None:
2356      perms = 0o100644
2357  else:
2358    zinfo = zinfo_or_arcname
2359    # Python 2 and 3 behave differently when calling ZipFile.writestr() with
2360    # zinfo.external_attr being 0. Python 3 uses `0o600 << 16` as the value for
2361    # such a case (since
2362    # https://github.com/python/cpython/commit/18ee29d0b870caddc0806916ca2c823254f1a1f9),
2363    # which seems to make more sense. Otherwise the entry will have 0o000 as the
2364    # permission bits. We follow the logic in Python 3 to get consistent
2365    # behavior between using the two versions.
2366    if not zinfo.external_attr:
2367      zinfo.external_attr = 0o600 << 16
2368
2369  # If compress_type is given, it overrides the value in zinfo.
2370  if compress_type is not None:
2371    zinfo.compress_type = compress_type
2372
2373  # If perms is given, it has a priority.
2374  if perms is not None:
2375    # If perms doesn't set the file type, mark it as a regular file.
2376    if perms & 0o770000 == 0:
2377      perms |= 0o100000
2378    zinfo.external_attr = perms << 16
2379
2380  # Use a fixed timestamp so the output is repeatable.
2381  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
2382
2383  zip_file.writestr(zinfo, data)
2384  zipfile.ZIP64_LIMIT = saved_zip64_limit
2385
2386
2387def ZipDelete(zip_filename, entries):
2388  """Deletes entries from a ZIP file.
2389
2390  Since deleting entries from a ZIP file is not supported, it shells out to
2391  'zip -d'.
2392
2393  Args:
2394    zip_filename: The name of the ZIP file.
2395    entries: The name of the entry, or the list of names to be deleted.
2396
2397  Raises:
2398    AssertionError: In case of non-zero return from 'zip'.
2399  """
2400  if isinstance(entries, str):
2401    entries = [entries]
2402  cmd = ["zip", "-d", zip_filename] + entries
2403  RunAndCheckOutput(cmd)
2404
2405
2406def ZipClose(zip_file):
2407  # http://b/18015246
2408  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
2409  # central directory.
2410  saved_zip64_limit = zipfile.ZIP64_LIMIT
2411  zipfile.ZIP64_LIMIT = (1 << 32) - 1
2412
2413  zip_file.close()
2414
2415  zipfile.ZIP64_LIMIT = saved_zip64_limit
2416
2417
2418class DeviceSpecificParams(object):
2419  module = None
2420
2421  def __init__(self, **kwargs):
2422    """Keyword arguments to the constructor become attributes of this
2423    object, which is passed to all functions in the device-specific
2424    module."""
2425    for k, v in kwargs.items():
2426      setattr(self, k, v)
2427    self.extras = OPTIONS.extras
2428
2429    if self.module is None:
2430      path = OPTIONS.device_specific
2431      if not path:
2432        return
2433      try:
2434        if os.path.isdir(path):
2435          info = imp.find_module("releasetools", [path])
2436        else:
2437          d, f = os.path.split(path)
2438          b, x = os.path.splitext(f)
2439          if x == ".py":
2440            f = b
2441          info = imp.find_module(f, [d])
2442        logger.info("loaded device-specific extensions from %s", path)
2443        self.module = imp.load_module("device_specific", *info)
2444      except ImportError:
2445        logger.info("unable to load device-specific module; assuming none")
2446
2447  def _DoCall(self, function_name, *args, **kwargs):
2448    """Call the named function in the device-specific module, passing
2449    the given args and kwargs.  The first argument to the call will be
2450    the DeviceSpecific object itself.  If there is no module, or the
2451    module does not define the function, return the value of the
2452    'default' kwarg (which itself defaults to None)."""
2453    if self.module is None or not hasattr(self.module, function_name):
2454      return kwargs.get("default")
2455    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
2456
2457  def FullOTA_Assertions(self):
2458    """Called after emitting the block of assertions at the top of a
2459    full OTA package.  Implementations can add whatever additional
2460    assertions they like."""
2461    return self._DoCall("FullOTA_Assertions")
2462
2463  def FullOTA_InstallBegin(self):
2464    """Called at the start of full OTA installation."""
2465    return self._DoCall("FullOTA_InstallBegin")
2466
2467  def FullOTA_GetBlockDifferences(self):
2468    """Called during full OTA installation and verification.
2469    Implementation should return a list of BlockDifference objects describing
2470    the update on each additional partitions.
2471    """
2472    return self._DoCall("FullOTA_GetBlockDifferences")
2473
2474  def FullOTA_InstallEnd(self):
2475    """Called at the end of full OTA installation; typically this is
2476    used to install the image for the device's baseband processor."""
2477    return self._DoCall("FullOTA_InstallEnd")
2478
2479  def IncrementalOTA_Assertions(self):
2480    """Called after emitting the block of assertions at the top of an
2481    incremental OTA package.  Implementations can add whatever
2482    additional assertions they like."""
2483    return self._DoCall("IncrementalOTA_Assertions")
2484
2485  def IncrementalOTA_VerifyBegin(self):
2486    """Called at the start of the verification phase of incremental
2487    OTA installation; additional checks can be placed here to abort
2488    the script before any changes are made."""
2489    return self._DoCall("IncrementalOTA_VerifyBegin")
2490
2491  def IncrementalOTA_VerifyEnd(self):
2492    """Called at the end of the verification phase of incremental OTA
2493    installation; additional checks can be placed here to abort the
2494    script before any changes are made."""
2495    return self._DoCall("IncrementalOTA_VerifyEnd")
2496
2497  def IncrementalOTA_InstallBegin(self):
2498    """Called at the start of incremental OTA installation (after
2499    verification is complete)."""
2500    return self._DoCall("IncrementalOTA_InstallBegin")
2501
2502  def IncrementalOTA_GetBlockDifferences(self):
2503    """Called during incremental OTA installation and verification.
2504    Implementation should return a list of BlockDifference objects describing
2505    the update on each additional partitions.
2506    """
2507    return self._DoCall("IncrementalOTA_GetBlockDifferences")
2508
2509  def IncrementalOTA_InstallEnd(self):
2510    """Called at the end of incremental OTA installation; typically
2511    this is used to install the image for the device's baseband
2512    processor."""
2513    return self._DoCall("IncrementalOTA_InstallEnd")
2514
2515  def VerifyOTA_Assertions(self):
2516    return self._DoCall("VerifyOTA_Assertions")
2517
2518
2519class File(object):
2520  def __init__(self, name, data, compress_size=None):
2521    self.name = name
2522    self.data = data
2523    self.size = len(data)
2524    self.compress_size = compress_size or self.size
2525    self.sha1 = sha1(data).hexdigest()
2526
2527  @classmethod
2528  def FromLocalFile(cls, name, diskname):
2529    f = open(diskname, "rb")
2530    data = f.read()
2531    f.close()
2532    return File(name, data)
2533
2534  def WriteToTemp(self):
2535    t = tempfile.NamedTemporaryFile()
2536    t.write(self.data)
2537    t.flush()
2538    return t
2539
2540  def WriteToDir(self, d):
2541    with open(os.path.join(d, self.name), "wb") as fp:
2542      fp.write(self.data)
2543
2544  def AddToZip(self, z, compression=None):
2545    ZipWriteStr(z, self.name, self.data, compress_type=compression)
2546
2547
2548DIFF_PROGRAM_BY_EXT = {
2549    ".gz": "imgdiff",
2550    ".zip": ["imgdiff", "-z"],
2551    ".jar": ["imgdiff", "-z"],
2552    ".apk": ["imgdiff", "-z"],
2553    ".img": "imgdiff",
2554}
2555
2556
2557class Difference(object):
2558  def __init__(self, tf, sf, diff_program=None):
2559    self.tf = tf
2560    self.sf = sf
2561    self.patch = None
2562    self.diff_program = diff_program
2563
2564  def ComputePatch(self):
2565    """Compute the patch (as a string of data) needed to turn sf into
2566    tf.  Returns the same tuple as GetPatch()."""
2567
2568    tf = self.tf
2569    sf = self.sf
2570
2571    if self.diff_program:
2572      diff_program = self.diff_program
2573    else:
2574      ext = os.path.splitext(tf.name)[1]
2575      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
2576
2577    ttemp = tf.WriteToTemp()
2578    stemp = sf.WriteToTemp()
2579
2580    ext = os.path.splitext(tf.name)[1]
2581
2582    try:
2583      ptemp = tempfile.NamedTemporaryFile()
2584      if isinstance(diff_program, list):
2585        cmd = copy.copy(diff_program)
2586      else:
2587        cmd = [diff_program]
2588      cmd.append(stemp.name)
2589      cmd.append(ttemp.name)
2590      cmd.append(ptemp.name)
2591      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
2592      err = []
2593
2594      def run():
2595        _, e = p.communicate()
2596        if e:
2597          err.append(e)
2598      th = threading.Thread(target=run)
2599      th.start()
2600      th.join(timeout=300)   # 5 mins
2601      if th.is_alive():
2602        logger.warning("diff command timed out")
2603        p.terminate()
2604        th.join(5)
2605        if th.is_alive():
2606          p.kill()
2607          th.join()
2608
2609      if p.returncode != 0:
2610        logger.warning("Failure running %s:\n%s\n", diff_program, "".join(err))
2611        self.patch = None
2612        return None, None, None
2613      diff = ptemp.read()
2614    finally:
2615      ptemp.close()
2616      stemp.close()
2617      ttemp.close()
2618
2619    self.patch = diff
2620    return self.tf, self.sf, self.patch
2621
2622  def GetPatch(self):
2623    """Returns a tuple of (target_file, source_file, patch_data).
2624
2625    patch_data may be None if ComputePatch hasn't been called, or if
2626    computing the patch failed.
2627    """
2628    return self.tf, self.sf, self.patch
2629
2630
2631def ComputeDifferences(diffs):
2632  """Call ComputePatch on all the Difference objects in 'diffs'."""
2633  logger.info("%d diffs to compute", len(diffs))
2634
2635  # Do the largest files first, to try and reduce the long-pole effect.
2636  by_size = [(i.tf.size, i) for i in diffs]
2637  by_size.sort(reverse=True)
2638  by_size = [i[1] for i in by_size]
2639
2640  lock = threading.Lock()
2641  diff_iter = iter(by_size)   # accessed under lock
2642
2643  def worker():
2644    try:
2645      lock.acquire()
2646      for d in diff_iter:
2647        lock.release()
2648        start = time.time()
2649        d.ComputePatch()
2650        dur = time.time() - start
2651        lock.acquire()
2652
2653        tf, sf, patch = d.GetPatch()
2654        if sf.name == tf.name:
2655          name = tf.name
2656        else:
2657          name = "%s (%s)" % (tf.name, sf.name)
2658        if patch is None:
2659          logger.error("patching failed! %40s", name)
2660        else:
2661          logger.info(
2662              "%8.2f sec %8d / %8d bytes (%6.2f%%) %s", dur, len(patch),
2663              tf.size, 100.0 * len(patch) / tf.size, name)
2664      lock.release()
2665    except Exception:
2666      logger.exception("Failed to compute diff from worker")
2667      raise
2668
2669  # start worker threads; wait for them all to finish.
2670  threads = [threading.Thread(target=worker)
2671             for i in range(OPTIONS.worker_threads)]
2672  for th in threads:
2673    th.start()
2674  while threads:
2675    threads.pop().join()
2676
2677
2678class BlockDifference(object):
2679  def __init__(self, partition, tgt, src=None, check_first_block=False,
2680               version=None, disable_imgdiff=False):
2681    self.tgt = tgt
2682    self.src = src
2683    self.partition = partition
2684    self.check_first_block = check_first_block
2685    self.disable_imgdiff = disable_imgdiff
2686
2687    if version is None:
2688      version = max(
2689          int(i) for i in
2690          OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
2691    assert version >= 3
2692    self.version = version
2693
2694    b = BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
2695                       version=self.version,
2696                       disable_imgdiff=self.disable_imgdiff)
2697    self.path = os.path.join(MakeTempDir(), partition)
2698    b.Compute(self.path)
2699    self._required_cache = b.max_stashed_size
2700    self.touched_src_ranges = b.touched_src_ranges
2701    self.touched_src_sha1 = b.touched_src_sha1
2702
2703    # On devices with dynamic partitions, for new partitions,
2704    # src is None but OPTIONS.source_info_dict is not.
2705    if OPTIONS.source_info_dict is None:
2706      is_dynamic_build = OPTIONS.info_dict.get(
2707          "use_dynamic_partitions") == "true"
2708      is_dynamic_source = False
2709    else:
2710      is_dynamic_build = OPTIONS.source_info_dict.get(
2711          "use_dynamic_partitions") == "true"
2712      is_dynamic_source = partition in shlex.split(
2713          OPTIONS.source_info_dict.get("dynamic_partition_list", "").strip())
2714
2715    is_dynamic_target = partition in shlex.split(
2716        OPTIONS.info_dict.get("dynamic_partition_list", "").strip())
2717
2718    # For dynamic partitions builds, check partition list in both source
2719    # and target build because new partitions may be added, and existing
2720    # partitions may be removed.
2721    is_dynamic = is_dynamic_build and (is_dynamic_source or is_dynamic_target)
2722
2723    if is_dynamic:
2724      self.device = 'map_partition("%s")' % partition
2725    else:
2726      if OPTIONS.source_info_dict is None:
2727        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
2728                                              OPTIONS.info_dict)
2729      else:
2730        _, device_expr = GetTypeAndDeviceExpr("/" + partition,
2731                                              OPTIONS.source_info_dict)
2732      self.device = device_expr
2733
2734  @property
2735  def required_cache(self):
2736    return self._required_cache
2737
2738  def WriteScript(self, script, output_zip, progress=None,
2739                  write_verify_script=False):
2740    if not self.src:
2741      # write the output unconditionally
2742      script.Print("Patching %s image unconditionally..." % (self.partition,))
2743    else:
2744      script.Print("Patching %s image after verification." % (self.partition,))
2745
2746    if progress:
2747      script.ShowProgress(progress, 0)
2748    self._WriteUpdate(script, output_zip)
2749
2750    if write_verify_script:
2751      self.WritePostInstallVerifyScript(script)
2752
2753  def WriteStrictVerifyScript(self, script):
2754    """Verify all the blocks in the care_map, including clobbered blocks.
2755
2756    This differs from the WriteVerifyScript() function: a) it prints different
2757    error messages; b) it doesn't allow half-way updated images to pass the
2758    verification."""
2759
2760    partition = self.partition
2761    script.Print("Verifying %s..." % (partition,))
2762    ranges = self.tgt.care_map
2763    ranges_str = ranges.to_string_raw()
2764    script.AppendExtra(
2765        'range_sha1(%s, "%s") == "%s" && ui_print("    Verified.") || '
2766        'ui_print("%s has unexpected contents.");' % (
2767            self.device, ranges_str,
2768            self.tgt.TotalSha1(include_clobbered_blocks=True),
2769            self.partition))
2770    script.AppendExtra("")
2771
2772  def WriteVerifyScript(self, script, touched_blocks_only=False):
2773    partition = self.partition
2774
2775    # full OTA
2776    if not self.src:
2777      script.Print("Image %s will be patched unconditionally." % (partition,))
2778
2779    # incremental OTA
2780    else:
2781      if touched_blocks_only:
2782        ranges = self.touched_src_ranges
2783        expected_sha1 = self.touched_src_sha1
2784      else:
2785        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
2786        expected_sha1 = self.src.TotalSha1()
2787
2788      # No blocks to be checked, skipping.
2789      if not ranges:
2790        return
2791
2792      ranges_str = ranges.to_string_raw()
2793      script.AppendExtra(
2794          'if (range_sha1(%s, "%s") == "%s" || block_image_verify(%s, '
2795          'package_extract_file("%s.transfer.list"), "%s.new.dat", '
2796          '"%s.patch.dat")) then' % (
2797              self.device, ranges_str, expected_sha1,
2798              self.device, partition, partition, partition))
2799      script.Print('Verified %s image...' % (partition,))
2800      script.AppendExtra('else')
2801
2802      if self.version >= 4:
2803
2804        # Bug: 21124327
2805        # When generating incrementals for the system and vendor partitions in
2806        # version 4 or newer, explicitly check the first block (which contains
2807        # the superblock) of the partition to see if it's what we expect. If
2808        # this check fails, give an explicit log message about the partition
2809        # having been remounted R/W (the most likely explanation).
2810        if self.check_first_block:
2811          script.AppendExtra('check_first_block(%s);' % (self.device,))
2812
2813        # If version >= 4, try block recovery before abort update
2814        if partition == "system":
2815          code = ErrorCode.SYSTEM_RECOVER_FAILURE
2816        else:
2817          code = ErrorCode.VENDOR_RECOVER_FAILURE
2818        script.AppendExtra((
2819            'ifelse (block_image_recover({device}, "{ranges}") && '
2820            'block_image_verify({device}, '
2821            'package_extract_file("{partition}.transfer.list"), '
2822            '"{partition}.new.dat", "{partition}.patch.dat"), '
2823            'ui_print("{partition} recovered successfully."), '
2824            'abort("E{code}: {partition} partition fails to recover"));\n'
2825            'endif;').format(device=self.device, ranges=ranges_str,
2826                             partition=partition, code=code))
2827
2828      # Abort the OTA update. Note that the incremental OTA cannot be applied
2829      # even if it may match the checksum of the target partition.
2830      # a) If version < 3, operations like move and erase will make changes
2831      #    unconditionally and damage the partition.
2832      # b) If version >= 3, it won't even reach here.
2833      else:
2834        if partition == "system":
2835          code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
2836        else:
2837          code = ErrorCode.VENDOR_VERIFICATION_FAILURE
2838        script.AppendExtra((
2839            'abort("E%d: %s partition has unexpected contents");\n'
2840            'endif;') % (code, partition))
2841
2842  def WritePostInstallVerifyScript(self, script):
2843    partition = self.partition
2844    script.Print('Verifying the updated %s image...' % (partition,))
2845    # Unlike pre-install verification, clobbered_blocks should not be ignored.
2846    ranges = self.tgt.care_map
2847    ranges_str = ranges.to_string_raw()
2848    script.AppendExtra(
2849        'if range_sha1(%s, "%s") == "%s" then' % (
2850            self.device, ranges_str,
2851            self.tgt.TotalSha1(include_clobbered_blocks=True)))
2852
2853    # Bug: 20881595
2854    # Verify that extended blocks are really zeroed out.
2855    if self.tgt.extended:
2856      ranges_str = self.tgt.extended.to_string_raw()
2857      script.AppendExtra(
2858          'if range_sha1(%s, "%s") == "%s" then' % (
2859              self.device, ranges_str,
2860              self._HashZeroBlocks(self.tgt.extended.size())))
2861      script.Print('Verified the updated %s image.' % (partition,))
2862      if partition == "system":
2863        code = ErrorCode.SYSTEM_NONZERO_CONTENTS
2864      else:
2865        code = ErrorCode.VENDOR_NONZERO_CONTENTS
2866      script.AppendExtra(
2867          'else\n'
2868          '  abort("E%d: %s partition has unexpected non-zero contents after '
2869          'OTA update");\n'
2870          'endif;' % (code, partition))
2871    else:
2872      script.Print('Verified the updated %s image.' % (partition,))
2873
2874    if partition == "system":
2875      code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
2876    else:
2877      code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
2878
2879    script.AppendExtra(
2880        'else\n'
2881        '  abort("E%d: %s partition has unexpected contents after OTA '
2882        'update");\n'
2883        'endif;' % (code, partition))
2884
2885  def _WriteUpdate(self, script, output_zip):
2886    ZipWrite(output_zip,
2887             '{}.transfer.list'.format(self.path),
2888             '{}.transfer.list'.format(self.partition))
2889
2890    # For full OTA, compress the new.dat with brotli with quality 6 to reduce
2891    # its size. Quailty 9 almost triples the compression time but doesn't
2892    # further reduce the size too much. For a typical 1.8G system.new.dat
2893    #                       zip  | brotli(quality 6)  | brotli(quality 9)
2894    #   compressed_size:    942M | 869M (~8% reduced) | 854M
2895    #   compression_time:   75s  | 265s               | 719s
2896    #   decompression_time: 15s  | 25s                | 25s
2897
2898    if not self.src:
2899      brotli_cmd = ['brotli', '--quality=6',
2900                    '--output={}.new.dat.br'.format(self.path),
2901                    '{}.new.dat'.format(self.path)]
2902      print("Compressing {}.new.dat with brotli".format(self.partition))
2903      RunAndCheckOutput(brotli_cmd)
2904
2905      new_data_name = '{}.new.dat.br'.format(self.partition)
2906      ZipWrite(output_zip,
2907               '{}.new.dat.br'.format(self.path),
2908               new_data_name,
2909               compress_type=zipfile.ZIP_STORED)
2910    else:
2911      new_data_name = '{}.new.dat'.format(self.partition)
2912      ZipWrite(output_zip, '{}.new.dat'.format(self.path), new_data_name)
2913
2914    ZipWrite(output_zip,
2915             '{}.patch.dat'.format(self.path),
2916             '{}.patch.dat'.format(self.partition),
2917             compress_type=zipfile.ZIP_STORED)
2918
2919    if self.partition == "system":
2920      code = ErrorCode.SYSTEM_UPDATE_FAILURE
2921    else:
2922      code = ErrorCode.VENDOR_UPDATE_FAILURE
2923
2924    call = ('block_image_update({device}, '
2925            'package_extract_file("{partition}.transfer.list"), '
2926            '"{new_data_name}", "{partition}.patch.dat") ||\n'
2927            '  abort("E{code}: Failed to update {partition} image.");'.format(
2928                device=self.device, partition=self.partition,
2929                new_data_name=new_data_name, code=code))
2930    script.AppendExtra(script.WordWrap(call))
2931
2932  def _HashBlocks(self, source, ranges):  # pylint: disable=no-self-use
2933    data = source.ReadRangeSet(ranges)
2934    ctx = sha1()
2935
2936    for p in data:
2937      ctx.update(p)
2938
2939    return ctx.hexdigest()
2940
2941  def _HashZeroBlocks(self, num_blocks):  # pylint: disable=no-self-use
2942    """Return the hash value for all zero blocks."""
2943    zero_block = '\x00' * 4096
2944    ctx = sha1()
2945    for _ in range(num_blocks):
2946      ctx.update(zero_block)
2947
2948    return ctx.hexdigest()
2949
2950
2951# Expose these two classes to support vendor-specific scripts
2952DataImage = images.DataImage
2953EmptyImage = images.EmptyImage
2954
2955
2956# map recovery.fstab's fs_types to mount/format "partition types"
2957PARTITION_TYPES = {
2958    "ext4": "EMMC",
2959    "emmc": "EMMC",
2960    "f2fs": "EMMC",
2961    "squashfs": "EMMC"
2962}
2963
2964
2965def GetTypeAndDevice(mount_point, info, check_no_slot=True):
2966  """
2967  Use GetTypeAndDeviceExpr whenever possible. This function is kept for
2968  backwards compatibility. It aborts if the fstab entry has slotselect option
2969  (unless check_no_slot is explicitly set to False).
2970  """
2971  fstab = info["fstab"]
2972  if fstab:
2973    if check_no_slot:
2974      assert not fstab[mount_point].slotselect, \
2975          "Use GetTypeAndDeviceExpr instead"
2976    return (PARTITION_TYPES[fstab[mount_point].fs_type],
2977            fstab[mount_point].device)
2978  raise KeyError
2979
2980
2981def GetTypeAndDeviceExpr(mount_point, info):
2982  """
2983  Return the filesystem of the partition, and an edify expression that evaluates
2984  to the device at runtime.
2985  """
2986  fstab = info["fstab"]
2987  if fstab:
2988    p = fstab[mount_point]
2989    device_expr = '"%s"' % fstab[mount_point].device
2990    if p.slotselect:
2991      device_expr = 'add_slot_suffix(%s)' % device_expr
2992    return (PARTITION_TYPES[fstab[mount_point].fs_type], device_expr)
2993  raise KeyError
2994
2995
2996def GetEntryForDevice(fstab, device):
2997  """
2998  Returns:
2999    The first entry in fstab whose device is the given value.
3000  """
3001  if not fstab:
3002    return None
3003  for mount_point in fstab:
3004    if fstab[mount_point].device == device:
3005      return fstab[mount_point]
3006  return None
3007
3008
3009def ParseCertificate(data):
3010  """Parses and converts a PEM-encoded certificate into DER-encoded.
3011
3012  This gives the same result as `openssl x509 -in <filename> -outform DER`.
3013
3014  Returns:
3015    The decoded certificate bytes.
3016  """
3017  cert_buffer = []
3018  save = False
3019  for line in data.split("\n"):
3020    if "--END CERTIFICATE--" in line:
3021      break
3022    if save:
3023      cert_buffer.append(line)
3024    if "--BEGIN CERTIFICATE--" in line:
3025      save = True
3026  cert = base64.b64decode("".join(cert_buffer))
3027  return cert
3028
3029
3030def ExtractPublicKey(cert):
3031  """Extracts the public key (PEM-encoded) from the given certificate file.
3032
3033  Args:
3034    cert: The certificate filename.
3035
3036  Returns:
3037    The public key string.
3038
3039  Raises:
3040    AssertionError: On non-zero return from 'openssl'.
3041  """
3042  # The behavior with '-out' is different between openssl 1.1 and openssl 1.0.
3043  # While openssl 1.1 writes the key into the given filename followed by '-out',
3044  # openssl 1.0 (both of 1.0.1 and 1.0.2) doesn't. So we collect the output from
3045  # stdout instead.
3046  cmd = ['openssl', 'x509', '-pubkey', '-noout', '-in', cert]
3047  proc = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3048  pubkey, stderrdata = proc.communicate()
3049  assert proc.returncode == 0, \
3050      'Failed to dump public key from certificate: %s\n%s' % (cert, stderrdata)
3051  return pubkey
3052
3053
3054def ExtractAvbPublicKey(avbtool, key):
3055  """Extracts the AVB public key from the given public or private key.
3056
3057  Args:
3058    avbtool: The AVB tool to use.
3059    key: The input key file, which should be PEM-encoded public or private key.
3060
3061  Returns:
3062    The path to the extracted AVB public key file.
3063  """
3064  output = MakeTempFile(prefix='avb-', suffix='.avbpubkey')
3065  RunAndCheckOutput(
3066      [avbtool, 'extract_public_key', "--key", key, "--output", output])
3067  return output
3068
3069
3070def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
3071                      info_dict=None):
3072  """Generates the recovery-from-boot patch and writes the script to output.
3073
3074  Most of the space in the boot and recovery images is just the kernel, which is
3075  identical for the two, so the resulting patch should be efficient. Add it to
3076  the output zip, along with a shell script that is run from init.rc on first
3077  boot to actually do the patching and install the new recovery image.
3078
3079  Args:
3080    input_dir: The top-level input directory of the target-files.zip.
3081    output_sink: The callback function that writes the result.
3082    recovery_img: File object for the recovery image.
3083    boot_img: File objects for the boot image.
3084    info_dict: A dict returned by common.LoadInfoDict() on the input
3085        target_files. Will use OPTIONS.info_dict if None has been given.
3086  """
3087  if info_dict is None:
3088    info_dict = OPTIONS.info_dict
3089
3090  full_recovery_image = info_dict.get("full_recovery_image") == "true"
3091  board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true"
3092
3093  if board_uses_vendorimage:
3094    # In this case, the output sink is rooted at VENDOR
3095    recovery_img_path = "etc/recovery.img"
3096    recovery_resource_dat_path = "VENDOR/etc/recovery-resource.dat"
3097    sh_dir = "bin"
3098  else:
3099    # In this case the output sink is rooted at SYSTEM
3100    recovery_img_path = "vendor/etc/recovery.img"
3101    recovery_resource_dat_path = "SYSTEM/vendor/etc/recovery-resource.dat"
3102    sh_dir = "vendor/bin"
3103
3104  if full_recovery_image:
3105    output_sink(recovery_img_path, recovery_img.data)
3106
3107  else:
3108    system_root_image = info_dict.get("system_root_image") == "true"
3109    path = os.path.join(input_dir, recovery_resource_dat_path)
3110    # With system-root-image, boot and recovery images will have mismatching
3111    # entries (only recovery has the ramdisk entry) (Bug: 72731506). Use bsdiff
3112    # to handle such a case.
3113    if system_root_image:
3114      diff_program = ["bsdiff"]
3115      bonus_args = ""
3116      assert not os.path.exists(path)
3117    else:
3118      diff_program = ["imgdiff"]
3119      if os.path.exists(path):
3120        diff_program.append("-b")
3121        diff_program.append(path)
3122        bonus_args = "--bonus /vendor/etc/recovery-resource.dat"
3123      else:
3124        bonus_args = ""
3125
3126    d = Difference(recovery_img, boot_img, diff_program=diff_program)
3127    _, _, patch = d.ComputePatch()
3128    output_sink("recovery-from-boot.p", patch)
3129
3130  try:
3131    # The following GetTypeAndDevice()s need to use the path in the target
3132    # info_dict instead of source_info_dict.
3133    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict,
3134                                              check_no_slot=False)
3135    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict,
3136                                                      check_no_slot=False)
3137  except KeyError:
3138    return
3139
3140  if full_recovery_image:
3141
3142    # Note that we use /vendor to refer to the recovery resources. This will
3143    # work for a separate vendor partition mounted at /vendor or a
3144    # /system/vendor subdirectory on the system partition, for which init will
3145    # create a symlink from /vendor to /system/vendor.
3146
3147    sh = """#!/vendor/bin/sh
3148if ! applypatch --check %(type)s:%(device)s:%(size)d:%(sha1)s; then
3149  applypatch \\
3150          --flash /vendor/etc/recovery.img \\
3151          --target %(type)s:%(device)s:%(size)d:%(sha1)s && \\
3152      log -t recovery "Installing new recovery image: succeeded" || \\
3153      log -t recovery "Installing new recovery image: failed"
3154else
3155  log -t recovery "Recovery image already installed"
3156fi
3157""" % {'type': recovery_type,
3158       'device': recovery_device,
3159       'sha1': recovery_img.sha1,
3160       'size': recovery_img.size}
3161  else:
3162    sh = """#!/vendor/bin/sh
3163if ! applypatch --check %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
3164  applypatch %(bonus_args)s \\
3165          --patch /vendor/recovery-from-boot.p \\
3166          --source %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s \\
3167          --target %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s && \\
3168      log -t recovery "Installing new recovery image: succeeded" || \\
3169      log -t recovery "Installing new recovery image: failed"
3170else
3171  log -t recovery "Recovery image already installed"
3172fi
3173""" % {'boot_size': boot_img.size,
3174       'boot_sha1': boot_img.sha1,
3175       'recovery_size': recovery_img.size,
3176       'recovery_sha1': recovery_img.sha1,
3177       'boot_type': boot_type,
3178       'boot_device': boot_device + '$(getprop ro.boot.slot_suffix)',
3179       'recovery_type': recovery_type + '$(getprop ro.boot.slot_suffix)',
3180       'recovery_device': recovery_device,
3181       'bonus_args': bonus_args}
3182
3183  # The install script location moved from /system/etc to /system/bin in the L
3184  # release. In the R release it is in VENDOR/bin or SYSTEM/vendor/bin.
3185  sh_location = os.path.join(sh_dir, "install-recovery.sh")
3186
3187  logger.info("putting script in %s", sh_location)
3188
3189  output_sink(sh_location, sh.encode())
3190
3191
3192class DynamicPartitionUpdate(object):
3193  def __init__(self, src_group=None, tgt_group=None, progress=None,
3194               block_difference=None):
3195    self.src_group = src_group
3196    self.tgt_group = tgt_group
3197    self.progress = progress
3198    self.block_difference = block_difference
3199
3200  @property
3201  def src_size(self):
3202    if not self.block_difference:
3203      return 0
3204    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.src)
3205
3206  @property
3207  def tgt_size(self):
3208    if not self.block_difference:
3209      return 0
3210    return DynamicPartitionUpdate._GetSparseImageSize(self.block_difference.tgt)
3211
3212  @staticmethod
3213  def _GetSparseImageSize(img):
3214    if not img:
3215      return 0
3216    return img.blocksize * img.total_blocks
3217
3218
3219class DynamicGroupUpdate(object):
3220  def __init__(self, src_size=None, tgt_size=None):
3221    # None: group does not exist. 0: no size limits.
3222    self.src_size = src_size
3223    self.tgt_size = tgt_size
3224
3225
3226class DynamicPartitionsDifference(object):
3227  def __init__(self, info_dict, block_diffs, progress_dict=None,
3228               source_info_dict=None):
3229    if progress_dict is None:
3230      progress_dict = {}
3231
3232    self._remove_all_before_apply = False
3233    if source_info_dict is None:
3234      self._remove_all_before_apply = True
3235      source_info_dict = {}
3236
3237    block_diff_dict = collections.OrderedDict(
3238        [(e.partition, e) for e in block_diffs])
3239
3240    assert len(block_diff_dict) == len(block_diffs), \
3241        "Duplicated BlockDifference object for {}".format(
3242            [partition for partition, count in
3243             collections.Counter(e.partition for e in block_diffs).items()
3244             if count > 1])
3245
3246    self._partition_updates = collections.OrderedDict()
3247
3248    for p, block_diff in block_diff_dict.items():
3249      self._partition_updates[p] = DynamicPartitionUpdate()
3250      self._partition_updates[p].block_difference = block_diff
3251
3252    for p, progress in progress_dict.items():
3253      if p in self._partition_updates:
3254        self._partition_updates[p].progress = progress
3255
3256    tgt_groups = shlex.split(info_dict.get(
3257        "super_partition_groups", "").strip())
3258    src_groups = shlex.split(source_info_dict.get(
3259        "super_partition_groups", "").strip())
3260
3261    for g in tgt_groups:
3262      for p in shlex.split(info_dict.get(
3263          "super_%s_partition_list" % g, "").strip()):
3264        assert p in self._partition_updates, \
3265            "{} is in target super_{}_partition_list but no BlockDifference " \
3266            "object is provided.".format(p, g)
3267        self._partition_updates[p].tgt_group = g
3268
3269    for g in src_groups:
3270      for p in shlex.split(source_info_dict.get(
3271          "super_%s_partition_list" % g, "").strip()):
3272        assert p in self._partition_updates, \
3273            "{} is in source super_{}_partition_list but no BlockDifference " \
3274            "object is provided.".format(p, g)
3275        self._partition_updates[p].src_group = g
3276
3277    target_dynamic_partitions = set(shlex.split(info_dict.get(
3278        "dynamic_partition_list", "").strip()))
3279    block_diffs_with_target = set(p for p, u in self._partition_updates.items()
3280                                  if u.tgt_size)
3281    assert block_diffs_with_target == target_dynamic_partitions, \
3282        "Target Dynamic partitions: {}, BlockDifference with target: {}".format(
3283            list(target_dynamic_partitions), list(block_diffs_with_target))
3284
3285    source_dynamic_partitions = set(shlex.split(source_info_dict.get(
3286        "dynamic_partition_list", "").strip()))
3287    block_diffs_with_source = set(p for p, u in self._partition_updates.items()
3288                                  if u.src_size)
3289    assert block_diffs_with_source == source_dynamic_partitions, \
3290        "Source Dynamic partitions: {}, BlockDifference with source: {}".format(
3291            list(source_dynamic_partitions), list(block_diffs_with_source))
3292
3293    if self._partition_updates:
3294      logger.info("Updating dynamic partitions %s",
3295                  self._partition_updates.keys())
3296
3297    self._group_updates = collections.OrderedDict()
3298
3299    for g in tgt_groups:
3300      self._group_updates[g] = DynamicGroupUpdate()
3301      self._group_updates[g].tgt_size = int(info_dict.get(
3302          "super_%s_group_size" % g, "0").strip())
3303
3304    for g in src_groups:
3305      if g not in self._group_updates:
3306        self._group_updates[g] = DynamicGroupUpdate()
3307      self._group_updates[g].src_size = int(source_info_dict.get(
3308          "super_%s_group_size" % g, "0").strip())
3309
3310    self._Compute()
3311
3312  def WriteScript(self, script, output_zip, write_verify_script=False):
3313    script.Comment('--- Start patching dynamic partitions ---')
3314    for p, u in self._partition_updates.items():
3315      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
3316        script.Comment('Patch partition %s' % p)
3317        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
3318                                       write_verify_script=False)
3319
3320    op_list_path = MakeTempFile()
3321    with open(op_list_path, 'w') as f:
3322      for line in self._op_list:
3323        f.write('{}\n'.format(line))
3324
3325    ZipWrite(output_zip, op_list_path, "dynamic_partitions_op_list")
3326
3327    script.Comment('Update dynamic partition metadata')
3328    script.AppendExtra('assert(update_dynamic_partitions('
3329                       'package_extract_file("dynamic_partitions_op_list")));')
3330
3331    if write_verify_script:
3332      for p, u in self._partition_updates.items():
3333        if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
3334          u.block_difference.WritePostInstallVerifyScript(script)
3335          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
3336
3337    for p, u in self._partition_updates.items():
3338      if u.tgt_size and u.src_size <= u.tgt_size:
3339        script.Comment('Patch partition %s' % p)
3340        u.block_difference.WriteScript(script, output_zip, progress=u.progress,
3341                                       write_verify_script=write_verify_script)
3342        if write_verify_script:
3343          script.AppendExtra('unmap_partition("%s");' % p)  # ignore errors
3344
3345    script.Comment('--- End patching dynamic partitions ---')
3346
3347  def _Compute(self):
3348    self._op_list = list()
3349
3350    def append(line):
3351      self._op_list.append(line)
3352
3353    def comment(line):
3354      self._op_list.append("# %s" % line)
3355
3356    if self._remove_all_before_apply:
3357      comment('Remove all existing dynamic partitions and groups before '
3358              'applying full OTA')
3359      append('remove_all_groups')
3360
3361    for p, u in self._partition_updates.items():
3362      if u.src_group and not u.tgt_group:
3363        append('remove %s' % p)
3364
3365    for p, u in self._partition_updates.items():
3366      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
3367        comment('Move partition %s from %s to default' % (p, u.src_group))
3368        append('move %s default' % p)
3369
3370    for p, u in self._partition_updates.items():
3371      if u.src_size and u.tgt_size and u.src_size > u.tgt_size:
3372        comment('Shrink partition %s from %d to %d' %
3373                (p, u.src_size, u.tgt_size))
3374        append('resize %s %s' % (p, u.tgt_size))
3375
3376    for g, u in self._group_updates.items():
3377      if u.src_size is not None and u.tgt_size is None:
3378        append('remove_group %s' % g)
3379      if (u.src_size is not None and u.tgt_size is not None and
3380          u.src_size > u.tgt_size):
3381        comment('Shrink group %s from %d to %d' % (g, u.src_size, u.tgt_size))
3382        append('resize_group %s %d' % (g, u.tgt_size))
3383
3384    for g, u in self._group_updates.items():
3385      if u.src_size is None and u.tgt_size is not None:
3386        comment('Add group %s with maximum size %d' % (g, u.tgt_size))
3387        append('add_group %s %d' % (g, u.tgt_size))
3388      if (u.src_size is not None and u.tgt_size is not None and
3389          u.src_size < u.tgt_size):
3390        comment('Grow group %s from %d to %d' % (g, u.src_size, u.tgt_size))
3391        append('resize_group %s %d' % (g, u.tgt_size))
3392
3393    for p, u in self._partition_updates.items():
3394      if u.tgt_group and not u.src_group:
3395        comment('Add partition %s to group %s' % (p, u.tgt_group))
3396        append('add %s %s' % (p, u.tgt_group))
3397
3398    for p, u in self._partition_updates.items():
3399      if u.tgt_size and u.src_size < u.tgt_size:
3400        comment('Grow partition %s from %d to %d' %
3401                (p, u.src_size, u.tgt_size))
3402        append('resize %s %d' % (p, u.tgt_size))
3403
3404    for p, u in self._partition_updates.items():
3405      if u.src_group and u.tgt_group and u.src_group != u.tgt_group:
3406        comment('Move partition %s from default to %s' %
3407                (p, u.tgt_group))
3408        append('move %s %s' % (p, u.tgt_group))
3409