1#!/usr/bin/env python
2#
3# Copyright (C) 2008 The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#      http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17"""
18Given a target-files zipfile, produces an OTA package that installs that build.
19An incremental OTA is produced if -i is given, otherwise a full OTA is produced.
20
21Usage:  ota_from_target_files [options] input_target_files output_ota_package
22
23Common options that apply to both of non-A/B and A/B OTAs
24
25  --downgrade
26      Intentionally generate an incremental OTA that updates from a newer build
27      to an older one (e.g. downgrading from P preview back to O MR1).
28      "ota-downgrade=yes" will be set in the package metadata file. A data wipe
29      will always be enforced when using this flag, so "ota-wipe=yes" will also
30      be included in the metadata file. The update-binary in the source build
31      will be used in the OTA package, unless --binary flag is specified. Please
32      also check the comment for --override_timestamp below.
33
34  -i  (--incremental_from) <file>
35      Generate an incremental OTA using the given target-files zip as the
36      starting build.
37
38  -k  (--package_key) <key>
39      Key to use to sign the package (default is the value of
40      default_system_dev_certificate from the input target-files's
41      META/misc_info.txt, or "build/make/target/product/security/testkey" if
42      that value is not specified).
43
44      For incremental OTAs, the default value is based on the source
45      target-file, not the target build.
46
47  --override_timestamp
48      Intentionally generate an incremental OTA that updates from a newer build
49      to an older one (based on timestamp comparison), by setting the downgrade
50      flag in the package metadata. This differs from --downgrade flag, as we
51      don't enforce a data wipe with this flag. Because we know for sure this is
52      NOT an actual downgrade case, but two builds happen to be cut in a reverse
53      order (e.g. from two branches). A legit use case is that we cut a new
54      build C (after having A and B), but want to enfore an update path of A ->
55      C -> B. Specifying --downgrade may not help since that would enforce a
56      data wipe for C -> B update.
57
58      We used to set a fake timestamp in the package metadata for this flow. But
59      now we consolidate the two cases (i.e. an actual downgrade, or a downgrade
60      based on timestamp) with the same "ota-downgrade=yes" flag, with the
61      difference being whether "ota-wipe=yes" is set.
62
63  --wipe_user_data
64      Generate an OTA package that will wipe the user data partition when
65      installed.
66
67  --retrofit_dynamic_partitions
68      Generates an OTA package that updates a device to support dynamic
69      partitions (default False). This flag is implied when generating
70      an incremental OTA where the base build does not support dynamic
71      partitions but the target build does. For A/B, when this flag is set,
72      --skip_postinstall is implied.
73
74  --skip_compatibility_check
75      Skip checking compatibility of the input target files package.
76
77  --output_metadata_path
78      Write a copy of the metadata to a separate file. Therefore, users can
79      read the post build fingerprint without extracting the OTA package.
80
81  --force_non_ab
82      This flag can only be set on an A/B device that also supports non-A/B
83      updates. Implies --two_step.
84      If set, generate that non-A/B update package.
85      If not set, generates A/B package for A/B device and non-A/B package for
86      non-A/B device.
87
88Non-A/B OTA specific options
89
90  -b  (--binary) <file>
91      Use the given binary as the update-binary in the output package, instead
92      of the binary in the build's target_files. Use for development only.
93
94  --block
95      Generate a block-based OTA for non-A/B device. We have deprecated the
96      support for file-based OTA since O. Block-based OTA will be used by
97      default for all non-A/B devices. Keeping this flag here to not break
98      existing callers.
99
100  -e  (--extra_script) <file>
101      Insert the contents of file at the end of the update script.
102
103  --full_bootloader
104      Similar to --full_radio. When generating an incremental OTA, always
105      include a full copy of bootloader image.
106
107  --full_radio
108      When generating an incremental OTA, always include a full copy of radio
109      image. This option is only meaningful when -i is specified, because a full
110      radio is always included in a full OTA if applicable.
111
112  --log_diff <file>
113      Generate a log file that shows the differences in the source and target
114      builds for an incremental package. This option is only meaningful when -i
115      is specified.
116
117  -o  (--oem_settings) <main_file[,additional_files...]>
118      Comma seperated list of files used to specify the expected OEM-specific
119      properties on the OEM partition of the intended device. Multiple expected
120      values can be used by providing multiple files. Only the first dict will
121      be used to compute fingerprint, while the rest will be used to assert
122      OEM-specific properties.
123
124  --oem_no_mount
125      For devices with OEM-specific properties but without an OEM partition, do
126      not mount the OEM partition in the updater-script. This should be very
127      rarely used, since it's expected to have a dedicated OEM partition for
128      OEM-specific properties. Only meaningful when -o is specified.
129
130  --stash_threshold <float>
131      Specify the threshold that will be used to compute the maximum allowed
132      stash size (defaults to 0.8).
133
134  -t  (--worker_threads) <int>
135      Specify the number of worker-threads that will be used when generating
136      patches for incremental updates (defaults to 3).
137
138  --verify
139      Verify the checksums of the updated system and vendor (if any) partitions.
140      Non-A/B incremental OTAs only.
141
142  -2  (--two_step)
143      Generate a 'two-step' OTA package, where recovery is updated first, so
144      that any changes made to the system partition are done using the new
145      recovery (new kernel, etc.).
146
147A/B OTA specific options
148
149  --disable_fec_computation
150      Disable the on device FEC data computation for incremental updates.
151
152  --include_secondary
153      Additionally include the payload for secondary slot images (default:
154      False). Only meaningful when generating A/B OTAs.
155
156      By default, an A/B OTA package doesn't contain the images for the
157      secondary slot (e.g. system_other.img). Specifying this flag allows
158      generating a separate payload that will install secondary slot images.
159
160      Such a package needs to be applied in a two-stage manner, with a reboot
161      in-between. During the first stage, the updater applies the primary
162      payload only. Upon finishing, it reboots the device into the newly updated
163      slot. It then continues to install the secondary payload to the inactive
164      slot, but without switching the active slot at the end (needs the matching
165      support in update_engine, i.e. SWITCH_SLOT_ON_REBOOT flag).
166
167      Due to the special install procedure, the secondary payload will be always
168      generated as a full payload.
169
170  --payload_signer <signer>
171      Specify the signer when signing the payload and metadata for A/B OTAs.
172      By default (i.e. without this flag), it calls 'openssl pkeyutl' to sign
173      with the package private key. If the private key cannot be accessed
174      directly, a payload signer that knows how to do that should be specified.
175      The signer will be supplied with "-inkey <path_to_key>",
176      "-in <input_file>" and "-out <output_file>" parameters.
177
178  --payload_signer_args <args>
179      Specify the arguments needed for payload signer.
180
181  --payload_signer_maximum_signature_size <signature_size>
182      The maximum signature size (in bytes) that would be generated by the given
183      payload signer. Only meaningful when custom payload signer is specified
184      via '--payload_signer'.
185      If the signer uses a RSA key, this should be the number of bytes to
186      represent the modulus. If it uses an EC key, this is the size of a
187      DER-encoded ECDSA signature.
188
189  --payload_signer_key_size <key_size>
190      Deprecated. Use the '--payload_signer_maximum_signature_size' instead.
191
192  --boot_variable_file <path>
193      A file that contains the possible values of ro.boot.* properties. It's
194      used to calculate the possible runtime fingerprints when some
195      ro.product.* properties are overridden by the 'import' statement.
196      The file expects one property per line, and each line has the following
197      format: 'prop_name=value1,value2'. e.g. 'ro.boot.product.sku=std,pro'
198
199  --skip_postinstall
200      Skip the postinstall hooks when generating an A/B OTA package (default:
201      False). Note that this discards ALL the hooks, including non-optional
202      ones. Should only be used if caller knows it's safe to do so (e.g. all the
203      postinstall work is to dexopt apps and a data wipe will happen immediately
204      after). Only meaningful when generating A/B OTAs.
205"""
206
207from __future__ import print_function
208
209import collections
210import copy
211import itertools
212import logging
213import multiprocessing
214import os.path
215import shlex
216import shutil
217import struct
218import sys
219import zipfile
220
221import check_target_files_vintf
222import common
223import edify_generator
224import target_files_diff
225import verity_utils
226
227
228if sys.hexversion < 0x02070000:
229  print("Python 2.7 or newer is required.", file=sys.stderr)
230  sys.exit(1)
231
232logger = logging.getLogger(__name__)
233
234OPTIONS = common.OPTIONS
235OPTIONS.package_key = None
236OPTIONS.incremental_source = None
237OPTIONS.verify = False
238OPTIONS.patch_threshold = 0.95
239OPTIONS.wipe_user_data = False
240OPTIONS.downgrade = False
241OPTIONS.extra_script = None
242OPTIONS.worker_threads = multiprocessing.cpu_count() // 2
243if OPTIONS.worker_threads == 0:
244  OPTIONS.worker_threads = 1
245OPTIONS.two_step = False
246OPTIONS.include_secondary = False
247OPTIONS.no_signing = False
248OPTIONS.block_based = True
249OPTIONS.updater_binary = None
250OPTIONS.oem_dicts = None
251OPTIONS.oem_source = None
252OPTIONS.oem_no_mount = False
253OPTIONS.full_radio = False
254OPTIONS.full_bootloader = False
255# Stash size cannot exceed cache_size * threshold.
256OPTIONS.cache_size = None
257OPTIONS.stash_threshold = 0.8
258OPTIONS.log_diff = None
259OPTIONS.payload_signer = None
260OPTIONS.payload_signer_args = []
261OPTIONS.payload_signer_maximum_signature_size = None
262OPTIONS.extracted_input = None
263OPTIONS.key_passwords = []
264OPTIONS.skip_postinstall = False
265OPTIONS.retrofit_dynamic_partitions = False
266OPTIONS.skip_compatibility_check = False
267OPTIONS.output_metadata_path = None
268OPTIONS.disable_fec_computation = False
269OPTIONS.force_non_ab = False
270OPTIONS.boot_variable_file = None
271
272
273METADATA_NAME = 'META-INF/com/android/metadata'
274POSTINSTALL_CONFIG = 'META/postinstall_config.txt'
275DYNAMIC_PARTITION_INFO = 'META/dynamic_partitions_info.txt'
276AB_PARTITIONS = 'META/ab_partitions.txt'
277UNZIP_PATTERN = ['IMAGES/*', 'META/*', 'OTA/*', 'RADIO/*']
278# Files to be unzipped for target diffing purpose.
279TARGET_DIFFING_UNZIP_PATTERN = ['BOOT', 'RECOVERY', 'SYSTEM/*', 'VENDOR/*',
280                                'PRODUCT/*', 'SYSTEM_EXT/*', 'ODM/*',
281                                'VENDOR_DLKM/*', 'ODM_DLKM/*']
282RETROFIT_DAP_UNZIP_PATTERN = ['OTA/super_*.img', AB_PARTITIONS]
283
284# Images to be excluded from secondary payload. We essentially only keep
285# 'system_other' and bootloader partitions.
286SECONDARY_PAYLOAD_SKIPPED_IMAGES = [
287    'boot', 'dtbo', 'modem', 'odm', 'odm_dlkm', 'product', 'radio', 'recovery',
288    'system_ext', 'vbmeta', 'vbmeta_system', 'vbmeta_vendor', 'vendor',
289    'vendor_boot']
290
291
292class PayloadSigner(object):
293  """A class that wraps the payload signing works.
294
295  When generating a Payload, hashes of the payload and metadata files will be
296  signed with the device key, either by calling an external payload signer or
297  by calling openssl with the package key. This class provides a unified
298  interface, so that callers can just call PayloadSigner.Sign().
299
300  If an external payload signer has been specified (OPTIONS.payload_signer), it
301  calls the signer with the provided args (OPTIONS.payload_signer_args). Note
302  that the signing key should be provided as part of the payload_signer_args.
303  Otherwise without an external signer, it uses the package key
304  (OPTIONS.package_key) and calls openssl for the signing works.
305  """
306
307  def __init__(self):
308    if OPTIONS.payload_signer is None:
309      # Prepare the payload signing key.
310      private_key = OPTIONS.package_key + OPTIONS.private_key_suffix
311      pw = OPTIONS.key_passwords[OPTIONS.package_key]
312
313      cmd = ["openssl", "pkcs8", "-in", private_key, "-inform", "DER"]
314      cmd.extend(["-passin", "pass:" + pw] if pw else ["-nocrypt"])
315      signing_key = common.MakeTempFile(prefix="key-", suffix=".key")
316      cmd.extend(["-out", signing_key])
317      common.RunAndCheckOutput(cmd, verbose=False)
318
319      self.signer = "openssl"
320      self.signer_args = ["pkeyutl", "-sign", "-inkey", signing_key,
321                          "-pkeyopt", "digest:sha256"]
322      self.maximum_signature_size = self._GetMaximumSignatureSizeInBytes(
323          signing_key)
324    else:
325      self.signer = OPTIONS.payload_signer
326      self.signer_args = OPTIONS.payload_signer_args
327      if OPTIONS.payload_signer_maximum_signature_size:
328        self.maximum_signature_size = int(
329            OPTIONS.payload_signer_maximum_signature_size)
330      else:
331        # The legacy config uses RSA2048 keys.
332        logger.warning("The maximum signature size for payload signer is not"
333                       " set, default to 256 bytes.")
334        self.maximum_signature_size = 256
335
336  @staticmethod
337  def _GetMaximumSignatureSizeInBytes(signing_key):
338    out_signature_size_file = common.MakeTempFile("signature_size")
339    cmd = ["delta_generator", "--out_maximum_signature_size_file={}".format(
340        out_signature_size_file), "--private_key={}".format(signing_key)]
341    common.RunAndCheckOutput(cmd)
342    with open(out_signature_size_file) as f:
343      signature_size = f.read().rstrip()
344    logger.info("%s outputs the maximum signature size: %s", cmd[0],
345                signature_size)
346    return int(signature_size)
347
348  def Sign(self, in_file):
349    """Signs the given input file. Returns the output filename."""
350    out_file = common.MakeTempFile(prefix="signed-", suffix=".bin")
351    cmd = [self.signer] + self.signer_args + ['-in', in_file, '-out', out_file]
352    common.RunAndCheckOutput(cmd)
353    return out_file
354
355
356class Payload(object):
357  """Manages the creation and the signing of an A/B OTA Payload."""
358
359  PAYLOAD_BIN = 'payload.bin'
360  PAYLOAD_PROPERTIES_TXT = 'payload_properties.txt'
361  SECONDARY_PAYLOAD_BIN = 'secondary/payload.bin'
362  SECONDARY_PAYLOAD_PROPERTIES_TXT = 'secondary/payload_properties.txt'
363
364  def __init__(self, secondary=False):
365    """Initializes a Payload instance.
366
367    Args:
368      secondary: Whether it's generating a secondary payload (default: False).
369    """
370    self.payload_file = None
371    self.payload_properties = None
372    self.secondary = secondary
373
374  def _Run(self, cmd):  # pylint: disable=no-self-use
375    # Don't pipe (buffer) the output if verbose is set. Let
376    # brillo_update_payload write to stdout/stderr directly, so its progress can
377    # be monitored.
378    if OPTIONS.verbose:
379      common.RunAndCheckOutput(cmd, stdout=None, stderr=None)
380    else:
381      common.RunAndCheckOutput(cmd)
382
383  def Generate(self, target_file, source_file=None, additional_args=None):
384    """Generates a payload from the given target-files zip(s).
385
386    Args:
387      target_file: The filename of the target build target-files zip.
388      source_file: The filename of the source build target-files zip; or None if
389          generating a full OTA.
390      additional_args: A list of additional args that should be passed to
391          brillo_update_payload script; or None.
392    """
393    if additional_args is None:
394      additional_args = []
395
396    payload_file = common.MakeTempFile(prefix="payload-", suffix=".bin")
397    cmd = ["brillo_update_payload", "generate",
398           "--payload", payload_file,
399           "--target_image", target_file]
400    if source_file is not None:
401      cmd.extend(["--source_image", source_file])
402      if OPTIONS.disable_fec_computation:
403        cmd.extend(["--disable_fec_computation", "true"])
404    cmd.extend(additional_args)
405    self._Run(cmd)
406
407    self.payload_file = payload_file
408    self.payload_properties = None
409
410  def Sign(self, payload_signer):
411    """Generates and signs the hashes of the payload and metadata.
412
413    Args:
414      payload_signer: A PayloadSigner() instance that serves the signing work.
415
416    Raises:
417      AssertionError: On any failure when calling brillo_update_payload script.
418    """
419    assert isinstance(payload_signer, PayloadSigner)
420
421    # 1. Generate hashes of the payload and metadata files.
422    payload_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin")
423    metadata_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin")
424    cmd = ["brillo_update_payload", "hash",
425           "--unsigned_payload", self.payload_file,
426           "--signature_size", str(payload_signer.maximum_signature_size),
427           "--metadata_hash_file", metadata_sig_file,
428           "--payload_hash_file", payload_sig_file]
429    self._Run(cmd)
430
431    # 2. Sign the hashes.
432    signed_payload_sig_file = payload_signer.Sign(payload_sig_file)
433    signed_metadata_sig_file = payload_signer.Sign(metadata_sig_file)
434
435    # 3. Insert the signatures back into the payload file.
436    signed_payload_file = common.MakeTempFile(prefix="signed-payload-",
437                                              suffix=".bin")
438    cmd = ["brillo_update_payload", "sign",
439           "--unsigned_payload", self.payload_file,
440           "--payload", signed_payload_file,
441           "--signature_size", str(payload_signer.maximum_signature_size),
442           "--metadata_signature_file", signed_metadata_sig_file,
443           "--payload_signature_file", signed_payload_sig_file]
444    self._Run(cmd)
445
446    # 4. Dump the signed payload properties.
447    properties_file = common.MakeTempFile(prefix="payload-properties-",
448                                          suffix=".txt")
449    cmd = ["brillo_update_payload", "properties",
450           "--payload", signed_payload_file,
451           "--properties_file", properties_file]
452    self._Run(cmd)
453
454    if self.secondary:
455      with open(properties_file, "a") as f:
456        f.write("SWITCH_SLOT_ON_REBOOT=0\n")
457
458    if OPTIONS.wipe_user_data:
459      with open(properties_file, "a") as f:
460        f.write("POWERWASH=1\n")
461
462    self.payload_file = signed_payload_file
463    self.payload_properties = properties_file
464
465  def WriteToZip(self, output_zip):
466    """Writes the payload to the given zip.
467
468    Args:
469      output_zip: The output ZipFile instance.
470    """
471    assert self.payload_file is not None
472    assert self.payload_properties is not None
473
474    if self.secondary:
475      payload_arcname = Payload.SECONDARY_PAYLOAD_BIN
476      payload_properties_arcname = Payload.SECONDARY_PAYLOAD_PROPERTIES_TXT
477    else:
478      payload_arcname = Payload.PAYLOAD_BIN
479      payload_properties_arcname = Payload.PAYLOAD_PROPERTIES_TXT
480
481    # Add the signed payload file and properties into the zip. In order to
482    # support streaming, we pack them as ZIP_STORED. So these entries can be
483    # read directly with the offset and length pairs.
484    common.ZipWrite(output_zip, self.payload_file, arcname=payload_arcname,
485                    compress_type=zipfile.ZIP_STORED)
486    common.ZipWrite(output_zip, self.payload_properties,
487                    arcname=payload_properties_arcname,
488                    compress_type=zipfile.ZIP_STORED)
489
490
491def SignOutput(temp_zip_name, output_zip_name):
492  pw = OPTIONS.key_passwords[OPTIONS.package_key]
493
494  common.SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw,
495                  whole_file=True)
496
497
498def _LoadOemDicts(oem_source):
499  """Returns the list of loaded OEM properties dict."""
500  if not oem_source:
501    return None
502
503  oem_dicts = []
504  for oem_file in oem_source:
505    with open(oem_file) as fp:
506      oem_dicts.append(common.LoadDictionaryFromLines(fp.readlines()))
507  return oem_dicts
508
509
510def _WriteRecoveryImageToBoot(script, output_zip):
511  """Find and write recovery image to /boot in two-step OTA.
512
513  In two-step OTAs, we write recovery image to /boot as the first step so that
514  we can reboot to there and install a new recovery image to /recovery.
515  A special "recovery-two-step.img" will be preferred, which encodes the correct
516  path of "/boot". Otherwise the device may show "device is corrupt" message
517  when booting into /boot.
518
519  Fall back to using the regular recovery.img if the two-step recovery image
520  doesn't exist. Note that rebuilding the special image at this point may be
521  infeasible, because we don't have the desired boot signer and keys when
522  calling ota_from_target_files.py.
523  """
524
525  recovery_two_step_img_name = "recovery-two-step.img"
526  recovery_two_step_img_path = os.path.join(
527      OPTIONS.input_tmp, "OTA", recovery_two_step_img_name)
528  if os.path.exists(recovery_two_step_img_path):
529    common.ZipWrite(
530        output_zip,
531        recovery_two_step_img_path,
532        arcname=recovery_two_step_img_name)
533    logger.info(
534        "two-step package: using %s in stage 1/3", recovery_two_step_img_name)
535    script.WriteRawImage("/boot", recovery_two_step_img_name)
536  else:
537    logger.info("two-step package: using recovery.img in stage 1/3")
538    # The "recovery.img" entry has been written into package earlier.
539    script.WriteRawImage("/boot", "recovery.img")
540
541
542def HasRecoveryPatch(target_files_zip, info_dict):
543  board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true"
544
545  if board_uses_vendorimage:
546    target_files_dir = "VENDOR"
547  else:
548    target_files_dir = "SYSTEM/vendor"
549
550  patch = "%s/recovery-from-boot.p" % target_files_dir
551  img = "%s/etc/recovery.img" % target_files_dir
552
553  namelist = target_files_zip.namelist()
554  return patch in namelist or img in namelist
555
556
557def HasPartition(target_files_zip, partition):
558  try:
559    target_files_zip.getinfo(partition.upper() + "/")
560    return True
561  except KeyError:
562    return False
563
564
565def HasTrebleEnabled(target_files, target_info):
566  def HasVendorPartition(target_files):
567    if os.path.isdir(target_files):
568      return os.path.isdir(os.path.join(target_files, "VENDOR"))
569    if zipfile.is_zipfile(target_files):
570      return HasPartition(zipfile.ZipFile(target_files), "vendor")
571    raise ValueError("Unknown target_files argument")
572
573  return (HasVendorPartition(target_files) and
574          target_info.GetBuildProp("ro.treble.enabled") == "true")
575
576
577def WriteFingerprintAssertion(script, target_info, source_info):
578  source_oem_props = source_info.oem_props
579  target_oem_props = target_info.oem_props
580
581  if source_oem_props is None and target_oem_props is None:
582    script.AssertSomeFingerprint(
583        source_info.fingerprint, target_info.fingerprint)
584  elif source_oem_props is not None and target_oem_props is not None:
585    script.AssertSomeThumbprint(
586        target_info.GetBuildProp("ro.build.thumbprint"),
587        source_info.GetBuildProp("ro.build.thumbprint"))
588  elif source_oem_props is None and target_oem_props is not None:
589    script.AssertFingerprintOrThumbprint(
590        source_info.fingerprint,
591        target_info.GetBuildProp("ro.build.thumbprint"))
592  else:
593    script.AssertFingerprintOrThumbprint(
594        target_info.fingerprint,
595        source_info.GetBuildProp("ro.build.thumbprint"))
596
597
598def CheckVintfIfTrebleEnabled(target_files, target_info):
599  """Checks compatibility info of the input target files.
600
601  Metadata used for compatibility verification is retrieved from target_zip.
602
603  Compatibility should only be checked for devices that have enabled
604  Treble support.
605
606  Args:
607    target_files: Path to zip file containing the source files to be included
608        for OTA. Can also be the path to extracted directory.
609    target_info: The BuildInfo instance that holds the target build info.
610  """
611
612  # Will only proceed if the target has enabled the Treble support (as well as
613  # having a /vendor partition).
614  if not HasTrebleEnabled(target_files, target_info):
615    return
616
617  # Skip adding the compatibility package as a workaround for b/114240221. The
618  # compatibility will always fail on devices without qualified kernels.
619  if OPTIONS.skip_compatibility_check:
620    return
621
622  if not check_target_files_vintf.CheckVintf(target_files, target_info):
623    raise RuntimeError("VINTF compatibility check failed")
624
625
626def GetBlockDifferences(target_zip, source_zip, target_info, source_info,
627                        device_specific):
628  """Returns a ordered dict of block differences with partition name as key."""
629
630  def GetIncrementalBlockDifferenceForPartition(name):
631    if not HasPartition(source_zip, name):
632      raise RuntimeError(
633          "can't generate incremental that adds {}".format(name))
634
635    partition_src = common.GetUserImage(name, OPTIONS.source_tmp, source_zip,
636                                        info_dict=source_info,
637                                        allow_shared_blocks=allow_shared_blocks)
638
639    hashtree_info_generator = verity_utils.CreateHashtreeInfoGenerator(
640        name, 4096, target_info)
641    partition_tgt = common.GetUserImage(name, OPTIONS.target_tmp, target_zip,
642                                        info_dict=target_info,
643                                        allow_shared_blocks=allow_shared_blocks,
644                                        hashtree_info_generator=hashtree_info_generator)
645
646    # Check the first block of the source system partition for remount R/W only
647    # if the filesystem is ext4.
648    partition_source_info = source_info["fstab"]["/" + name]
649    check_first_block = partition_source_info.fs_type == "ext4"
650    # Disable using imgdiff for squashfs. 'imgdiff -z' expects input files to be
651    # in zip formats. However with squashfs, a) all files are compressed in LZ4;
652    # b) the blocks listed in block map may not contain all the bytes for a
653    # given file (because they're rounded to be 4K-aligned).
654    partition_target_info = target_info["fstab"]["/" + name]
655    disable_imgdiff = (partition_source_info.fs_type == "squashfs" or
656                       partition_target_info.fs_type == "squashfs")
657    return common.BlockDifference(name, partition_tgt, partition_src,
658                                  check_first_block,
659                                  version=blockimgdiff_version,
660                                  disable_imgdiff=disable_imgdiff)
661
662  if source_zip:
663    # See notes in common.GetUserImage()
664    allow_shared_blocks = (source_info.get('ext4_share_dup_blocks') == "true" or
665                           target_info.get('ext4_share_dup_blocks') == "true")
666    blockimgdiff_version = max(
667        int(i) for i in target_info.get(
668            "blockimgdiff_versions", "1").split(","))
669    assert blockimgdiff_version >= 3
670
671  block_diff_dict = collections.OrderedDict()
672  partition_names = ["system", "vendor", "product", "odm", "system_ext",
673                     "vendor_dlkm", "odm_dlkm"]
674  for partition in partition_names:
675    if not HasPartition(target_zip, partition):
676      continue
677    # Full OTA update.
678    if not source_zip:
679      tgt = common.GetUserImage(partition, OPTIONS.input_tmp, target_zip,
680                                info_dict=target_info,
681                                reset_file_map=True)
682      block_diff_dict[partition] = common.BlockDifference(partition, tgt,
683                                                          src=None)
684    # Incremental OTA update.
685    else:
686      block_diff_dict[partition] = GetIncrementalBlockDifferenceForPartition(
687          partition)
688  assert "system" in block_diff_dict
689
690  # Get the block diffs from the device specific script. If there is a
691  # duplicate block diff for a partition, ignore the diff in the generic script
692  # and use the one in the device specific script instead.
693  if source_zip:
694    device_specific_diffs = device_specific.IncrementalOTA_GetBlockDifferences()
695    function_name = "IncrementalOTA_GetBlockDifferences"
696  else:
697    device_specific_diffs = device_specific.FullOTA_GetBlockDifferences()
698    function_name = "FullOTA_GetBlockDifferences"
699
700  if device_specific_diffs:
701    assert all(isinstance(diff, common.BlockDifference)
702               for diff in device_specific_diffs), \
703        "{} is not returning a list of BlockDifference objects".format(
704            function_name)
705    for diff in device_specific_diffs:
706      if diff.partition in block_diff_dict:
707        logger.warning("Duplicate block difference found. Device specific block"
708                       " diff for partition '%s' overrides the one in generic"
709                       " script.", diff.partition)
710      block_diff_dict[diff.partition] = diff
711
712  return block_diff_dict
713
714
715def WriteFullOTAPackage(input_zip, output_file):
716  target_info = common.BuildInfo(OPTIONS.info_dict, OPTIONS.oem_dicts)
717
718  # We don't know what version it will be installed on top of. We expect the API
719  # just won't change very often. Similarly for fstab, it might have changed in
720  # the target build.
721  target_api_version = target_info["recovery_api_version"]
722  script = edify_generator.EdifyGenerator(target_api_version, target_info)
723
724  if target_info.oem_props and not OPTIONS.oem_no_mount:
725    target_info.WriteMountOemScript(script)
726
727  metadata = GetPackageMetadata(target_info)
728
729  if not OPTIONS.no_signing:
730    staging_file = common.MakeTempFile(suffix='.zip')
731  else:
732    staging_file = output_file
733
734  output_zip = zipfile.ZipFile(
735      staging_file, "w", compression=zipfile.ZIP_DEFLATED)
736
737  device_specific = common.DeviceSpecificParams(
738      input_zip=input_zip,
739      input_version=target_api_version,
740      output_zip=output_zip,
741      script=script,
742      input_tmp=OPTIONS.input_tmp,
743      metadata=metadata,
744      info_dict=OPTIONS.info_dict)
745
746  assert HasRecoveryPatch(input_zip, info_dict=OPTIONS.info_dict)
747
748  # Assertions (e.g. downgrade check, device properties check).
749  ts = target_info.GetBuildProp("ro.build.date.utc")
750  ts_text = target_info.GetBuildProp("ro.build.date")
751  script.AssertOlderBuild(ts, ts_text)
752
753  target_info.WriteDeviceAssertions(script, OPTIONS.oem_no_mount)
754  device_specific.FullOTA_Assertions()
755
756  block_diff_dict = GetBlockDifferences(target_zip=input_zip, source_zip=None,
757                                        target_info=target_info,
758                                        source_info=None,
759                                        device_specific=device_specific)
760
761  # Two-step package strategy (in chronological order, which is *not*
762  # the order in which the generated script has things):
763  #
764  # if stage is not "2/3" or "3/3":
765  #    write recovery image to boot partition
766  #    set stage to "2/3"
767  #    reboot to boot partition and restart recovery
768  # else if stage is "2/3":
769  #    write recovery image to recovery partition
770  #    set stage to "3/3"
771  #    reboot to recovery partition and restart recovery
772  # else:
773  #    (stage must be "3/3")
774  #    set stage to ""
775  #    do normal full package installation:
776  #       wipe and install system, boot image, etc.
777  #       set up system to update recovery partition on first boot
778  #    complete script normally
779  #    (allow recovery to mark itself finished and reboot)
780
781  recovery_img = common.GetBootableImage("recovery.img", "recovery.img",
782                                         OPTIONS.input_tmp, "RECOVERY")
783  if OPTIONS.two_step:
784    if not target_info.get("multistage_support"):
785      assert False, "two-step packages not supported by this build"
786    fs = target_info["fstab"]["/misc"]
787    assert fs.fs_type.upper() == "EMMC", \
788        "two-step packages only supported on devices with EMMC /misc partitions"
789    bcb_dev = {"bcb_dev": fs.device}
790    common.ZipWriteStr(output_zip, "recovery.img", recovery_img.data)
791    script.AppendExtra("""
792if get_stage("%(bcb_dev)s") == "2/3" then
793""" % bcb_dev)
794
795    # Stage 2/3: Write recovery image to /recovery (currently running /boot).
796    script.Comment("Stage 2/3")
797    script.WriteRawImage("/recovery", "recovery.img")
798    script.AppendExtra("""
799set_stage("%(bcb_dev)s", "3/3");
800reboot_now("%(bcb_dev)s", "recovery");
801else if get_stage("%(bcb_dev)s") == "3/3" then
802""" % bcb_dev)
803
804    # Stage 3/3: Make changes.
805    script.Comment("Stage 3/3")
806
807  # Dump fingerprints
808  script.Print("Target: {}".format(target_info.fingerprint))
809
810  device_specific.FullOTA_InstallBegin()
811
812  # All other partitions as well as the data wipe use 10% of the progress, and
813  # the update of the system partition takes the remaining progress.
814  system_progress = 0.9 - (len(block_diff_dict) - 1) * 0.1
815  if OPTIONS.wipe_user_data:
816    system_progress -= 0.1
817  progress_dict = {partition: 0.1 for partition in block_diff_dict}
818  progress_dict["system"] = system_progress
819
820  if target_info.get('use_dynamic_partitions') == "true":
821    # Use empty source_info_dict to indicate that all partitions / groups must
822    # be re-added.
823    dynamic_partitions_diff = common.DynamicPartitionsDifference(
824        info_dict=OPTIONS.info_dict,
825        block_diffs=block_diff_dict.values(),
826        progress_dict=progress_dict)
827    dynamic_partitions_diff.WriteScript(script, output_zip,
828                                        write_verify_script=OPTIONS.verify)
829  else:
830    for block_diff in block_diff_dict.values():
831      block_diff.WriteScript(script, output_zip,
832                             progress=progress_dict.get(block_diff.partition),
833                             write_verify_script=OPTIONS.verify)
834
835  CheckVintfIfTrebleEnabled(OPTIONS.input_tmp, target_info)
836
837  boot_img = common.GetBootableImage(
838      "boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
839  common.CheckSize(boot_img.data, "boot.img", target_info)
840  common.ZipWriteStr(output_zip, "boot.img", boot_img.data)
841
842  script.WriteRawImage("/boot", "boot.img")
843
844  script.ShowProgress(0.1, 10)
845  device_specific.FullOTA_InstallEnd()
846
847  if OPTIONS.extra_script is not None:
848    script.AppendExtra(OPTIONS.extra_script)
849
850  script.UnmountAll()
851
852  if OPTIONS.wipe_user_data:
853    script.ShowProgress(0.1, 10)
854    script.FormatPartition("/data")
855
856  if OPTIONS.two_step:
857    script.AppendExtra("""
858set_stage("%(bcb_dev)s", "");
859""" % bcb_dev)
860    script.AppendExtra("else\n")
861
862    # Stage 1/3: Nothing to verify for full OTA. Write recovery image to /boot.
863    script.Comment("Stage 1/3")
864    _WriteRecoveryImageToBoot(script, output_zip)
865
866    script.AppendExtra("""
867set_stage("%(bcb_dev)s", "2/3");
868reboot_now("%(bcb_dev)s", "");
869endif;
870endif;
871""" % bcb_dev)
872
873  script.SetProgress(1)
874  script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary)
875  metadata["ota-required-cache"] = str(script.required_cache)
876
877  # We haven't written the metadata entry, which will be done in
878  # FinalizeMetadata.
879  common.ZipClose(output_zip)
880
881  needed_property_files = (
882      NonAbOtaPropertyFiles(),
883  )
884  FinalizeMetadata(metadata, staging_file, output_file, needed_property_files)
885
886
887def WriteMetadata(metadata, output):
888  """Writes the metadata to the zip archive or a file.
889
890  Args:
891    metadata: The metadata dict for the package.
892    output: A ZipFile object or a string of the output file path.
893  """
894
895  value = "".join(["%s=%s\n" % kv for kv in sorted(metadata.items())])
896  if isinstance(output, zipfile.ZipFile):
897    common.ZipWriteStr(output, METADATA_NAME, value,
898                       compress_type=zipfile.ZIP_STORED)
899    return
900
901  with open(output, 'w') as f:
902    f.write(value)
903
904
905def HandleDowngradeMetadata(metadata, target_info, source_info):
906  # Only incremental OTAs are allowed to reach here.
907  assert OPTIONS.incremental_source is not None
908
909  post_timestamp = target_info.GetBuildProp("ro.build.date.utc")
910  pre_timestamp = source_info.GetBuildProp("ro.build.date.utc")
911  is_downgrade = int(post_timestamp) < int(pre_timestamp)
912
913  if OPTIONS.downgrade:
914    if not is_downgrade:
915      raise RuntimeError(
916          "--downgrade or --override_timestamp specified but no downgrade "
917          "detected: pre: %s, post: %s" % (pre_timestamp, post_timestamp))
918    metadata["ota-downgrade"] = "yes"
919  else:
920    if is_downgrade:
921      raise RuntimeError(
922          "Downgrade detected based on timestamp check: pre: %s, post: %s. "
923          "Need to specify --override_timestamp OR --downgrade to allow "
924          "building the incremental." % (pre_timestamp, post_timestamp))
925
926
927def GetPackageMetadata(target_info, source_info=None):
928  """Generates and returns the metadata dict.
929
930  It generates a dict() that contains the info to be written into an OTA
931  package (META-INF/com/android/metadata). It also handles the detection of
932  downgrade / data wipe based on the global options.
933
934  Args:
935    target_info: The BuildInfo instance that holds the target build info.
936    source_info: The BuildInfo instance that holds the source build info, or
937        None if generating full OTA.
938
939  Returns:
940    A dict to be written into package metadata entry.
941  """
942  assert isinstance(target_info, common.BuildInfo)
943  assert source_info is None or isinstance(source_info, common.BuildInfo)
944
945  separator = '|'
946
947  boot_variable_values = {}
948  if OPTIONS.boot_variable_file:
949    d = common.LoadDictionaryFromFile(OPTIONS.boot_variable_file)
950    for key, values in d.items():
951      boot_variable_values[key] = [val.strip() for val in values.split(',')]
952
953  post_build_devices, post_build_fingerprints = \
954      CalculateRuntimeDevicesAndFingerprints(target_info, boot_variable_values)
955  metadata = {
956      'post-build': separator.join(sorted(post_build_fingerprints)),
957      'post-build-incremental': target_info.GetBuildProp(
958          'ro.build.version.incremental'),
959      'post-sdk-level': target_info.GetBuildProp(
960          'ro.build.version.sdk'),
961      'post-security-patch-level': target_info.GetBuildProp(
962          'ro.build.version.security_patch'),
963  }
964
965  if target_info.is_ab and not OPTIONS.force_non_ab:
966    metadata['ota-type'] = 'AB'
967    metadata['ota-required-cache'] = '0'
968  else:
969    metadata['ota-type'] = 'BLOCK'
970
971  if OPTIONS.wipe_user_data:
972    metadata['ota-wipe'] = 'yes'
973
974  if OPTIONS.retrofit_dynamic_partitions:
975    metadata['ota-retrofit-dynamic-partitions'] = 'yes'
976
977  is_incremental = source_info is not None
978  if is_incremental:
979    pre_build_devices, pre_build_fingerprints = \
980        CalculateRuntimeDevicesAndFingerprints(source_info,
981                                               boot_variable_values)
982    metadata['pre-build'] = separator.join(sorted(pre_build_fingerprints))
983    metadata['pre-build-incremental'] = source_info.GetBuildProp(
984        'ro.build.version.incremental')
985    metadata['pre-device'] = separator.join(sorted(pre_build_devices))
986  else:
987    metadata['pre-device'] = separator.join(sorted(post_build_devices))
988
989  # Use the actual post-timestamp, even for a downgrade case.
990  metadata['post-timestamp'] = target_info.GetBuildProp('ro.build.date.utc')
991
992  # Detect downgrades and set up downgrade flags accordingly.
993  if is_incremental:
994    HandleDowngradeMetadata(metadata, target_info, source_info)
995
996  return metadata
997
998
999class PropertyFiles(object):
1000  """A class that computes the property-files string for an OTA package.
1001
1002  A property-files string is a comma-separated string that contains the
1003  offset/size info for an OTA package. The entries, which must be ZIP_STORED,
1004  can be fetched directly with the package URL along with the offset/size info.
1005  These strings can be used for streaming A/B OTAs, or allowing an updater to
1006  download package metadata entry directly, without paying the cost of
1007  downloading entire package.
1008
1009  Computing the final property-files string requires two passes. Because doing
1010  the whole package signing (with signapk.jar) will possibly reorder the ZIP
1011  entries, which may in turn invalidate earlier computed ZIP entry offset/size
1012  values.
1013
1014  This class provides functions to be called for each pass. The general flow is
1015  as follows.
1016
1017    property_files = PropertyFiles()
1018    # The first pass, which writes placeholders before doing initial signing.
1019    property_files.Compute()
1020    SignOutput()
1021
1022    # The second pass, by replacing the placeholders with actual data.
1023    property_files.Finalize()
1024    SignOutput()
1025
1026  And the caller can additionally verify the final result.
1027
1028    property_files.Verify()
1029  """
1030
1031  def __init__(self):
1032    self.name = None
1033    self.required = ()
1034    self.optional = ()
1035
1036  def Compute(self, input_zip):
1037    """Computes and returns a property-files string with placeholders.
1038
1039    We reserve extra space for the offset and size of the metadata entry itself,
1040    although we don't know the final values until the package gets signed.
1041
1042    Args:
1043      input_zip: The input ZIP file.
1044
1045    Returns:
1046      A string with placeholders for the metadata offset/size info, e.g.
1047      "payload.bin:679:343,payload_properties.txt:378:45,metadata:        ".
1048    """
1049    return self.GetPropertyFilesString(input_zip, reserve_space=True)
1050
1051  class InsufficientSpaceException(Exception):
1052    pass
1053
1054  def Finalize(self, input_zip, reserved_length):
1055    """Finalizes a property-files string with actual METADATA offset/size info.
1056
1057    The input ZIP file has been signed, with the ZIP entries in the desired
1058    place (signapk.jar will possibly reorder the ZIP entries). Now we compute
1059    the ZIP entry offsets and construct the property-files string with actual
1060    data. Note that during this process, we must pad the property-files string
1061    to the reserved length, so that the METADATA entry size remains the same.
1062    Otherwise the entries' offsets and sizes may change again.
1063
1064    Args:
1065      input_zip: The input ZIP file.
1066      reserved_length: The reserved length of the property-files string during
1067          the call to Compute(). The final string must be no more than this
1068          size.
1069
1070    Returns:
1071      A property-files string including the metadata offset/size info, e.g.
1072      "payload.bin:679:343,payload_properties.txt:378:45,metadata:69:379  ".
1073
1074    Raises:
1075      InsufficientSpaceException: If the reserved length is insufficient to hold
1076          the final string.
1077    """
1078    result = self.GetPropertyFilesString(input_zip, reserve_space=False)
1079    if len(result) > reserved_length:
1080      raise self.InsufficientSpaceException(
1081          'Insufficient reserved space: reserved={}, actual={}'.format(
1082              reserved_length, len(result)))
1083
1084    result += ' ' * (reserved_length - len(result))
1085    return result
1086
1087  def Verify(self, input_zip, expected):
1088    """Verifies the input ZIP file contains the expected property-files string.
1089
1090    Args:
1091      input_zip: The input ZIP file.
1092      expected: The property-files string that's computed from Finalize().
1093
1094    Raises:
1095      AssertionError: On finding a mismatch.
1096    """
1097    actual = self.GetPropertyFilesString(input_zip)
1098    assert actual == expected, \
1099        "Mismatching streaming metadata: {} vs {}.".format(actual, expected)
1100
1101  def GetPropertyFilesString(self, zip_file, reserve_space=False):
1102    """
1103    Constructs the property-files string per request.
1104
1105    Args:
1106      zip_file: The input ZIP file.
1107      reserved_length: The reserved length of the property-files string.
1108
1109    Returns:
1110      A property-files string including the metadata offset/size info, e.g.
1111      "payload.bin:679:343,payload_properties.txt:378:45,metadata:     ".
1112    """
1113
1114    def ComputeEntryOffsetSize(name):
1115      """Computes the zip entry offset and size."""
1116      info = zip_file.getinfo(name)
1117      offset = info.header_offset
1118      offset += zipfile.sizeFileHeader
1119      offset += len(info.extra) + len(info.filename)
1120      size = info.file_size
1121      return '%s:%d:%d' % (os.path.basename(name), offset, size)
1122
1123    tokens = []
1124    tokens.extend(self._GetPrecomputed(zip_file))
1125    for entry in self.required:
1126      tokens.append(ComputeEntryOffsetSize(entry))
1127    for entry in self.optional:
1128      if entry in zip_file.namelist():
1129        tokens.append(ComputeEntryOffsetSize(entry))
1130
1131    # 'META-INF/com/android/metadata' is required. We don't know its actual
1132    # offset and length (as well as the values for other entries). So we reserve
1133    # 15-byte as a placeholder ('offset:length'), which is sufficient to cover
1134    # the space for metadata entry. Because 'offset' allows a max of 10-digit
1135    # (i.e. ~9 GiB), with a max of 4-digit for the length. Note that all the
1136    # reserved space serves the metadata entry only.
1137    if reserve_space:
1138      tokens.append('metadata:' + ' ' * 15)
1139    else:
1140      tokens.append(ComputeEntryOffsetSize(METADATA_NAME))
1141
1142    return ','.join(tokens)
1143
1144  def _GetPrecomputed(self, input_zip):
1145    """Computes the additional tokens to be included into the property-files.
1146
1147    This applies to tokens without actual ZIP entries, such as
1148    payload_metadadata.bin. We want to expose the offset/size to updaters, so
1149    that they can download the payload metadata directly with the info.
1150
1151    Args:
1152      input_zip: The input zip file.
1153
1154    Returns:
1155      A list of strings (tokens) to be added to the property-files string.
1156    """
1157    # pylint: disable=no-self-use
1158    # pylint: disable=unused-argument
1159    return []
1160
1161
1162class StreamingPropertyFiles(PropertyFiles):
1163  """A subclass for computing the property-files for streaming A/B OTAs."""
1164
1165  def __init__(self):
1166    super(StreamingPropertyFiles, self).__init__()
1167    self.name = 'ota-streaming-property-files'
1168    self.required = (
1169        # payload.bin and payload_properties.txt must exist.
1170        'payload.bin',
1171        'payload_properties.txt',
1172    )
1173    self.optional = (
1174        # care_map is available only if dm-verity is enabled.
1175        'care_map.pb',
1176        'care_map.txt',
1177        # compatibility.zip is available only if target supports Treble.
1178        'compatibility.zip',
1179    )
1180
1181
1182class AbOtaPropertyFiles(StreamingPropertyFiles):
1183  """The property-files for A/B OTA that includes payload_metadata.bin info.
1184
1185  Since P, we expose one more token (aka property-file), in addition to the ones
1186  for streaming A/B OTA, for a virtual entry of 'payload_metadata.bin'.
1187  'payload_metadata.bin' is the header part of a payload ('payload.bin'), which
1188  doesn't exist as a separate ZIP entry, but can be used to verify if the
1189  payload can be applied on the given device.
1190
1191  For backward compatibility, we keep both of the 'ota-streaming-property-files'
1192  and the newly added 'ota-property-files' in P. The new token will only be
1193  available in 'ota-property-files'.
1194  """
1195
1196  def __init__(self):
1197    super(AbOtaPropertyFiles, self).__init__()
1198    self.name = 'ota-property-files'
1199
1200  def _GetPrecomputed(self, input_zip):
1201    offset, size = self._GetPayloadMetadataOffsetAndSize(input_zip)
1202    return ['payload_metadata.bin:{}:{}'.format(offset, size)]
1203
1204  @staticmethod
1205  def _GetPayloadMetadataOffsetAndSize(input_zip):
1206    """Computes the offset and size of the payload metadata for a given package.
1207
1208    (From system/update_engine/update_metadata.proto)
1209    A delta update file contains all the deltas needed to update a system from
1210    one specific version to another specific version. The update format is
1211    represented by this struct pseudocode:
1212
1213    struct delta_update_file {
1214      char magic[4] = "CrAU";
1215      uint64 file_format_version;
1216      uint64 manifest_size;  // Size of protobuf DeltaArchiveManifest
1217
1218      // Only present if format_version > 1:
1219      uint32 metadata_signature_size;
1220
1221      // The Bzip2 compressed DeltaArchiveManifest
1222      char manifest[metadata_signature_size];
1223
1224      // The signature of the metadata (from the beginning of the payload up to
1225      // this location, not including the signature itself). This is a
1226      // serialized Signatures message.
1227      char medatada_signature_message[metadata_signature_size];
1228
1229      // Data blobs for files, no specific format. The specific offset
1230      // and length of each data blob is recorded in the DeltaArchiveManifest.
1231      struct {
1232        char data[];
1233      } blobs[];
1234
1235      // These two are not signed:
1236      uint64 payload_signatures_message_size;
1237      char payload_signatures_message[];
1238    };
1239
1240    'payload-metadata.bin' contains all the bytes from the beginning of the
1241    payload, till the end of 'medatada_signature_message'.
1242    """
1243    payload_info = input_zip.getinfo('payload.bin')
1244    payload_offset = payload_info.header_offset
1245    payload_offset += zipfile.sizeFileHeader
1246    payload_offset += len(payload_info.extra) + len(payload_info.filename)
1247    payload_size = payload_info.file_size
1248
1249    with input_zip.open('payload.bin') as payload_fp:
1250      header_bin = payload_fp.read(24)
1251
1252    # network byte order (big-endian)
1253    header = struct.unpack("!IQQL", header_bin)
1254
1255    # 'CrAU'
1256    magic = header[0]
1257    assert magic == 0x43724155, "Invalid magic: {:x}".format(magic)
1258
1259    manifest_size = header[2]
1260    metadata_signature_size = header[3]
1261    metadata_total = 24 + manifest_size + metadata_signature_size
1262    assert metadata_total < payload_size
1263
1264    return (payload_offset, metadata_total)
1265
1266
1267class NonAbOtaPropertyFiles(PropertyFiles):
1268  """The property-files for non-A/B OTA.
1269
1270  For non-A/B OTA, the property-files string contains the info for METADATA
1271  entry, with which a system updater can be fetched the package metadata prior
1272  to downloading the entire package.
1273  """
1274
1275  def __init__(self):
1276    super(NonAbOtaPropertyFiles, self).__init__()
1277    self.name = 'ota-property-files'
1278
1279
1280def FinalizeMetadata(metadata, input_file, output_file, needed_property_files):
1281  """Finalizes the metadata and signs an A/B OTA package.
1282
1283  In order to stream an A/B OTA package, we need 'ota-streaming-property-files'
1284  that contains the offsets and sizes for the ZIP entries. An example
1285  property-files string is as follows.
1286
1287    "payload.bin:679:343,payload_properties.txt:378:45,metadata:69:379"
1288
1289  OTA server can pass down this string, in addition to the package URL, to the
1290  system update client. System update client can then fetch individual ZIP
1291  entries (ZIP_STORED) directly at the given offset of the URL.
1292
1293  Args:
1294    metadata: The metadata dict for the package.
1295    input_file: The input ZIP filename that doesn't contain the package METADATA
1296        entry yet.
1297    output_file: The final output ZIP filename.
1298    needed_property_files: The list of PropertyFiles' to be generated.
1299  """
1300
1301  def ComputeAllPropertyFiles(input_file, needed_property_files):
1302    # Write the current metadata entry with placeholders.
1303    with zipfile.ZipFile(input_file) as input_zip:
1304      for property_files in needed_property_files:
1305        metadata[property_files.name] = property_files.Compute(input_zip)
1306      namelist = input_zip.namelist()
1307
1308    if METADATA_NAME in namelist:
1309      common.ZipDelete(input_file, METADATA_NAME)
1310    output_zip = zipfile.ZipFile(input_file, 'a')
1311    WriteMetadata(metadata, output_zip)
1312    common.ZipClose(output_zip)
1313
1314    if OPTIONS.no_signing:
1315      return input_file
1316
1317    prelim_signing = common.MakeTempFile(suffix='.zip')
1318    SignOutput(input_file, prelim_signing)
1319    return prelim_signing
1320
1321  def FinalizeAllPropertyFiles(prelim_signing, needed_property_files):
1322    with zipfile.ZipFile(prelim_signing) as prelim_signing_zip:
1323      for property_files in needed_property_files:
1324        metadata[property_files.name] = property_files.Finalize(
1325            prelim_signing_zip, len(metadata[property_files.name]))
1326
1327  # SignOutput(), which in turn calls signapk.jar, will possibly reorder the ZIP
1328  # entries, as well as padding the entry headers. We do a preliminary signing
1329  # (with an incomplete metadata entry) to allow that to happen. Then compute
1330  # the ZIP entry offsets, write back the final metadata and do the final
1331  # signing.
1332  prelim_signing = ComputeAllPropertyFiles(input_file, needed_property_files)
1333  try:
1334    FinalizeAllPropertyFiles(prelim_signing, needed_property_files)
1335  except PropertyFiles.InsufficientSpaceException:
1336    # Even with the preliminary signing, the entry orders may change
1337    # dramatically, which leads to insufficiently reserved space during the
1338    # first call to ComputeAllPropertyFiles(). In that case, we redo all the
1339    # preliminary signing works, based on the already ordered ZIP entries, to
1340    # address the issue.
1341    prelim_signing = ComputeAllPropertyFiles(
1342        prelim_signing, needed_property_files)
1343    FinalizeAllPropertyFiles(prelim_signing, needed_property_files)
1344
1345  # Replace the METADATA entry.
1346  common.ZipDelete(prelim_signing, METADATA_NAME)
1347  output_zip = zipfile.ZipFile(prelim_signing, 'a')
1348  WriteMetadata(metadata, output_zip)
1349  common.ZipClose(output_zip)
1350
1351  # Re-sign the package after updating the metadata entry.
1352  if OPTIONS.no_signing:
1353    output_file = prelim_signing
1354  else:
1355    SignOutput(prelim_signing, output_file)
1356
1357  # Reopen the final signed zip to double check the streaming metadata.
1358  with zipfile.ZipFile(output_file) as output_zip:
1359    for property_files in needed_property_files:
1360      property_files.Verify(output_zip, metadata[property_files.name].strip())
1361
1362  # If requested, dump the metadata to a separate file.
1363  output_metadata_path = OPTIONS.output_metadata_path
1364  if output_metadata_path:
1365    WriteMetadata(metadata, output_metadata_path)
1366
1367
1368def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_file):
1369  target_info = common.BuildInfo(OPTIONS.target_info_dict, OPTIONS.oem_dicts)
1370  source_info = common.BuildInfo(OPTIONS.source_info_dict, OPTIONS.oem_dicts)
1371
1372  target_api_version = target_info["recovery_api_version"]
1373  source_api_version = source_info["recovery_api_version"]
1374  if source_api_version == 0:
1375    logger.warning(
1376        "Generating edify script for a source that can't install it.")
1377
1378  script = edify_generator.EdifyGenerator(
1379      source_api_version, target_info, fstab=source_info["fstab"])
1380
1381  if target_info.oem_props or source_info.oem_props:
1382    if not OPTIONS.oem_no_mount:
1383      source_info.WriteMountOemScript(script)
1384
1385  metadata = GetPackageMetadata(target_info, source_info)
1386
1387  if not OPTIONS.no_signing:
1388    staging_file = common.MakeTempFile(suffix='.zip')
1389  else:
1390    staging_file = output_file
1391
1392  output_zip = zipfile.ZipFile(
1393      staging_file, "w", compression=zipfile.ZIP_DEFLATED)
1394
1395  device_specific = common.DeviceSpecificParams(
1396      source_zip=source_zip,
1397      source_version=source_api_version,
1398      source_tmp=OPTIONS.source_tmp,
1399      target_zip=target_zip,
1400      target_version=target_api_version,
1401      target_tmp=OPTIONS.target_tmp,
1402      output_zip=output_zip,
1403      script=script,
1404      metadata=metadata,
1405      info_dict=source_info)
1406
1407  source_boot = common.GetBootableImage(
1408      "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT", source_info)
1409  target_boot = common.GetBootableImage(
1410      "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT", target_info)
1411  updating_boot = (not OPTIONS.two_step and
1412                   (source_boot.data != target_boot.data))
1413
1414  target_recovery = common.GetBootableImage(
1415      "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY")
1416
1417  block_diff_dict = GetBlockDifferences(target_zip=target_zip,
1418                                        source_zip=source_zip,
1419                                        target_info=target_info,
1420                                        source_info=source_info,
1421                                        device_specific=device_specific)
1422
1423  CheckVintfIfTrebleEnabled(OPTIONS.target_tmp, target_info)
1424
1425  # Assertions (e.g. device properties check).
1426  target_info.WriteDeviceAssertions(script, OPTIONS.oem_no_mount)
1427  device_specific.IncrementalOTA_Assertions()
1428
1429  # Two-step incremental package strategy (in chronological order,
1430  # which is *not* the order in which the generated script has
1431  # things):
1432  #
1433  # if stage is not "2/3" or "3/3":
1434  #    do verification on current system
1435  #    write recovery image to boot partition
1436  #    set stage to "2/3"
1437  #    reboot to boot partition and restart recovery
1438  # else if stage is "2/3":
1439  #    write recovery image to recovery partition
1440  #    set stage to "3/3"
1441  #    reboot to recovery partition and restart recovery
1442  # else:
1443  #    (stage must be "3/3")
1444  #    perform update:
1445  #       patch system files, etc.
1446  #       force full install of new boot image
1447  #       set up system to update recovery partition on first boot
1448  #    complete script normally
1449  #    (allow recovery to mark itself finished and reboot)
1450
1451  if OPTIONS.two_step:
1452    if not source_info.get("multistage_support"):
1453      assert False, "two-step packages not supported by this build"
1454    fs = source_info["fstab"]["/misc"]
1455    assert fs.fs_type.upper() == "EMMC", \
1456        "two-step packages only supported on devices with EMMC /misc partitions"
1457    bcb_dev = {"bcb_dev": fs.device}
1458    common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data)
1459    script.AppendExtra("""
1460if get_stage("%(bcb_dev)s") == "2/3" then
1461""" % bcb_dev)
1462
1463    # Stage 2/3: Write recovery image to /recovery (currently running /boot).
1464    script.Comment("Stage 2/3")
1465    script.AppendExtra("sleep(20);\n")
1466    script.WriteRawImage("/recovery", "recovery.img")
1467    script.AppendExtra("""
1468set_stage("%(bcb_dev)s", "3/3");
1469reboot_now("%(bcb_dev)s", "recovery");
1470else if get_stage("%(bcb_dev)s") != "3/3" then
1471""" % bcb_dev)
1472
1473    # Stage 1/3: (a) Verify the current system.
1474    script.Comment("Stage 1/3")
1475
1476  # Dump fingerprints
1477  script.Print("Source: {}".format(source_info.fingerprint))
1478  script.Print("Target: {}".format(target_info.fingerprint))
1479
1480  script.Print("Verifying current system...")
1481
1482  device_specific.IncrementalOTA_VerifyBegin()
1483
1484  WriteFingerprintAssertion(script, target_info, source_info)
1485
1486  # Check the required cache size (i.e. stashed blocks).
1487  required_cache_sizes = [diff.required_cache for diff in
1488                          block_diff_dict.values()]
1489  if updating_boot:
1490    boot_type, boot_device_expr = common.GetTypeAndDeviceExpr("/boot",
1491                                                              source_info)
1492    d = common.Difference(target_boot, source_boot)
1493    _, _, d = d.ComputePatch()
1494    if d is None:
1495      include_full_boot = True
1496      common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
1497    else:
1498      include_full_boot = False
1499
1500      logger.info(
1501          "boot      target: %d  source: %d  diff: %d", target_boot.size,
1502          source_boot.size, len(d))
1503
1504      common.ZipWriteStr(output_zip, "boot.img.p", d)
1505
1506      target_expr = 'concat("{}:",{},":{}:{}")'.format(
1507          boot_type, boot_device_expr, target_boot.size, target_boot.sha1)
1508      source_expr = 'concat("{}:",{},":{}:{}")'.format(
1509          boot_type, boot_device_expr, source_boot.size, source_boot.sha1)
1510      script.PatchPartitionExprCheck(target_expr, source_expr)
1511
1512      required_cache_sizes.append(target_boot.size)
1513
1514  if required_cache_sizes:
1515    script.CacheFreeSpaceCheck(max(required_cache_sizes))
1516
1517  # Verify the existing partitions.
1518  for diff in block_diff_dict.values():
1519    diff.WriteVerifyScript(script, touched_blocks_only=True)
1520
1521  device_specific.IncrementalOTA_VerifyEnd()
1522
1523  if OPTIONS.two_step:
1524    # Stage 1/3: (b) Write recovery image to /boot.
1525    _WriteRecoveryImageToBoot(script, output_zip)
1526
1527    script.AppendExtra("""
1528set_stage("%(bcb_dev)s", "2/3");
1529reboot_now("%(bcb_dev)s", "");
1530else
1531""" % bcb_dev)
1532
1533    # Stage 3/3: Make changes.
1534    script.Comment("Stage 3/3")
1535
1536  script.Comment("---- start making changes here ----")
1537
1538  device_specific.IncrementalOTA_InstallBegin()
1539
1540  progress_dict = {partition: 0.1 for partition in block_diff_dict}
1541  progress_dict["system"] = 1 - len(block_diff_dict) * 0.1
1542
1543  if OPTIONS.source_info_dict.get("use_dynamic_partitions") == "true":
1544    if OPTIONS.target_info_dict.get("use_dynamic_partitions") != "true":
1545      raise RuntimeError(
1546          "can't generate incremental that disables dynamic partitions")
1547    dynamic_partitions_diff = common.DynamicPartitionsDifference(
1548        info_dict=OPTIONS.target_info_dict,
1549        source_info_dict=OPTIONS.source_info_dict,
1550        block_diffs=block_diff_dict.values(),
1551        progress_dict=progress_dict)
1552    dynamic_partitions_diff.WriteScript(
1553        script, output_zip, write_verify_script=OPTIONS.verify)
1554  else:
1555    for block_diff in block_diff_dict.values():
1556      block_diff.WriteScript(script, output_zip,
1557                             progress=progress_dict.get(block_diff.partition),
1558                             write_verify_script=OPTIONS.verify)
1559
1560  if OPTIONS.two_step:
1561    common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
1562    script.WriteRawImage("/boot", "boot.img")
1563    logger.info("writing full boot image (forced by two-step mode)")
1564
1565  if not OPTIONS.two_step:
1566    if updating_boot:
1567      if include_full_boot:
1568        logger.info("boot image changed; including full.")
1569        script.Print("Installing boot image...")
1570        script.WriteRawImage("/boot", "boot.img")
1571      else:
1572        # Produce the boot image by applying a patch to the current
1573        # contents of the boot partition, and write it back to the
1574        # partition.
1575        logger.info("boot image changed; including patch.")
1576        script.Print("Patching boot image...")
1577        script.ShowProgress(0.1, 10)
1578        target_expr = 'concat("{}:",{},":{}:{}")'.format(
1579            boot_type, boot_device_expr, target_boot.size, target_boot.sha1)
1580        source_expr = 'concat("{}:",{},":{}:{}")'.format(
1581            boot_type, boot_device_expr, source_boot.size, source_boot.sha1)
1582        script.PatchPartitionExpr(target_expr, source_expr, '"boot.img.p"')
1583    else:
1584      logger.info("boot image unchanged; skipping.")
1585
1586  # Do device-specific installation (eg, write radio image).
1587  device_specific.IncrementalOTA_InstallEnd()
1588
1589  if OPTIONS.extra_script is not None:
1590    script.AppendExtra(OPTIONS.extra_script)
1591
1592  if OPTIONS.wipe_user_data:
1593    script.Print("Erasing user data...")
1594    script.FormatPartition("/data")
1595
1596  if OPTIONS.two_step:
1597    script.AppendExtra("""
1598set_stage("%(bcb_dev)s", "");
1599endif;
1600endif;
1601""" % bcb_dev)
1602
1603  script.SetProgress(1)
1604  # For downgrade OTAs, we prefer to use the update-binary in the source
1605  # build that is actually newer than the one in the target build.
1606  if OPTIONS.downgrade:
1607    script.AddToZip(source_zip, output_zip, input_path=OPTIONS.updater_binary)
1608  else:
1609    script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary)
1610  metadata["ota-required-cache"] = str(script.required_cache)
1611
1612  # We haven't written the metadata entry yet, which will be handled in
1613  # FinalizeMetadata().
1614  common.ZipClose(output_zip)
1615
1616  # Sign the generated zip package unless no_signing is specified.
1617  needed_property_files = (
1618      NonAbOtaPropertyFiles(),
1619  )
1620  FinalizeMetadata(metadata, staging_file, output_file, needed_property_files)
1621
1622
1623def GetTargetFilesZipForSecondaryImages(input_file, skip_postinstall=False):
1624  """Returns a target-files.zip file for generating secondary payload.
1625
1626  Although the original target-files.zip already contains secondary slot
1627  images (i.e. IMAGES/system_other.img), we need to rename the files to the
1628  ones without _other suffix. Note that we cannot instead modify the names in
1629  META/ab_partitions.txt, because there are no matching partitions on device.
1630
1631  For the partitions that don't have secondary images, the ones for primary
1632  slot will be used. This is to ensure that we always have valid boot, vbmeta,
1633  bootloader images in the inactive slot.
1634
1635  Args:
1636    input_file: The input target-files.zip file.
1637    skip_postinstall: Whether to skip copying the postinstall config file.
1638
1639  Returns:
1640    The filename of the target-files.zip for generating secondary payload.
1641  """
1642
1643  def GetInfoForSecondaryImages(info_file):
1644    """Updates info file for secondary payload generation.
1645
1646    Scan each line in the info file, and remove the unwanted partitions from
1647    the dynamic partition list in the related properties. e.g.
1648    "super_google_dynamic_partitions_partition_list=system vendor product"
1649    will become "super_google_dynamic_partitions_partition_list=system".
1650
1651    Args:
1652      info_file: The input info file. e.g. misc_info.txt.
1653
1654    Returns:
1655      A string of the updated info content.
1656    """
1657
1658    output_list = []
1659    with open(info_file) as f:
1660      lines = f.read().splitlines()
1661
1662    # The suffix in partition_list variables that follows the name of the
1663    # partition group.
1664    LIST_SUFFIX = 'partition_list'
1665    for line in lines:
1666      if line.startswith('#') or '=' not in line:
1667        output_list.append(line)
1668        continue
1669      key, value = line.strip().split('=', 1)
1670      if key == 'dynamic_partition_list' or key.endswith(LIST_SUFFIX):
1671        partitions = value.split()
1672        partitions = [partition for partition in partitions if partition
1673                      not in SECONDARY_PAYLOAD_SKIPPED_IMAGES]
1674        output_list.append('{}={}'.format(key, ' '.join(partitions)))
1675      elif key in ['virtual_ab', "virtual_ab_retrofit"]:
1676        # Remove virtual_ab flag from secondary payload so that OTA client
1677        # don't use snapshots for secondary update
1678        pass
1679      else:
1680        output_list.append(line)
1681    return '\n'.join(output_list)
1682
1683  target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip")
1684  target_zip = zipfile.ZipFile(target_file, 'w', allowZip64=True)
1685
1686  with zipfile.ZipFile(input_file, 'r') as input_zip:
1687    infolist = input_zip.infolist()
1688
1689  input_tmp = common.UnzipTemp(input_file, UNZIP_PATTERN)
1690  for info in infolist:
1691    unzipped_file = os.path.join(input_tmp, *info.filename.split('/'))
1692    if info.filename == 'IMAGES/system_other.img':
1693      common.ZipWrite(target_zip, unzipped_file, arcname='IMAGES/system.img')
1694
1695    # Primary images and friends need to be skipped explicitly.
1696    elif info.filename in ('IMAGES/system.img',
1697                           'IMAGES/system.map'):
1698      pass
1699
1700    # Copy images that are not in SECONDARY_PAYLOAD_SKIPPED_IMAGES.
1701    elif info.filename.startswith(('IMAGES/', 'RADIO/')):
1702      image_name = os.path.basename(info.filename)
1703      if image_name not in ['{}.img'.format(partition) for partition in
1704                            SECONDARY_PAYLOAD_SKIPPED_IMAGES]:
1705        common.ZipWrite(target_zip, unzipped_file, arcname=info.filename)
1706
1707    # Skip copying the postinstall config if requested.
1708    elif skip_postinstall and info.filename == POSTINSTALL_CONFIG:
1709      pass
1710
1711    elif info.filename.startswith('META/'):
1712      # Remove the unnecessary partitions for secondary images from the
1713      # ab_partitions file.
1714      if info.filename == AB_PARTITIONS:
1715        with open(unzipped_file) as f:
1716          partition_list = f.read().splitlines()
1717        partition_list = [partition for partition in partition_list if partition
1718                          and partition not in SECONDARY_PAYLOAD_SKIPPED_IMAGES]
1719        common.ZipWriteStr(target_zip, info.filename,
1720                           '\n'.join(partition_list))
1721      # Remove the unnecessary partitions from the dynamic partitions list.
1722      elif (info.filename == 'META/misc_info.txt' or
1723            info.filename == DYNAMIC_PARTITION_INFO):
1724        modified_info = GetInfoForSecondaryImages(unzipped_file)
1725        common.ZipWriteStr(target_zip, info.filename, modified_info)
1726      else:
1727        common.ZipWrite(target_zip, unzipped_file, arcname=info.filename)
1728
1729  common.ZipClose(target_zip)
1730
1731  return target_file
1732
1733
1734def GetTargetFilesZipWithoutPostinstallConfig(input_file):
1735  """Returns a target-files.zip that's not containing postinstall_config.txt.
1736
1737  This allows brillo_update_payload script to skip writing all the postinstall
1738  hooks in the generated payload. The input target-files.zip file will be
1739  duplicated, with 'META/postinstall_config.txt' skipped. If input_file doesn't
1740  contain the postinstall_config.txt entry, the input file will be returned.
1741
1742  Args:
1743    input_file: The input target-files.zip filename.
1744
1745  Returns:
1746    The filename of target-files.zip that doesn't contain postinstall config.
1747  """
1748  # We should only make a copy if postinstall_config entry exists.
1749  with zipfile.ZipFile(input_file, 'r') as input_zip:
1750    if POSTINSTALL_CONFIG not in input_zip.namelist():
1751      return input_file
1752
1753  target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip")
1754  shutil.copyfile(input_file, target_file)
1755  common.ZipDelete(target_file, POSTINSTALL_CONFIG)
1756  return target_file
1757
1758
1759def GetTargetFilesZipForRetrofitDynamicPartitions(input_file,
1760                                                  super_block_devices,
1761                                                  dynamic_partition_list):
1762  """Returns a target-files.zip for retrofitting dynamic partitions.
1763
1764  This allows brillo_update_payload to generate an OTA based on the exact
1765  bits on the block devices. Postinstall is disabled.
1766
1767  Args:
1768    input_file: The input target-files.zip filename.
1769    super_block_devices: The list of super block devices
1770    dynamic_partition_list: The list of dynamic partitions
1771
1772  Returns:
1773    The filename of target-files.zip with *.img replaced with super_*.img for
1774    each block device in super_block_devices.
1775  """
1776  assert super_block_devices, "No super_block_devices are specified."
1777
1778  replace = {'OTA/super_{}.img'.format(dev): 'IMAGES/{}.img'.format(dev)
1779             for dev in super_block_devices}
1780
1781  target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip")
1782  shutil.copyfile(input_file, target_file)
1783
1784  with zipfile.ZipFile(input_file) as input_zip:
1785    namelist = input_zip.namelist()
1786
1787  input_tmp = common.UnzipTemp(input_file, RETROFIT_DAP_UNZIP_PATTERN)
1788
1789  # Remove partitions from META/ab_partitions.txt that is in
1790  # dynamic_partition_list but not in super_block_devices so that
1791  # brillo_update_payload won't generate update for those logical partitions.
1792  ab_partitions_file = os.path.join(input_tmp, *AB_PARTITIONS.split('/'))
1793  with open(ab_partitions_file) as f:
1794    ab_partitions_lines = f.readlines()
1795    ab_partitions = [line.strip() for line in ab_partitions_lines]
1796  # Assert that all super_block_devices are in ab_partitions
1797  super_device_not_updated = [partition for partition in super_block_devices
1798                              if partition not in ab_partitions]
1799  assert not super_device_not_updated, \
1800      "{} is in super_block_devices but not in {}".format(
1801          super_device_not_updated, AB_PARTITIONS)
1802  # ab_partitions -= (dynamic_partition_list - super_block_devices)
1803  new_ab_partitions = common.MakeTempFile(
1804      prefix="ab_partitions", suffix=".txt")
1805  with open(new_ab_partitions, 'w') as f:
1806    for partition in ab_partitions:
1807      if (partition in dynamic_partition_list and
1808          partition not in super_block_devices):
1809        logger.info("Dropping %s from ab_partitions.txt", partition)
1810        continue
1811      f.write(partition + "\n")
1812  to_delete = [AB_PARTITIONS]
1813
1814  # Always skip postinstall for a retrofit update.
1815  to_delete += [POSTINSTALL_CONFIG]
1816
1817  # Delete dynamic_partitions_info.txt so that brillo_update_payload thinks this
1818  # is a regular update on devices without dynamic partitions support.
1819  to_delete += [DYNAMIC_PARTITION_INFO]
1820
1821  # Remove the existing partition images as well as the map files.
1822  to_delete += list(replace.values())
1823  to_delete += ['IMAGES/{}.map'.format(dev) for dev in super_block_devices]
1824
1825  common.ZipDelete(target_file, to_delete)
1826
1827  target_zip = zipfile.ZipFile(target_file, 'a', allowZip64=True)
1828
1829  # Write super_{foo}.img as {foo}.img.
1830  for src, dst in replace.items():
1831    assert src in namelist, \
1832        'Missing {} in {}; {} cannot be written'.format(src, input_file, dst)
1833    unzipped_file = os.path.join(input_tmp, *src.split('/'))
1834    common.ZipWrite(target_zip, unzipped_file, arcname=dst)
1835
1836  # Write new ab_partitions.txt file
1837  common.ZipWrite(target_zip, new_ab_partitions, arcname=AB_PARTITIONS)
1838
1839  common.ZipClose(target_zip)
1840
1841  return target_file
1842
1843
1844def GenerateAbOtaPackage(target_file, output_file, source_file=None):
1845  """Generates an Android OTA package that has A/B update payload."""
1846  # Stage the output zip package for package signing.
1847  if not OPTIONS.no_signing:
1848    staging_file = common.MakeTempFile(suffix='.zip')
1849  else:
1850    staging_file = output_file
1851  output_zip = zipfile.ZipFile(staging_file, "w",
1852                               compression=zipfile.ZIP_DEFLATED)
1853
1854  if source_file is not None:
1855    target_info = common.BuildInfo(OPTIONS.target_info_dict, OPTIONS.oem_dicts)
1856    source_info = common.BuildInfo(OPTIONS.source_info_dict, OPTIONS.oem_dicts)
1857  else:
1858    target_info = common.BuildInfo(OPTIONS.info_dict, OPTIONS.oem_dicts)
1859    source_info = None
1860
1861  # Metadata to comply with Android OTA package format.
1862  metadata = GetPackageMetadata(target_info, source_info)
1863
1864  if OPTIONS.retrofit_dynamic_partitions:
1865    target_file = GetTargetFilesZipForRetrofitDynamicPartitions(
1866        target_file, target_info.get("super_block_devices").strip().split(),
1867        target_info.get("dynamic_partition_list").strip().split())
1868  elif OPTIONS.skip_postinstall:
1869    target_file = GetTargetFilesZipWithoutPostinstallConfig(target_file)
1870
1871  # Generate payload.
1872  payload = Payload()
1873
1874  # Enforce a max timestamp this payload can be applied on top of.
1875  if OPTIONS.downgrade:
1876    max_timestamp = source_info.GetBuildProp("ro.build.date.utc")
1877  else:
1878    max_timestamp = metadata["post-timestamp"]
1879  additional_args = ["--max_timestamp", max_timestamp]
1880
1881  payload.Generate(target_file, source_file, additional_args)
1882
1883  # Sign the payload.
1884  payload_signer = PayloadSigner()
1885  payload.Sign(payload_signer)
1886
1887  # Write the payload into output zip.
1888  payload.WriteToZip(output_zip)
1889
1890  # Generate and include the secondary payload that installs secondary images
1891  # (e.g. system_other.img).
1892  if OPTIONS.include_secondary:
1893    # We always include a full payload for the secondary slot, even when
1894    # building an incremental OTA. See the comments for "--include_secondary".
1895    secondary_target_file = GetTargetFilesZipForSecondaryImages(
1896        target_file, OPTIONS.skip_postinstall)
1897    secondary_payload = Payload(secondary=True)
1898    secondary_payload.Generate(secondary_target_file,
1899                               additional_args=additional_args)
1900    secondary_payload.Sign(payload_signer)
1901    secondary_payload.WriteToZip(output_zip)
1902
1903  # If dm-verity is supported for the device, copy contents of care_map
1904  # into A/B OTA package.
1905  target_zip = zipfile.ZipFile(target_file, "r")
1906  if (target_info.get("verity") == "true" or
1907      target_info.get("avb_enable") == "true"):
1908    care_map_list = [x for x in ["care_map.pb", "care_map.txt"] if
1909                     "META/" + x in target_zip.namelist()]
1910
1911    # Adds care_map if either the protobuf format or the plain text one exists.
1912    if care_map_list:
1913      care_map_name = care_map_list[0]
1914      care_map_data = target_zip.read("META/" + care_map_name)
1915      # In order to support streaming, care_map needs to be packed as
1916      # ZIP_STORED.
1917      common.ZipWriteStr(output_zip, care_map_name, care_map_data,
1918                         compress_type=zipfile.ZIP_STORED)
1919    else:
1920      logger.warning("Cannot find care map file in target_file package")
1921
1922  common.ZipClose(target_zip)
1923
1924  CheckVintfIfTrebleEnabled(target_file, target_info)
1925
1926  # We haven't written the metadata entry yet, which will be handled in
1927  # FinalizeMetadata().
1928  common.ZipClose(output_zip)
1929
1930  # AbOtaPropertyFiles intends to replace StreamingPropertyFiles, as it covers
1931  # all the info of the latter. However, system updaters and OTA servers need to
1932  # take time to switch to the new flag. We keep both of the flags for
1933  # P-timeframe, and will remove StreamingPropertyFiles in later release.
1934  needed_property_files = (
1935      AbOtaPropertyFiles(),
1936      StreamingPropertyFiles(),
1937  )
1938  FinalizeMetadata(metadata, staging_file, output_file, needed_property_files)
1939
1940
1941def GenerateNonAbOtaPackage(target_file, output_file, source_file=None):
1942  """Generates a non-A/B OTA package."""
1943  # Check the loaded info dicts first.
1944  if OPTIONS.info_dict.get("no_recovery") == "true":
1945    raise common.ExternalError(
1946        "--- target build has specified no recovery ---")
1947
1948  # Non-A/B OTAs rely on /cache partition to store temporary files.
1949  cache_size = OPTIONS.info_dict.get("cache_size")
1950  if cache_size is None:
1951    logger.warning("--- can't determine the cache partition size ---")
1952  OPTIONS.cache_size = cache_size
1953
1954  if OPTIONS.extra_script is not None:
1955    with open(OPTIONS.extra_script) as fp:
1956      OPTIONS.extra_script = fp.read()
1957
1958  if OPTIONS.extracted_input is not None:
1959    OPTIONS.input_tmp = OPTIONS.extracted_input
1960  else:
1961    logger.info("unzipping target target-files...")
1962    OPTIONS.input_tmp = common.UnzipTemp(target_file, UNZIP_PATTERN)
1963  OPTIONS.target_tmp = OPTIONS.input_tmp
1964
1965  # If the caller explicitly specified the device-specific extensions path via
1966  # -s / --device_specific, use that. Otherwise, use META/releasetools.py if it
1967  # is present in the target target_files. Otherwise, take the path of the file
1968  # from 'tool_extensions' in the info dict and look for that in the local
1969  # filesystem, relative to the current directory.
1970  if OPTIONS.device_specific is None:
1971    from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py")
1972    if os.path.exists(from_input):
1973      logger.info("(using device-specific extensions from target_files)")
1974      OPTIONS.device_specific = from_input
1975    else:
1976      OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions")
1977
1978  if OPTIONS.device_specific is not None:
1979    OPTIONS.device_specific = os.path.abspath(OPTIONS.device_specific)
1980
1981  # Generate a full OTA.
1982  if source_file is None:
1983    with zipfile.ZipFile(target_file) as input_zip:
1984      WriteFullOTAPackage(
1985          input_zip,
1986          output_file)
1987
1988  # Generate an incremental OTA.
1989  else:
1990    logger.info("unzipping source target-files...")
1991    OPTIONS.source_tmp = common.UnzipTemp(
1992        OPTIONS.incremental_source, UNZIP_PATTERN)
1993    with zipfile.ZipFile(target_file) as input_zip, \
1994            zipfile.ZipFile(source_file) as source_zip:
1995      WriteBlockIncrementalOTAPackage(
1996          input_zip,
1997          source_zip,
1998          output_file)
1999
2000
2001def CalculateRuntimeDevicesAndFingerprints(build_info, boot_variable_values):
2002  """Returns a tuple of sets for runtime devices and fingerprints"""
2003
2004  device_names = {build_info.device}
2005  fingerprints = {build_info.fingerprint}
2006
2007  if not boot_variable_values:
2008    return device_names, fingerprints
2009
2010  # Calculate all possible combinations of the values for the boot variables.
2011  keys = boot_variable_values.keys()
2012  value_list = boot_variable_values.values()
2013  combinations = [dict(zip(keys, values))
2014                  for values in itertools.product(*value_list)]
2015  for placeholder_values in combinations:
2016    # Reload the info_dict as some build properties may change their values
2017    # based on the value of ro.boot* properties.
2018    info_dict = copy.deepcopy(build_info.info_dict)
2019    for partition in common.PARTITIONS_WITH_CARE_MAP:
2020      partition_prop_key = "{}.build.prop".format(partition)
2021      input_file = info_dict[partition_prop_key].input_file
2022      if isinstance(input_file, zipfile.ZipFile):
2023        with zipfile.ZipFile(input_file.filename) as input_zip:
2024          info_dict[partition_prop_key] = \
2025              common.PartitionBuildProps.FromInputFile(input_zip, partition,
2026                                                       placeholder_values)
2027      else:
2028        info_dict[partition_prop_key] = \
2029            common.PartitionBuildProps.FromInputFile(input_file, partition,
2030                                                     placeholder_values)
2031    info_dict["build.prop"] = info_dict["system.build.prop"]
2032
2033    new_build_info = common.BuildInfo(info_dict, build_info.oem_dicts)
2034    device_names.add(new_build_info.device)
2035    fingerprints.add(new_build_info.fingerprint)
2036  return device_names, fingerprints
2037
2038
2039def main(argv):
2040
2041  def option_handler(o, a):
2042    if o in ("-k", "--package_key"):
2043      OPTIONS.package_key = a
2044    elif o in ("-i", "--incremental_from"):
2045      OPTIONS.incremental_source = a
2046    elif o == "--full_radio":
2047      OPTIONS.full_radio = True
2048    elif o == "--full_bootloader":
2049      OPTIONS.full_bootloader = True
2050    elif o == "--wipe_user_data":
2051      OPTIONS.wipe_user_data = True
2052    elif o == "--downgrade":
2053      OPTIONS.downgrade = True
2054      OPTIONS.wipe_user_data = True
2055    elif o == "--override_timestamp":
2056      OPTIONS.downgrade = True
2057    elif o in ("-o", "--oem_settings"):
2058      OPTIONS.oem_source = a.split(',')
2059    elif o == "--oem_no_mount":
2060      OPTIONS.oem_no_mount = True
2061    elif o in ("-e", "--extra_script"):
2062      OPTIONS.extra_script = a
2063    elif o in ("-t", "--worker_threads"):
2064      if a.isdigit():
2065        OPTIONS.worker_threads = int(a)
2066      else:
2067        raise ValueError("Cannot parse value %r for option %r - only "
2068                         "integers are allowed." % (a, o))
2069    elif o in ("-2", "--two_step"):
2070      OPTIONS.two_step = True
2071    elif o == "--include_secondary":
2072      OPTIONS.include_secondary = True
2073    elif o == "--no_signing":
2074      OPTIONS.no_signing = True
2075    elif o == "--verify":
2076      OPTIONS.verify = True
2077    elif o == "--block":
2078      OPTIONS.block_based = True
2079    elif o in ("-b", "--binary"):
2080      OPTIONS.updater_binary = a
2081    elif o == "--stash_threshold":
2082      try:
2083        OPTIONS.stash_threshold = float(a)
2084      except ValueError:
2085        raise ValueError("Cannot parse value %r for option %r - expecting "
2086                         "a float" % (a, o))
2087    elif o == "--log_diff":
2088      OPTIONS.log_diff = a
2089    elif o == "--payload_signer":
2090      OPTIONS.payload_signer = a
2091    elif o == "--payload_signer_args":
2092      OPTIONS.payload_signer_args = shlex.split(a)
2093    elif o == "--payload_signer_maximum_signature_size":
2094      OPTIONS.payload_signer_maximum_signature_size = a
2095    elif o == "--payload_signer_key_size":
2096      # TODO(Xunchang) remove this option after cleaning up the callers.
2097      logger.warning("The option '--payload_signer_key_size' is deprecated."
2098                     " Use '--payload_signer_maximum_signature_size' instead.")
2099      OPTIONS.payload_signer_maximum_signature_size = a
2100    elif o == "--extracted_input_target_files":
2101      OPTIONS.extracted_input = a
2102    elif o == "--skip_postinstall":
2103      OPTIONS.skip_postinstall = True
2104    elif o == "--retrofit_dynamic_partitions":
2105      OPTIONS.retrofit_dynamic_partitions = True
2106    elif o == "--skip_compatibility_check":
2107      OPTIONS.skip_compatibility_check = True
2108    elif o == "--output_metadata_path":
2109      OPTIONS.output_metadata_path = a
2110    elif o == "--disable_fec_computation":
2111      OPTIONS.disable_fec_computation = True
2112    elif o == "--force_non_ab":
2113      OPTIONS.force_non_ab = True
2114    elif o == "--boot_variable_file":
2115      OPTIONS.boot_variable_file = a
2116    else:
2117      return False
2118    return True
2119
2120  args = common.ParseOptions(argv, __doc__,
2121                             extra_opts="b:k:i:d:e:t:2o:",
2122                             extra_long_opts=[
2123                                 "package_key=",
2124                                 "incremental_from=",
2125                                 "full_radio",
2126                                 "full_bootloader",
2127                                 "wipe_user_data",
2128                                 "downgrade",
2129                                 "override_timestamp",
2130                                 "extra_script=",
2131                                 "worker_threads=",
2132                                 "two_step",
2133                                 "include_secondary",
2134                                 "no_signing",
2135                                 "block",
2136                                 "binary=",
2137                                 "oem_settings=",
2138                                 "oem_no_mount",
2139                                 "verify",
2140                                 "stash_threshold=",
2141                                 "log_diff=",
2142                                 "payload_signer=",
2143                                 "payload_signer_args=",
2144                                 "payload_signer_maximum_signature_size=",
2145                                 "payload_signer_key_size=",
2146                                 "extracted_input_target_files=",
2147                                 "skip_postinstall",
2148                                 "retrofit_dynamic_partitions",
2149                                 "skip_compatibility_check",
2150                                 "output_metadata_path=",
2151                                 "disable_fec_computation",
2152                                 "force_non_ab",
2153                                 "boot_variable_file=",
2154                             ], extra_option_handler=option_handler)
2155
2156  if len(args) != 2:
2157    common.Usage(__doc__)
2158    sys.exit(1)
2159
2160  common.InitLogging()
2161
2162  if OPTIONS.downgrade:
2163    # We should only allow downgrading incrementals (as opposed to full).
2164    # Otherwise the device may go back from arbitrary build with this full
2165    # OTA package.
2166    if OPTIONS.incremental_source is None:
2167      raise ValueError("Cannot generate downgradable full OTAs")
2168
2169  # Load the build info dicts from the zip directly or the extracted input
2170  # directory. We don't need to unzip the entire target-files zips, because they
2171  # won't be needed for A/B OTAs (brillo_update_payload does that on its own).
2172  # When loading the info dicts, we don't need to provide the second parameter
2173  # to common.LoadInfoDict(). Specifying the second parameter allows replacing
2174  # some properties with their actual paths, such as 'selinux_fc',
2175  # 'ramdisk_dir', which won't be used during OTA generation.
2176  if OPTIONS.extracted_input is not None:
2177    OPTIONS.info_dict = common.LoadInfoDict(OPTIONS.extracted_input)
2178  else:
2179    with zipfile.ZipFile(args[0], 'r') as input_zip:
2180      OPTIONS.info_dict = common.LoadInfoDict(input_zip)
2181
2182  logger.info("--- target info ---")
2183  common.DumpInfoDict(OPTIONS.info_dict)
2184
2185  # Load the source build dict if applicable.
2186  if OPTIONS.incremental_source is not None:
2187    OPTIONS.target_info_dict = OPTIONS.info_dict
2188    with zipfile.ZipFile(OPTIONS.incremental_source, 'r') as source_zip:
2189      OPTIONS.source_info_dict = common.LoadInfoDict(source_zip)
2190
2191    logger.info("--- source info ---")
2192    common.DumpInfoDict(OPTIONS.source_info_dict)
2193
2194  # Load OEM dicts if provided.
2195  OPTIONS.oem_dicts = _LoadOemDicts(OPTIONS.oem_source)
2196
2197  # Assume retrofitting dynamic partitions when base build does not set
2198  # use_dynamic_partitions but target build does.
2199  if (OPTIONS.source_info_dict and
2200      OPTIONS.source_info_dict.get("use_dynamic_partitions") != "true" and
2201      OPTIONS.target_info_dict.get("use_dynamic_partitions") == "true"):
2202    if OPTIONS.target_info_dict.get("dynamic_partition_retrofit") != "true":
2203      raise common.ExternalError(
2204          "Expect to generate incremental OTA for retrofitting dynamic "
2205          "partitions, but dynamic_partition_retrofit is not set in target "
2206          "build.")
2207    logger.info("Implicitly generating retrofit incremental OTA.")
2208    OPTIONS.retrofit_dynamic_partitions = True
2209
2210  # Skip postinstall for retrofitting dynamic partitions.
2211  if OPTIONS.retrofit_dynamic_partitions:
2212    OPTIONS.skip_postinstall = True
2213
2214  ab_update = OPTIONS.info_dict.get("ab_update") == "true"
2215  allow_non_ab = OPTIONS.info_dict.get("allow_non_ab") == "true"
2216  if OPTIONS.force_non_ab:
2217    assert allow_non_ab, "--force_non_ab only allowed on devices that supports non-A/B"
2218    assert ab_update, "--force_non_ab only allowed on A/B devices"
2219
2220  generate_ab = not OPTIONS.force_non_ab and ab_update
2221
2222  # Use the default key to sign the package if not specified with package_key.
2223  # package_keys are needed on ab_updates, so always define them if an
2224  # A/B update is getting created.
2225  if not OPTIONS.no_signing or generate_ab:
2226    if OPTIONS.package_key is None:
2227      OPTIONS.package_key = OPTIONS.info_dict.get(
2228          "default_system_dev_certificate",
2229          "build/make/target/product/security/testkey")
2230    # Get signing keys
2231    OPTIONS.key_passwords = common.GetKeyPasswords([OPTIONS.package_key])
2232
2233  if generate_ab:
2234    GenerateAbOtaPackage(
2235        target_file=args[0],
2236        output_file=args[1],
2237        source_file=OPTIONS.incremental_source)
2238
2239  else:
2240    GenerateNonAbOtaPackage(
2241        target_file=args[0],
2242        output_file=args[1],
2243        source_file=OPTIONS.incremental_source)
2244
2245  # Post OTA generation works.
2246  if OPTIONS.incremental_source is not None and OPTIONS.log_diff:
2247    logger.info("Generating diff logs...")
2248    logger.info("Unzipping target-files for diffing...")
2249    target_dir = common.UnzipTemp(args[0], TARGET_DIFFING_UNZIP_PATTERN)
2250    source_dir = common.UnzipTemp(
2251        OPTIONS.incremental_source, TARGET_DIFFING_UNZIP_PATTERN)
2252
2253    with open(OPTIONS.log_diff, 'w') as out_file:
2254      target_files_diff.recursiveDiff(
2255          '', source_dir, target_dir, out_file)
2256
2257  logger.info("done.")
2258
2259
2260if __name__ == '__main__':
2261  try:
2262    common.CloseInheritedPipes()
2263    main(sys.argv[1:])
2264  except common.ExternalError:
2265    logger.exception("\n   ERROR:\n")
2266    sys.exit(1)
2267  finally:
2268    common.Cleanup()
2269