1 # Copyright (C) 2008 The Android Open Source Project
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
15 from __future__ import print_function
36 from hashlib import sha1 as sha1
45 if hasattr(obj, 'iteritems'):
46 return obj.iteritems()
50 class Options(object):
52 platform_search_path = {
53 "linux2": "out/host/linux-x86",
54 "darwin": "out/host/darwin-x86",
57 self.search_path = platform_search_path.get(sys.platform, None)
58 self.signapk_path = "framework/signapk.jar" # Relative to search_path
59 self.signapk_shared_library_path = "lib64" # Relative to search_path
60 self.extra_signapk_args = []
61 self.java_path = "java" # Use the one on the path by default.
62 self.java_args = "-Xmx2048m" # JVM Args
63 self.public_key_suffix = ".x509.pem"
64 self.private_key_suffix = ".pk8"
65 # use otatools built boot_signer by default
66 self.boot_signer_path = "boot_signer"
67 self.boot_signer_args = []
68 self.verity_signer_path = None
69 self.verity_signer_args = []
72 self.device_specific = None
75 self.source_info_dict = None
76 self.target_info_dict = None
77 self.worker_threads = None
78 # Stash size cannot exceed cache_size * threshold.
79 self.cache_size = None
80 self.stash_threshold = 0.8
86 # Values for "certificate" in apkcerts that mean special things.
87 SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
89 class ErrorCode(object):
90 """Define error_codes for failures that happen during the actual
91 update package installation.
93 Error codes 0-999 are reserved for failures before the package
94 installation (i.e. low battery, package verification failure).
95 Detailed code in 'bootable/recovery/error_code.h' """
97 SYSTEM_VERIFICATION_FAILURE = 1000
98 SYSTEM_UPDATE_FAILURE = 1001
99 SYSTEM_UNEXPECTED_CONTENTS = 1002
100 SYSTEM_NONZERO_CONTENTS = 1003
101 SYSTEM_RECOVER_FAILURE = 1004
102 VENDOR_VERIFICATION_FAILURE = 2000
103 VENDOR_UPDATE_FAILURE = 2001
104 VENDOR_UNEXPECTED_CONTENTS = 2002
105 VENDOR_NONZERO_CONTENTS = 2003
106 VENDOR_RECOVER_FAILURE = 2004
107 OEM_PROP_MISMATCH = 3000
108 FINGERPRINT_MISMATCH = 3001
109 THUMBPRINT_MISMATCH = 3002
111 DEVICE_MISMATCH = 3004
112 BAD_PATCH_FILE = 3005
113 INSUFFICIENT_CACHE_SPACE = 3006
114 TUNE_PARTITION_FAILURE = 3007
115 APPLY_PATCH_FAILURE = 3008
117 class ExternalError(RuntimeError):
121 def Run(args, **kwargs):
122 """Create and return a subprocess.Popen object, printing the command
123 line on the terminal if -v was specified."""
125 print(" running: ", " ".join(args))
126 return subprocess.Popen(args, **kwargs)
129 def CloseInheritedPipes():
130 """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
131 before doing other work."""
132 if platform.system() != "Darwin":
134 for d in range(3, 1025):
138 pipebit = stat[0] & 0x1000
145 def LoadInfoDict(input_file, input_dir=None):
146 """Read and parse the META/misc_info.txt key/value pairs from the
147 input target files and return a dict."""
150 if isinstance(input_file, zipfile.ZipFile):
151 return input_file.read(fn)
153 path = os.path.join(input_file, *fn.split("/"))
155 with open(path) as f:
158 if e.errno == errno.ENOENT:
162 d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
164 # ok if misc_info.txt doesn't exist
167 # backwards compatibility: These values used to be in their own
168 # files. Look for them, in case we're processing an old
171 if "mkyaffs2_extra_flags" not in d:
173 d["mkyaffs2_extra_flags"] = read_helper(
174 "META/mkyaffs2-extra-flags.txt").strip()
176 # ok if flags don't exist
179 if "recovery_api_version" not in d:
181 d["recovery_api_version"] = read_helper(
182 "META/recovery-api-version.txt").strip()
184 raise ValueError("can't find recovery API version in input target-files")
186 if "tool_extensions" not in d:
188 d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
190 # ok if extensions don't exist
193 if "fstab_version" not in d:
194 d["fstab_version"] = "1"
196 # A few properties are stored as links to the files in the out/ directory.
197 # It works fine with the build system. However, they are no longer available
198 # when (re)generating from target_files zip. If input_dir is not None, we
199 # are doing repacking. Redirect those properties to the actual files in the
200 # unzipped directory.
201 if input_dir is not None:
202 # We carry a copy of file_contexts.bin under META/. If not available,
203 # search BOOT/RAMDISK/. Note that sometimes we may need a different file
204 # to build images than the one running on device, such as when enabling
205 # system_root_image. In that case, we must have the one for image
206 # generation copied to META/.
207 fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
208 fc_config = os.path.join(input_dir, "META", fc_basename)
209 if d.get("system_root_image") == "true":
210 assert os.path.exists(fc_config)
211 if not os.path.exists(fc_config):
212 fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
213 if not os.path.exists(fc_config):
217 d["selinux_fc"] = fc_config
219 # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
220 if d.get("system_root_image") == "true":
221 d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
222 d["ramdisk_fs_config"] = os.path.join(
223 input_dir, "META", "root_filesystem_config.txt")
225 # Redirect {system,vendor}_base_fs_file.
226 if "system_base_fs_file" in d:
227 basename = os.path.basename(d["system_base_fs_file"])
228 system_base_fs_file = os.path.join(input_dir, "META", basename)
229 if os.path.exists(system_base_fs_file):
230 d["system_base_fs_file"] = system_base_fs_file
232 print "Warning: failed to find system base fs file: %s" % (
233 system_base_fs_file,)
234 del d["system_base_fs_file"]
236 if "vendor_base_fs_file" in d:
237 basename = os.path.basename(d["vendor_base_fs_file"])
238 vendor_base_fs_file = os.path.join(input_dir, "META", basename)
239 if os.path.exists(vendor_base_fs_file):
240 d["vendor_base_fs_file"] = vendor_base_fs_file
242 print "Warning: failed to find vendor base fs file: %s" % (
243 vendor_base_fs_file,)
244 del d["vendor_base_fs_file"]
247 if "device_type" not in d:
248 d["device_type"] = "MMC"
250 data = read_helper("META/imagesizes.txt")
251 for line in data.split("\n"):
254 name, value = line.split(" ", 1)
257 if name == "blocksize":
260 d[name + "_size"] = value
266 d[key] = int(d[key], 0)
268 makeint("recovery_api_version")
270 makeint("system_size")
271 makeint("vendor_size")
272 makeint("userdata_size")
273 makeint("cache_size")
274 makeint("recovery_size")
276 makeint("fstab_version")
278 if d.get("no_recovery", False) == "true":
281 d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
282 d.get("system_root_image", d["device_type"], False))
283 d["build.prop"] = LoadBuildProp(read_helper)
286 def LoadBuildProp(read_helper):
288 data = read_helper("SYSTEM/build.prop")
290 print("Warning: could not find SYSTEM/build.prop in %s" % zip)
292 return LoadDictionaryFromLines(data.split("\n"))
294 def LoadDictionaryFromLines(lines):
298 if not line or line.startswith("#"):
301 name, value = line.split("=", 1)
305 def LoadRecoveryFSTab(read_helper, fstab_version, type, system_root_image=False):
306 class Partition(object):
307 def __init__(self, mount_point, fs_type, device, length, device2, context):
308 self.mount_point = mount_point
309 self.fs_type = fs_type
312 self.device2 = device2
313 self.context = context
316 data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
318 print("Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab")
321 if fstab_version == 1:
323 for line in data.split("\n"):
325 if not line or line.startswith("#"):
327 pieces = line.split()
328 if not 3 <= len(pieces) <= 4:
329 raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
332 if pieces[3].startswith("/"):
342 mount_point = pieces[0]
345 options = options.split(",")
347 if i.startswith("length="):
350 print("%s: unknown option \"%s\"" % (mount_point, i))
352 if not d.get(mount_point):
353 d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
354 device=pieces[2], length=length,
357 elif fstab_version == 2:
359 for line in data.split("\n"):
361 if not line or line.startswith("#"):
363 # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
364 pieces = line.split()
366 raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
368 # Ignore entries that are managed by vold
370 if "voldmanaged=" in options:
373 # It's a good line, parse it
375 options = options.split(",")
377 if i.startswith("length="):
380 # Ignore all unknown options in the unified fstab
383 mount_flags = pieces[3]
384 # Honor the SELinux context if present.
386 for i in mount_flags.split(","):
387 if i.startswith("context="):
390 mount_point = pieces[1]
391 if not d.get(mount_point):
392 d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
393 device=pieces[0], length=length,
394 device2=None, context=context)
397 raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
399 # / is used for the system mount point when the root directory is included in
400 # system. Other areas assume system is always at "/system" so point /system
402 if system_root_image:
403 assert not d.has_key("/system") and d.has_key("/")
404 d["/system"] = d["/"]
409 for k, v in sorted(d.items()):
410 print("%-25s = (%s) %s" % (k, type(v).__name__, v))
413 def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
415 """Build a bootable image from the specified sourcedir.
417 Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
418 'sourcedir'), and turn them into a boot image. Return the image data, or
419 None if sourcedir does not appear to contains files for building the
423 ramdisk_img = tempfile.NamedTemporaryFile()
425 if os.access(fs_config_file, os.F_OK):
426 cmd = ["mkbootfs", "-f", fs_config_file,
427 os.path.join(sourcedir, "RAMDISK")]
429 cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
430 p1 = Run(cmd, stdout=subprocess.PIPE)
431 p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
435 assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
436 assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
440 if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
443 if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
446 if info_dict is None:
447 info_dict = OPTIONS.info_dict
449 img = tempfile.NamedTemporaryFile()
450 bootimg_key = os.getenv("PRODUCT_PRIVATE_KEY", None)
451 custom_boot_signer = os.getenv("PRODUCT_BOOT_SIGNER", None)
454 ramdisk_img = make_ramdisk()
456 """check if uboot is requested"""
457 fn = os.path.join(sourcedir, "ubootargs")
458 if os.access(fn, os.F_OK):
460 for argument in open(fn).read().rstrip("\n").split(" "):
463 cmd.append(os.path.join(sourcedir, "kernel") + ":" + ramdisk_img.name)
466 # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
467 mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
469 cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
471 fn = os.path.join(sourcedir, "second")
472 if os.access(fn, os.F_OK):
473 cmd.append("--second")
476 fn = os.path.join(sourcedir, "cmdline")
477 if os.access(fn, os.F_OK):
478 cmd.append("--cmdline")
479 cmd.append(open(fn).read().rstrip("\n"))
481 fn = os.path.join(sourcedir, "base")
482 if os.access(fn, os.F_OK):
484 cmd.append(open(fn).read().rstrip("\n"))
486 fn = os.path.join(sourcedir, "tagsaddr")
487 if os.access(fn, os.F_OK):
488 cmd.append("--tags-addr")
489 cmd.append(open(fn).read().rstrip("\n"))
491 fn = os.path.join(sourcedir, "tags_offset")
492 if os.access(fn, os.F_OK):
493 cmd.append("--tags_offset")
494 cmd.append(open(fn).read().rstrip("\n"))
496 fn = os.path.join(sourcedir, "ramdisk_offset")
497 if os.access(fn, os.F_OK):
498 cmd.append("--ramdisk_offset")
499 cmd.append(open(fn).read().rstrip("\n"))
501 fn = os.path.join(sourcedir, "dt")
502 if os.access(fn, os.F_OK):
506 fn = os.path.join(sourcedir, "pagesize")
507 if os.access(fn, os.F_OK):
508 kernel_pagesize = open(fn).read().rstrip("\n")
509 cmd.append("--pagesize")
510 cmd.append(kernel_pagesize)
512 args = info_dict.get("mkbootimg_args", None)
513 if args and args.strip():
514 cmd.extend(shlex.split(args))
516 args = info_dict.get("mkbootimg_version_args", None)
517 if args and args.strip():
518 cmd.extend(shlex.split(args))
521 cmd.extend(["--ramdisk", ramdisk_img.name])
524 if info_dict.get("vboot", None):
525 img_unsigned = tempfile.NamedTemporaryFile()
526 cmd.extend(["--output", img_unsigned.name])
528 cmd.extend(["--output", img.name])
530 p = Run(cmd, stdout=subprocess.PIPE)
532 assert p.returncode == 0, "mkbootimg of %s image failed" % (
533 os.path.basename(sourcedir),)
535 if custom_boot_signer and bootimg_key and os.path.exists(bootimg_key):
536 print("Signing bootable image with custom boot signer...")
537 img_secure = tempfile.NamedTemporaryFile()
538 p = Run([custom_boot_signer, img.name, img_secure.name], stdout=subprocess.PIPE)
540 assert p.returncode == 0, "signing of bootable image failed"
541 shutil.copyfile(img_secure.name, img.name)
543 elif bootimg_key and os.path.exists(bootimg_key) and kernel_pagesize > 0:
544 print("Signing bootable image...")
545 bootimg_key_passwords = {}
546 bootimg_key_passwords.update(PasswordManager().GetPasswords(bootimg_key.split()))
547 bootimg_key_password = bootimg_key_passwords[bootimg_key]
548 if bootimg_key_password is not None:
549 bootimg_key_password += "\n"
550 img_sha256 = tempfile.NamedTemporaryFile()
551 img_sig = tempfile.NamedTemporaryFile()
552 img_sig_padded = tempfile.NamedTemporaryFile()
553 img_secure = tempfile.NamedTemporaryFile()
554 p = Run(["openssl", "dgst", "-sha256", "-binary", "-out", img_sha256.name, img.name],
555 stdout=subprocess.PIPE)
557 assert p.returncode == 0, "signing of bootable image failed"
558 p = Run(["openssl", "rsautl", "-sign", "-in", img_sha256.name, "-inkey", bootimg_key, "-out",
559 img_sig.name, "-passin", "stdin"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
560 p.communicate(bootimg_key_password)
561 assert p.returncode == 0, "signing of bootable image failed"
562 p = Run(["dd", "if=/dev/zero", "of=%s" % img_sig_padded.name, "bs=%s" % kernel_pagesize,
563 "count=1"], stdout=subprocess.PIPE)
565 assert p.returncode == 0, "signing of bootable image failed"
566 p = Run(["dd", "if=%s" % img_sig.name, "of=%s" % img_sig_padded.name, "conv=notrunc"],
567 stdout=subprocess.PIPE)
569 assert p.returncode == 0, "signing of bootable image failed"
570 p = Run(["cat", img.name, img_sig_padded.name], stdout=img_secure.file.fileno())
572 assert p.returncode == 0, "signing of bootable image failed"
573 shutil.copyfile(img_secure.name, img.name)
576 img_sig_padded.close()
579 if (info_dict.get("boot_signer", None) == "true" and
580 info_dict.get("verity_key", None)):
581 path = "/" + os.path.basename(sourcedir).lower()
582 cmd = [OPTIONS.boot_signer_path]
583 cmd.extend(OPTIONS.boot_signer_args)
584 cmd.extend([path, img.name,
585 info_dict["verity_key"] + ".pk8",
586 info_dict["verity_key"] + ".x509.pem", img.name])
587 p = Run(cmd, stdout=subprocess.PIPE)
589 assert p.returncode == 0, "boot_signer of %s image failed" % path
591 # Sign the image if vboot is non-empty.
592 elif info_dict.get("vboot", None):
593 path = "/" + os.path.basename(sourcedir).lower()
594 img_keyblock = tempfile.NamedTemporaryFile()
595 cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
596 img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
597 info_dict["vboot_key"] + ".vbprivk",
598 info_dict["vboot_subkey"] + ".vbprivk",
601 p = Run(cmd, stdout=subprocess.PIPE)
603 assert p.returncode == 0, "vboot_signer of %s image failed" % path
605 # Clean up the temp files.
609 img.seek(os.SEEK_SET, 0)
619 def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
621 """Return a File object with the desired bootable image.
623 Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
624 otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
625 the source files in 'unpack_dir'/'tree_subdir'."""
627 prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
628 if os.path.exists(prebuilt_path):
629 print("using prebuilt %s from BOOTABLE_IMAGES..." % prebuilt_name)
630 return File.FromLocalFile(name, prebuilt_path)
632 prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
633 if os.path.exists(prebuilt_path):
634 print("using prebuilt %s from IMAGES..." % prebuilt_name)
635 return File.FromLocalFile(name, prebuilt_path)
637 print("building image from target_files %s..." % tree_subdir)
639 if info_dict is None:
640 info_dict = OPTIONS.info_dict
642 # With system_root_image == "true", we don't pack ramdisk into the boot image.
643 # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
645 has_ramdisk = (info_dict.get("system_root_image") != "true" or
646 prebuilt_name != "boot.img" or
647 info_dict.get("recovery_as_boot") == "true")
649 fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
650 data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
651 os.path.join(unpack_dir, fs_config),
652 info_dict, has_ramdisk)
654 return File(name, data)
658 def UnzipTemp(filename, pattern=None):
659 """Unzip the given archive into a temporary directory and return the name.
661 If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
662 temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
664 Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
665 main file), open for reading.
668 tmp = tempfile.mkdtemp(prefix="targetfiles-")
669 OPTIONS.tempfiles.append(tmp)
671 def unzip_to_dir(filename, dirname):
672 subprocess.call(["rm", "-rf", dirname + filename, "targetfiles-*"])
673 cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
674 if pattern is not None:
676 p = Run(cmd, stdout=subprocess.PIPE)
678 if p.returncode != 0:
679 raise ExternalError("failed to unzip input target-files \"%s\"" %
682 m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
684 unzip_to_dir(m.group(1), tmp)
685 unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
686 filename = m.group(1)
688 unzip_to_dir(filename, tmp)
690 return tmp, zipfile.ZipFile(filename, "r")
693 def GetKeyPasswords(keylist):
694 """Given a list of keys, prompt the user to enter passwords for
695 those which require them. Return a {key: password} dict. password
696 will be None if the key has no password."""
701 devnull = open("/dev/null", "w+b")
702 for k in sorted(keylist):
703 # We don't need a password for things that aren't really keys.
704 if k in SPECIAL_CERT_STRINGS:
705 no_passwords.append(k)
708 p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
709 "-inform", "DER", "-nocrypt"],
710 stdin=devnull.fileno(),
711 stdout=devnull.fileno(),
712 stderr=subprocess.STDOUT)
714 if p.returncode == 0:
715 # Definitely an unencrypted key.
716 no_passwords.append(k)
718 p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
719 "-inform", "DER", "-passin", "pass:"],
720 stdin=devnull.fileno(),
721 stdout=devnull.fileno(),
722 stderr=subprocess.PIPE)
723 _, stderr = p.communicate()
724 if p.returncode == 0:
725 # Encrypted key with empty string as password.
726 key_passwords[k] = ''
727 elif stderr.startswith(b'Error decrypting key'):
728 # Definitely encrypted key.
729 # It would have said "Error reading key" if it didn't parse correctly.
730 need_passwords.append(k)
732 # Potentially, a type of key that openssl doesn't understand.
733 # We'll let the routines in signapk.jar handle it.
734 no_passwords.append(k)
737 key_passwords.update(PasswordManager().GetPasswords(need_passwords))
738 key_passwords.update(dict.fromkeys(no_passwords, None))
742 def GetMinSdkVersion(apk_name):
743 """Get the minSdkVersion delared in the APK. This can be both a decimal number
744 (API Level) or a codename.
747 p = Run(["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE)
748 output, err = p.communicate()
750 raise ExternalError("Failed to obtain minSdkVersion: aapt return code %s"
753 for line in output.split("\n"):
754 # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'
755 m = re.match(r'sdkVersion:\'([^\']*)\'', line)
758 raise ExternalError("No minSdkVersion returned by aapt")
761 def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
762 """Get the minSdkVersion declared in the APK as a number (API Level). If
763 minSdkVersion is set to a codename, it is translated to a number using the
767 version = GetMinSdkVersion(apk_name)
771 # Not a decimal number. Codename?
772 if version in codename_to_api_level_map:
773 return codename_to_api_level_map[version]
775 raise ExternalError("Unknown minSdkVersion: '%s'. Known codenames: %s"
776 % (version, codename_to_api_level_map))
779 def SignFile(input_name, output_name, key, password, min_api_level=None,
780 codename_to_api_level_map=dict(),
782 """Sign the input_name zip/jar/apk, producing output_name. Use the
783 given key and password (the latter may be None if the key does not
786 If whole_file is true, use the "-w" option to SignApk to embed a
787 signature that covers the whole file in the archive comment of the
790 min_api_level is the API Level (int) of the oldest platform this file may end
791 up on. If not specified for an APK, the API Level is obtained by interpreting
792 the minSdkVersion attribute of the APK's AndroidManifest.xml.
794 codename_to_api_level_map is needed to translate the codename which may be
795 encountered as the APK's minSdkVersion.
798 java_library_path = os.path.join(
799 OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
801 cmd = [OPTIONS.java_path, OPTIONS.java_args,
802 "-Djava.library.path=" + java_library_path,
804 os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
805 cmd.extend(OPTIONS.extra_signapk_args)
809 min_sdk_version = min_api_level
810 if min_sdk_version is None:
812 min_sdk_version = GetMinSdkVersionInt(
813 input_name, codename_to_api_level_map)
814 if min_sdk_version is not None:
815 cmd.extend(["--min-sdk-version", str(min_sdk_version)])
817 cmd.extend([key + OPTIONS.public_key_suffix,
818 key + OPTIONS.private_key_suffix,
819 input_name, output_name])
821 p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
822 if password is not None:
824 p.communicate(password)
825 if p.returncode != 0:
826 raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
829 def CheckSize(data, target, info_dict):
830 """Check the data string passed against the max size limit, if
831 any, for the given target. Raise exception if the data is too big.
832 Print a warning if the data is nearing the maximum size."""
834 if target.endswith(".img"):
836 mount_point = "/" + target
840 if info_dict["fstab"]:
841 if mount_point == "/userdata_extra":
842 mount_point = "/data"
843 if mount_point == "/userdata":
844 mount_point = "/data"
845 p = info_dict["fstab"][mount_point]
849 device = device[device.rfind("/")+1:]
850 limit = info_dict.get(device + "_size", None)
851 if not fs_type or not limit:
854 if fs_type == "yaffs2":
855 # image size should be increased by 1/64th to account for the
856 # spare area (64 bytes per 2k page)
857 limit = limit / 2048 * (2048+64)
859 pct = float(size) * 100.0 / limit
860 msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
862 raise ExternalError(msg)
865 print(" WARNING: ", msg)
867 elif OPTIONS.verbose:
871 def ReadApkCerts(tf_zip):
872 """Given a target_files ZipFile, parse the META/apkcerts.txt file
873 and return a {package: cert} dict."""
875 for line in tf_zip.read("META/apkcerts.txt").split("\n"):
879 m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
880 r'private_key="(.*)"$', line)
882 name, cert, privkey = m.groups()
883 public_key_suffix_len = len(OPTIONS.public_key_suffix)
884 private_key_suffix_len = len(OPTIONS.private_key_suffix)
885 if cert in SPECIAL_CERT_STRINGS and not privkey:
887 elif (cert.endswith(OPTIONS.public_key_suffix) and
888 privkey.endswith(OPTIONS.private_key_suffix) and
889 cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
890 certmap[name] = cert[:-public_key_suffix_len]
892 raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
896 COMMON_DOCSTRING = """
898 Prepend <dir>/bin to the list of places to search for binaries
899 run by this script, and expect to find jars in <dir>/framework.
901 -s (--device_specific) <file>
902 Path to the python module containing device-specific
905 -x (--extra) <key=value>
906 Add a key/value pair to the 'extras' dict, which device-specific
907 extension code may look at.
910 Show command lines being executed.
913 Display this usage message and exit.
916 def Usage(docstring):
917 print(docstring.rstrip("\n"))
918 print(COMMON_DOCSTRING)
921 def ParseOptions(argv,
923 extra_opts="", extra_long_opts=(),
924 extra_option_handler=None):
925 """Parse the options in argv and return any arguments that aren't
926 flags. docstring is the calling module's docstring, to be displayed
927 for errors and -h. extra_opts and extra_long_opts are for flags
928 defined by the caller, which are processed by passing them to
929 extra_option_handler."""
932 opts, args = getopt.getopt(
933 argv, "hvp:s:x:" + extra_opts,
934 ["help", "verbose", "path=", "signapk_path=",
935 "signapk_shared_library_path=", "extra_signapk_args=",
936 "java_path=", "java_args=", "public_key_suffix=",
937 "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
938 "verity_signer_path=", "verity_signer_args=", "device_specific=",
940 list(extra_long_opts))
941 except getopt.GetoptError as err:
943 print("**", str(err), "**")
947 if o in ("-h", "--help"):
950 elif o in ("-v", "--verbose"):
951 OPTIONS.verbose = True
952 elif o in ("-p", "--path"):
953 OPTIONS.search_path = a
954 elif o in ("--signapk_path",):
955 OPTIONS.signapk_path = a
956 elif o in ("--signapk_shared_library_path",):
957 OPTIONS.signapk_shared_library_path = a
958 elif o in ("--extra_signapk_args",):
959 OPTIONS.extra_signapk_args = shlex.split(a)
960 elif o in ("--java_path",):
961 OPTIONS.java_path = a
962 elif o in ("--java_args",):
963 OPTIONS.java_args = a
964 elif o in ("--public_key_suffix",):
965 OPTIONS.public_key_suffix = a
966 elif o in ("--private_key_suffix",):
967 OPTIONS.private_key_suffix = a
968 elif o in ("--boot_signer_path",):
969 OPTIONS.boot_signer_path = a
970 elif o in ("--boot_signer_args",):
971 OPTIONS.boot_signer_args = shlex.split(a)
972 elif o in ("--verity_signer_path",):
973 OPTIONS.verity_signer_path = a
974 elif o in ("--verity_signer_args",):
975 OPTIONS.verity_signer_args = shlex.split(a)
976 elif o in ("-s", "--device_specific"):
977 OPTIONS.device_specific = a
978 elif o in ("-x", "--extra"):
979 key, value = a.split("=", 1)
980 OPTIONS.extras[key] = value
982 if extra_option_handler is None or not extra_option_handler(o, a):
983 assert False, "unknown option \"%s\"" % (o,)
985 if OPTIONS.search_path:
986 os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
987 os.pathsep + os.environ["PATH"])
992 def MakeTempFile(prefix=None, suffix=None):
993 """Make a temp file and add it to the list of things to be deleted
994 when Cleanup() is called. Return the filename."""
995 fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
997 OPTIONS.tempfiles.append(fn)
1002 for i in OPTIONS.tempfiles:
1003 if os.path.isdir(i):
1009 class PasswordManager(object):
1011 self.editor = os.getenv("EDITOR", None)
1012 self.pwfile = os.getenv("ANDROID_PW_FILE", None)
1014 def GetPasswords(self, items):
1015 """Get passwords corresponding to each string in 'items',
1016 returning a dict. (The dict may have keys in addition to the
1019 Uses the passwords in $ANDROID_PW_FILE if available, letting the
1020 user edit that file to add more needed passwords. If no editor is
1021 available, or $ANDROID_PW_FILE isn't define, prompts the user
1022 interactively in the ordinary way.
1025 current = self.ReadFile()
1031 if i not in current or not current[i]:
1033 # Are all the passwords already in the file?
1041 print("key file %s still missing some passwords." % self.pwfile)
1042 answer = raw_input("try to edit again? [y]> ").strip()
1043 if answer and answer[0] not in 'yY':
1044 raise RuntimeError("key passwords unavailable")
1047 current = self.UpdateAndReadFile(current)
1049 def PromptResult(self, current): # pylint: disable=no-self-use
1050 """Prompt the user to enter a value (password) for each key in
1051 'current' whose value is fales. Returns a new dict with all the
1055 for k, v in sorted(iteritems(current)):
1060 result[k] = getpass.getpass(
1061 "Enter password for %s key> " % k).strip()
1066 def UpdateAndReadFile(self, current):
1067 if not self.editor or not self.pwfile:
1068 return self.PromptResult(current)
1070 f = open(self.pwfile, "w")
1071 os.chmod(self.pwfile, 0o600)
1072 f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
1073 f.write("# (Additional spaces are harmless.)\n\n")
1076 sorted_list = sorted((not v, k, v) for (k, v) in current.items())
1077 for i, (_, k, v) in enumerate(sorted_list):
1078 f.write("[[[ %s ]]] %s\n" % (v, k))
1079 if not v and first_line is None:
1080 # position cursor on first line with no password.
1084 p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
1085 _, _ = p.communicate()
1087 return self.ReadFile()
1091 if self.pwfile is None:
1094 f = open(self.pwfile, "r")
1097 if not line or line[0] == '#':
1099 m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
1101 print("failed to parse password file: ", line)
1103 result[m.group(2)] = m.group(1)
1105 except IOError as e:
1106 if e.errno != errno.ENOENT:
1107 print("error reading password file: ", str(e))
1111 def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
1112 compress_type=None):
1116 # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
1117 # for files larger than 2GiB. We can work around this by adjusting their
1118 # limit. Note that `zipfile.writestr()` will not work for strings larger than
1119 # 2GiB. The Python interpreter sometimes rejects strings that large (though
1120 # it isn't clear to me exactly what circumstances cause this).
1121 # `zipfile.write()` must be used directly to work around this.
1123 # This mess can be avoided if we port to python3.
1124 saved_zip64_limit = zipfile.ZIP64_LIMIT
1125 zipfile.ZIP64_LIMIT = (1 << 32) - 1
1127 if compress_type is None:
1128 compress_type = zip_file.compression
1132 saved_stat = os.stat(filename)
1135 # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
1136 # file to be zipped and reset it when we're done.
1137 os.chmod(filename, perms)
1139 # Use a fixed timestamp so the output is repeatable.
1140 epoch = datetime.datetime.fromtimestamp(0)
1141 timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
1142 os.utime(filename, (timestamp, timestamp))
1144 zip_file.write(filename, arcname=arcname, compress_type=compress_type)
1146 os.chmod(filename, saved_stat.st_mode)
1147 os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
1148 zipfile.ZIP64_LIMIT = saved_zip64_limit
1151 def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
1152 compress_type=None):
1153 """Wrap zipfile.writestr() function to work around the zip64 limit.
1155 Even with the ZIP64_LIMIT workaround, it won't allow writing a string
1156 longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
1157 when calling crc32(bytes).
1159 But it still works fine to write a shorter string into a large zip file.
1160 We should use ZipWrite() whenever possible, and only use ZipWriteStr()
1161 when we know the string won't be too long.
1164 saved_zip64_limit = zipfile.ZIP64_LIMIT
1165 zipfile.ZIP64_LIMIT = (1 << 32) - 1
1167 if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
1168 zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
1169 zinfo.compress_type = zip_file.compression
1173 zinfo = zinfo_or_arcname
1175 # If compress_type is given, it overrides the value in zinfo.
1176 if compress_type is not None:
1177 zinfo.compress_type = compress_type
1179 # If perms is given, it has a priority.
1180 if perms is not None:
1181 # If perms doesn't set the file type, mark it as a regular file.
1182 if perms & 0o770000 == 0:
1184 zinfo.external_attr = perms << 16
1186 # Use a fixed timestamp so the output is repeatable.
1187 zinfo.date_time = (2009, 1, 1, 0, 0, 0)
1189 zip_file.writestr(zinfo, data)
1190 zipfile.ZIP64_LIMIT = saved_zip64_limit
1193 def ZipClose(zip_file):
1195 # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
1196 # central directory.
1197 saved_zip64_limit = zipfile.ZIP64_LIMIT
1198 zipfile.ZIP64_LIMIT = (1 << 32) - 1
1202 zipfile.ZIP64_LIMIT = saved_zip64_limit
1205 class DeviceSpecificParams(object):
1207 def __init__(self, **kwargs):
1208 """Keyword arguments to the constructor become attributes of this
1209 object, which is passed to all functions in the device-specific
1211 for k, v in iteritems(kwargs):
1213 self.extras = OPTIONS.extras
1215 if self.module is None:
1216 path = OPTIONS.device_specific
1220 if os.path.isdir(path):
1221 info = imp.find_module("releasetools", [path])
1223 d, f = os.path.split(path)
1224 b, x = os.path.splitext(f)
1227 info = imp.find_module(f, [d])
1228 print("loaded device-specific extensions from", path)
1229 self.module = imp.load_module("device_specific", *info)
1231 print("unable to load device-specific module; assuming none")
1233 def _DoCall(self, function_name, *args, **kwargs):
1234 """Call the named function in the device-specific module, passing
1235 the given args and kwargs. The first argument to the call will be
1236 the DeviceSpecific object itself. If there is no module, or the
1237 module does not define the function, return the value of the
1238 'default' kwarg (which itself defaults to None)."""
1239 if self.module is None or not hasattr(self.module, function_name):
1240 return kwargs.get("default", None)
1241 return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1243 def FullOTA_Assertions(self):
1244 """Called after emitting the block of assertions at the top of a
1245 full OTA package. Implementations can add whatever additional
1246 assertions they like."""
1247 return self._DoCall("FullOTA_Assertions")
1249 def FullOTA_InstallBegin(self):
1250 """Called at the start of full OTA installation."""
1251 return self._DoCall("FullOTA_InstallBegin")
1253 def FullOTA_InstallEnd(self):
1254 """Called at the end of full OTA installation; typically this is
1255 used to install the image for the device's baseband processor."""
1256 return self._DoCall("FullOTA_InstallEnd")
1258 def FullOTA_PostValidate(self):
1259 """Called after installing and validating /system; typically this is
1260 used to resize the system partition after a block based installation."""
1261 return self._DoCall("FullOTA_PostValidate")
1263 def IncrementalOTA_Assertions(self):
1264 """Called after emitting the block of assertions at the top of an
1265 incremental OTA package. Implementations can add whatever
1266 additional assertions they like."""
1267 return self._DoCall("IncrementalOTA_Assertions")
1269 def IncrementalOTA_VerifyBegin(self):
1270 """Called at the start of the verification phase of incremental
1271 OTA installation; additional checks can be placed here to abort
1272 the script before any changes are made."""
1273 return self._DoCall("IncrementalOTA_VerifyBegin")
1275 def IncrementalOTA_VerifyEnd(self):
1276 """Called at the end of the verification phase of incremental OTA
1277 installation; additional checks can be placed here to abort the
1278 script before any changes are made."""
1279 return self._DoCall("IncrementalOTA_VerifyEnd")
1281 def IncrementalOTA_InstallBegin(self):
1282 """Called at the start of incremental OTA installation (after
1283 verification is complete)."""
1284 return self._DoCall("IncrementalOTA_InstallBegin")
1286 def IncrementalOTA_InstallEnd(self):
1287 """Called at the end of incremental OTA installation; typically
1288 this is used to install the image for the device's baseband
1290 return self._DoCall("IncrementalOTA_InstallEnd")
1292 def VerifyOTA_Assertions(self):
1293 return self._DoCall("VerifyOTA_Assertions")
1296 def __init__(self, name, data):
1299 self.size = len(data)
1300 self.sha1 = sha1(data).hexdigest()
1303 def FromLocalFile(cls, name, diskname):
1304 f = open(diskname, "rb")
1307 return File(name, data)
1309 def WriteToTemp(self):
1310 t = tempfile.NamedTemporaryFile()
1315 def AddToZip(self, z, compression=None):
1316 ZipWriteStr(z, self.name, self.data, compress_type=compression)
1318 DIFF_PROGRAM_BY_EXT = {
1320 ".zip" : ["imgdiff", "-z"],
1321 ".jar" : ["imgdiff", "-z"],
1322 ".apk" : ["imgdiff", "-z"],
1326 class Difference(object):
1327 def __init__(self, tf, sf, diff_program=None):
1331 self.diff_program = diff_program
1333 def ComputePatch(self):
1334 """Compute the patch (as a string of data) needed to turn sf into
1335 tf. Returns the same tuple as GetPatch()."""
1340 if self.diff_program:
1341 diff_program = self.diff_program
1343 ext = os.path.splitext(tf.name)[1]
1344 diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1346 ttemp = tf.WriteToTemp()
1347 stemp = sf.WriteToTemp()
1349 ext = os.path.splitext(tf.name)[1]
1352 ptemp = tempfile.NamedTemporaryFile()
1353 if isinstance(diff_program, list):
1354 cmd = copy.copy(diff_program)
1356 cmd = [diff_program]
1357 cmd.append(stemp.name)
1358 cmd.append(ttemp.name)
1359 cmd.append(ptemp.name)
1360 p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1363 _, e = p.communicate()
1366 th = threading.Thread(target=run)
1368 th.join(timeout=300) # 5 mins
1370 print("WARNING: diff command timed out")
1377 if err or p.returncode != 0:
1378 print("WARNING: failure running %s:\n%s\n" % (
1379 diff_program, "".join(err)))
1381 return None, None, None
1389 return self.tf, self.sf, self.patch
1393 """Return a tuple (target_file, source_file, patch_data).
1394 patch_data may be None if ComputePatch hasn't been called, or if
1395 computing the patch failed."""
1396 return self.tf, self.sf, self.patch
1399 def ComputeDifferences(diffs):
1400 """Call ComputePatch on all the Difference objects in 'diffs'."""
1401 print(len(diffs), "diffs to compute")
1403 # Do the largest files first, to try and reduce the long-pole effect.
1404 by_size = [(i.tf.size, i) for i in diffs]
1405 by_size.sort(reverse=True)
1406 by_size = [i[1] for i in by_size]
1408 lock = threading.Lock()
1409 diff_iter = iter(by_size) # accessed under lock
1418 dur = time.time() - start
1421 tf, sf, patch = d.GetPatch()
1422 if sf.name == tf.name:
1425 name = "%s (%s)" % (tf.name, sf.name)
1427 print("patching failed! %s" % name)
1429 print("%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1430 dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name))
1432 except Exception as e:
1436 # start worker threads; wait for them all to finish.
1437 threads = [threading.Thread(target=worker)
1438 for i in range(OPTIONS.worker_threads)]
1442 threads.pop().join()
1445 class BlockDifference(object):
1446 def __init__(self, partition, tgt, src=None, check_first_block=False,
1447 version=None, disable_imgdiff=False):
1450 self.partition = partition
1451 self.check_first_block = check_first_block
1452 self.disable_imgdiff = disable_imgdiff
1456 if OPTIONS.info_dict:
1459 OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1460 self.version = version
1462 b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1463 version=self.version,
1464 disable_imgdiff=self.disable_imgdiff)
1465 tmpdir = tempfile.mkdtemp()
1466 OPTIONS.tempfiles.append(tmpdir)
1467 self.path = os.path.join(tmpdir, partition)
1468 b.Compute(self.path)
1469 self._required_cache = b.max_stashed_size
1470 self.touched_src_ranges = b.touched_src_ranges
1471 self.touched_src_sha1 = b.touched_src_sha1
1474 _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1476 _, self.device = GetTypeAndDevice("/" + partition,
1477 OPTIONS.source_info_dict)
1480 def required_cache(self):
1481 return self._required_cache
1483 def WriteScript(self, script, output_zip, progress=None):
1485 # write the output unconditionally
1486 script.Print("Patching %s image unconditionally..." % (self.partition,))
1488 script.Print("Patching %s image after verification." % (self.partition,))
1491 script.ShowProgress(progress, 0)
1492 self._WriteUpdate(script, output_zip)
1494 self._WritePostInstallVerifyScript(script)
1496 def WriteStrictVerifyScript(self, script):
1497 """Verify all the blocks in the care_map, including clobbered blocks.
1499 This differs from the WriteVerifyScript() function: a) it prints different
1500 error messages; b) it doesn't allow half-way updated images to pass the
1503 partition = self.partition
1504 script.Print("Verifying %s..." % (partition,))
1505 ranges = self.tgt.care_map
1506 ranges_str = ranges.to_string_raw()
1507 script.AppendExtra('range_sha1("%s", "%s") == "%s" && '
1508 'ui_print(" Verified.") || '
1509 'ui_print("\\"%s\\" has unexpected contents.");' % (
1510 self.device, ranges_str,
1511 self.tgt.TotalSha1(include_clobbered_blocks=True),
1513 script.AppendExtra("")
1515 def WriteVerifyScript(self, script, touched_blocks_only=False):
1516 partition = self.partition
1520 script.Print("Image %s will be patched unconditionally." % (partition,))
1524 if touched_blocks_only and self.version >= 3:
1525 ranges = self.touched_src_ranges
1526 expected_sha1 = self.touched_src_sha1
1528 ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1529 expected_sha1 = self.src.TotalSha1()
1531 # No blocks to be checked, skipping.
1535 ranges_str = ranges.to_string_raw()
1536 if self.version >= 4:
1537 script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1538 'block_image_verify("%s", '
1539 'package_extract_file("%s.transfer.list"), '
1540 '"%s.new.dat", "%s.patch.dat")) then') % (
1541 self.device, ranges_str, expected_sha1,
1542 self.device, partition, partition, partition))
1543 elif self.version == 3:
1544 script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1545 'block_image_verify("%s", '
1546 'package_extract_file("%s.transfer.list"), '
1547 '"%s.new.dat", "%s.patch.dat")) then') % (
1548 self.device, ranges_str, expected_sha1,
1549 self.device, partition, partition, partition))
1551 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1552 self.device, ranges_str, self.src.TotalSha1()))
1553 script.Print('Verified %s image...' % (partition,))
1554 script.AppendExtra('else')
1556 if self.version >= 4:
1559 # When generating incrementals for the system and vendor partitions in
1560 # version 4 or newer, explicitly check the first block (which contains
1561 # the superblock) of the partition to see if it's what we expect. If
1562 # this check fails, give an explicit log message about the partition
1563 # having been remounted R/W (the most likely explanation).
1564 if self.check_first_block:
1565 script.AppendExtra('check_first_block("%s");' % (self.device,))
1567 # If version >= 4, try block recovery before abort update
1568 if partition == "system":
1569 code = ErrorCode.SYSTEM_RECOVER_FAILURE
1571 code = ErrorCode.VENDOR_RECOVER_FAILURE
1572 script.AppendExtra((
1573 'ifelse (block_image_recover("{device}", "{ranges}") && '
1574 'block_image_verify("{device}", '
1575 'package_extract_file("{partition}.transfer.list"), '
1576 '"{partition}.new.dat", "{partition}.patch.dat"), '
1577 'ui_print("{partition} recovered successfully."), '
1578 'abort("E{code}: {partition} partition fails to recover"));\n'
1579 'endif;').format(device=self.device, ranges=ranges_str,
1580 partition=partition, code=code))
1582 # Abort the OTA update. Note that the incremental OTA cannot be applied
1583 # even if it may match the checksum of the target partition.
1584 # a) If version < 3, operations like move and erase will make changes
1585 # unconditionally and damage the partition.
1586 # b) If version >= 3, it won't even reach here.
1588 if partition == "system":
1589 code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
1591 code = ErrorCode.VENDOR_VERIFICATION_FAILURE
1592 script.AppendExtra((
1593 'abort("E%d: %s partition has unexpected contents");\n'
1594 'endif;') % (code, partition))
1596 def _WritePostInstallVerifyScript(self, script):
1597 partition = self.partition
1598 script.Print('Verifying the updated %s image...' % (partition,))
1599 # Unlike pre-install verification, clobbered_blocks should not be ignored.
1600 ranges = self.tgt.care_map
1601 ranges_str = ranges.to_string_raw()
1602 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1603 self.device, ranges_str,
1604 self.tgt.TotalSha1(include_clobbered_blocks=True)))
1607 # Verify that extended blocks are really zeroed out.
1608 if self.tgt.extended:
1609 ranges_str = self.tgt.extended.to_string_raw()
1610 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1611 self.device, ranges_str,
1612 self._HashZeroBlocks(self.tgt.extended.size())))
1613 script.Print('Verified the updated %s image.' % (partition,))
1614 if partition == "system":
1615 code = ErrorCode.SYSTEM_NONZERO_CONTENTS
1617 code = ErrorCode.VENDOR_NONZERO_CONTENTS
1620 ' abort("E%d: %s partition has unexpected non-zero contents after '
1622 'endif;' % (code, partition))
1624 script.Print('Verified the updated %s image.' % (partition,))
1626 if partition == "system":
1627 code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
1629 code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
1633 ' abort("E%d: %s partition has unexpected contents after OTA '
1635 'endif;' % (code, partition))
1637 def _WriteUpdate(self, script, output_zip):
1638 ZipWrite(output_zip,
1639 '{}.transfer.list'.format(self.path),
1640 '{}.transfer.list'.format(self.partition))
1641 ZipWrite(output_zip,
1642 '{}.new.dat'.format(self.path),
1643 '{}.new.dat'.format(self.partition))
1644 ZipWrite(output_zip,
1645 '{}.patch.dat'.format(self.path),
1646 '{}.patch.dat'.format(self.partition),
1647 compress_type=zipfile.ZIP_STORED)
1649 if self.partition == "system":
1650 code = ErrorCode.SYSTEM_UPDATE_FAILURE
1652 code = ErrorCode.VENDOR_UPDATE_FAILURE
1654 call = ('block_image_update("{device}", '
1655 'package_extract_file("{partition}.transfer.list"), '
1656 '"{partition}.new.dat", "{partition}.patch.dat") ||\n'
1657 ' abort("E{code}: Failed to update {partition} image.");'.format(
1658 device=self.device, partition=self.partition, code=code))
1659 script.AppendExtra(script.WordWrap(call))
1661 def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1662 data = source.ReadRangeSet(ranges)
1668 return ctx.hexdigest()
1670 def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1671 """Return the hash value for all zero blocks."""
1672 zero_block = '\x00' * 4096
1674 for _ in range(num_blocks):
1675 ctx.update(zero_block)
1677 return ctx.hexdigest()
1680 DataImage = blockimgdiff.DataImage
1682 # map recovery.fstab's fs_types to mount/format "partition types"
1696 def GetTypeAndDevice(mount_point, info):
1697 fstab = info["fstab"]
1699 return (PARTITION_TYPES[fstab[mount_point].fs_type],
1700 fstab[mount_point].device)
1705 def ParseCertificate(data):
1706 """Parse a PEM-format certificate."""
1707 from codecs import decode
1710 for line in data.split("\n"):
1711 if "--END CERTIFICATE--" in line:
1714 l = line.encode() if hasattr(line, 'encode') else line
1716 if "--BEGIN CERTIFICATE--" in line:
1718 cert = decode(b"".join(cert), 'base64')
1721 def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1723 """Generate a binary patch that creates the recovery image starting
1724 with the boot image. (Most of the space in these images is just the
1725 kernel, which is identical for the two, so the resulting patch
1726 should be efficient.) Add it to the output zip, along with a shell
1727 script that is run from init.rc on first boot to actually do the
1728 patching and install the new recovery image.
1730 recovery_img and boot_img should be File objects for the
1731 corresponding images. info should be the dictionary returned by
1732 common.LoadInfoDict() on the input target_files.
1735 if info_dict is None:
1736 info_dict = OPTIONS.info_dict
1738 full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1739 system_root_image = info_dict.get("system_root_image", None) == "true"
1741 if full_recovery_image:
1742 output_sink("etc/recovery.img", recovery_img.data)
1745 diff_program = ["imgdiff"]
1746 path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1747 if os.path.exists(path):
1748 diff_program.append("-b")
1749 diff_program.append(path)
1750 bonus_args = "-b /system/etc/recovery-resource.dat"
1754 d = Difference(recovery_img, boot_img, diff_program=diff_program)
1755 _, _, patch = d.ComputePatch()
1756 output_sink("recovery-from-boot.p", patch)
1759 # The following GetTypeAndDevice()s need to use the path in the target
1760 # info_dict instead of source_info_dict.
1761 boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1762 recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1766 if full_recovery_image:
1767 sh = """#!/system/bin/sh
1768 if [ -f /system/etc/recovery-transform.sh ]; then
1769 exec sh /system/etc/recovery-transform.sh %(recovery_size)d %(recovery_sha1)s %(boot_size)d %(boot_sha1)s
1771 if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1772 applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1774 log -t recovery "Recovery image already installed"
1776 """ % {'type': recovery_type,
1777 'device': recovery_device,
1778 'sha1': recovery_img.sha1,
1779 'size': recovery_img.size}
1781 sh = """#!/system/bin/sh
1782 if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1783 applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1785 log -t recovery "Recovery image already installed"
1787 """ % {'boot_size': boot_img.size,
1788 'boot_sha1': boot_img.sha1,
1789 'recovery_size': recovery_img.size,
1790 'recovery_sha1': recovery_img.sha1,
1791 'boot_type': boot_type,
1792 'boot_device': boot_device,
1793 'recovery_type': recovery_type,
1794 'recovery_device': recovery_device,
1795 'bonus_args': bonus_args}
1797 # The install script location moved from /system/etc to /system/bin
1798 # in the L release. Parse init.*.rc files to find out where the
1799 # target-files expects it to be, and put it there.
1800 sh_location = "etc/install-recovery.sh"
1802 if system_root_image:
1803 init_rc_dir = os.path.join(input_dir, "ROOT")
1805 init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1806 init_rc_files = os.listdir(init_rc_dir)
1807 for init_rc_file in init_rc_files:
1808 if (not init_rc_file.startswith('init.') or
1809 not init_rc_file.endswith('.rc')):
1812 with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1814 m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1816 sh_location = m.group(1)
1823 print("putting script in", sh_location)
1825 output_sink(sh_location, sh)