1 # Copyright (C) 2008 The Android Open Source Project
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
15 from __future__ import print_function
36 from hashlib import sha1 as sha1
45 if hasattr(obj, 'iteritems'):
46 return obj.iteritems()
50 class Options(object):
52 platform_search_path = {
53 "linux2": "out/host/linux-x86",
54 "darwin": "out/host/darwin-x86",
57 self.search_path = platform_search_path.get(sys.platform, None)
58 self.signapk_path = "framework/signapk.jar" # Relative to search_path
59 self.signapk_shared_library_path = "lib64" # Relative to search_path
60 self.extra_signapk_args = []
61 self.java_path = "java" # Use the one on the path by default.
62 self.java_args = "-Xmx2048m" # JVM Args
63 self.public_key_suffix = ".x509.pem"
64 self.private_key_suffix = ".pk8"
65 # use otatools built boot_signer by default
66 self.boot_signer_path = "boot_signer"
67 self.boot_signer_args = []
68 self.verity_signer_path = None
69 self.verity_signer_args = []
72 self.device_specific = None
75 self.source_info_dict = None
76 self.target_info_dict = None
77 self.worker_threads = None
78 # Stash size cannot exceed cache_size * threshold.
79 self.cache_size = None
80 self.stash_threshold = 0.8
86 # Values for "certificate" in apkcerts that mean special things.
87 SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
89 class ErrorCode(object):
90 """Define error_codes for failures that happen during the actual
91 update package installation.
93 Error codes 0-999 are reserved for failures before the package
94 installation (i.e. low battery, package verification failure).
95 Detailed code in 'bootable/recovery/error_code.h' """
97 SYSTEM_VERIFICATION_FAILURE = 1000
98 SYSTEM_UPDATE_FAILURE = 1001
99 SYSTEM_UNEXPECTED_CONTENTS = 1002
100 SYSTEM_NONZERO_CONTENTS = 1003
101 SYSTEM_RECOVER_FAILURE = 1004
102 VENDOR_VERIFICATION_FAILURE = 2000
103 VENDOR_UPDATE_FAILURE = 2001
104 VENDOR_UNEXPECTED_CONTENTS = 2002
105 VENDOR_NONZERO_CONTENTS = 2003
106 VENDOR_RECOVER_FAILURE = 2004
107 OEM_PROP_MISMATCH = 3000
108 FINGERPRINT_MISMATCH = 3001
109 THUMBPRINT_MISMATCH = 3002
111 DEVICE_MISMATCH = 3004
112 BAD_PATCH_FILE = 3005
113 INSUFFICIENT_CACHE_SPACE = 3006
114 TUNE_PARTITION_FAILURE = 3007
115 APPLY_PATCH_FAILURE = 3008
117 class ExternalError(RuntimeError):
121 def Run(args, **kwargs):
122 """Create and return a subprocess.Popen object, printing the command
123 line on the terminal if -v was specified."""
125 print(" running: ", " ".join(args))
126 return subprocess.Popen(args, **kwargs)
129 def CloseInheritedPipes():
130 """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
131 before doing other work."""
132 if platform.system() != "Darwin":
134 for d in range(3, 1025):
138 pipebit = stat[0] & 0x1000
145 def LoadInfoDict(input_file, input_dir=None):
146 """Read and parse the META/misc_info.txt key/value pairs from the
147 input target files and return a dict."""
150 if isinstance(input_file, zipfile.ZipFile):
151 return input_file.read(fn)
153 path = os.path.join(input_file, *fn.split("/"))
155 with open(path) as f:
158 if e.errno == errno.ENOENT:
162 d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
164 # ok if misc_info.txt doesn't exist
167 # backwards compatibility: These values used to be in their own
168 # files. Look for them, in case we're processing an old
171 if "mkyaffs2_extra_flags" not in d:
173 d["mkyaffs2_extra_flags"] = read_helper(
174 "META/mkyaffs2-extra-flags.txt").strip()
176 # ok if flags don't exist
179 if "recovery_api_version" not in d:
181 d["recovery_api_version"] = read_helper(
182 "META/recovery-api-version.txt").strip()
184 raise ValueError("can't find recovery API version in input target-files")
186 if "tool_extensions" not in d:
188 d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
190 # ok if extensions don't exist
193 if "fstab_version" not in d:
194 d["fstab_version"] = "1"
196 # A few properties are stored as links to the files in the out/ directory.
197 # It works fine with the build system. However, they are no longer available
198 # when (re)generating from target_files zip. If input_dir is not None, we
199 # are doing repacking. Redirect those properties to the actual files in the
200 # unzipped directory.
201 if input_dir is not None:
202 # We carry a copy of file_contexts.bin under META/. If not available,
203 # search BOOT/RAMDISK/. Note that sometimes we may need a different file
204 # to build images than the one running on device, such as when enabling
205 # system_root_image. In that case, we must have the one for image
206 # generation copied to META/.
207 fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
208 fc_config = os.path.join(input_dir, "META", fc_basename)
209 if d.get("system_root_image") == "true":
210 assert os.path.exists(fc_config)
211 if not os.path.exists(fc_config):
212 fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
213 if not os.path.exists(fc_config):
217 d["selinux_fc"] = fc_config
219 # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
220 if d.get("system_root_image") == "true":
221 d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
222 d["ramdisk_fs_config"] = os.path.join(
223 input_dir, "META", "root_filesystem_config.txt")
225 # Redirect {system,vendor}_base_fs_file.
226 if "system_base_fs_file" in d:
227 basename = os.path.basename(d["system_base_fs_file"])
228 system_base_fs_file = os.path.join(input_dir, "META", basename)
229 if os.path.exists(system_base_fs_file):
230 d["system_base_fs_file"] = system_base_fs_file
232 print "Warning: failed to find system base fs file: %s" % (
233 system_base_fs_file,)
234 del d["system_base_fs_file"]
236 if "vendor_base_fs_file" in d:
237 basename = os.path.basename(d["vendor_base_fs_file"])
238 vendor_base_fs_file = os.path.join(input_dir, "META", basename)
239 if os.path.exists(vendor_base_fs_file):
240 d["vendor_base_fs_file"] = vendor_base_fs_file
242 print "Warning: failed to find vendor base fs file: %s" % (
243 vendor_base_fs_file,)
244 del d["vendor_base_fs_file"]
247 if "device_type" not in d:
248 d["device_type"] = "MMC"
250 data = read_helper("META/imagesizes.txt")
251 for line in data.split("\n"):
254 name, value = line.split(" ", 1)
257 if name == "blocksize":
260 d[name + "_size"] = value
266 d[key] = int(d[key], 0)
268 makeint("recovery_api_version")
270 makeint("system_size")
271 makeint("vendor_size")
272 makeint("userdata_size")
273 makeint("cache_size")
274 makeint("recovery_size")
276 makeint("fstab_version")
278 if d.get("no_recovery", False) == "true":
281 d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
282 d.get("system_root_image", d["device_type"], False))
283 d["build.prop"] = LoadBuildProp(read_helper)
286 def LoadBuildProp(read_helper):
288 data = read_helper("SYSTEM/build.prop")
290 print("Warning: could not find SYSTEM/build.prop in %s" % zip)
292 return LoadDictionaryFromLines(data.split("\n"))
294 def LoadDictionaryFromLines(lines):
298 if not line or line.startswith("#"):
301 name, value = line.split("=", 1)
305 def LoadRecoveryFSTab(read_helper, fstab_version, type, system_root_image=False):
306 class Partition(object):
307 def __init__(self, mount_point, fs_type, device, length, device2, context):
308 self.mount_point = mount_point
309 self.fs_type = fs_type
312 self.device2 = device2
313 self.context = context
316 data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
318 print("Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab")
321 if fstab_version == 1:
323 for line in data.split("\n"):
325 if not line or line.startswith("#"):
327 pieces = line.split()
328 if not 3 <= len(pieces) <= 4:
329 raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
332 if pieces[3].startswith("/"):
342 mount_point = pieces[0]
345 options = options.split(",")
347 if i.startswith("length="):
350 print("%s: unknown option \"%s\"" % (mount_point, i))
352 if not d.get(mount_point):
353 d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
354 device=pieces[2], length=length,
357 elif fstab_version == 2:
359 for line in data.split("\n"):
361 if not line or line.startswith("#"):
363 # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
364 pieces = line.split()
366 raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
368 # Ignore entries that are managed by vold
370 if "voldmanaged=" in options:
373 # It's a good line, parse it
375 options = options.split(",")
377 if i.startswith("length="):
380 # Ignore all unknown options in the unified fstab
383 mount_flags = pieces[3]
384 # Honor the SELinux context if present.
386 for i in mount_flags.split(","):
387 if i.startswith("context="):
390 mount_point = pieces[1]
391 if not d.get(mount_point):
392 d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
393 device=pieces[0], length=length,
394 device2=None, context=context)
397 raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
399 # / is used for the system mount point when the root directory is included in
400 # system. Other areas assume system is always at "/system" so point /system
402 if system_root_image:
403 assert not d.has_key("/system") and d.has_key("/")
404 d["/system"] = d["/"]
409 for k, v in sorted(d.items()):
410 print("%-25s = (%s) %s" % (k, type(v).__name__, v))
413 def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
415 """Build a bootable image from the specified sourcedir.
417 Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
418 'sourcedir'), and turn them into a boot image. Return the image data, or
419 None if sourcedir does not appear to contains files for building the
423 ramdisk_img = tempfile.NamedTemporaryFile()
425 if os.access(fs_config_file, os.F_OK):
426 cmd = ["mkbootfs", "-f", fs_config_file,
427 os.path.join(sourcedir, "RAMDISK")]
429 cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
430 p1 = Run(cmd, stdout=subprocess.PIPE)
431 p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
435 assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
436 assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
440 if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
443 if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
446 if info_dict is None:
447 info_dict = OPTIONS.info_dict
449 img = tempfile.NamedTemporaryFile()
450 bootimg_key = os.getenv("PRODUCT_PRIVATE_KEY", None)
453 ramdisk_img = make_ramdisk()
455 """check if uboot is requested"""
456 fn = os.path.join(sourcedir, "ubootargs")
457 if os.access(fn, os.F_OK):
459 for argument in open(fn).read().rstrip("\n").split(" "):
462 cmd.append(os.path.join(sourcedir, "kernel") + ":" + ramdisk_img.name)
465 # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
466 mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
468 cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
470 fn = os.path.join(sourcedir, "second")
471 if os.access(fn, os.F_OK):
472 cmd.append("--second")
475 fn = os.path.join(sourcedir, "cmdline")
476 if os.access(fn, os.F_OK):
477 cmd.append("--cmdline")
478 cmd.append(open(fn).read().rstrip("\n"))
480 fn = os.path.join(sourcedir, "base")
481 if os.access(fn, os.F_OK):
483 cmd.append(open(fn).read().rstrip("\n"))
485 fn = os.path.join(sourcedir, "tagsaddr")
486 if os.access(fn, os.F_OK):
487 cmd.append("--tags-addr")
488 cmd.append(open(fn).read().rstrip("\n"))
490 fn = os.path.join(sourcedir, "tags_offset")
491 if os.access(fn, os.F_OK):
492 cmd.append("--tags_offset")
493 cmd.append(open(fn).read().rstrip("\n"))
495 fn = os.path.join(sourcedir, "ramdisk_offset")
496 if os.access(fn, os.F_OK):
497 cmd.append("--ramdisk_offset")
498 cmd.append(open(fn).read().rstrip("\n"))
500 fn = os.path.join(sourcedir, "dt")
501 if os.access(fn, os.F_OK):
505 fn = os.path.join(sourcedir, "pagesize")
506 if os.access(fn, os.F_OK):
507 kernel_pagesize = open(fn).read().rstrip("\n")
508 cmd.append("--pagesize")
509 cmd.append(kernel_pagesize)
511 args = info_dict.get("mkbootimg_args", None)
512 if args and args.strip():
513 cmd.extend(shlex.split(args))
515 args = info_dict.get("mkbootimg_version_args", None)
516 if args and args.strip():
517 cmd.extend(shlex.split(args))
520 cmd.extend(["--ramdisk", ramdisk_img.name])
523 if info_dict.get("vboot", None):
524 img_unsigned = tempfile.NamedTemporaryFile()
525 cmd.extend(["--output", img_unsigned.name])
527 cmd.extend(["--output", img.name])
529 p = Run(cmd, stdout=subprocess.PIPE)
531 assert p.returncode == 0, "mkbootimg of %s image failed" % (
532 os.path.basename(sourcedir),)
534 if bootimg_key and os.path.exists(bootimg_key) and kernel_pagesize > 0:
535 print "Signing bootable image..."
536 bootimg_key_passwords = {}
537 bootimg_key_passwords.update(PasswordManager().GetPasswords(bootimg_key.split()))
538 bootimg_key_password = bootimg_key_passwords[bootimg_key]
539 if bootimg_key_password is not None:
540 bootimg_key_password += "\n"
541 img_sha256 = tempfile.NamedTemporaryFile()
542 img_sig = tempfile.NamedTemporaryFile()
543 img_sig_padded = tempfile.NamedTemporaryFile()
544 img_secure = tempfile.NamedTemporaryFile()
545 p = Run(["openssl", "dgst", "-sha256", "-binary", "-out", img_sha256.name, img.name],
546 stdout=subprocess.PIPE)
548 assert p.returncode == 0, "signing of bootable image failed"
549 p = Run(["openssl", "rsautl", "-sign", "-in", img_sha256.name, "-inkey", bootimg_key, "-out",
550 img_sig.name, "-passin", "stdin"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
551 p.communicate(bootimg_key_password)
552 assert p.returncode == 0, "signing of bootable image failed"
553 p = Run(["dd", "if=/dev/zero", "of=%s" % img_sig_padded.name, "bs=%s" % kernel_pagesize,
554 "count=1"], stdout=subprocess.PIPE)
556 assert p.returncode == 0, "signing of bootable image failed"
557 p = Run(["dd", "if=%s" % img_sig.name, "of=%s" % img_sig_padded.name, "conv=notrunc"],
558 stdout=subprocess.PIPE)
560 assert p.returncode == 0, "signing of bootable image failed"
561 p = Run(["cat", img.name, img_sig_padded.name], stdout=img_secure.file.fileno())
563 assert p.returncode == 0, "signing of bootable image failed"
564 shutil.copyfile(img_secure.name, img.name)
567 img_sig_padded.close()
570 if (info_dict.get("boot_signer", None) == "true" and
571 info_dict.get("verity_key", None)):
572 path = "/" + os.path.basename(sourcedir).lower()
573 cmd = [OPTIONS.boot_signer_path]
574 cmd.extend(OPTIONS.boot_signer_args)
575 cmd.extend([path, img.name,
576 info_dict["verity_key"] + ".pk8",
577 info_dict["verity_key"] + ".x509.pem", img.name])
578 p = Run(cmd, stdout=subprocess.PIPE)
580 assert p.returncode == 0, "boot_signer of %s image failed" % path
582 # Sign the image if vboot is non-empty.
583 elif info_dict.get("vboot", None):
584 path = "/" + os.path.basename(sourcedir).lower()
585 img_keyblock = tempfile.NamedTemporaryFile()
586 cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
587 img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
588 info_dict["vboot_key"] + ".vbprivk",
589 info_dict["vboot_subkey"] + ".vbprivk",
592 p = Run(cmd, stdout=subprocess.PIPE)
594 assert p.returncode == 0, "vboot_signer of %s image failed" % path
596 # Clean up the temp files.
600 img.seek(os.SEEK_SET, 0)
610 def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
612 """Return a File object with the desired bootable image.
614 Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
615 otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
616 the source files in 'unpack_dir'/'tree_subdir'."""
618 prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
619 if os.path.exists(prebuilt_path):
620 print("using prebuilt %s from BOOTABLE_IMAGES..." % prebuilt_name)
621 return File.FromLocalFile(name, prebuilt_path)
623 prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
624 if os.path.exists(prebuilt_path):
625 print("using prebuilt %s from IMAGES..." % prebuilt_name)
626 return File.FromLocalFile(name, prebuilt_path)
628 print("building image from target_files %s..." % tree_subdir)
630 if info_dict is None:
631 info_dict = OPTIONS.info_dict
633 # With system_root_image == "true", we don't pack ramdisk into the boot image.
634 # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
636 has_ramdisk = (info_dict.get("system_root_image") != "true" or
637 prebuilt_name != "boot.img" or
638 info_dict.get("recovery_as_boot") == "true")
640 fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
641 data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
642 os.path.join(unpack_dir, fs_config),
643 info_dict, has_ramdisk)
645 return File(name, data)
649 def UnzipTemp(filename, pattern=None):
650 """Unzip the given archive into a temporary directory and return the name.
652 If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
653 temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
655 Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
656 main file), open for reading.
659 tmp = tempfile.mkdtemp(prefix="targetfiles-")
660 OPTIONS.tempfiles.append(tmp)
662 def unzip_to_dir(filename, dirname):
663 subprocess.call(["rm", "-rf", dirname + filename, "targetfiles-*"])
664 cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
665 if pattern is not None:
667 p = Run(cmd, stdout=subprocess.PIPE)
669 if p.returncode != 0:
670 raise ExternalError("failed to unzip input target-files \"%s\"" %
673 m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
675 unzip_to_dir(m.group(1), tmp)
676 unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
677 filename = m.group(1)
679 unzip_to_dir(filename, tmp)
681 return tmp, zipfile.ZipFile(filename, "r")
684 def GetKeyPasswords(keylist):
685 """Given a list of keys, prompt the user to enter passwords for
686 those which require them. Return a {key: password} dict. password
687 will be None if the key has no password."""
692 devnull = open("/dev/null", "w+b")
693 for k in sorted(keylist):
694 # We don't need a password for things that aren't really keys.
695 if k in SPECIAL_CERT_STRINGS:
696 no_passwords.append(k)
699 p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
700 "-inform", "DER", "-nocrypt"],
701 stdin=devnull.fileno(),
702 stdout=devnull.fileno(),
703 stderr=subprocess.STDOUT)
705 if p.returncode == 0:
706 # Definitely an unencrypted key.
707 no_passwords.append(k)
709 p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
710 "-inform", "DER", "-passin", "pass:"],
711 stdin=devnull.fileno(),
712 stdout=devnull.fileno(),
713 stderr=subprocess.PIPE)
714 _, stderr = p.communicate()
715 if p.returncode == 0:
716 # Encrypted key with empty string as password.
717 key_passwords[k] = ''
718 elif stderr.startswith(b'Error decrypting key'):
719 # Definitely encrypted key.
720 # It would have said "Error reading key" if it didn't parse correctly.
721 need_passwords.append(k)
723 # Potentially, a type of key that openssl doesn't understand.
724 # We'll let the routines in signapk.jar handle it.
725 no_passwords.append(k)
728 key_passwords.update(PasswordManager().GetPasswords(need_passwords))
729 key_passwords.update(dict.fromkeys(no_passwords, None))
733 def GetMinSdkVersion(apk_name):
734 """Get the minSdkVersion delared in the APK. This can be both a decimal number
735 (API Level) or a codename.
738 p = Run(["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE)
739 output, err = p.communicate()
741 raise ExternalError("Failed to obtain minSdkVersion: aapt return code %s"
744 for line in output.split("\n"):
745 # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'
746 m = re.match(r'sdkVersion:\'([^\']*)\'', line)
749 raise ExternalError("No minSdkVersion returned by aapt")
752 def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
753 """Get the minSdkVersion declared in the APK as a number (API Level). If
754 minSdkVersion is set to a codename, it is translated to a number using the
758 version = GetMinSdkVersion(apk_name)
762 # Not a decimal number. Codename?
763 if version in codename_to_api_level_map:
764 return codename_to_api_level_map[version]
766 raise ExternalError("Unknown minSdkVersion: '%s'. Known codenames: %s"
767 % (version, codename_to_api_level_map))
770 def SignFile(input_name, output_name, key, password, min_api_level=None,
771 codename_to_api_level_map=dict(),
773 """Sign the input_name zip/jar/apk, producing output_name. Use the
774 given key and password (the latter may be None if the key does not
777 If whole_file is true, use the "-w" option to SignApk to embed a
778 signature that covers the whole file in the archive comment of the
781 min_api_level is the API Level (int) of the oldest platform this file may end
782 up on. If not specified for an APK, the API Level is obtained by interpreting
783 the minSdkVersion attribute of the APK's AndroidManifest.xml.
785 codename_to_api_level_map is needed to translate the codename which may be
786 encountered as the APK's minSdkVersion.
789 java_library_path = os.path.join(
790 OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
792 cmd = [OPTIONS.java_path, OPTIONS.java_args,
793 "-Djava.library.path=" + java_library_path,
795 os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
796 cmd.extend(OPTIONS.extra_signapk_args)
800 min_sdk_version = min_api_level
801 if min_sdk_version is None:
803 min_sdk_version = GetMinSdkVersionInt(
804 input_name, codename_to_api_level_map)
805 if min_sdk_version is not None:
806 cmd.extend(["--min-sdk-version", str(min_sdk_version)])
808 cmd.extend([key + OPTIONS.public_key_suffix,
809 key + OPTIONS.private_key_suffix,
810 input_name, output_name])
812 p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
813 if password is not None:
815 p.communicate(password)
816 if p.returncode != 0:
817 raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
820 def CheckSize(data, target, info_dict):
821 """Check the data string passed against the max size limit, if
822 any, for the given target. Raise exception if the data is too big.
823 Print a warning if the data is nearing the maximum size."""
825 if target.endswith(".img"):
827 mount_point = "/" + target
831 if info_dict["fstab"]:
832 if mount_point == "/userdata_extra":
833 mount_point = "/data"
834 if mount_point == "/userdata":
835 mount_point = "/data"
836 p = info_dict["fstab"][mount_point]
840 device = device[device.rfind("/")+1:]
841 limit = info_dict.get(device + "_size", None)
842 if not fs_type or not limit:
845 if fs_type == "yaffs2":
846 # image size should be increased by 1/64th to account for the
847 # spare area (64 bytes per 2k page)
848 limit = limit / 2048 * (2048+64)
850 pct = float(size) * 100.0 / limit
851 msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
853 raise ExternalError(msg)
856 print(" WARNING: ", msg)
858 elif OPTIONS.verbose:
862 def ReadApkCerts(tf_zip):
863 """Given a target_files ZipFile, parse the META/apkcerts.txt file
864 and return a {package: cert} dict."""
866 for line in tf_zip.read("META/apkcerts.txt").split("\n"):
870 m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
871 r'private_key="(.*)"$', line)
873 name, cert, privkey = m.groups()
874 public_key_suffix_len = len(OPTIONS.public_key_suffix)
875 private_key_suffix_len = len(OPTIONS.private_key_suffix)
876 if cert in SPECIAL_CERT_STRINGS and not privkey:
878 elif (cert.endswith(OPTIONS.public_key_suffix) and
879 privkey.endswith(OPTIONS.private_key_suffix) and
880 cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
881 certmap[name] = cert[:-public_key_suffix_len]
883 raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
887 COMMON_DOCSTRING = """
889 Prepend <dir>/bin to the list of places to search for binaries
890 run by this script, and expect to find jars in <dir>/framework.
892 -s (--device_specific) <file>
893 Path to the python module containing device-specific
896 -x (--extra) <key=value>
897 Add a key/value pair to the 'extras' dict, which device-specific
898 extension code may look at.
901 Show command lines being executed.
904 Display this usage message and exit.
907 def Usage(docstring):
908 print(docstring.rstrip("\n"))
909 print(COMMON_DOCSTRING)
912 def ParseOptions(argv,
914 extra_opts="", extra_long_opts=(),
915 extra_option_handler=None):
916 """Parse the options in argv and return any arguments that aren't
917 flags. docstring is the calling module's docstring, to be displayed
918 for errors and -h. extra_opts and extra_long_opts are for flags
919 defined by the caller, which are processed by passing them to
920 extra_option_handler."""
923 opts, args = getopt.getopt(
924 argv, "hvp:s:x:" + extra_opts,
925 ["help", "verbose", "path=", "signapk_path=",
926 "signapk_shared_library_path=", "extra_signapk_args=",
927 "java_path=", "java_args=", "public_key_suffix=",
928 "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
929 "verity_signer_path=", "verity_signer_args=", "device_specific=",
931 list(extra_long_opts))
932 except getopt.GetoptError as err:
934 print("**", str(err), "**")
938 if o in ("-h", "--help"):
941 elif o in ("-v", "--verbose"):
942 OPTIONS.verbose = True
943 elif o in ("-p", "--path"):
944 OPTIONS.search_path = a
945 elif o in ("--signapk_path",):
946 OPTIONS.signapk_path = a
947 elif o in ("--signapk_shared_library_path",):
948 OPTIONS.signapk_shared_library_path = a
949 elif o in ("--extra_signapk_args",):
950 OPTIONS.extra_signapk_args = shlex.split(a)
951 elif o in ("--java_path",):
952 OPTIONS.java_path = a
953 elif o in ("--java_args",):
954 OPTIONS.java_args = a
955 elif o in ("--public_key_suffix",):
956 OPTIONS.public_key_suffix = a
957 elif o in ("--private_key_suffix",):
958 OPTIONS.private_key_suffix = a
959 elif o in ("--boot_signer_path",):
960 OPTIONS.boot_signer_path = a
961 elif o in ("--boot_signer_args",):
962 OPTIONS.boot_signer_args = shlex.split(a)
963 elif o in ("--verity_signer_path",):
964 OPTIONS.verity_signer_path = a
965 elif o in ("--verity_signer_args",):
966 OPTIONS.verity_signer_args = shlex.split(a)
967 elif o in ("-s", "--device_specific"):
968 OPTIONS.device_specific = a
969 elif o in ("-x", "--extra"):
970 key, value = a.split("=", 1)
971 OPTIONS.extras[key] = value
973 if extra_option_handler is None or not extra_option_handler(o, a):
974 assert False, "unknown option \"%s\"" % (o,)
976 if OPTIONS.search_path:
977 os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
978 os.pathsep + os.environ["PATH"])
983 def MakeTempFile(prefix=None, suffix=None):
984 """Make a temp file and add it to the list of things to be deleted
985 when Cleanup() is called. Return the filename."""
986 fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
988 OPTIONS.tempfiles.append(fn)
993 for i in OPTIONS.tempfiles:
1000 class PasswordManager(object):
1002 self.editor = os.getenv("EDITOR", None)
1003 self.pwfile = os.getenv("ANDROID_PW_FILE", None)
1005 def GetPasswords(self, items):
1006 """Get passwords corresponding to each string in 'items',
1007 returning a dict. (The dict may have keys in addition to the
1010 Uses the passwords in $ANDROID_PW_FILE if available, letting the
1011 user edit that file to add more needed passwords. If no editor is
1012 available, or $ANDROID_PW_FILE isn't define, prompts the user
1013 interactively in the ordinary way.
1016 current = self.ReadFile()
1022 if i not in current or not current[i]:
1024 # Are all the passwords already in the file?
1032 print("key file %s still missing some passwords." % self.pwfile)
1033 answer = raw_input("try to edit again? [y]> ").strip()
1034 if answer and answer[0] not in 'yY':
1035 raise RuntimeError("key passwords unavailable")
1038 current = self.UpdateAndReadFile(current)
1040 def PromptResult(self, current): # pylint: disable=no-self-use
1041 """Prompt the user to enter a value (password) for each key in
1042 'current' whose value is fales. Returns a new dict with all the
1046 for k, v in sorted(iteritems(current)):
1051 result[k] = getpass.getpass(
1052 "Enter password for %s key> " % k).strip()
1057 def UpdateAndReadFile(self, current):
1058 if not self.editor or not self.pwfile:
1059 return self.PromptResult(current)
1061 f = open(self.pwfile, "w")
1062 os.chmod(self.pwfile, 0o600)
1063 f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
1064 f.write("# (Additional spaces are harmless.)\n\n")
1067 sorted_list = sorted((not v, k, v) for (k, v) in current.items())
1068 for i, (_, k, v) in enumerate(sorted_list):
1069 f.write("[[[ %s ]]] %s\n" % (v, k))
1070 if not v and first_line is None:
1071 # position cursor on first line with no password.
1075 p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
1076 _, _ = p.communicate()
1078 return self.ReadFile()
1082 if self.pwfile is None:
1085 f = open(self.pwfile, "r")
1088 if not line or line[0] == '#':
1090 m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
1092 print("failed to parse password file: ", line)
1094 result[m.group(2)] = m.group(1)
1096 except IOError as e:
1097 if e.errno != errno.ENOENT:
1098 print("error reading password file: ", str(e))
1102 def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
1103 compress_type=None):
1107 # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
1108 # for files larger than 2GiB. We can work around this by adjusting their
1109 # limit. Note that `zipfile.writestr()` will not work for strings larger than
1110 # 2GiB. The Python interpreter sometimes rejects strings that large (though
1111 # it isn't clear to me exactly what circumstances cause this).
1112 # `zipfile.write()` must be used directly to work around this.
1114 # This mess can be avoided if we port to python3.
1115 saved_zip64_limit = zipfile.ZIP64_LIMIT
1116 zipfile.ZIP64_LIMIT = (1 << 32) - 1
1118 if compress_type is None:
1119 compress_type = zip_file.compression
1123 saved_stat = os.stat(filename)
1126 # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
1127 # file to be zipped and reset it when we're done.
1128 os.chmod(filename, perms)
1130 # Use a fixed timestamp so the output is repeatable.
1131 epoch = datetime.datetime.fromtimestamp(0)
1132 timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
1133 os.utime(filename, (timestamp, timestamp))
1135 zip_file.write(filename, arcname=arcname, compress_type=compress_type)
1137 os.chmod(filename, saved_stat.st_mode)
1138 os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
1139 zipfile.ZIP64_LIMIT = saved_zip64_limit
1142 def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
1143 compress_type=None):
1144 """Wrap zipfile.writestr() function to work around the zip64 limit.
1146 Even with the ZIP64_LIMIT workaround, it won't allow writing a string
1147 longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
1148 when calling crc32(bytes).
1150 But it still works fine to write a shorter string into a large zip file.
1151 We should use ZipWrite() whenever possible, and only use ZipWriteStr()
1152 when we know the string won't be too long.
1155 saved_zip64_limit = zipfile.ZIP64_LIMIT
1156 zipfile.ZIP64_LIMIT = (1 << 32) - 1
1158 if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
1159 zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
1160 zinfo.compress_type = zip_file.compression
1164 zinfo = zinfo_or_arcname
1166 # If compress_type is given, it overrides the value in zinfo.
1167 if compress_type is not None:
1168 zinfo.compress_type = compress_type
1170 # If perms is given, it has a priority.
1171 if perms is not None:
1172 # If perms doesn't set the file type, mark it as a regular file.
1173 if perms & 0o770000 == 0:
1175 zinfo.external_attr = perms << 16
1177 # Use a fixed timestamp so the output is repeatable.
1178 zinfo.date_time = (2009, 1, 1, 0, 0, 0)
1180 zip_file.writestr(zinfo, data)
1181 zipfile.ZIP64_LIMIT = saved_zip64_limit
1184 def ZipClose(zip_file):
1186 # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
1187 # central directory.
1188 saved_zip64_limit = zipfile.ZIP64_LIMIT
1189 zipfile.ZIP64_LIMIT = (1 << 32) - 1
1193 zipfile.ZIP64_LIMIT = saved_zip64_limit
1196 class DeviceSpecificParams(object):
1198 def __init__(self, **kwargs):
1199 """Keyword arguments to the constructor become attributes of this
1200 object, which is passed to all functions in the device-specific
1202 for k, v in iteritems(kwargs):
1204 self.extras = OPTIONS.extras
1206 if self.module is None:
1207 path = OPTIONS.device_specific
1211 if os.path.isdir(path):
1212 info = imp.find_module("releasetools", [path])
1214 d, f = os.path.split(path)
1215 b, x = os.path.splitext(f)
1218 info = imp.find_module(f, [d])
1219 print("loaded device-specific extensions from", path)
1220 self.module = imp.load_module("device_specific", *info)
1222 print("unable to load device-specific module; assuming none")
1224 def _DoCall(self, function_name, *args, **kwargs):
1225 """Call the named function in the device-specific module, passing
1226 the given args and kwargs. The first argument to the call will be
1227 the DeviceSpecific object itself. If there is no module, or the
1228 module does not define the function, return the value of the
1229 'default' kwarg (which itself defaults to None)."""
1230 if self.module is None or not hasattr(self.module, function_name):
1231 return kwargs.get("default", None)
1232 return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1234 def FullOTA_Assertions(self):
1235 """Called after emitting the block of assertions at the top of a
1236 full OTA package. Implementations can add whatever additional
1237 assertions they like."""
1238 return self._DoCall("FullOTA_Assertions")
1240 def FullOTA_InstallBegin(self):
1241 """Called at the start of full OTA installation."""
1242 return self._DoCall("FullOTA_InstallBegin")
1244 def FullOTA_InstallEnd(self):
1245 """Called at the end of full OTA installation; typically this is
1246 used to install the image for the device's baseband processor."""
1247 return self._DoCall("FullOTA_InstallEnd")
1249 def FullOTA_PostValidate(self):
1250 """Called after installing and validating /system; typically this is
1251 used to resize the system partition after a block based installation."""
1252 return self._DoCall("FullOTA_PostValidate")
1254 def IncrementalOTA_Assertions(self):
1255 """Called after emitting the block of assertions at the top of an
1256 incremental OTA package. Implementations can add whatever
1257 additional assertions they like."""
1258 return self._DoCall("IncrementalOTA_Assertions")
1260 def IncrementalOTA_VerifyBegin(self):
1261 """Called at the start of the verification phase of incremental
1262 OTA installation; additional checks can be placed here to abort
1263 the script before any changes are made."""
1264 return self._DoCall("IncrementalOTA_VerifyBegin")
1266 def IncrementalOTA_VerifyEnd(self):
1267 """Called at the end of the verification phase of incremental OTA
1268 installation; additional checks can be placed here to abort the
1269 script before any changes are made."""
1270 return self._DoCall("IncrementalOTA_VerifyEnd")
1272 def IncrementalOTA_InstallBegin(self):
1273 """Called at the start of incremental OTA installation (after
1274 verification is complete)."""
1275 return self._DoCall("IncrementalOTA_InstallBegin")
1277 def IncrementalOTA_InstallEnd(self):
1278 """Called at the end of incremental OTA installation; typically
1279 this is used to install the image for the device's baseband
1281 return self._DoCall("IncrementalOTA_InstallEnd")
1283 def VerifyOTA_Assertions(self):
1284 return self._DoCall("VerifyOTA_Assertions")
1287 def __init__(self, name, data):
1290 self.size = len(data)
1291 self.sha1 = sha1(data).hexdigest()
1294 def FromLocalFile(cls, name, diskname):
1295 f = open(diskname, "rb")
1298 return File(name, data)
1300 def WriteToTemp(self):
1301 t = tempfile.NamedTemporaryFile()
1306 def AddToZip(self, z, compression=None):
1307 ZipWriteStr(z, self.name, self.data, compress_type=compression)
1309 DIFF_PROGRAM_BY_EXT = {
1311 ".zip" : ["imgdiff", "-z"],
1312 ".jar" : ["imgdiff", "-z"],
1313 ".apk" : ["imgdiff", "-z"],
1317 class Difference(object):
1318 def __init__(self, tf, sf, diff_program=None):
1322 self.diff_program = diff_program
1324 def ComputePatch(self):
1325 """Compute the patch (as a string of data) needed to turn sf into
1326 tf. Returns the same tuple as GetPatch()."""
1331 if self.diff_program:
1332 diff_program = self.diff_program
1334 ext = os.path.splitext(tf.name)[1]
1335 diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1337 ttemp = tf.WriteToTemp()
1338 stemp = sf.WriteToTemp()
1340 ext = os.path.splitext(tf.name)[1]
1343 ptemp = tempfile.NamedTemporaryFile()
1344 if isinstance(diff_program, list):
1345 cmd = copy.copy(diff_program)
1347 cmd = [diff_program]
1348 cmd.append(stemp.name)
1349 cmd.append(ttemp.name)
1350 cmd.append(ptemp.name)
1351 p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1354 _, e = p.communicate()
1357 th = threading.Thread(target=run)
1359 th.join(timeout=300) # 5 mins
1361 print("WARNING: diff command timed out")
1368 if err or p.returncode != 0:
1369 print("WARNING: failure running %s:\n%s\n" % (
1370 diff_program, "".join(err)))
1372 return None, None, None
1380 return self.tf, self.sf, self.patch
1384 """Return a tuple (target_file, source_file, patch_data).
1385 patch_data may be None if ComputePatch hasn't been called, or if
1386 computing the patch failed."""
1387 return self.tf, self.sf, self.patch
1390 def ComputeDifferences(diffs):
1391 """Call ComputePatch on all the Difference objects in 'diffs'."""
1392 print(len(diffs), "diffs to compute")
1394 # Do the largest files first, to try and reduce the long-pole effect.
1395 by_size = [(i.tf.size, i) for i in diffs]
1396 by_size.sort(reverse=True)
1397 by_size = [i[1] for i in by_size]
1399 lock = threading.Lock()
1400 diff_iter = iter(by_size) # accessed under lock
1409 dur = time.time() - start
1412 tf, sf, patch = d.GetPatch()
1413 if sf.name == tf.name:
1416 name = "%s (%s)" % (tf.name, sf.name)
1418 print("patching failed! %s" % name)
1420 print("%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1421 dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name))
1423 except Exception as e:
1427 # start worker threads; wait for them all to finish.
1428 threads = [threading.Thread(target=worker)
1429 for i in range(OPTIONS.worker_threads)]
1433 threads.pop().join()
1436 class BlockDifference(object):
1437 def __init__(self, partition, tgt, src=None, check_first_block=False,
1438 version=None, disable_imgdiff=False):
1441 self.partition = partition
1442 self.check_first_block = check_first_block
1443 self.disable_imgdiff = disable_imgdiff
1447 if OPTIONS.info_dict:
1450 OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1451 self.version = version
1453 b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1454 version=self.version,
1455 disable_imgdiff=self.disable_imgdiff)
1456 tmpdir = tempfile.mkdtemp()
1457 OPTIONS.tempfiles.append(tmpdir)
1458 self.path = os.path.join(tmpdir, partition)
1459 b.Compute(self.path)
1460 self._required_cache = b.max_stashed_size
1461 self.touched_src_ranges = b.touched_src_ranges
1462 self.touched_src_sha1 = b.touched_src_sha1
1465 _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1467 _, self.device = GetTypeAndDevice("/" + partition,
1468 OPTIONS.source_info_dict)
1471 def required_cache(self):
1472 return self._required_cache
1474 def WriteScript(self, script, output_zip, progress=None):
1476 # write the output unconditionally
1477 script.Print("Patching %s image unconditionally..." % (self.partition,))
1479 script.Print("Patching %s image after verification." % (self.partition,))
1482 script.ShowProgress(progress, 0)
1483 self._WriteUpdate(script, output_zip)
1485 self._WritePostInstallVerifyScript(script)
1487 def WriteStrictVerifyScript(self, script):
1488 """Verify all the blocks in the care_map, including clobbered blocks.
1490 This differs from the WriteVerifyScript() function: a) it prints different
1491 error messages; b) it doesn't allow half-way updated images to pass the
1494 partition = self.partition
1495 script.Print("Verifying %s..." % (partition,))
1496 ranges = self.tgt.care_map
1497 ranges_str = ranges.to_string_raw()
1498 script.AppendExtra('range_sha1("%s", "%s") == "%s" && '
1499 'ui_print(" Verified.") || '
1500 'ui_print("\\"%s\\" has unexpected contents.");' % (
1501 self.device, ranges_str,
1502 self.tgt.TotalSha1(include_clobbered_blocks=True),
1504 script.AppendExtra("")
1506 def WriteVerifyScript(self, script, touched_blocks_only=False):
1507 partition = self.partition
1511 script.Print("Image %s will be patched unconditionally." % (partition,))
1515 if touched_blocks_only and self.version >= 3:
1516 ranges = self.touched_src_ranges
1517 expected_sha1 = self.touched_src_sha1
1519 ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1520 expected_sha1 = self.src.TotalSha1()
1522 # No blocks to be checked, skipping.
1526 ranges_str = ranges.to_string_raw()
1527 if self.version >= 4:
1528 script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1529 'block_image_verify("%s", '
1530 'package_extract_file("%s.transfer.list"), '
1531 '"%s.new.dat", "%s.patch.dat")) then') % (
1532 self.device, ranges_str, expected_sha1,
1533 self.device, partition, partition, partition))
1534 elif self.version == 3:
1535 script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1536 'block_image_verify("%s", '
1537 'package_extract_file("%s.transfer.list"), '
1538 '"%s.new.dat", "%s.patch.dat")) then') % (
1539 self.device, ranges_str, expected_sha1,
1540 self.device, partition, partition, partition))
1542 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1543 self.device, ranges_str, self.src.TotalSha1()))
1544 script.Print('Verified %s image...' % (partition,))
1545 script.AppendExtra('else')
1547 if self.version >= 4:
1550 # When generating incrementals for the system and vendor partitions in
1551 # version 4 or newer, explicitly check the first block (which contains
1552 # the superblock) of the partition to see if it's what we expect. If
1553 # this check fails, give an explicit log message about the partition
1554 # having been remounted R/W (the most likely explanation).
1555 if self.check_first_block:
1556 script.AppendExtra('check_first_block("%s");' % (self.device,))
1558 # If version >= 4, try block recovery before abort update
1559 if partition == "system":
1560 code = ErrorCode.SYSTEM_RECOVER_FAILURE
1562 code = ErrorCode.VENDOR_RECOVER_FAILURE
1563 script.AppendExtra((
1564 'ifelse (block_image_recover("{device}", "{ranges}") && '
1565 'block_image_verify("{device}", '
1566 'package_extract_file("{partition}.transfer.list"), '
1567 '"{partition}.new.dat", "{partition}.patch.dat"), '
1568 'ui_print("{partition} recovered successfully."), '
1569 'abort("E{code}: {partition} partition fails to recover"));\n'
1570 'endif;').format(device=self.device, ranges=ranges_str,
1571 partition=partition, code=code))
1573 # Abort the OTA update. Note that the incremental OTA cannot be applied
1574 # even if it may match the checksum of the target partition.
1575 # a) If version < 3, operations like move and erase will make changes
1576 # unconditionally and damage the partition.
1577 # b) If version >= 3, it won't even reach here.
1579 if partition == "system":
1580 code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
1582 code = ErrorCode.VENDOR_VERIFICATION_FAILURE
1583 script.AppendExtra((
1584 'abort("E%d: %s partition has unexpected contents");\n'
1585 'endif;') % (code, partition))
1587 def _WritePostInstallVerifyScript(self, script):
1588 partition = self.partition
1589 script.Print('Verifying the updated %s image...' % (partition,))
1590 # Unlike pre-install verification, clobbered_blocks should not be ignored.
1591 ranges = self.tgt.care_map
1592 ranges_str = ranges.to_string_raw()
1593 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1594 self.device, ranges_str,
1595 self.tgt.TotalSha1(include_clobbered_blocks=True)))
1598 # Verify that extended blocks are really zeroed out.
1599 if self.tgt.extended:
1600 ranges_str = self.tgt.extended.to_string_raw()
1601 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1602 self.device, ranges_str,
1603 self._HashZeroBlocks(self.tgt.extended.size())))
1604 script.Print('Verified the updated %s image.' % (partition,))
1605 if partition == "system":
1606 code = ErrorCode.SYSTEM_NONZERO_CONTENTS
1608 code = ErrorCode.VENDOR_NONZERO_CONTENTS
1611 ' abort("E%d: %s partition has unexpected non-zero contents after '
1613 'endif;' % (code, partition))
1615 script.Print('Verified the updated %s image.' % (partition,))
1617 if partition == "system":
1618 code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
1620 code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
1624 ' abort("E%d: %s partition has unexpected contents after OTA '
1626 'endif;' % (code, partition))
1628 def _WriteUpdate(self, script, output_zip):
1629 ZipWrite(output_zip,
1630 '{}.transfer.list'.format(self.path),
1631 '{}.transfer.list'.format(self.partition))
1632 ZipWrite(output_zip,
1633 '{}.new.dat'.format(self.path),
1634 '{}.new.dat'.format(self.partition))
1635 ZipWrite(output_zip,
1636 '{}.patch.dat'.format(self.path),
1637 '{}.patch.dat'.format(self.partition),
1638 compress_type=zipfile.ZIP_STORED)
1640 if self.partition == "system":
1641 code = ErrorCode.SYSTEM_UPDATE_FAILURE
1643 code = ErrorCode.VENDOR_UPDATE_FAILURE
1645 call = ('block_image_update("{device}", '
1646 'package_extract_file("{partition}.transfer.list"), '
1647 '"{partition}.new.dat", "{partition}.patch.dat") ||\n'
1648 ' abort("E{code}: Failed to update {partition} image.");'.format(
1649 device=self.device, partition=self.partition, code=code))
1650 script.AppendExtra(script.WordWrap(call))
1652 def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1653 data = source.ReadRangeSet(ranges)
1659 return ctx.hexdigest()
1661 def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1662 """Return the hash value for all zero blocks."""
1663 zero_block = '\x00' * 4096
1665 for _ in range(num_blocks):
1666 ctx.update(zero_block)
1668 return ctx.hexdigest()
1671 DataImage = blockimgdiff.DataImage
1673 # map recovery.fstab's fs_types to mount/format "partition types"
1686 def GetTypeAndDevice(mount_point, info):
1687 fstab = info["fstab"]
1689 return (PARTITION_TYPES[fstab[mount_point].fs_type],
1690 fstab[mount_point].device)
1695 def ParseCertificate(data):
1696 """Parse a PEM-format certificate."""
1697 from codecs import decode
1700 for line in data.split("\n"):
1701 if "--END CERTIFICATE--" in line:
1704 l = line.encode() if hasattr(line, 'encode') else line
1706 if "--BEGIN CERTIFICATE--" in line:
1708 cert = decode(b"".join(cert), 'base64')
1711 def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1713 """Generate a binary patch that creates the recovery image starting
1714 with the boot image. (Most of the space in these images is just the
1715 kernel, which is identical for the two, so the resulting patch
1716 should be efficient.) Add it to the output zip, along with a shell
1717 script that is run from init.rc on first boot to actually do the
1718 patching and install the new recovery image.
1720 recovery_img and boot_img should be File objects for the
1721 corresponding images. info should be the dictionary returned by
1722 common.LoadInfoDict() on the input target_files.
1725 if info_dict is None:
1726 info_dict = OPTIONS.info_dict
1728 full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1729 system_root_image = info_dict.get("system_root_image", None) == "true"
1731 if full_recovery_image:
1732 output_sink("etc/recovery.img", recovery_img.data)
1735 diff_program = ["imgdiff"]
1736 path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1737 if os.path.exists(path):
1738 diff_program.append("-b")
1739 diff_program.append(path)
1740 bonus_args = "-b /system/etc/recovery-resource.dat"
1744 d = Difference(recovery_img, boot_img, diff_program=diff_program)
1745 _, _, patch = d.ComputePatch()
1746 output_sink("recovery-from-boot.p", patch)
1749 # The following GetTypeAndDevice()s need to use the path in the target
1750 # info_dict instead of source_info_dict.
1751 boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1752 recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1756 if full_recovery_image:
1757 sh = """#!/system/bin/sh
1758 if [ -f /system/etc/recovery-transform.sh ]; then
1759 exec sh /system/etc/recovery-transform.sh %(recovery_size)d %(recovery_sha1)s %(boot_size)d %(boot_sha1)s
1761 if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1762 applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1764 log -t recovery "Recovery image already installed"
1766 """ % {'type': recovery_type,
1767 'device': recovery_device,
1768 'sha1': recovery_img.sha1,
1769 'size': recovery_img.size}
1771 sh = """#!/system/bin/sh
1772 if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1773 applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1775 log -t recovery "Recovery image already installed"
1777 """ % {'boot_size': boot_img.size,
1778 'boot_sha1': boot_img.sha1,
1779 'recovery_size': recovery_img.size,
1780 'recovery_sha1': recovery_img.sha1,
1781 'boot_type': boot_type,
1782 'boot_device': boot_device,
1783 'recovery_type': recovery_type,
1784 'recovery_device': recovery_device,
1785 'bonus_args': bonus_args}
1787 # The install script location moved from /system/etc to /system/bin
1788 # in the L release. Parse init.*.rc files to find out where the
1789 # target-files expects it to be, and put it there.
1790 sh_location = "etc/install-recovery.sh"
1792 if system_root_image:
1793 init_rc_dir = os.path.join(input_dir, "ROOT")
1795 init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1796 init_rc_files = os.listdir(init_rc_dir)
1797 for init_rc_file in init_rc_files:
1798 if (not init_rc_file.startswith('init.') or
1799 not init_rc_file.endswith('.rc')):
1802 with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1804 m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1806 sh_location = m.group(1)
1813 print("putting script in", sh_location)
1815 output_sink(sh_location, sh)