1 # Copyright (C) 2008 The Android Open Source Project
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
34 from hashlib import sha1 as sha1
37 class Options(object):
39 platform_search_path = {
40 "linux2": "out/host/linux-x86",
41 "darwin": "out/host/darwin-x86",
44 self.search_path = platform_search_path.get(sys.platform, None)
45 self.signapk_path = "framework/signapk.jar" # Relative to search_path
46 self.signapk_shared_library_path = "lib64" # Relative to search_path
47 self.extra_signapk_args = []
48 self.java_path = "java" # Use the one on the path by default.
49 self.java_args = "-Xmx2048m" # JVM Args
50 self.public_key_suffix = ".x509.pem"
51 self.private_key_suffix = ".pk8"
52 # use otatools built boot_signer by default
53 self.boot_signer_path = "boot_signer"
54 self.boot_signer_args = []
55 self.verity_signer_path = None
56 self.verity_signer_args = []
59 self.device_specific = None
62 self.source_info_dict = None
63 self.target_info_dict = None
64 self.worker_threads = None
65 # Stash size cannot exceed cache_size * threshold.
66 self.cache_size = None
67 self.stash_threshold = 0.8
73 # Values for "certificate" in apkcerts that mean special things.
74 SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
76 class ErrorCode(object):
77 """Define error_codes for failures that happen during the actual
78 update package installation.
80 Error codes 0-999 are reserved for failures before the package
81 installation (i.e. low battery, package verification failure).
82 Detailed code in 'bootable/recovery/error_code.h' """
84 SYSTEM_VERIFICATION_FAILURE = 1000
85 SYSTEM_UPDATE_FAILURE = 1001
86 SYSTEM_UNEXPECTED_CONTENTS = 1002
87 SYSTEM_NONZERO_CONTENTS = 1003
88 SYSTEM_RECOVER_FAILURE = 1004
89 VENDOR_VERIFICATION_FAILURE = 2000
90 VENDOR_UPDATE_FAILURE = 2001
91 VENDOR_UNEXPECTED_CONTENTS = 2002
92 VENDOR_NONZERO_CONTENTS = 2003
93 VENDOR_RECOVER_FAILURE = 2004
94 OEM_PROP_MISMATCH = 3000
95 FINGERPRINT_MISMATCH = 3001
96 THUMBPRINT_MISMATCH = 3002
98 DEVICE_MISMATCH = 3004
100 INSUFFICIENT_CACHE_SPACE = 3006
101 TUNE_PARTITION_FAILURE = 3007
102 APPLY_PATCH_FAILURE = 3008
104 class ExternalError(RuntimeError):
108 def Run(args, **kwargs):
109 """Create and return a subprocess.Popen object, printing the command
110 line on the terminal if -v was specified."""
112 print " running: ", " ".join(args)
113 return subprocess.Popen(args, **kwargs)
116 def CloseInheritedPipes():
117 """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
118 before doing other work."""
119 if platform.system() != "Darwin":
121 for d in range(3, 1025):
125 pipebit = stat[0] & 0x1000
132 def LoadInfoDict(input_file, input_dir=None):
133 """Read and parse the META/misc_info.txt key/value pairs from the
134 input target files and return a dict."""
137 if isinstance(input_file, zipfile.ZipFile):
138 return input_file.read(fn)
140 path = os.path.join(input_file, *fn.split("/"))
142 with open(path) as f:
145 if e.errno == errno.ENOENT:
149 d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
151 # ok if misc_info.txt doesn't exist
154 # backwards compatibility: These values used to be in their own
155 # files. Look for them, in case we're processing an old
158 if "mkyaffs2_extra_flags" not in d:
160 d["mkyaffs2_extra_flags"] = read_helper(
161 "META/mkyaffs2-extra-flags.txt").strip()
163 # ok if flags don't exist
166 if "recovery_api_version" not in d:
168 d["recovery_api_version"] = read_helper(
169 "META/recovery-api-version.txt").strip()
171 raise ValueError("can't find recovery API version in input target-files")
173 if "tool_extensions" not in d:
175 d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
177 # ok if extensions don't exist
180 if "fstab_version" not in d:
181 d["fstab_version"] = "1"
183 # A few properties are stored as links to the files in the out/ directory.
184 # It works fine with the build system. However, they are no longer available
185 # when (re)generating from target_files zip. If input_dir is not None, we
186 # are doing repacking. Redirect those properties to the actual files in the
187 # unzipped directory.
188 if input_dir is not None:
189 # We carry a copy of file_contexts.bin under META/. If not available,
190 # search BOOT/RAMDISK/. Note that sometimes we may need a different file
191 # to build images than the one running on device, such as when enabling
192 # system_root_image. In that case, we must have the one for image
193 # generation copied to META/.
194 fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
195 fc_config = os.path.join(input_dir, "META", fc_basename)
196 if d.get("system_root_image") == "true":
197 assert os.path.exists(fc_config)
198 if not os.path.exists(fc_config):
199 fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
200 if not os.path.exists(fc_config):
204 d["selinux_fc"] = fc_config
206 # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
207 if d.get("system_root_image") == "true":
208 d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
209 d["ramdisk_fs_config"] = os.path.join(
210 input_dir, "META", "root_filesystem_config.txt")
212 # Redirect {system,vendor}_base_fs_file.
213 if "system_base_fs_file" in d:
214 basename = os.path.basename(d["system_base_fs_file"])
215 system_base_fs_file = os.path.join(input_dir, "META", basename)
216 if os.path.exists(system_base_fs_file):
217 d["system_base_fs_file"] = system_base_fs_file
219 print "Warning: failed to find system base fs file: %s" % (
220 system_base_fs_file,)
221 del d["system_base_fs_file"]
223 if "vendor_base_fs_file" in d:
224 basename = os.path.basename(d["vendor_base_fs_file"])
225 vendor_base_fs_file = os.path.join(input_dir, "META", basename)
226 if os.path.exists(vendor_base_fs_file):
227 d["vendor_base_fs_file"] = vendor_base_fs_file
229 print "Warning: failed to find vendor base fs file: %s" % (
230 vendor_base_fs_file,)
231 del d["vendor_base_fs_file"]
234 if "device_type" not in d:
235 d["device_type"] = "MMC"
237 data = read_helper("META/imagesizes.txt")
238 for line in data.split("\n"):
241 name, value = line.split(" ", 1)
244 if name == "blocksize":
247 d[name + "_size"] = value
253 d[key] = int(d[key], 0)
255 makeint("recovery_api_version")
257 makeint("system_size")
258 makeint("vendor_size")
259 makeint("userdata_size")
260 makeint("cache_size")
261 makeint("recovery_size")
263 makeint("fstab_version")
265 if d.get("no_recovery", False) == "true":
268 d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
269 d.get("system_root_image", d["device_type"], False))
270 d["build.prop"] = LoadBuildProp(read_helper)
273 def LoadBuildProp(read_helper):
275 data = read_helper("SYSTEM/build.prop")
277 print "Warning: could not find SYSTEM/build.prop in %s" % zip
279 return LoadDictionaryFromLines(data.split("\n"))
281 def LoadDictionaryFromLines(lines):
285 if not line or line.startswith("#"):
288 name, value = line.split("=", 1)
292 def LoadRecoveryFSTab(read_helper, fstab_version, type, system_root_image=False):
293 class Partition(object):
294 def __init__(self, mount_point, fs_type, device, length, device2, context):
295 self.mount_point = mount_point
296 self.fs_type = fs_type
299 self.device2 = device2
300 self.context = context
303 data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
305 print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
308 if fstab_version == 1:
310 for line in data.split("\n"):
312 if not line or line.startswith("#"):
314 pieces = line.split()
315 if not 3 <= len(pieces) <= 4:
316 raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
319 if pieces[3].startswith("/"):
329 mount_point = pieces[0]
332 options = options.split(",")
334 if i.startswith("length="):
337 print "%s: unknown option \"%s\"" % (mount_point, i)
339 d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
340 device=pieces[2], length=length,
343 elif fstab_version == 2:
345 for line in data.split("\n"):
347 if not line or line.startswith("#"):
349 # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
350 pieces = line.split()
352 raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
354 # Ignore entries that are managed by vold
356 if "voldmanaged=" in options:
359 # It's a good line, parse it
361 options = options.split(",")
363 if i.startswith("length="):
366 # Ignore all unknown options in the unified fstab
369 mount_flags = pieces[3]
370 # Honor the SELinux context if present.
372 for i in mount_flags.split(","):
373 if i.startswith("context="):
376 mount_point = pieces[1]
377 d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
378 device=pieces[0], length=length,
379 device2=None, context=context)
382 raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
384 # / is used for the system mount point when the root directory is included in
385 # system. Other areas assume system is always at "/system" so point /system
387 if system_root_image:
388 assert not d.has_key("/system") and d.has_key("/")
389 d["/system"] = d["/"]
394 for k, v in sorted(d.items()):
395 print "%-25s = (%s) %s" % (k, type(v).__name__, v)
398 def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
400 """Build a bootable image from the specified sourcedir.
402 Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
403 'sourcedir'), and turn them into a boot image. Return the image data, or
404 None if sourcedir does not appear to contains files for building the
408 ramdisk_img = tempfile.NamedTemporaryFile()
410 if os.access(fs_config_file, os.F_OK):
411 cmd = ["mkbootfs", "-f", fs_config_file,
412 os.path.join(sourcedir, "RAMDISK")]
414 cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
415 p1 = Run(cmd, stdout=subprocess.PIPE)
416 p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
420 assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
421 assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
425 if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
428 if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
431 if info_dict is None:
432 info_dict = OPTIONS.info_dict
434 img = tempfile.NamedTemporaryFile()
435 bootimg_key = os.getenv("PRODUCT_PRIVATE_KEY", None)
438 ramdisk_img = make_ramdisk()
440 """check if uboot is requested"""
441 fn = os.path.join(sourcedir, "ubootargs")
442 if os.access(fn, os.F_OK):
444 for argument in open(fn).read().rstrip("\n").split(" "):
447 cmd.append(os.path.join(sourcedir, "kernel") + ":" + ramdisk_img.name)
450 # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
451 mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
453 cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
455 fn = os.path.join(sourcedir, "second")
456 if os.access(fn, os.F_OK):
457 cmd.append("--second")
460 fn = os.path.join(sourcedir, "cmdline")
461 if os.access(fn, os.F_OK):
462 cmd.append("--cmdline")
463 cmd.append(open(fn).read().rstrip("\n"))
465 fn = os.path.join(sourcedir, "base")
466 if os.access(fn, os.F_OK):
468 cmd.append(open(fn).read().rstrip("\n"))
470 fn = os.path.join(sourcedir, "tagsaddr")
471 if os.access(fn, os.F_OK):
472 cmd.append("--tags-addr")
473 cmd.append(open(fn).read().rstrip("\n"))
475 fn = os.path.join(sourcedir, "tags_offset")
476 if os.access(fn, os.F_OK):
477 cmd.append("--tags_offset")
478 cmd.append(open(fn).read().rstrip("\n"))
480 fn = os.path.join(sourcedir, "ramdisk_offset")
481 if os.access(fn, os.F_OK):
482 cmd.append("--ramdisk_offset")
483 cmd.append(open(fn).read().rstrip("\n"))
485 fn = os.path.join(sourcedir, "dt")
486 if os.access(fn, os.F_OK):
490 fn = os.path.join(sourcedir, "pagesize")
491 if os.access(fn, os.F_OK):
492 kernel_pagesize = open(fn).read().rstrip("\n")
493 cmd.append("--pagesize")
494 cmd.append(kernel_pagesize)
496 args = info_dict.get("mkbootimg_args", None)
497 if args and args.strip():
498 cmd.extend(shlex.split(args))
500 args = info_dict.get("mkbootimg_version_args", None)
501 if args and args.strip():
502 cmd.extend(shlex.split(args))
505 cmd.extend(["--ramdisk", ramdisk_img.name])
508 if info_dict.get("vboot", None):
509 img_unsigned = tempfile.NamedTemporaryFile()
510 cmd.extend(["--output", img_unsigned.name])
512 cmd.extend(["--output", img.name])
514 p = Run(cmd, stdout=subprocess.PIPE)
516 assert p.returncode == 0, "mkbootimg of %s image failed" % (
517 os.path.basename(sourcedir),)
519 if bootimg_key and os.path.exists(bootimg_key) and kernel_pagesize > 0:
520 print "Signing bootable image..."
521 bootimg_key_passwords = {}
522 bootimg_key_passwords.update(PasswordManager().GetPasswords(bootimg_key.split()))
523 bootimg_key_password = bootimg_key_passwords[bootimg_key]
524 if bootimg_key_password is not None:
525 bootimg_key_password += "\n"
526 img_sha256 = tempfile.NamedTemporaryFile()
527 img_sig = tempfile.NamedTemporaryFile()
528 img_sig_padded = tempfile.NamedTemporaryFile()
529 img_secure = tempfile.NamedTemporaryFile()
530 p = Run(["openssl", "dgst", "-sha256", "-binary", "-out", img_sha256.name, img.name],
531 stdout=subprocess.PIPE)
533 assert p.returncode == 0, "signing of bootable image failed"
534 p = Run(["openssl", "rsautl", "-sign", "-in", img_sha256.name, "-inkey", bootimg_key, "-out",
535 img_sig.name, "-passin", "stdin"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
536 p.communicate(bootimg_key_password)
537 assert p.returncode == 0, "signing of bootable image failed"
538 p = Run(["dd", "if=/dev/zero", "of=%s" % img_sig_padded.name, "bs=%s" % kernel_pagesize,
539 "count=1"], stdout=subprocess.PIPE)
541 assert p.returncode == 0, "signing of bootable image failed"
542 p = Run(["dd", "if=%s" % img_sig.name, "of=%s" % img_sig_padded.name, "conv=notrunc"],
543 stdout=subprocess.PIPE)
545 assert p.returncode == 0, "signing of bootable image failed"
546 p = Run(["cat", img.name, img_sig_padded.name], stdout=img_secure.file.fileno())
548 assert p.returncode == 0, "signing of bootable image failed"
549 shutil.copyfile(img_secure.name, img.name)
552 img_sig_padded.close()
555 if (info_dict.get("boot_signer", None) == "true" and
556 info_dict.get("verity_key", None)):
557 path = "/" + os.path.basename(sourcedir).lower()
558 cmd = [OPTIONS.boot_signer_path]
559 cmd.extend(OPTIONS.boot_signer_args)
560 cmd.extend([path, img.name,
561 info_dict["verity_key"] + ".pk8",
562 info_dict["verity_key"] + ".x509.pem", img.name])
563 p = Run(cmd, stdout=subprocess.PIPE)
565 assert p.returncode == 0, "boot_signer of %s image failed" % path
567 # Sign the image if vboot is non-empty.
568 elif info_dict.get("vboot", None):
569 path = "/" + os.path.basename(sourcedir).lower()
570 img_keyblock = tempfile.NamedTemporaryFile()
571 cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
572 img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
573 info_dict["vboot_key"] + ".vbprivk",
574 info_dict["vboot_subkey"] + ".vbprivk",
577 p = Run(cmd, stdout=subprocess.PIPE)
579 assert p.returncode == 0, "vboot_signer of %s image failed" % path
581 # Clean up the temp files.
585 img.seek(os.SEEK_SET, 0)
595 def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
597 """Return a File object with the desired bootable image.
599 Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
600 otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
601 the source files in 'unpack_dir'/'tree_subdir'."""
603 prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
604 if os.path.exists(prebuilt_path):
605 print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
606 return File.FromLocalFile(name, prebuilt_path)
608 prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
609 if os.path.exists(prebuilt_path):
610 print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
611 return File.FromLocalFile(name, prebuilt_path)
613 print "building image from target_files %s..." % (tree_subdir,)
615 if info_dict is None:
616 info_dict = OPTIONS.info_dict
618 # With system_root_image == "true", we don't pack ramdisk into the boot image.
619 # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
621 has_ramdisk = (info_dict.get("system_root_image") != "true" or
622 prebuilt_name != "boot.img" or
623 info_dict.get("recovery_as_boot") == "true")
625 fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
626 data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
627 os.path.join(unpack_dir, fs_config),
628 info_dict, has_ramdisk)
630 return File(name, data)
634 def UnzipTemp(filename, pattern=None):
635 """Unzip the given archive into a temporary directory and return the name.
637 If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
638 temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
640 Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
641 main file), open for reading.
644 tmp = tempfile.mkdtemp(prefix="targetfiles-")
645 OPTIONS.tempfiles.append(tmp)
647 def unzip_to_dir(filename, dirname):
648 subprocess.call(["rm", "-rf", dirname + filename, "targetfiles-*"])
649 cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
650 if pattern is not None:
652 p = Run(cmd, stdout=subprocess.PIPE)
654 if p.returncode != 0:
655 raise ExternalError("failed to unzip input target-files \"%s\"" %
658 m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
660 unzip_to_dir(m.group(1), tmp)
661 unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
662 filename = m.group(1)
664 unzip_to_dir(filename, tmp)
666 return tmp, zipfile.ZipFile(filename, "r")
669 def GetKeyPasswords(keylist):
670 """Given a list of keys, prompt the user to enter passwords for
671 those which require them. Return a {key: password} dict. password
672 will be None if the key has no password."""
677 devnull = open("/dev/null", "w+b")
678 for k in sorted(keylist):
679 # We don't need a password for things that aren't really keys.
680 if k in SPECIAL_CERT_STRINGS:
681 no_passwords.append(k)
684 p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
685 "-inform", "DER", "-nocrypt"],
686 stdin=devnull.fileno(),
687 stdout=devnull.fileno(),
688 stderr=subprocess.STDOUT)
690 if p.returncode == 0:
691 # Definitely an unencrypted key.
692 no_passwords.append(k)
694 p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
695 "-inform", "DER", "-passin", "pass:"],
696 stdin=devnull.fileno(),
697 stdout=devnull.fileno(),
698 stderr=subprocess.PIPE)
699 _, stderr = p.communicate()
700 if p.returncode == 0:
701 # Encrypted key with empty string as password.
702 key_passwords[k] = ''
703 elif stderr.startswith('Error decrypting key'):
704 # Definitely encrypted key.
705 # It would have said "Error reading key" if it didn't parse correctly.
706 need_passwords.append(k)
708 # Potentially, a type of key that openssl doesn't understand.
709 # We'll let the routines in signapk.jar handle it.
710 no_passwords.append(k)
713 key_passwords.update(PasswordManager().GetPasswords(need_passwords))
714 key_passwords.update(dict.fromkeys(no_passwords, None))
718 def GetMinSdkVersion(apk_name):
719 """Get the minSdkVersion delared in the APK. This can be both a decimal number
720 (API Level) or a codename.
723 p = Run(["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE)
724 output, err = p.communicate()
726 raise ExternalError("Failed to obtain minSdkVersion: aapt return code %s"
729 for line in output.split("\n"):
730 # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'
731 m = re.match(r'sdkVersion:\'([^\']*)\'', line)
734 raise ExternalError("No minSdkVersion returned by aapt")
737 def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
738 """Get the minSdkVersion declared in the APK as a number (API Level). If
739 minSdkVersion is set to a codename, it is translated to a number using the
743 version = GetMinSdkVersion(apk_name)
747 # Not a decimal number. Codename?
748 if version in codename_to_api_level_map:
749 return codename_to_api_level_map[version]
751 raise ExternalError("Unknown minSdkVersion: '%s'. Known codenames: %s"
752 % (version, codename_to_api_level_map))
755 def SignFile(input_name, output_name, key, password, min_api_level=None,
756 codename_to_api_level_map=dict(),
758 """Sign the input_name zip/jar/apk, producing output_name. Use the
759 given key and password (the latter may be None if the key does not
762 If whole_file is true, use the "-w" option to SignApk to embed a
763 signature that covers the whole file in the archive comment of the
766 min_api_level is the API Level (int) of the oldest platform this file may end
767 up on. If not specified for an APK, the API Level is obtained by interpreting
768 the minSdkVersion attribute of the APK's AndroidManifest.xml.
770 codename_to_api_level_map is needed to translate the codename which may be
771 encountered as the APK's minSdkVersion.
774 java_library_path = os.path.join(
775 OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
777 cmd = [OPTIONS.java_path, OPTIONS.java_args,
778 "-Djava.library.path=" + java_library_path,
780 os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
781 cmd.extend(OPTIONS.extra_signapk_args)
785 min_sdk_version = min_api_level
786 if min_sdk_version is None:
788 min_sdk_version = GetMinSdkVersionInt(
789 input_name, codename_to_api_level_map)
790 if min_sdk_version is not None:
791 cmd.extend(["--min-sdk-version", str(min_sdk_version)])
793 cmd.extend([key + OPTIONS.public_key_suffix,
794 key + OPTIONS.private_key_suffix,
795 input_name, output_name])
797 p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
798 if password is not None:
800 p.communicate(password)
801 if p.returncode != 0:
802 raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
805 def CheckSize(data, target, info_dict):
806 """Check the data string passed against the max size limit, if
807 any, for the given target. Raise exception if the data is too big.
808 Print a warning if the data is nearing the maximum size."""
810 if target.endswith(".img"):
812 mount_point = "/" + target
816 if info_dict["fstab"]:
817 if mount_point == "/userdata_extra":
818 mount_point = "/data"
819 if mount_point == "/userdata":
820 mount_point = "/data"
821 p = info_dict["fstab"][mount_point]
825 device = device[device.rfind("/")+1:]
826 limit = info_dict.get(device + "_size", None)
827 if not fs_type or not limit:
830 if fs_type == "yaffs2":
831 # image size should be increased by 1/64th to account for the
832 # spare area (64 bytes per 2k page)
833 limit = limit / 2048 * (2048+64)
835 pct = float(size) * 100.0 / limit
836 msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
838 raise ExternalError(msg)
841 print " WARNING: ", msg
843 elif OPTIONS.verbose:
847 def ReadApkCerts(tf_zip):
848 """Given a target_files ZipFile, parse the META/apkcerts.txt file
849 and return a {package: cert} dict."""
851 for line in tf_zip.read("META/apkcerts.txt").split("\n"):
855 m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
856 r'private_key="(.*)"$', line)
858 name, cert, privkey = m.groups()
859 public_key_suffix_len = len(OPTIONS.public_key_suffix)
860 private_key_suffix_len = len(OPTIONS.private_key_suffix)
861 if cert in SPECIAL_CERT_STRINGS and not privkey:
863 elif (cert.endswith(OPTIONS.public_key_suffix) and
864 privkey.endswith(OPTIONS.private_key_suffix) and
865 cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
866 certmap[name] = cert[:-public_key_suffix_len]
868 raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
872 COMMON_DOCSTRING = """
874 Prepend <dir>/bin to the list of places to search for binaries
875 run by this script, and expect to find jars in <dir>/framework.
877 -s (--device_specific) <file>
878 Path to the python module containing device-specific
881 -x (--extra) <key=value>
882 Add a key/value pair to the 'extras' dict, which device-specific
883 extension code may look at.
886 Show command lines being executed.
889 Display this usage message and exit.
892 def Usage(docstring):
893 print docstring.rstrip("\n")
894 print COMMON_DOCSTRING
897 def ParseOptions(argv,
899 extra_opts="", extra_long_opts=(),
900 extra_option_handler=None):
901 """Parse the options in argv and return any arguments that aren't
902 flags. docstring is the calling module's docstring, to be displayed
903 for errors and -h. extra_opts and extra_long_opts are for flags
904 defined by the caller, which are processed by passing them to
905 extra_option_handler."""
908 opts, args = getopt.getopt(
909 argv, "hvp:s:x:" + extra_opts,
910 ["help", "verbose", "path=", "signapk_path=",
911 "signapk_shared_library_path=", "extra_signapk_args=",
912 "java_path=", "java_args=", "public_key_suffix=",
913 "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
914 "verity_signer_path=", "verity_signer_args=", "device_specific=",
916 list(extra_long_opts))
917 except getopt.GetoptError as err:
919 print "**", str(err), "**"
923 if o in ("-h", "--help"):
926 elif o in ("-v", "--verbose"):
927 OPTIONS.verbose = True
928 elif o in ("-p", "--path"):
929 OPTIONS.search_path = a
930 elif o in ("--signapk_path",):
931 OPTIONS.signapk_path = a
932 elif o in ("--signapk_shared_library_path",):
933 OPTIONS.signapk_shared_library_path = a
934 elif o in ("--extra_signapk_args",):
935 OPTIONS.extra_signapk_args = shlex.split(a)
936 elif o in ("--java_path",):
937 OPTIONS.java_path = a
938 elif o in ("--java_args",):
939 OPTIONS.java_args = a
940 elif o in ("--public_key_suffix",):
941 OPTIONS.public_key_suffix = a
942 elif o in ("--private_key_suffix",):
943 OPTIONS.private_key_suffix = a
944 elif o in ("--boot_signer_path",):
945 OPTIONS.boot_signer_path = a
946 elif o in ("--boot_signer_args",):
947 OPTIONS.boot_signer_args = shlex.split(a)
948 elif o in ("--verity_signer_path",):
949 OPTIONS.verity_signer_path = a
950 elif o in ("--verity_signer_args",):
951 OPTIONS.verity_signer_args = shlex.split(a)
952 elif o in ("-s", "--device_specific"):
953 OPTIONS.device_specific = a
954 elif o in ("-x", "--extra"):
955 key, value = a.split("=", 1)
956 OPTIONS.extras[key] = value
958 if extra_option_handler is None or not extra_option_handler(o, a):
959 assert False, "unknown option \"%s\"" % (o,)
961 if OPTIONS.search_path:
962 os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
963 os.pathsep + os.environ["PATH"])
968 def MakeTempFile(prefix=None, suffix=None):
969 """Make a temp file and add it to the list of things to be deleted
970 when Cleanup() is called. Return the filename."""
971 fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
973 OPTIONS.tempfiles.append(fn)
978 for i in OPTIONS.tempfiles:
985 class PasswordManager(object):
987 self.editor = os.getenv("EDITOR", None)
988 self.pwfile = os.getenv("ANDROID_PW_FILE", None)
990 def GetPasswords(self, items):
991 """Get passwords corresponding to each string in 'items',
992 returning a dict. (The dict may have keys in addition to the
995 Uses the passwords in $ANDROID_PW_FILE if available, letting the
996 user edit that file to add more needed passwords. If no editor is
997 available, or $ANDROID_PW_FILE isn't define, prompts the user
998 interactively in the ordinary way.
1001 current = self.ReadFile()
1007 if i not in current or not current[i]:
1009 # Are all the passwords already in the file?
1017 print "key file %s still missing some passwords." % (self.pwfile,)
1018 answer = raw_input("try to edit again? [y]> ").strip()
1019 if answer and answer[0] not in 'yY':
1020 raise RuntimeError("key passwords unavailable")
1023 current = self.UpdateAndReadFile(current)
1025 def PromptResult(self, current): # pylint: disable=no-self-use
1026 """Prompt the user to enter a value (password) for each key in
1027 'current' whose value is fales. Returns a new dict with all the
1031 for k, v in sorted(current.iteritems()):
1036 result[k] = getpass.getpass(
1037 "Enter password for %s key> " % k).strip()
1042 def UpdateAndReadFile(self, current):
1043 if not self.editor or not self.pwfile:
1044 return self.PromptResult(current)
1046 f = open(self.pwfile, "w")
1047 os.chmod(self.pwfile, 0o600)
1048 f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
1049 f.write("# (Additional spaces are harmless.)\n\n")
1052 sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
1053 for i, (_, k, v) in enumerate(sorted_list):
1054 f.write("[[[ %s ]]] %s\n" % (v, k))
1055 if not v and first_line is None:
1056 # position cursor on first line with no password.
1060 p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
1061 _, _ = p.communicate()
1063 return self.ReadFile()
1067 if self.pwfile is None:
1070 f = open(self.pwfile, "r")
1073 if not line or line[0] == '#':
1075 m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
1077 print "failed to parse password file: ", line
1079 result[m.group(2)] = m.group(1)
1081 except IOError as e:
1082 if e.errno != errno.ENOENT:
1083 print "error reading password file: ", str(e)
1087 def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
1088 compress_type=None):
1092 # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
1093 # for files larger than 2GiB. We can work around this by adjusting their
1094 # limit. Note that `zipfile.writestr()` will not work for strings larger than
1095 # 2GiB. The Python interpreter sometimes rejects strings that large (though
1096 # it isn't clear to me exactly what circumstances cause this).
1097 # `zipfile.write()` must be used directly to work around this.
1099 # This mess can be avoided if we port to python3.
1100 saved_zip64_limit = zipfile.ZIP64_LIMIT
1101 zipfile.ZIP64_LIMIT = (1 << 32) - 1
1103 if compress_type is None:
1104 compress_type = zip_file.compression
1108 saved_stat = os.stat(filename)
1111 # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
1112 # file to be zipped and reset it when we're done.
1113 os.chmod(filename, perms)
1115 # Use a fixed timestamp so the output is repeatable.
1116 epoch = datetime.datetime.fromtimestamp(0)
1117 timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
1118 os.utime(filename, (timestamp, timestamp))
1120 zip_file.write(filename, arcname=arcname, compress_type=compress_type)
1122 os.chmod(filename, saved_stat.st_mode)
1123 os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
1124 zipfile.ZIP64_LIMIT = saved_zip64_limit
1127 def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
1128 compress_type=None):
1129 """Wrap zipfile.writestr() function to work around the zip64 limit.
1131 Even with the ZIP64_LIMIT workaround, it won't allow writing a string
1132 longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
1133 when calling crc32(bytes).
1135 But it still works fine to write a shorter string into a large zip file.
1136 We should use ZipWrite() whenever possible, and only use ZipWriteStr()
1137 when we know the string won't be too long.
1140 saved_zip64_limit = zipfile.ZIP64_LIMIT
1141 zipfile.ZIP64_LIMIT = (1 << 32) - 1
1143 if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
1144 zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
1145 zinfo.compress_type = zip_file.compression
1149 zinfo = zinfo_or_arcname
1151 # If compress_type is given, it overrides the value in zinfo.
1152 if compress_type is not None:
1153 zinfo.compress_type = compress_type
1155 # If perms is given, it has a priority.
1156 if perms is not None:
1157 # If perms doesn't set the file type, mark it as a regular file.
1158 if perms & 0o770000 == 0:
1160 zinfo.external_attr = perms << 16
1162 # Use a fixed timestamp so the output is repeatable.
1163 zinfo.date_time = (2009, 1, 1, 0, 0, 0)
1165 zip_file.writestr(zinfo, data)
1166 zipfile.ZIP64_LIMIT = saved_zip64_limit
1169 def ZipClose(zip_file):
1171 # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
1172 # central directory.
1173 saved_zip64_limit = zipfile.ZIP64_LIMIT
1174 zipfile.ZIP64_LIMIT = (1 << 32) - 1
1178 zipfile.ZIP64_LIMIT = saved_zip64_limit
1181 class DeviceSpecificParams(object):
1183 def __init__(self, **kwargs):
1184 """Keyword arguments to the constructor become attributes of this
1185 object, which is passed to all functions in the device-specific
1187 for k, v in kwargs.iteritems():
1189 self.extras = OPTIONS.extras
1191 if self.module is None:
1192 path = OPTIONS.device_specific
1196 if os.path.isdir(path):
1197 info = imp.find_module("releasetools", [path])
1199 d, f = os.path.split(path)
1200 b, x = os.path.splitext(f)
1203 info = imp.find_module(f, [d])
1204 print "loaded device-specific extensions from", path
1205 self.module = imp.load_module("device_specific", *info)
1207 print "unable to load device-specific module; assuming none"
1209 def _DoCall(self, function_name, *args, **kwargs):
1210 """Call the named function in the device-specific module, passing
1211 the given args and kwargs. The first argument to the call will be
1212 the DeviceSpecific object itself. If there is no module, or the
1213 module does not define the function, return the value of the
1214 'default' kwarg (which itself defaults to None)."""
1215 if self.module is None or not hasattr(self.module, function_name):
1216 return kwargs.get("default", None)
1217 return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1219 def FullOTA_Assertions(self):
1220 """Called after emitting the block of assertions at the top of a
1221 full OTA package. Implementations can add whatever additional
1222 assertions they like."""
1223 return self._DoCall("FullOTA_Assertions")
1225 def FullOTA_InstallBegin(self):
1226 """Called at the start of full OTA installation."""
1227 return self._DoCall("FullOTA_InstallBegin")
1229 def FullOTA_InstallEnd(self):
1230 """Called at the end of full OTA installation; typically this is
1231 used to install the image for the device's baseband processor."""
1232 return self._DoCall("FullOTA_InstallEnd")
1234 def FullOTA_PostValidate(self):
1235 """Called after installing and validating /system; typically this is
1236 used to resize the system partition after a block based installation."""
1237 return self._DoCall("FullOTA_PostValidate")
1239 def IncrementalOTA_Assertions(self):
1240 """Called after emitting the block of assertions at the top of an
1241 incremental OTA package. Implementations can add whatever
1242 additional assertions they like."""
1243 return self._DoCall("IncrementalOTA_Assertions")
1245 def IncrementalOTA_VerifyBegin(self):
1246 """Called at the start of the verification phase of incremental
1247 OTA installation; additional checks can be placed here to abort
1248 the script before any changes are made."""
1249 return self._DoCall("IncrementalOTA_VerifyBegin")
1251 def IncrementalOTA_VerifyEnd(self):
1252 """Called at the end of the verification phase of incremental OTA
1253 installation; additional checks can be placed here to abort the
1254 script before any changes are made."""
1255 return self._DoCall("IncrementalOTA_VerifyEnd")
1257 def IncrementalOTA_InstallBegin(self):
1258 """Called at the start of incremental OTA installation (after
1259 verification is complete)."""
1260 return self._DoCall("IncrementalOTA_InstallBegin")
1262 def IncrementalOTA_InstallEnd(self):
1263 """Called at the end of incremental OTA installation; typically
1264 this is used to install the image for the device's baseband
1266 return self._DoCall("IncrementalOTA_InstallEnd")
1268 def VerifyOTA_Assertions(self):
1269 return self._DoCall("VerifyOTA_Assertions")
1272 def __init__(self, name, data):
1275 self.size = len(data)
1276 self.sha1 = sha1(data).hexdigest()
1279 def FromLocalFile(cls, name, diskname):
1280 f = open(diskname, "rb")
1283 return File(name, data)
1285 def WriteToTemp(self):
1286 t = tempfile.NamedTemporaryFile()
1291 def AddToZip(self, z, compression=None):
1292 ZipWriteStr(z, self.name, self.data, compress_type=compression)
1294 DIFF_PROGRAM_BY_EXT = {
1296 ".zip" : ["imgdiff", "-z"],
1297 ".jar" : ["imgdiff", "-z"],
1298 ".apk" : ["imgdiff", "-z"],
1302 class Difference(object):
1303 def __init__(self, tf, sf, diff_program=None):
1307 self.diff_program = diff_program
1309 def ComputePatch(self):
1310 """Compute the patch (as a string of data) needed to turn sf into
1311 tf. Returns the same tuple as GetPatch()."""
1316 if self.diff_program:
1317 diff_program = self.diff_program
1319 ext = os.path.splitext(tf.name)[1]
1320 diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1322 ttemp = tf.WriteToTemp()
1323 stemp = sf.WriteToTemp()
1325 ext = os.path.splitext(tf.name)[1]
1328 ptemp = tempfile.NamedTemporaryFile()
1329 if isinstance(diff_program, list):
1330 cmd = copy.copy(diff_program)
1332 cmd = [diff_program]
1333 cmd.append(stemp.name)
1334 cmd.append(ttemp.name)
1335 cmd.append(ptemp.name)
1336 p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1339 _, e = p.communicate()
1342 th = threading.Thread(target=run)
1344 th.join(timeout=300) # 5 mins
1346 print "WARNING: diff command timed out"
1353 if err or p.returncode != 0:
1354 print "WARNING: failure running %s:\n%s\n" % (
1355 diff_program, "".join(err))
1357 return None, None, None
1365 return self.tf, self.sf, self.patch
1369 """Return a tuple (target_file, source_file, patch_data).
1370 patch_data may be None if ComputePatch hasn't been called, or if
1371 computing the patch failed."""
1372 return self.tf, self.sf, self.patch
1375 def ComputeDifferences(diffs):
1376 """Call ComputePatch on all the Difference objects in 'diffs'."""
1377 print len(diffs), "diffs to compute"
1379 # Do the largest files first, to try and reduce the long-pole effect.
1380 by_size = [(i.tf.size, i) for i in diffs]
1381 by_size.sort(reverse=True)
1382 by_size = [i[1] for i in by_size]
1384 lock = threading.Lock()
1385 diff_iter = iter(by_size) # accessed under lock
1394 dur = time.time() - start
1397 tf, sf, patch = d.GetPatch()
1398 if sf.name == tf.name:
1401 name = "%s (%s)" % (tf.name, sf.name)
1403 print "patching failed! %s" % (name,)
1405 print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1406 dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1408 except Exception as e:
1412 # start worker threads; wait for them all to finish.
1413 threads = [threading.Thread(target=worker)
1414 for i in range(OPTIONS.worker_threads)]
1418 threads.pop().join()
1421 class BlockDifference(object):
1422 def __init__(self, partition, tgt, src=None, check_first_block=False,
1423 version=None, disable_imgdiff=False):
1426 self.partition = partition
1427 self.check_first_block = check_first_block
1428 self.disable_imgdiff = disable_imgdiff
1432 if OPTIONS.info_dict:
1435 OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1436 self.version = version
1438 b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1439 version=self.version,
1440 disable_imgdiff=self.disable_imgdiff)
1441 tmpdir = tempfile.mkdtemp()
1442 OPTIONS.tempfiles.append(tmpdir)
1443 self.path = os.path.join(tmpdir, partition)
1444 b.Compute(self.path)
1445 self._required_cache = b.max_stashed_size
1446 self.touched_src_ranges = b.touched_src_ranges
1447 self.touched_src_sha1 = b.touched_src_sha1
1450 _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1452 _, self.device = GetTypeAndDevice("/" + partition,
1453 OPTIONS.source_info_dict)
1456 def required_cache(self):
1457 return self._required_cache
1459 def WriteScript(self, script, output_zip, progress=None):
1461 # write the output unconditionally
1462 script.Print("Patching %s image unconditionally..." % (self.partition,))
1464 script.Print("Patching %s image after verification." % (self.partition,))
1467 script.ShowProgress(progress, 0)
1468 self._WriteUpdate(script, output_zip)
1470 self._WritePostInstallVerifyScript(script)
1472 def WriteStrictVerifyScript(self, script):
1473 """Verify all the blocks in the care_map, including clobbered blocks.
1475 This differs from the WriteVerifyScript() function: a) it prints different
1476 error messages; b) it doesn't allow half-way updated images to pass the
1479 partition = self.partition
1480 script.Print("Verifying %s..." % (partition,))
1481 ranges = self.tgt.care_map
1482 ranges_str = ranges.to_string_raw()
1483 script.AppendExtra('range_sha1("%s", "%s") == "%s" && '
1484 'ui_print(" Verified.") || '
1485 'ui_print("\\"%s\\" has unexpected contents.");' % (
1486 self.device, ranges_str,
1487 self.tgt.TotalSha1(include_clobbered_blocks=True),
1489 script.AppendExtra("")
1491 def WriteVerifyScript(self, script, touched_blocks_only=False):
1492 partition = self.partition
1496 script.Print("Image %s will be patched unconditionally." % (partition,))
1500 if touched_blocks_only and self.version >= 3:
1501 ranges = self.touched_src_ranges
1502 expected_sha1 = self.touched_src_sha1
1504 ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1505 expected_sha1 = self.src.TotalSha1()
1507 # No blocks to be checked, skipping.
1511 ranges_str = ranges.to_string_raw()
1512 if self.version >= 4:
1513 script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1514 'block_image_verify("%s", '
1515 'package_extract_file("%s.transfer.list"), '
1516 '"%s.new.dat", "%s.patch.dat")) then') % (
1517 self.device, ranges_str, expected_sha1,
1518 self.device, partition, partition, partition))
1519 elif self.version == 3:
1520 script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1521 'block_image_verify("%s", '
1522 'package_extract_file("%s.transfer.list"), '
1523 '"%s.new.dat", "%s.patch.dat")) then') % (
1524 self.device, ranges_str, expected_sha1,
1525 self.device, partition, partition, partition))
1527 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1528 self.device, ranges_str, self.src.TotalSha1()))
1529 script.Print('Verified %s image...' % (partition,))
1530 script.AppendExtra('else')
1532 if self.version >= 4:
1535 # When generating incrementals for the system and vendor partitions in
1536 # version 4 or newer, explicitly check the first block (which contains
1537 # the superblock) of the partition to see if it's what we expect. If
1538 # this check fails, give an explicit log message about the partition
1539 # having been remounted R/W (the most likely explanation).
1540 if self.check_first_block:
1541 script.AppendExtra('check_first_block("%s");' % (self.device,))
1543 # If version >= 4, try block recovery before abort update
1544 if partition == "system":
1545 code = ErrorCode.SYSTEM_RECOVER_FAILURE
1547 code = ErrorCode.VENDOR_RECOVER_FAILURE
1548 script.AppendExtra((
1549 'ifelse (block_image_recover("{device}", "{ranges}") && '
1550 'block_image_verify("{device}", '
1551 'package_extract_file("{partition}.transfer.list"), '
1552 '"{partition}.new.dat", "{partition}.patch.dat"), '
1553 'ui_print("{partition} recovered successfully."), '
1554 'abort("E{code}: {partition} partition fails to recover"));\n'
1555 'endif;').format(device=self.device, ranges=ranges_str,
1556 partition=partition, code=code))
1558 # Abort the OTA update. Note that the incremental OTA cannot be applied
1559 # even if it may match the checksum of the target partition.
1560 # a) If version < 3, operations like move and erase will make changes
1561 # unconditionally and damage the partition.
1562 # b) If version >= 3, it won't even reach here.
1564 if partition == "system":
1565 code = ErrorCode.SYSTEM_VERIFICATION_FAILURE
1567 code = ErrorCode.VENDOR_VERIFICATION_FAILURE
1568 script.AppendExtra((
1569 'abort("E%d: %s partition has unexpected contents");\n'
1570 'endif;') % (code, partition))
1572 def _WritePostInstallVerifyScript(self, script):
1573 partition = self.partition
1574 script.Print('Verifying the updated %s image...' % (partition,))
1575 # Unlike pre-install verification, clobbered_blocks should not be ignored.
1576 ranges = self.tgt.care_map
1577 ranges_str = ranges.to_string_raw()
1578 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1579 self.device, ranges_str,
1580 self.tgt.TotalSha1(include_clobbered_blocks=True)))
1583 # Verify that extended blocks are really zeroed out.
1584 if self.tgt.extended:
1585 ranges_str = self.tgt.extended.to_string_raw()
1586 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1587 self.device, ranges_str,
1588 self._HashZeroBlocks(self.tgt.extended.size())))
1589 script.Print('Verified the updated %s image.' % (partition,))
1590 if partition == "system":
1591 code = ErrorCode.SYSTEM_NONZERO_CONTENTS
1593 code = ErrorCode.VENDOR_NONZERO_CONTENTS
1596 ' abort("E%d: %s partition has unexpected non-zero contents after '
1598 'endif;' % (code, partition))
1600 script.Print('Verified the updated %s image.' % (partition,))
1602 if partition == "system":
1603 code = ErrorCode.SYSTEM_UNEXPECTED_CONTENTS
1605 code = ErrorCode.VENDOR_UNEXPECTED_CONTENTS
1609 ' abort("E%d: %s partition has unexpected contents after OTA '
1611 'endif;' % (code, partition))
1613 def _WriteUpdate(self, script, output_zip):
1614 ZipWrite(output_zip,
1615 '{}.transfer.list'.format(self.path),
1616 '{}.transfer.list'.format(self.partition))
1617 ZipWrite(output_zip,
1618 '{}.new.dat'.format(self.path),
1619 '{}.new.dat'.format(self.partition))
1620 ZipWrite(output_zip,
1621 '{}.patch.dat'.format(self.path),
1622 '{}.patch.dat'.format(self.partition),
1623 compress_type=zipfile.ZIP_STORED)
1625 if self.partition == "system":
1626 code = ErrorCode.SYSTEM_UPDATE_FAILURE
1628 code = ErrorCode.VENDOR_UPDATE_FAILURE
1630 call = ('block_image_update("{device}", '
1631 'package_extract_file("{partition}.transfer.list"), '
1632 '"{partition}.new.dat", "{partition}.patch.dat") ||\n'
1633 ' abort("E{code}: Failed to update {partition} image.");'.format(
1634 device=self.device, partition=self.partition, code=code))
1635 script.AppendExtra(script.WordWrap(call))
1637 def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1638 data = source.ReadRangeSet(ranges)
1644 return ctx.hexdigest()
1646 def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1647 """Return the hash value for all zero blocks."""
1648 zero_block = '\x00' * 4096
1650 for _ in range(num_blocks):
1651 ctx.update(zero_block)
1653 return ctx.hexdigest()
1656 DataImage = blockimgdiff.DataImage
1658 # map recovery.fstab's fs_types to mount/format "partition types"
1671 def GetTypeAndDevice(mount_point, info):
1672 fstab = info["fstab"]
1674 return (PARTITION_TYPES[fstab[mount_point].fs_type],
1675 fstab[mount_point].device)
1680 def ParseCertificate(data):
1681 """Parse a PEM-format certificate."""
1684 for line in data.split("\n"):
1685 if "--END CERTIFICATE--" in line:
1689 if "--BEGIN CERTIFICATE--" in line:
1691 cert = "".join(cert).decode('base64')
1694 def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1696 """Generate a binary patch that creates the recovery image starting
1697 with the boot image. (Most of the space in these images is just the
1698 kernel, which is identical for the two, so the resulting patch
1699 should be efficient.) Add it to the output zip, along with a shell
1700 script that is run from init.rc on first boot to actually do the
1701 patching and install the new recovery image.
1703 recovery_img and boot_img should be File objects for the
1704 corresponding images. info should be the dictionary returned by
1705 common.LoadInfoDict() on the input target_files.
1708 if info_dict is None:
1709 info_dict = OPTIONS.info_dict
1711 full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1712 system_root_image = info_dict.get("system_root_image", None) == "true"
1714 if full_recovery_image:
1715 output_sink("etc/recovery.img", recovery_img.data)
1718 diff_program = ["imgdiff"]
1719 path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1720 if os.path.exists(path):
1721 diff_program.append("-b")
1722 diff_program.append(path)
1723 bonus_args = "-b /system/etc/recovery-resource.dat"
1727 d = Difference(recovery_img, boot_img, diff_program=diff_program)
1728 _, _, patch = d.ComputePatch()
1729 output_sink("recovery-from-boot.p", patch)
1732 # The following GetTypeAndDevice()s need to use the path in the target
1733 # info_dict instead of source_info_dict.
1734 boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1735 recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1739 if full_recovery_image:
1740 sh = """#!/system/bin/sh
1741 if [ -f /system/etc/recovery-transform.sh ]; then
1742 exec sh /system/etc/recovery-transform.sh %(recovery_size)d %(recovery_sha1)s %(boot_size)d %(boot_sha1)s
1744 if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1745 applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1747 log -t recovery "Recovery image already installed"
1749 """ % {'type': recovery_type,
1750 'device': recovery_device,
1751 'sha1': recovery_img.sha1,
1752 'size': recovery_img.size}
1754 sh = """#!/system/bin/sh
1755 if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1756 applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1758 log -t recovery "Recovery image already installed"
1760 """ % {'boot_size': boot_img.size,
1761 'boot_sha1': boot_img.sha1,
1762 'recovery_size': recovery_img.size,
1763 'recovery_sha1': recovery_img.sha1,
1764 'boot_type': boot_type,
1765 'boot_device': boot_device,
1766 'recovery_type': recovery_type,
1767 'recovery_device': recovery_device,
1768 'bonus_args': bonus_args}
1770 # The install script location moved from /system/etc to /system/bin
1771 # in the L release. Parse init.*.rc files to find out where the
1772 # target-files expects it to be, and put it there.
1773 sh_location = "etc/install-recovery.sh"
1775 if system_root_image:
1776 init_rc_dir = os.path.join(input_dir, "ROOT")
1778 init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1779 init_rc_files = os.listdir(init_rc_dir)
1780 for init_rc_file in init_rc_files:
1781 if (not init_rc_file.startswith('init.') or
1782 not init_rc_file.endswith('.rc')):
1785 with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1787 m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1789 sh_location = m.group(1)
1796 print "putting script in", sh_location
1798 output_sink(sh_location, sh)