1 # Copyright (C) 2008 The Android Open Source Project
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
35 from hashlib import sha1 as sha1
38 class Options(object):
40 platform_search_path = {
41 "linux2": "out/host/linux-x86",
42 "darwin": "out/host/darwin-x86",
45 self.search_path = platform_search_path.get(sys.platform, None)
46 self.signapk_path = "framework/signapk.jar" # Relative to search_path
47 self.extra_signapk_args = []
48 self.java_path = "java" # Use the one on the path by default.
49 self.java_args = "-Xmx2048m" # JVM Args
50 self.public_key_suffix = ".x509.pem"
51 self.private_key_suffix = ".pk8"
52 # use otatools built boot_signer by default
53 self.boot_signer_path = "boot_signer"
54 self.boot_signer_args = []
55 self.verity_signer_path = None
56 self.verity_signer_args = []
59 self.device_specific = None
62 self.source_info_dict = None
63 self.target_info_dict = None
64 self.worker_threads = None
70 # Values for "certificate" in apkcerts that mean special things.
71 SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
74 class ExternalError(RuntimeError):
78 def Run(args, **kwargs):
79 """Create and return a subprocess.Popen object, printing the command
80 line on the terminal if -v was specified."""
82 print " running: ", " ".join(args)
83 return subprocess.Popen(args, **kwargs)
86 def CloseInheritedPipes():
87 """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
88 before doing other work."""
89 if platform.system() != "Darwin":
91 for d in range(3, 1025):
95 pipebit = stat[0] & 0x1000
102 def LoadInfoDict(input_file):
103 """Read and parse the META/misc_info.txt key/value pairs from the
104 input target files and return a dict."""
107 if isinstance(input_file, zipfile.ZipFile):
108 return input_file.read(fn)
110 path = os.path.join(input_file, *fn.split("/"))
112 with open(path) as f:
115 if e.errno == errno.ENOENT:
119 d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
121 # ok if misc_info.txt doesn't exist
124 # backwards compatibility: These values used to be in their own
125 # files. Look for them, in case we're processing an old
128 if "mkyaffs2_extra_flags" not in d:
130 d["mkyaffs2_extra_flags"] = read_helper(
131 "META/mkyaffs2-extra-flags.txt").strip()
133 # ok if flags don't exist
136 if "recovery_api_version" not in d:
138 d["recovery_api_version"] = read_helper(
139 "META/recovery-api-version.txt").strip()
141 raise ValueError("can't find recovery API version in input target-files")
143 if "tool_extensions" not in d:
145 d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
147 # ok if extensions don't exist
150 if "fstab_version" not in d:
151 d["fstab_version"] = "1"
154 data = read_helper("META/imagesizes.txt")
155 for line in data.split("\n"):
158 name, value = line.split(" ", 1)
161 if name == "blocksize":
164 d[name + "_size"] = value
170 d[key] = int(d[key], 0)
172 makeint("recovery_api_version")
174 makeint("system_size")
175 makeint("vendor_size")
176 makeint("userdata_size")
177 makeint("cache_size")
178 makeint("recovery_size")
180 makeint("fstab_version")
182 d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"])
183 d["build.prop"] = LoadBuildProp(read_helper)
186 def LoadBuildProp(read_helper):
188 data = read_helper("SYSTEM/build.prop")
190 print "Warning: could not find SYSTEM/build.prop in %s" % zip
192 return LoadDictionaryFromLines(data.split("\n"))
194 def LoadDictionaryFromLines(lines):
198 if not line or line.startswith("#"):
201 name, value = line.split("=", 1)
205 def LoadRecoveryFSTab(read_helper, fstab_version):
206 class Partition(object):
207 def __init__(self, mount_point, fs_type, device, length, device2, context):
208 self.mount_point = mount_point
209 self.fs_type = fs_type
212 self.device2 = device2
213 self.context = context
216 data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
218 print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
221 if fstab_version == 1:
223 for line in data.split("\n"):
225 if not line or line.startswith("#"):
227 pieces = line.split()
228 if not 3 <= len(pieces) <= 4:
229 raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
232 if pieces[3].startswith("/"):
242 mount_point = pieces[0]
245 options = options.split(",")
247 if i.startswith("length="):
250 print "%s: unknown option \"%s\"" % (mount_point, i)
252 d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
253 device=pieces[2], length=length,
256 elif fstab_version == 2:
258 for line in data.split("\n"):
260 if not line or line.startswith("#"):
262 # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
263 pieces = line.split()
265 raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
267 # Ignore entries that are managed by vold
269 if "voldmanaged=" in options:
272 # It's a good line, parse it
274 options = options.split(",")
276 if i.startswith("length="):
279 # Ignore all unknown options in the unified fstab
282 mount_flags = pieces[3]
283 # Honor the SELinux context if present.
285 for i in mount_flags.split(","):
286 if i.startswith("context="):
289 mount_point = pieces[1]
290 d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
291 device=pieces[0], length=length,
292 device2=None, context=context)
295 raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
301 for k, v in sorted(d.items()):
302 print "%-25s = (%s) %s" % (k, type(v).__name__, v)
305 def BuildBootableImage(sourcedir, fs_config_file, info_dict=None):
306 """Take a kernel, cmdline, and ramdisk directory from the input (in
307 'sourcedir'), and turn them into a boot image. Return the image
308 data, or None if sourcedir does not appear to contains files for
309 building the requested image."""
311 if (not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK) or
312 not os.access(os.path.join(sourcedir, "kernel"), os.F_OK)):
315 if info_dict is None:
316 info_dict = OPTIONS.info_dict
318 ramdisk_img = tempfile.NamedTemporaryFile()
319 img = tempfile.NamedTemporaryFile()
321 if os.access(fs_config_file, os.F_OK):
322 cmd = ["mkbootfs", "-f", fs_config_file, os.path.join(sourcedir, "RAMDISK")]
324 cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
325 p1 = Run(cmd, stdout=subprocess.PIPE)
326 p2 = Run(["minigzip"],
327 stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
331 assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
332 assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
334 # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
335 mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
337 cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
339 fn = os.path.join(sourcedir, "second")
340 if os.access(fn, os.F_OK):
341 cmd.append("--second")
344 fn = os.path.join(sourcedir, "cmdline")
345 if os.access(fn, os.F_OK):
346 cmd.append("--cmdline")
347 cmd.append(open(fn).read().rstrip("\n"))
349 fn = os.path.join(sourcedir, "base")
350 if os.access(fn, os.F_OK):
352 cmd.append(open(fn).read().rstrip("\n"))
354 fn = os.path.join(sourcedir, "pagesize")
355 if os.access(fn, os.F_OK):
356 cmd.append("--pagesize")
357 cmd.append(open(fn).read().rstrip("\n"))
359 args = info_dict.get("mkbootimg_args", None)
360 if args and args.strip():
361 cmd.extend(shlex.split(args))
364 if info_dict.get("vboot", None):
365 img_unsigned = tempfile.NamedTemporaryFile()
366 cmd.extend(["--ramdisk", ramdisk_img.name,
367 "--output", img_unsigned.name])
369 cmd.extend(["--ramdisk", ramdisk_img.name,
370 "--output", img.name])
372 p = Run(cmd, stdout=subprocess.PIPE)
374 assert p.returncode == 0, "mkbootimg of %s image failed" % (
375 os.path.basename(sourcedir),)
377 if (info_dict.get("boot_signer", None) == "true" and
378 info_dict.get("verity_key", None)):
379 path = "/" + os.path.basename(sourcedir).lower()
380 cmd = [OPTIONS.boot_signer_path]
381 cmd.extend(OPTIONS.boot_signer_args)
382 cmd.extend([path, img.name,
383 info_dict["verity_key"] + ".pk8",
384 info_dict["verity_key"] + ".x509.pem", img.name])
385 p = Run(cmd, stdout=subprocess.PIPE)
387 assert p.returncode == 0, "boot_signer of %s image failed" % path
389 # Sign the image if vboot is non-empty.
390 elif info_dict.get("vboot", None):
391 path = "/" + os.path.basename(sourcedir).lower()
392 img_keyblock = tempfile.NamedTemporaryFile()
393 cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
394 img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
395 info_dict["vboot_key"] + ".vbprivk",
396 info_dict["vboot_subkey"] + ".vbprivk",
399 p = Run(cmd, stdout=subprocess.PIPE)
401 assert p.returncode == 0, "vboot_signer of %s image failed" % path
403 # Clean up the temp files.
407 img.seek(os.SEEK_SET, 0)
416 def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
418 """Return a File object (with name 'name') with the desired bootable
419 image. Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name
420 'prebuilt_name', otherwise look for it under 'unpack_dir'/IMAGES,
421 otherwise construct it from the source files in
422 'unpack_dir'/'tree_subdir'."""
424 prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
425 if os.path.exists(prebuilt_path):
426 print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
427 return File.FromLocalFile(name, prebuilt_path)
429 prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
430 if os.path.exists(prebuilt_path):
431 print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
432 return File.FromLocalFile(name, prebuilt_path)
434 print "building image from target_files %s..." % (tree_subdir,)
435 fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
436 data = BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
437 os.path.join(unpack_dir, fs_config),
440 return File(name, data)
444 def UnzipTemp(filename, pattern=None):
445 """Unzip the given archive into a temporary directory and return the name.
447 If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
448 temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
450 Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
451 main file), open for reading.
454 tmp = tempfile.mkdtemp(prefix="targetfiles-")
455 OPTIONS.tempfiles.append(tmp)
457 def unzip_to_dir(filename, dirname):
458 cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
459 if pattern is not None:
461 p = Run(cmd, stdout=subprocess.PIPE)
463 if p.returncode != 0:
464 raise ExternalError("failed to unzip input target-files \"%s\"" %
467 m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
469 unzip_to_dir(m.group(1), tmp)
470 unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
471 filename = m.group(1)
473 unzip_to_dir(filename, tmp)
475 return tmp, zipfile.ZipFile(filename, "r")
478 def GetKeyPasswords(keylist):
479 """Given a list of keys, prompt the user to enter passwords for
480 those which require them. Return a {key: password} dict. password
481 will be None if the key has no password."""
486 devnull = open("/dev/null", "w+b")
487 for k in sorted(keylist):
488 # We don't need a password for things that aren't really keys.
489 if k in SPECIAL_CERT_STRINGS:
490 no_passwords.append(k)
493 p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
494 "-inform", "DER", "-nocrypt"],
495 stdin=devnull.fileno(),
496 stdout=devnull.fileno(),
497 stderr=subprocess.STDOUT)
499 if p.returncode == 0:
500 # Definitely an unencrypted key.
501 no_passwords.append(k)
503 p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
504 "-inform", "DER", "-passin", "pass:"],
505 stdin=devnull.fileno(),
506 stdout=devnull.fileno(),
507 stderr=subprocess.PIPE)
508 _, stderr = p.communicate()
509 if p.returncode == 0:
510 # Encrypted key with empty string as password.
511 key_passwords[k] = ''
512 elif stderr.startswith('Error decrypting key'):
513 # Definitely encrypted key.
514 # It would have said "Error reading key" if it didn't parse correctly.
515 need_passwords.append(k)
517 # Potentially, a type of key that openssl doesn't understand.
518 # We'll let the routines in signapk.jar handle it.
519 no_passwords.append(k)
522 key_passwords.update(PasswordManager().GetPasswords(need_passwords))
523 key_passwords.update(dict.fromkeys(no_passwords, None))
527 def SignFile(input_name, output_name, key, password, align=None,
529 """Sign the input_name zip/jar/apk, producing output_name. Use the
530 given key and password (the latter may be None if the key does not
533 If align is an integer > 1, zipalign is run to align stored files in
534 the output zip on 'align'-byte boundaries.
536 If whole_file is true, use the "-w" option to SignApk to embed a
537 signature that covers the whole file in the archive comment of the
541 if align == 0 or align == 1:
545 temp = tempfile.NamedTemporaryFile()
546 sign_name = temp.name
548 sign_name = output_name
550 cmd = [OPTIONS.java_path, OPTIONS.java_args, "-jar",
551 os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
552 cmd.extend(OPTIONS.extra_signapk_args)
555 cmd.extend([key + OPTIONS.public_key_suffix,
556 key + OPTIONS.private_key_suffix,
557 input_name, sign_name])
559 p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
560 if password is not None:
562 p.communicate(password)
563 if p.returncode != 0:
564 raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
567 p = Run(["zipalign", "-f", "-p", str(align), sign_name, output_name])
569 if p.returncode != 0:
570 raise ExternalError("zipalign failed: return code %s" % (p.returncode,))
574 def CheckSize(data, target, info_dict):
575 """Check the data string passed against the max size limit, if
576 any, for the given target. Raise exception if the data is too big.
577 Print a warning if the data is nearing the maximum size."""
579 if target.endswith(".img"):
581 mount_point = "/" + target
585 if info_dict["fstab"]:
586 if mount_point == "/userdata":
587 mount_point = "/data"
588 p = info_dict["fstab"][mount_point]
592 device = device[device.rfind("/")+1:]
593 limit = info_dict.get(device + "_size", None)
594 if not fs_type or not limit:
597 if fs_type == "yaffs2":
598 # image size should be increased by 1/64th to account for the
599 # spare area (64 bytes per 2k page)
600 limit = limit / 2048 * (2048+64)
602 pct = float(size) * 100.0 / limit
603 msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
605 raise ExternalError(msg)
608 print " WARNING: ", msg
610 elif OPTIONS.verbose:
614 def ReadApkCerts(tf_zip):
615 """Given a target_files ZipFile, parse the META/apkcerts.txt file
616 and return a {package: cert} dict."""
618 for line in tf_zip.read("META/apkcerts.txt").split("\n"):
622 m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
623 r'private_key="(.*)"$', line)
625 name, cert, privkey = m.groups()
626 public_key_suffix_len = len(OPTIONS.public_key_suffix)
627 private_key_suffix_len = len(OPTIONS.private_key_suffix)
628 if cert in SPECIAL_CERT_STRINGS and not privkey:
630 elif (cert.endswith(OPTIONS.public_key_suffix) and
631 privkey.endswith(OPTIONS.private_key_suffix) and
632 cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
633 certmap[name] = cert[:-public_key_suffix_len]
635 raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
639 COMMON_DOCSTRING = """
641 Prepend <dir>/bin to the list of places to search for binaries
642 run by this script, and expect to find jars in <dir>/framework.
644 -s (--device_specific) <file>
645 Path to the python module containing device-specific
648 -x (--extra) <key=value>
649 Add a key/value pair to the 'extras' dict, which device-specific
650 extension code may look at.
653 Show command lines being executed.
656 Display this usage message and exit.
659 def Usage(docstring):
660 print docstring.rstrip("\n")
661 print COMMON_DOCSTRING
664 def ParseOptions(argv,
666 extra_opts="", extra_long_opts=(),
667 extra_option_handler=None):
668 """Parse the options in argv and return any arguments that aren't
669 flags. docstring is the calling module's docstring, to be displayed
670 for errors and -h. extra_opts and extra_long_opts are for flags
671 defined by the caller, which are processed by passing them to
672 extra_option_handler."""
675 opts, args = getopt.getopt(
676 argv, "hvp:s:x:" + extra_opts,
677 ["help", "verbose", "path=", "signapk_path=", "extra_signapk_args=",
678 "java_path=", "java_args=", "public_key_suffix=",
679 "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
680 "verity_signer_path=", "verity_signer_args=", "device_specific=",
682 list(extra_long_opts))
683 except getopt.GetoptError as err:
685 print "**", str(err), "**"
689 if o in ("-h", "--help"):
692 elif o in ("-v", "--verbose"):
693 OPTIONS.verbose = True
694 elif o in ("-p", "--path"):
695 OPTIONS.search_path = a
696 elif o in ("--signapk_path",):
697 OPTIONS.signapk_path = a
698 elif o in ("--extra_signapk_args",):
699 OPTIONS.extra_signapk_args = shlex.split(a)
700 elif o in ("--java_path",):
701 OPTIONS.java_path = a
702 elif o in ("--java_args",):
703 OPTIONS.java_args = a
704 elif o in ("--public_key_suffix",):
705 OPTIONS.public_key_suffix = a
706 elif o in ("--private_key_suffix",):
707 OPTIONS.private_key_suffix = a
708 elif o in ("--boot_signer_path",):
709 OPTIONS.boot_signer_path = a
710 elif o in ("--boot_signer_args",):
711 OPTIONS.boot_signer_args = shlex.split(a)
712 elif o in ("--verity_signer_path",):
713 OPTIONS.verity_signer_path = a
714 elif o in ("--verity_signer_args",):
715 OPTIONS.verity_signer_args = shlex.split(a)
716 elif o in ("-s", "--device_specific"):
717 OPTIONS.device_specific = a
718 elif o in ("-x", "--extra"):
719 key, value = a.split("=", 1)
720 OPTIONS.extras[key] = value
722 if extra_option_handler is None or not extra_option_handler(o, a):
723 assert False, "unknown option \"%s\"" % (o,)
725 if OPTIONS.search_path:
726 os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
727 os.pathsep + os.environ["PATH"])
732 def MakeTempFile(prefix=None, suffix=None):
733 """Make a temp file and add it to the list of things to be deleted
734 when Cleanup() is called. Return the filename."""
735 fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
737 OPTIONS.tempfiles.append(fn)
742 for i in OPTIONS.tempfiles:
749 class PasswordManager(object):
751 self.editor = os.getenv("EDITOR", None)
752 self.pwfile = os.getenv("ANDROID_PW_FILE", None)
754 def GetPasswords(self, items):
755 """Get passwords corresponding to each string in 'items',
756 returning a dict. (The dict may have keys in addition to the
759 Uses the passwords in $ANDROID_PW_FILE if available, letting the
760 user edit that file to add more needed passwords. If no editor is
761 available, or $ANDROID_PW_FILE isn't define, prompts the user
762 interactively in the ordinary way.
765 current = self.ReadFile()
771 if i not in current or not current[i]:
773 # Are all the passwords already in the file?
781 print "key file %s still missing some passwords." % (self.pwfile,)
782 answer = raw_input("try to edit again? [y]> ").strip()
783 if answer and answer[0] not in 'yY':
784 raise RuntimeError("key passwords unavailable")
787 current = self.UpdateAndReadFile(current)
789 def PromptResult(self, current): # pylint: disable=no-self-use
790 """Prompt the user to enter a value (password) for each key in
791 'current' whose value is fales. Returns a new dict with all the
795 for k, v in sorted(current.iteritems()):
800 result[k] = getpass.getpass(
801 "Enter password for %s key> " % k).strip()
806 def UpdateAndReadFile(self, current):
807 if not self.editor or not self.pwfile:
808 return self.PromptResult(current)
810 f = open(self.pwfile, "w")
811 os.chmod(self.pwfile, 0o600)
812 f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
813 f.write("# (Additional spaces are harmless.)\n\n")
816 sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
817 for i, (_, k, v) in enumerate(sorted_list):
818 f.write("[[[ %s ]]] %s\n" % (v, k))
819 if not v and first_line is None:
820 # position cursor on first line with no password.
824 p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
825 _, _ = p.communicate()
827 return self.ReadFile()
831 if self.pwfile is None:
834 f = open(self.pwfile, "r")
837 if not line or line[0] == '#':
839 m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
841 print "failed to parse password file: ", line
843 result[m.group(2)] = m.group(1)
846 if e.errno != errno.ENOENT:
847 print "error reading password file: ", str(e)
851 def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
856 # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
857 # for files larger than 2GiB. We can work around this by adjusting their
858 # limit. Note that `zipfile.writestr()` will not work for strings larger than
859 # 2GiB. The Python interpreter sometimes rejects strings that large (though
860 # it isn't clear to me exactly what circumstances cause this).
861 # `zipfile.write()` must be used directly to work around this.
863 # This mess can be avoided if we port to python3.
864 saved_zip64_limit = zipfile.ZIP64_LIMIT
865 zipfile.ZIP64_LIMIT = (1 << 32) - 1
867 if compress_type is None:
868 compress_type = zip_file.compression
872 saved_stat = os.stat(filename)
875 # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
876 # file to be zipped and reset it when we're done.
877 os.chmod(filename, perms)
879 # Use a fixed timestamp so the output is repeatable.
880 epoch = datetime.datetime.fromtimestamp(0)
881 timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
882 os.utime(filename, (timestamp, timestamp))
884 zip_file.write(filename, arcname=arcname, compress_type=compress_type)
886 os.chmod(filename, saved_stat.st_mode)
887 os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
888 zipfile.ZIP64_LIMIT = saved_zip64_limit
891 def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
893 """Wrap zipfile.writestr() function to work around the zip64 limit.
895 Even with the ZIP64_LIMIT workaround, it won't allow writing a string
896 longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
897 when calling crc32(bytes).
899 But it still works fine to write a shorter string into a large zip file.
900 We should use ZipWrite() whenever possible, and only use ZipWriteStr()
901 when we know the string won't be too long.
904 saved_zip64_limit = zipfile.ZIP64_LIMIT
905 zipfile.ZIP64_LIMIT = (1 << 32) - 1
907 if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
908 zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
909 zinfo.compress_type = zip_file.compression
913 zinfo = zinfo_or_arcname
915 # If compress_type is given, it overrides the value in zinfo.
916 if compress_type is not None:
917 zinfo.compress_type = compress_type
919 # If perms is given, it has a priority.
920 if perms is not None:
921 zinfo.external_attr = perms << 16
923 # Use a fixed timestamp so the output is repeatable.
924 zinfo.date_time = (2009, 1, 1, 0, 0, 0)
926 zip_file.writestr(zinfo, data)
927 zipfile.ZIP64_LIMIT = saved_zip64_limit
930 def ZipClose(zip_file):
932 # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
934 saved_zip64_limit = zipfile.ZIP64_LIMIT
935 zipfile.ZIP64_LIMIT = (1 << 32) - 1
939 zipfile.ZIP64_LIMIT = saved_zip64_limit
942 class DeviceSpecificParams(object):
944 def __init__(self, **kwargs):
945 """Keyword arguments to the constructor become attributes of this
946 object, which is passed to all functions in the device-specific
948 for k, v in kwargs.iteritems():
950 self.extras = OPTIONS.extras
952 if self.module is None:
953 path = OPTIONS.device_specific
957 if os.path.isdir(path):
958 info = imp.find_module("releasetools", [path])
960 d, f = os.path.split(path)
961 b, x = os.path.splitext(f)
964 info = imp.find_module(f, [d])
965 print "loaded device-specific extensions from", path
966 self.module = imp.load_module("device_specific", *info)
968 print "unable to load device-specific module; assuming none"
970 def _DoCall(self, function_name, *args, **kwargs):
971 """Call the named function in the device-specific module, passing
972 the given args and kwargs. The first argument to the call will be
973 the DeviceSpecific object itself. If there is no module, or the
974 module does not define the function, return the value of the
975 'default' kwarg (which itself defaults to None)."""
976 if self.module is None or not hasattr(self.module, function_name):
977 return kwargs.get("default", None)
978 return getattr(self.module, function_name)(*((self,) + args), **kwargs)
980 def FullOTA_Assertions(self):
981 """Called after emitting the block of assertions at the top of a
982 full OTA package. Implementations can add whatever additional
983 assertions they like."""
984 return self._DoCall("FullOTA_Assertions")
986 def FullOTA_InstallBegin(self):
987 """Called at the start of full OTA installation."""
988 return self._DoCall("FullOTA_InstallBegin")
990 def FullOTA_InstallEnd(self):
991 """Called at the end of full OTA installation; typically this is
992 used to install the image for the device's baseband processor."""
993 return self._DoCall("FullOTA_InstallEnd")
995 def IncrementalOTA_Assertions(self):
996 """Called after emitting the block of assertions at the top of an
997 incremental OTA package. Implementations can add whatever
998 additional assertions they like."""
999 return self._DoCall("IncrementalOTA_Assertions")
1001 def IncrementalOTA_VerifyBegin(self):
1002 """Called at the start of the verification phase of incremental
1003 OTA installation; additional checks can be placed here to abort
1004 the script before any changes are made."""
1005 return self._DoCall("IncrementalOTA_VerifyBegin")
1007 def IncrementalOTA_VerifyEnd(self):
1008 """Called at the end of the verification phase of incremental OTA
1009 installation; additional checks can be placed here to abort the
1010 script before any changes are made."""
1011 return self._DoCall("IncrementalOTA_VerifyEnd")
1013 def IncrementalOTA_InstallBegin(self):
1014 """Called at the start of incremental OTA installation (after
1015 verification is complete)."""
1016 return self._DoCall("IncrementalOTA_InstallBegin")
1018 def IncrementalOTA_InstallEnd(self):
1019 """Called at the end of incremental OTA installation; typically
1020 this is used to install the image for the device's baseband
1022 return self._DoCall("IncrementalOTA_InstallEnd")
1025 def __init__(self, name, data):
1028 self.size = len(data)
1029 self.sha1 = sha1(data).hexdigest()
1032 def FromLocalFile(cls, name, diskname):
1033 f = open(diskname, "rb")
1036 return File(name, data)
1038 def WriteToTemp(self):
1039 t = tempfile.NamedTemporaryFile()
1044 def AddToZip(self, z, compression=None):
1045 ZipWriteStr(z, self.name, self.data, compress_type=compression)
1047 DIFF_PROGRAM_BY_EXT = {
1049 ".zip" : ["imgdiff", "-z"],
1050 ".jar" : ["imgdiff", "-z"],
1051 ".apk" : ["imgdiff", "-z"],
1055 class Difference(object):
1056 def __init__(self, tf, sf, diff_program=None):
1060 self.diff_program = diff_program
1062 def ComputePatch(self):
1063 """Compute the patch (as a string of data) needed to turn sf into
1064 tf. Returns the same tuple as GetPatch()."""
1069 if self.diff_program:
1070 diff_program = self.diff_program
1072 ext = os.path.splitext(tf.name)[1]
1073 diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1075 ttemp = tf.WriteToTemp()
1076 stemp = sf.WriteToTemp()
1078 ext = os.path.splitext(tf.name)[1]
1081 ptemp = tempfile.NamedTemporaryFile()
1082 if isinstance(diff_program, list):
1083 cmd = copy.copy(diff_program)
1085 cmd = [diff_program]
1086 cmd.append(stemp.name)
1087 cmd.append(ttemp.name)
1088 cmd.append(ptemp.name)
1089 p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1092 _, e = p.communicate()
1095 th = threading.Thread(target=run)
1097 th.join(timeout=300) # 5 mins
1099 print "WARNING: diff command timed out"
1106 if err or p.returncode != 0:
1107 print "WARNING: failure running %s:\n%s\n" % (
1108 diff_program, "".join(err))
1110 return None, None, None
1118 return self.tf, self.sf, self.patch
1122 """Return a tuple (target_file, source_file, patch_data).
1123 patch_data may be None if ComputePatch hasn't been called, or if
1124 computing the patch failed."""
1125 return self.tf, self.sf, self.patch
1128 def ComputeDifferences(diffs):
1129 """Call ComputePatch on all the Difference objects in 'diffs'."""
1130 print len(diffs), "diffs to compute"
1132 # Do the largest files first, to try and reduce the long-pole effect.
1133 by_size = [(i.tf.size, i) for i in diffs]
1134 by_size.sort(reverse=True)
1135 by_size = [i[1] for i in by_size]
1137 lock = threading.Lock()
1138 diff_iter = iter(by_size) # accessed under lock
1147 dur = time.time() - start
1150 tf, sf, patch = d.GetPatch()
1151 if sf.name == tf.name:
1154 name = "%s (%s)" % (tf.name, sf.name)
1156 print "patching failed! %s" % (name,)
1158 print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1159 dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1161 except Exception as e:
1165 # start worker threads; wait for them all to finish.
1166 threads = [threading.Thread(target=worker)
1167 for i in range(OPTIONS.worker_threads)]
1171 threads.pop().join()
1174 class BlockDifference(object):
1175 def __init__(self, partition, tgt, src=None, check_first_block=False,
1179 self.partition = partition
1180 self.check_first_block = check_first_block
1182 # Due to http://b/20939131, check_first_block is disabled temporarily.
1183 assert not self.check_first_block
1187 if OPTIONS.info_dict:
1190 OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1191 self.version = version
1193 b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1194 version=self.version)
1195 tmpdir = tempfile.mkdtemp()
1196 OPTIONS.tempfiles.append(tmpdir)
1197 self.path = os.path.join(tmpdir, partition)
1198 b.Compute(self.path)
1201 _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1203 _, self.device = GetTypeAndDevice("/" + partition,
1204 OPTIONS.source_info_dict)
1206 def WriteScript(self, script, output_zip, progress=None):
1208 # write the output unconditionally
1209 script.Print("Patching %s image unconditionally..." % (self.partition,))
1211 script.Print("Patching %s image after verification." % (self.partition,))
1214 script.ShowProgress(progress, 0)
1215 self._WriteUpdate(script, output_zip)
1216 self._WritePostInstallVerifyScript(script)
1218 def WriteVerifyScript(self, script):
1219 partition = self.partition
1221 script.Print("Image %s will be patched unconditionally." % (partition,))
1223 ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1224 ranges_str = ranges.to_string_raw()
1225 if self.version >= 3:
1226 script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1227 'block_image_verify("%s", '
1228 'package_extract_file("%s.transfer.list"), '
1229 '"%s.new.dat", "%s.patch.dat")) then') % (
1230 self.device, ranges_str, self.src.TotalSha1(),
1231 self.device, partition, partition, partition))
1233 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1234 self.device, ranges_str, self.src.TotalSha1()))
1235 script.Print('Verified %s image...' % (partition,))
1236 script.AppendExtra('else')
1238 # When generating incrementals for the system and vendor partitions,
1239 # explicitly check the first block (which contains the superblock) of
1240 # the partition to see if it's what we expect. If this check fails,
1241 # give an explicit log message about the partition having been
1242 # remounted R/W (the most likely explanation) and the need to flash to
1243 # get OTAs working again.
1244 if self.check_first_block:
1245 self._CheckFirstBlock(script)
1247 # Abort the OTA update. Note that the incremental OTA cannot be applied
1248 # even if it may match the checksum of the target partition.
1249 # a) If version < 3, operations like move and erase will make changes
1250 # unconditionally and damage the partition.
1251 # b) If version >= 3, it won't even reach here.
1252 script.AppendExtra(('abort("%s partition has unexpected contents");\n'
1253 'endif;') % (partition,))
1255 def _WritePostInstallVerifyScript(self, script):
1256 partition = self.partition
1257 script.Print('Verifying the updated %s image...' % (partition,))
1258 # Unlike pre-install verification, clobbered_blocks should not be ignored.
1259 ranges = self.tgt.care_map
1260 ranges_str = ranges.to_string_raw()
1261 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1262 self.device, ranges_str,
1263 self.tgt.TotalSha1(include_clobbered_blocks=True)))
1266 # Verify that extended blocks are really zeroed out.
1267 if self.tgt.extended:
1268 ranges_str = self.tgt.extended.to_string_raw()
1269 script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1270 self.device, ranges_str,
1271 self._HashZeroBlocks(self.tgt.extended.size())))
1272 script.Print('Verified the updated %s image.' % (partition,))
1275 ' abort("%s partition has unexpected non-zero contents after OTA '
1277 'endif;' % (partition,))
1279 script.Print('Verified the updated %s image.' % (partition,))
1283 ' abort("%s partition has unexpected contents after OTA update");\n'
1284 'endif;' % (partition,))
1286 def _WriteUpdate(self, script, output_zip):
1287 ZipWrite(output_zip,
1288 '{}.transfer.list'.format(self.path),
1289 '{}.transfer.list'.format(self.partition))
1290 ZipWrite(output_zip,
1291 '{}.new.dat'.format(self.path),
1292 '{}.new.dat'.format(self.partition))
1293 ZipWrite(output_zip,
1294 '{}.patch.dat'.format(self.path),
1295 '{}.patch.dat'.format(self.partition),
1296 compress_type=zipfile.ZIP_STORED)
1298 call = ('block_image_update("{device}", '
1299 'package_extract_file("{partition}.transfer.list"), '
1300 '"{partition}.new.dat", "{partition}.patch.dat");\n'.format(
1301 device=self.device, partition=self.partition))
1302 script.AppendExtra(script.WordWrap(call))
1304 def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1305 data = source.ReadRangeSet(ranges)
1311 return ctx.hexdigest()
1313 def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1314 """Return the hash value for all zero blocks."""
1315 zero_block = '\x00' * 4096
1317 for _ in range(num_blocks):
1318 ctx.update(zero_block)
1320 return ctx.hexdigest()
1322 # TODO(tbao): Due to http://b/20939131, block 0 may be changed without
1323 # remounting R/W. Will change the checking to a finer-grained way to
1324 # mask off those bits.
1325 def _CheckFirstBlock(self, script):
1326 r = rangelib.RangeSet((0, 1))
1327 srchash = self._HashBlocks(self.src, r)
1329 script.AppendExtra(('(range_sha1("%s", "%s") == "%s") || '
1330 'abort("%s has been remounted R/W; '
1331 'reflash device to reenable OTA updates");')
1332 % (self.device, r.to_string_raw(), srchash,
1335 DataImage = blockimgdiff.DataImage
1338 # map recovery.fstab's fs_types to mount/format "partition types"
1348 def GetTypeAndDevice(mount_point, info):
1349 fstab = info["fstab"]
1351 return (PARTITION_TYPES[fstab[mount_point].fs_type],
1352 fstab[mount_point].device)
1357 def ParseCertificate(data):
1358 """Parse a PEM-format certificate."""
1361 for line in data.split("\n"):
1362 if "--END CERTIFICATE--" in line:
1366 if "--BEGIN CERTIFICATE--" in line:
1368 cert = "".join(cert).decode('base64')
1371 def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1373 """Generate a binary patch that creates the recovery image starting
1374 with the boot image. (Most of the space in these images is just the
1375 kernel, which is identical for the two, so the resulting patch
1376 should be efficient.) Add it to the output zip, along with a shell
1377 script that is run from init.rc on first boot to actually do the
1378 patching and install the new recovery image.
1380 recovery_img and boot_img should be File objects for the
1381 corresponding images. info should be the dictionary returned by
1382 common.LoadInfoDict() on the input target_files.
1385 if info_dict is None:
1386 info_dict = OPTIONS.info_dict
1388 diff_program = ["imgdiff"]
1389 path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1390 if os.path.exists(path):
1391 diff_program.append("-b")
1392 diff_program.append(path)
1393 bonus_args = "-b /system/etc/recovery-resource.dat"
1397 d = Difference(recovery_img, boot_img, diff_program=diff_program)
1398 _, _, patch = d.ComputePatch()
1399 output_sink("recovery-from-boot.p", patch)
1402 # The following GetTypeAndDevice()s need to use the path in the target
1403 # info_dict instead of source_info_dict.
1404 boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1405 recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1409 sh = """#!/system/bin/sh
1410 if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1411 applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1413 log -t recovery "Recovery image already installed"
1415 """ % {'boot_size': boot_img.size,
1416 'boot_sha1': boot_img.sha1,
1417 'recovery_size': recovery_img.size,
1418 'recovery_sha1': recovery_img.sha1,
1419 'boot_type': boot_type,
1420 'boot_device': boot_device,
1421 'recovery_type': recovery_type,
1422 'recovery_device': recovery_device,
1423 'bonus_args': bonus_args}
1425 # The install script location moved from /system/etc to /system/bin
1426 # in the L release. Parse init.*.rc files to find out where the
1427 # target-files expects it to be, and put it there.
1428 sh_location = "etc/install-recovery.sh"
1430 init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1431 init_rc_files = os.listdir(init_rc_dir)
1432 for init_rc_file in init_rc_files:
1433 if (not init_rc_file.startswith('init.') or
1434 not init_rc_file.endswith('.rc')):
1437 with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1439 m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1441 sh_location = m.group(1)
1448 print "putting script in", sh_location
1450 output_sink(sh_location, sh)