bg->first_free_block = 0;
bg->free_inodes = info.inodes_per_group;
bg->first_free_inode = 1;
- bg->flags = EXT4_BG_INODE_UNINIT;
+ bg->flags = 0;
if (reserve_blocks(bg, bg->first_free_block, bg->header_blocks) < 0)
error("failed to reserve %u blocks in block group %u\n", bg->header_blocks, i);
if (!strcmp(dir, "/data/user")) {
return 0;
}
+ if (!strcmp(dir, "/data/system_ce")) {
+ return 0;
+ }
UnencryptedProperties props("/data");
std::string policy = props.Get<std::string>(properties::ref);
result.c_str());
return 0;
}
+
+int e4crypt_create_user_key(userid_t user_id, int serial, bool ephemeral)
+{
+ auto command = std::string() + "cryptfs create_user_key "
+ + std::to_string(user_id) + " "
+ + std::to_string(serial) + " "
+ + (ephemeral ? "1" : "0");
+ auto result = vold_command(command);
+ // ext4enc:TODO proper error handling
+ KLOG_INFO(TAG, "create_user_key returned with result %s\n",
+ result.c_str());
+ return 0;
+}
#include <sys/cdefs.h>
#include <stdbool.h>
+#include <cutils/multiuser.h>
__BEGIN_DECLS
bool e4crypt_non_default_key(const char* path);
int do_policy_set(const char *directory, const char *policy, int policy_length);
int e4crypt_set_user_crypto_policies(const char* path);
+int e4crypt_create_user_key(userid_t user_id, int serial, bool ephemeral);
__END_DECLS
int make_ext4fs_sparse_fd(int fd, long long len,
const char *mountpoint, struct selabel_handle *sehnd)
{
+ return make_ext4fs_sparse_fd_directory(fd, len, mountpoint, sehnd, NULL);
+}
+
+int make_ext4fs_sparse_fd_directory(int fd, long long len,
+ const char *mountpoint, struct selabel_handle *sehnd,
+ const char *directory)
+{
reset_ext4fs_info();
info.len = len;
- return make_ext4fs_internal(fd, NULL, NULL, mountpoint, NULL, 0, 1, 0, 0, 0, sehnd, 0, -1, NULL);
+ return make_ext4fs_internal(fd, directory, NULL, mountpoint, NULL,
+ 0, 1, 0, 0, 0,
+ sehnd, 0, -1, NULL);
}
int make_ext4fs(const char *filename, long long len,
const char *mountpoint, struct selabel_handle *sehnd)
{
+ return make_ext4fs_directory(filename, len, mountpoint, sehnd, NULL);
+}
+
+int make_ext4fs_directory(const char *filename, long long len,
+ const char *mountpoint, struct selabel_handle *sehnd,
+ const char *directory)
+{
int fd;
int status;
return EXIT_FAILURE;
}
- status = make_ext4fs_internal(fd, NULL, NULL, mountpoint, NULL, 0, 0, 0, 1, 0, sehnd, 0, -1, NULL);
+ status = make_ext4fs_internal(fd, directory, NULL, mountpoint, NULL,
+ 0, 0, 0, 1, 0,
+ sehnd, 0, -1, NULL);
close(fd);
return status;
info.feat_ro_compat |=
EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER |
- EXT4_FEATURE_RO_COMPAT_LARGE_FILE |
- EXT4_FEATURE_RO_COMPAT_GDT_CSUM;
+ EXT4_FEATURE_RO_COMPAT_LARGE_FILE;
info.feat_incompat |=
EXT4_FEATURE_INCOMPAT_EXTENTS |
int make_ext4fs(const char *filename, long long len,
const char *mountpoint, struct selabel_handle *sehnd);
+int make_ext4fs_directory(const char *filename, long long len,
+ const char *mountpoint, struct selabel_handle *sehnd,
+ const char *directory);
int make_ext4fs_sparse_fd(int fd, long long len,
const char *mountpoint, struct selabel_handle *sehnd);
+int make_ext4fs_sparse_fd_directory(int fd, long long len,
+ const char *mountpoint, struct selabel_handle *sehnd,
+ const char *directory);
#ifdef __cplusplus
}
#
LOCAL_PATH:= $(call my-dir)
-
include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := mmapPerf.cpp
+LOCAL_MODULE_PATH := $(TARGET_OUT_DATA)/local/tmp
LOCAL_MODULE := mmapPerf
-LOCAL_SRC_FILES_64 := mmapPerf.cpp
-LOCAL_SRC_FILES_32 := unsupported.cpp
+LOCAL_MULTILIB := both
+LOCAL_MODULE_STEM_32 := $(LOCAL_MODULE)
+LOCAL_MODULE_STEM_64 := $(LOCAL_MODULE)64
LOCAL_CLANG := true
LOCAL_CFLAGS += -g -Wall -Werror -std=c++11 -Wno-missing-field-initializers -Wno-sign-compare -O3
LOCAL_FORCE_STATIC_EXECUTABLE := true
LOCAL_CXX_STL := libc++_static
LOCAL_STATIC_LIBRARIES := libc
-LOCAL_MODULE_TAGS := optional
include $(BUILD_EXECUTABLE)
FileMap(const string &name, size_t size, Hint hint = FILE_MAP_HINT_NONE) : m_name{name}, m_size{size} {
int fd = open(name.c_str(), O_CREAT | O_RDWR, S_IRWXU);
if (fd < 0) {
- cerr << "open failed: " << fd << endl;
- return;
+ cout << "Error: open failed for " << name << ": " << strerror(errno) << endl;
+ exit(1);
}
m_fileFd.set(fd);
fallocate(m_fileFd.get(), 0, 0, size);
unlink(name.c_str());
m_ptr = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, m_fileFd.get(), 0);
if ((int)(uintptr_t)m_ptr == -1) {
- cerr << "mmap failed: " << (int)(uintptr_t)m_ptr << endl;
- m_ptr = nullptr;
- return;
+ cout << "Error: mmap failed: " << (int)(uintptr_t)m_ptr << ": " << strerror(errno) << endl;
+ exit(1);
}
switch (hint) {
case FILE_MAP_HINT_NONE: break;
fillPageJunk(targetPtr);
}
}
- void benchRandom(bool write) {
+ double benchRandom(bool write) {
size_t pagesTotal = m_size / pageSize;
size_t pagesToHit = pagesTotal / 128;
uint64_t nsTotal = 0;
}
end = chrono::high_resolution_clock::now();
nsTotal += chrono::duration_cast<chrono::nanoseconds>(end - start).count();
- //cout << "random: " << nsTotal / 1000.0 / (pagesToHit) << "us/page" << endl;
- cout << "random " << (write ? "write" : "read") << ": " << ((4096.0 * pagesToHit) / (1 << 20)) / (nsTotal / 1.0E9) << "MB/s" << endl;
+ return ((4096.0 * pagesToHit) / (1 << 20)) / (nsTotal / 1.0E9);
}
- void benchLinear(bool write) {
+ double benchLinear(bool write) {
int pagesTotal = m_size / pageSize;
int iterations = 4;
uint64_t nsTotal = 0;
}
end = chrono::high_resolution_clock::now();
nsTotal += chrono::duration_cast<chrono::nanoseconds>(end - start).count();
- //cout << "linear: " << nsTotal / 1000.0 / (pagesTotal * iterations) << "us/page" << endl;
- cout << "linear " << (write ? "write" : "read") << ": " << ((4096.0 * pagesTotal * iterations) / (1 << 20)) / (nsTotal / 1.0E9 ) << "MB/s" << endl;
+ return ((4096.0 * pagesTotal * iterations) / (1 << 20)) / (nsTotal / 1.0E9 );
}
void dropCache() {
int ret1 = msync(m_ptr, m_size, MS_SYNC | MS_INVALIDATE);
int main(int argc, char *argv[])
{
- (void)argc;
- (void)argv;
+ double randomRead, randomWrite, linearRead, linearWrite;
+ size_t fsize = 0;
srand(0);
+ if (argc == 1)
+ fsize = 1024 * (1ull << 20);
+ else if (argc == 2) {
+ long long sz = atoll(argv[1]);
+ if (sz > 0 && (sz << 20) < SIZE_MAX)
+ fsize = atoll(argv[1]) * (1ull << 20);
+ }
+
+ if (fsize <= 0) {
+ cout << "Error: invalid argument" << endl;
+ cerr << "Usage: " << argv[0] << " [fsize_in_MB]" << endl;
+ exit(1);
+ }
+ cerr << "Using filesize=" << fsize << endl;
+
{
- FileMap file{"/data/local/tmp/mmap_test", 16000 * (1ull << 20)};
- file.benchRandom(false);
+ cerr << "Running random_read..." << endl;
+ FileMap file{"/data/local/tmp/mmap_test", fsize};
+ randomRead = file.benchRandom(false);
}
{
- FileMap file{"/data/local/tmp/mmap_test", 16000 * (1ull << 20)};
- file.benchLinear(false);
+ cerr << "Running linear_read..." << endl;
+ FileMap file{"/data/local/tmp/mmap_test", fsize};
+ linearRead = file.benchLinear(false);
}
{
- FileMap file{"/data/local/tmp/mmap_test", 16000 * (1ull << 20)};
- file.benchRandom(true);
+ cerr << "Running random_write..." << endl;
+ FileMap file{"/data/local/tmp/mmap_test", fsize};
+ randomWrite = file.benchRandom(true);
}
{
- FileMap file{"/data/local/tmp/mmap_test", 16000 * (1ull << 20)};
- file.benchLinear(true);
+ cerr << "Running linear_write..." << endl;
+ FileMap file{"/data/local/tmp/mmap_test", fsize};
+ linearWrite = file.benchLinear(true);
}
+ cout << "Success" << endl;
+ cout << "random_read : " << randomRead << " : MB/s" << endl;
+ cout << "linear_read : " << linearRead << " : MB/s" << endl;
+ cout << "random_write : " << randomWrite << " : MB/s" << endl;
+ cout << "linear_write : " << linearWrite << " : MB/s" << endl;
return 0;
}
--- /dev/null
+# Copyright 2015 The Android Open Source Project
+
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= dumpcache.c
+LOCAL_SHARED_LIBRARIES := libcutils
+LOCAL_MODULE_PATH := $(TARGET_OUT_OPTIONAL_EXECUTABLES)
+LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE:= dumpcache
+
+include $(BUILD_EXECUTABLE)
+
--- /dev/null
+
+ Copyright (c) 2005-2015, The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
--- /dev/null
+Pagecache tools.
+
+dumpcache.c: dumps complete pagecache of device.
+pagecache.py: shows live info on files going in/out of pagecache.
--- /dev/null
+#include <ftw.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <ctype.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+
+// Initial size of the array holding struct file_info
+#define INITIAL_NUM_FILES 512
+
+// Max number of file descriptors to use for ntfw
+#define MAX_NUM_FD 1
+
+struct file_info {
+ char *name;
+ size_t file_size;
+ size_t num_cached_pages;
+};
+
+// Size of pages on this system
+static int g_page_size;
+
+// Total number of cached pages found so far
+static size_t g_total_cached = 0;
+
+// Total number of files scanned so far
+static size_t g_num_files = 0;
+
+// Scanned files and their associated cached page counts
+static struct file_info **g_files;
+
+// Current size of files array
+size_t g_files_size;
+
+static struct file_info *get_file_info(const char* fpath, size_t file_size) {
+ struct file_info *info;
+ if (g_num_files >= g_files_size) {
+ g_files = realloc(g_files, 2 * g_files_size * sizeof(struct file_info*));
+ if (!g_files) {
+ fprintf(stderr, "Couldn't allocate space for files array: %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ g_files_size = 2 * g_files_size;
+ }
+
+ info = calloc(1, sizeof(*info));
+ if (!info) {
+ fprintf(stderr, "Couldn't allocate space for file struct: %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+
+ info->name = malloc(strlen(fpath) + 1);
+ if (!info->name) {
+ fprintf(stderr, "Couldn't allocate space for file struct: %s\n", strerror(errno));
+ exit(EXIT_FAILURE);
+ }
+ strcpy(info->name, fpath);
+
+ info->num_cached_pages = 0;
+ info->file_size = file_size;
+
+ g_files[g_num_files++] = info;
+
+ return info;
+}
+
+static int store_num_cached(const char* fpath, const struct stat *sb) {
+ int fd;
+ fd = open (fpath, O_RDONLY);
+
+ if (fd == -1) {
+ printf("Could not open file.");
+ return -1;
+ }
+
+ void* mapped_addr = mmap(NULL, sb->st_size, PROT_NONE, MAP_SHARED, fd, 0);
+
+ if (mapped_addr != MAP_FAILED) {
+ // Calculate bit-vector size
+ size_t num_file_pages = (sb->st_size + g_page_size - 1) / g_page_size;
+ unsigned char* mincore_data = calloc(1, num_file_pages);
+ int ret = mincore(mapped_addr, sb->st_size, mincore_data);
+ int num_cached = 0;
+ unsigned int page = 0;
+ for (page = 0; page < num_file_pages; page++) {
+ if (mincore_data[page]) num_cached++;
+ }
+ if (num_cached > 0) {
+ struct file_info *info = get_file_info(fpath, sb->st_size);
+ info->num_cached_pages += num_cached;
+ g_total_cached += num_cached;
+ }
+ munmap(mapped_addr, sb->st_size);
+ }
+
+ close(fd);
+ return 0;
+}
+
+static int scan_entry(const char *fpath, const struct stat *sb, int typeflag, struct FTW *ftwbuf) {
+ if (typeflag == FTW_F) {
+ store_num_cached(fpath, sb);
+ }
+ return 0;
+}
+
+static int cmpsize(size_t a, size_t b) {
+ if (a < b) return -1;
+ if (a > b) return 1;
+ return 0;
+}
+
+static int cmpfiles(const void *a, const void *b) {
+ return cmpsize((*((struct file_info**)a))->num_cached_pages,
+ (*((struct file_info**)b))->num_cached_pages);
+}
+
+int main()
+{
+ size_t i;
+ g_page_size = getpagesize();
+
+ g_files = malloc(INITIAL_NUM_FILES * sizeof(struct file_info*));
+ g_files_size = INITIAL_NUM_FILES;
+
+ // Walk filesystem trees
+ nftw("/system/", &scan_entry, MAX_NUM_FD, 0);
+ nftw("/vendor/", &scan_entry, MAX_NUM_FD, 0);
+ nftw("/data/", &scan_entry, MAX_NUM_FD, 0);
+
+ // Sort entries
+ qsort(g_files, g_num_files, sizeof(g_files[0]), &cmpfiles);
+
+ // Dump entries
+ for (i = 0; i < g_num_files; i++) {
+ struct file_info *info = g_files[i];
+ fprintf(stdout, "%s: %zu cached pages (%.2f MB, %zu%% of total file size.)\n", info->name,
+ info->num_cached_pages,
+ (float) (info->num_cached_pages * g_page_size) / 1024 / 1024,
+ (100 * info->num_cached_pages * g_page_size) / info->file_size);
+ }
+
+ fprintf(stdout, "TOTAL CACHED: %zu pages (%f MB)\n", g_total_cached,
+ (float) (g_total_cached * 4096) / 1024 / 1024);
+ return 0;
+}
--- /dev/null
+#!/usr/bin/env python
+
+import curses
+import operator
+import optparse
+import os
+import re
+import subprocess
+import sys
+import threading
+import Queue
+
+STATS_UPDATE_INTERVAL = 0.2
+PAGE_SIZE = 4096
+
+class PagecacheStats():
+ """Holds pagecache stats by accounting for pages added and removed.
+
+ """
+ def __init__(self, inode_to_filename):
+ self._inode_to_filename = inode_to_filename
+ self._file_size = {}
+ self._file_pages_added = {}
+ self._file_pages_removed = {}
+ self._total_pages_added = 0
+ self._total_pages_removed = 0
+
+ def add_page(self, device_number, inode, offset):
+ # See if we can find the page in our lookup table
+ if (device_number, inode) in self._inode_to_filename:
+ filename, filesize = self._inode_to_filename[(device_number, inode)]
+ if filename not in self._file_pages_added:
+ self._file_pages_added[filename] = 1
+ else:
+ self._file_pages_added[filename] += 1
+ self._total_pages_added += 1
+
+ if filename not in self._file_size:
+ self._file_size[filename] = filesize
+
+ def remove_page(self, device_number, inode, offset):
+ if (device_number, inode) in self._inode_to_filename:
+ filename, filesize = self._inode_to_filename[(device_number, inode)]
+ if filename not in self._file_pages_removed:
+ self._file_pages_removed[filename] = 1
+ else:
+ self._file_pages_removed[filename] += 1
+ self._total_pages_removed += 1
+
+ if filename not in self._file_size:
+ self._file_size[filename] = filesize
+
+ def pages_to_mb(self, num_pages):
+ return "%.2f" % round(num_pages * PAGE_SIZE / 1024.0 / 1024.0, 2)
+
+ def bytes_to_mb(self, num_bytes):
+ return "%.2f" % round(int(num_bytes) / 1024.0 / 1024.0, 2)
+
+ def print_pages_and_mb(self, num_pages):
+ pages_string = str(num_pages) + ' (' + str(self.pages_to_mb(num_pages)) + ' MB)'
+ return pages_string
+
+ def reset_stats(self):
+ self._file_pages_removed.clear()
+ self._file_pages_added.clear()
+ self._total_pages_added = 0;
+ self._total_pages_removed = 0;
+
+ def print_stats(self, pad):
+ sorted_added = sorted(self._file_pages_added.items(), key=operator.itemgetter(1), reverse=True)
+ height, width = pad.getmaxyx()
+ pad.clear()
+ pad.addstr(0, 2, 'NAME'.ljust(68), curses.A_REVERSE)
+ pad.addstr(0, 70, 'ADDED (MB)'.ljust(12), curses.A_REVERSE)
+ pad.addstr(0, 82, 'REMOVED (MB)'.ljust(14), curses.A_REVERSE)
+ pad.addstr(0, 96, 'SIZE (MB)'.ljust(9), curses.A_REVERSE)
+ y = 1
+ for filename, added in sorted_added:
+ filesize = self._file_size[filename]
+ removed = 0
+ if filename in self._file_pages_removed:
+ removed = self._file_pages_removed[filename]
+ if (filename > 64):
+ filename = filename[-64:]
+ pad.addstr(y, 2, filename)
+ pad.addstr(y, 70, self.pages_to_mb(added).rjust(10))
+ pad.addstr(y, 80, self.pages_to_mb(removed).rjust(14))
+ pad.addstr(y, 96, self.bytes_to_mb(filesize).rjust(9))
+ y += 1
+ if y == height - 2:
+ pad.addstr(y, 4, "<more...>")
+ break
+ y += 1
+ pad.addstr(y, 2, 'TOTAL'.ljust(74), curses.A_REVERSE)
+ pad.addstr(y, 70, str(self.pages_to_mb(self._total_pages_added)).rjust(10), curses.A_REVERSE)
+ pad.addstr(y, 80, str(self.pages_to_mb(self._total_pages_removed)).rjust(14), curses.A_REVERSE)
+ pad.refresh(0,0, 0,0, height,width)
+
+class FileReaderThread(threading.Thread):
+ """Reads data from a file/pipe on a worker thread.
+
+ Use the standard threading. Thread object API to start and interact with the
+ thread (start(), join(), etc.).
+ """
+
+ def __init__(self, file_object, output_queue, text_file, chunk_size=-1):
+ """Initializes a FileReaderThread.
+
+ Args:
+ file_object: The file or pipe to read from.
+ output_queue: A Queue.Queue object that will receive the data
+ text_file: If True, the file will be read one line at a time, and
+ chunk_size will be ignored. If False, line breaks are ignored and
+ chunk_size must be set to a positive integer.
+ chunk_size: When processing a non-text file (text_file = False),
+ chunk_size is the amount of data to copy into the queue with each
+ read operation. For text files, this parameter is ignored.
+ """
+ threading.Thread.__init__(self)
+ self._file_object = file_object
+ self._output_queue = output_queue
+ self._text_file = text_file
+ self._chunk_size = chunk_size
+ assert text_file or chunk_size > 0
+
+ def run(self):
+ """Overrides Thread's run() function.
+
+ Returns when an EOF is encountered.
+ """
+ if self._text_file:
+ # Read a text file one line at a time.
+ for line in self._file_object:
+ self._output_queue.put(line)
+ else:
+ # Read binary or text data until we get to EOF.
+ while True:
+ chunk = self._file_object.read(self._chunk_size)
+ if not chunk:
+ break
+ self._output_queue.put(chunk)
+
+ def set_chunk_size(self, chunk_size):
+ """Change the read chunk size.
+
+ This function can only be called if the FileReaderThread object was
+ created with an initial chunk_size > 0.
+ Args:
+ chunk_size: the new chunk size for this file. Must be > 0.
+ """
+ # The chunk size can be changed asynchronously while a file is being read
+ # in a worker thread. However, type of file can not be changed after the
+ # the FileReaderThread has been created. These asserts verify that we are
+ # only changing the chunk size, and not the type of file.
+ assert not self._text_file
+ assert chunk_size > 0
+ self._chunk_size = chunk_size
+
+class AdbUtils():
+ @staticmethod
+ def add_adb_serial(adb_command, device_serial):
+ if device_serial is not None:
+ adb_command.insert(1, device_serial)
+ adb_command.insert(1, '-s')
+
+ @staticmethod
+ def construct_adb_shell_command(shell_args, device_serial):
+ adb_command = ['adb', 'shell', ' '.join(shell_args)]
+ AdbUtils.add_adb_serial(adb_command, device_serial)
+ return adb_command
+
+ @staticmethod
+ def run_adb_shell(shell_args, device_serial):
+ """Runs "adb shell" with the given arguments.
+
+ Args:
+ shell_args: array of arguments to pass to adb shell.
+ device_serial: if not empty, will add the appropriate command-line
+ parameters so that adb targets the given device.
+ Returns:
+ A tuple containing the adb output (stdout & stderr) and the return code
+ from adb. Will exit if adb fails to start.
+ """
+ adb_command = AdbUtils.construct_adb_shell_command(shell_args, device_serial)
+
+ adb_output = []
+ adb_return_code = 0
+ try:
+ adb_output = subprocess.check_output(adb_command, stderr=subprocess.STDOUT,
+ shell=False, universal_newlines=True)
+ except OSError as error:
+ # This usually means that the adb executable was not found in the path.
+ print >> sys.stderr, ('\nThe command "%s" failed with the following error:'
+ % ' '.join(adb_command))
+ print >> sys.stderr, ' %s' % str(error)
+ print >> sys.stderr, 'Is adb in your path?'
+ adb_return_code = error.errno
+ adb_output = error
+ except subprocess.CalledProcessError as error:
+ # The process exited with an error.
+ adb_return_code = error.returncode
+ adb_output = error.output
+
+ return (adb_output, adb_return_code)
+
+ @staticmethod
+ def do_preprocess_adb_cmd(command, serial):
+ args = [command]
+ dump, ret_code = AdbUtils.run_adb_shell(args, serial)
+ if ret_code != 0:
+ return None
+
+ dump = ''.join(dump)
+ return dump
+
+def parse_atrace_line(line, pagecache_stats):
+ # Find a mm_filemap_add_to_page_cache entry
+ m = re.match('.* (mm_filemap_add_to_page_cache|mm_filemap_delete_from_page_cache): dev (\d+):(\d+) ino ([0-9a-z]+) page=([0-9a-z]+) pfn=\d+ ofs=(\d+).*', line)
+ if m != None:
+ # Get filename
+ device_number = int(m.group(2)) << 8 | int(m.group(3))
+ if device_number == 0:
+ return
+ inode = int(m.group(4), 16)
+ if m.group(1) == 'mm_filemap_add_to_page_cache':
+ pagecache_stats.add_page(device_number, inode, m.group(4))
+ elif m.group(1) == 'mm_filemap_delete_from_page_cache':
+ pagecache_stats.remove_page(device_number, inode, m.group(4))
+
+def build_inode_lookup_table(inode_dump):
+ inode2filename = {}
+ text = inode_dump.splitlines()
+ for line in text:
+ result = re.match('([0-9]+) ([0-9]+) ([0-9]+) (.*)', line)
+ if result:
+ inode2filename[(int(result.group(1)), int(result.group(2)))] = (result.group(4), result.group(3))
+
+ return inode2filename;
+
+def get_inode_data(datafile, dumpfile, adb_serial):
+ if datafile is not None and os.path.isfile(datafile):
+ print('Using cached inode data from ' + datafile)
+ f = open(datafile, 'r')
+ stat_dump = f.read();
+ else:
+ # Build inode maps if we were tracing page cache
+ print('Downloading inode data from device')
+ stat_dump = AdbUtils.do_preprocess_adb_cmd('find /system /data /vendor ' +
+ '-exec stat -c "%d %i %s %n" {} \;', adb_serial)
+ if stat_dump is None:
+ print 'Could not retrieve inode data from device.'
+ sys.exit(1)
+
+ if dumpfile is not None:
+ print 'Storing inode data in ' + dumpfile
+ f = open(dumpfile, 'w')
+ f.write(stat_dump)
+ f.close()
+
+ sys.stdout.write('Done.\n')
+
+ return stat_dump
+
+def read_and_parse_trace_data(atrace, pagecache_stats):
+ # Start reading trace data
+ stdout_queue = Queue.Queue(maxsize=128)
+ stderr_queue = Queue.Queue()
+
+ stdout_thread = FileReaderThread(atrace.stdout, stdout_queue,
+ text_file=True, chunk_size=64)
+ stderr_thread = FileReaderThread(atrace.stderr, stderr_queue,
+ text_file=True)
+ stdout_thread.start()
+ stderr_thread.start()
+
+ stdscr = curses.initscr()
+
+ try:
+ height, width = stdscr.getmaxyx()
+ curses.noecho()
+ curses.cbreak()
+ stdscr.keypad(True)
+ stdscr.nodelay(True)
+ stdscr.refresh()
+ # We need at least a 30x100 window
+ used_width = max(width, 100)
+ used_height = max(height, 30)
+
+ # Create a pad for pagecache stats
+ pagecache_pad = curses.newpad(used_height - 2, used_width)
+
+ stdscr.addstr(used_height - 1, 0, 'KEY SHORTCUTS: (r)eset stats, CTRL-c to quit')
+ while (stdout_thread.isAlive() or stderr_thread.isAlive() or
+ not stdout_queue.empty() or not stderr_queue.empty()):
+ while not stderr_queue.empty():
+ # Pass along errors from adb.
+ line = stderr_queue.get()
+ sys.stderr.write(line)
+ while True:
+ try:
+ line = stdout_queue.get(True, STATS_UPDATE_INTERVAL)
+ parse_atrace_line(line, pagecache_stats)
+ except Queue.Empty:
+ break
+
+ key = ''
+ try:
+ key = stdscr.getkey()
+ except:
+ pass
+
+ if key == 'r':
+ pagecache_stats.reset_stats()
+
+ pagecache_stats.print_stats(pagecache_pad)
+ except Exception, e:
+ curses.endwin()
+ print e
+ finally:
+ curses.endwin()
+ # The threads should already have stopped, so this is just for cleanup.
+ stdout_thread.join()
+ stderr_thread.join()
+
+ atrace.stdout.close()
+ atrace.stderr.close()
+
+
+def parse_options(argv):
+ usage = 'Usage: %prog [options]'
+ desc = 'Example: %prog'
+ parser = optparse.OptionParser(usage=usage, description=desc)
+ parser.add_option('-d', dest='inode_dump_file', metavar='FILE',
+ help='Dump the inode data read from a device to a file.'
+ ' This file can then be reused with the -i option to speed'
+ ' up future invocations of this script.')
+ parser.add_option('-i', dest='inode_data_file', metavar='FILE',
+ help='Read cached inode data from a file saved arlier with the'
+ ' -d option.')
+ parser.add_option('-s', '--serial', dest='device_serial', type='string',
+ help='adb device serial number')
+ options, categories = parser.parse_args(argv[1:])
+ if options.inode_dump_file and options.inode_data_file:
+ parser.error('options -d and -i can\'t be used at the same time')
+ return (options, categories)
+
+def main():
+ options, categories = parse_options(sys.argv)
+
+ # Load inode data for this device
+ inode_data = get_inode_data(options.inode_data_file, options.inode_dump_file,
+ options.device_serial)
+ # Build (dev, inode) -> filename hash
+ inode_lookup_table = build_inode_lookup_table(inode_data)
+ # Init pagecache stats
+ pagecache_stats = PagecacheStats(inode_lookup_table)
+
+ # Construct and execute trace command
+ trace_cmd = AdbUtils.construct_adb_shell_command(['atrace', '--stream', 'pagecache'],
+ options.device_serial)
+
+ try:
+ atrace = subprocess.Popen(trace_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ except OSError as error:
+ print >> sys.stderr, ('The command failed')
+ sys.exit(1)
+
+ read_and_parse_trace_data(atrace, pagecache_stats)
+
+if __name__ == "__main__":
+ main()
}
}
+static uint64_t get_zram_mem_used() {
+#define ZRAM_SYSFS "/sys/block/zram0/"
+ FILE *f = fopen(ZRAM_SYSFS "mm_stat", "r");
+ if (f) {
+ uint64_t mem_used_total = 0;
+
+ int matched = fscanf(f, "%*d %*d %" SCNu64 " %*d %*d %*d %*d", &mem_used_total);
+ if (matched != 1)
+ fprintf(stderr, "warning: failed to parse " ZRAM_SYSFS "mm_stat\n");
+
+ fclose(f);
+ return mem_used_total;
+ }
+
+ f = fopen(ZRAM_SYSFS "mem_used_total", "r");
+ if (f) {
+ uint64_t mem_used_total = 0;
+
+ int matched = fscanf(f, "%" SCNu64, &mem_used_total);
+ if (matched != 1)
+ fprintf(stderr, "warning: failed to parse " ZRAM_SYSFS "mem_used_total\n");
+
+ fclose(f);
+ return mem_used_total;
+ }
+
+ return 0;
+}
+
int main(int argc, char *argv[]) {
pm_kernel_t *ker;
pm_process_t *proc;
uint64_t mem[MEMINFO_COUNT] = { };
pm_proportional_swap_t *p_swap;
- int fd, len;
- char buffer[1024];
float zram_cr = 0.0;
signal(SIGPIPE, SIG_IGN);
qsort(procs, num_procs, sizeof(procs[0]), compfn);
if (has_swap) {
- fd = open("/sys/block/zram0/mem_used_total", O_RDONLY);
- if (fd >= 0) {
- len = read(fd, buffer, sizeof(buffer)-1);
- close(fd);
- if (len > 0) {
- buffer[len] = 0;
- mem[MEMINFO_ZRAM_TOTAL] = atoll(buffer)/1024;
- zram_cr = (float) mem[MEMINFO_ZRAM_TOTAL] /
- (mem[MEMINFO_SWAP_TOTAL] - mem[MEMINFO_SWAP_FREE]);
- has_zram = true;
- }
+ uint64_t zram_mem_used = get_zram_mem_used();
+ if (zram_mem_used) {
+ mem[MEMINFO_ZRAM_TOTAL] = zram_mem_used/1024;
+ zram_cr = (float) mem[MEMINFO_ZRAM_TOTAL] /
+ (mem[MEMINFO_SWAP_TOTAL] - mem[MEMINFO_SWAP_FREE]);
+ has_zram = true;
}
}
#include <cerrno>
#include <grp.h>
#include <iostream>
+#include <iomanip>
#include <libgen.h>
#include <time.h>
#include <unistd.h>
int cpu_;
};
+struct Duration {
+ double value;
+ friend std::ostream& operator<<(std::ostream& stream, const Duration& d) {
+ static const char *SUFFIXES[] = {"s", "ms", "us", "ns"};
+ size_t suffix = 0;
+ double temp = d.value;
+ while (temp < .1 && suffix < 3) {
+ temp *= 1000;
+ suffix++;
+ }
+ stream << temp << SUFFIXES[suffix];
+ return stream;
+ }
+};
+
// File scope function prototypes
-static void server(void);
+static bool server(void);
static void client(void);
static void bindCPU(unsigned int cpu);
static ostream &operator<<(ostream &stream, const String16& str);
return 0;
default: // Parent
- server();
+ if (!server()) { break; }
// Wait for all children to end
do {
return 0;
}
-static void server(void)
+static bool server(void)
{
int rv;
new AddIntsService(options.serverCPU))) != 0) {
cerr << "addService " << serviceName << " failed, rv: " << rv
<< " errno: " << errno << endl;
+ return false;
}
// Start threads to handle server work
proc->startThreadPool();
+ return true;
}
static void client(void)
// Attach to service
sp<IBinder> binder;
- do {
+ for (int i = 0; i < 3; i++) {
binder = sm->getService(serviceName);
if (binder != 0) break;
cout << serviceName << " not published, waiting..." << endl;
usleep(500000); // 0.5 s
- } while(true);
+ }
+
+ if (binder == 0) {
+ cout << serviceName << " failed to publish, aborting" << endl;
+ return;
+ }
// Perform the IPC operations
for (unsigned int iter = 0; iter < options.iterations; iter++) {
}
// Display the results
- cout << "Time per iteration min: " << min
- << " avg: " << (total / options.iterations)
- << " max: " << max
+ cout << fixed << setprecision(2)
+ << "Time per iteration min: " << Duration{min}
+ << " avg: " << Duration{total / options.iterations}
+ << " max: " << Duration{max}
<< endl;
}
--- /dev/null
+#
+# Script to start 3 chrome tabs, fling each of them, repeat
+# For each iteration, Total frames and janky frames are reported.
+#
+# Options are described below.
+#
+iterations=10
+startapps=1
+capturesystrace=0
+waittime=4
+app=chrome
+
+function processLocalOption {
+ ret=0
+ case "$1" in
+ (-N) startapps=0;;
+ (-A) unset appList;;
+ (-L) appList=$2; shift; ret=1;;
+ (-T) capturesystrace=1;;
+ (-W) waittime=$2; shift; ret=1;;
+ (*)
+ echo "$0: unrecognized option: $1"
+ echo; echo "Usage: $0 [options]"
+ echo "-A : use all known applications"
+ echo "-L applist : list of applications"
+ echo " default: $appList"
+ echo "-N : no app startups, just fling"
+ echo "-g : generate activity strings"
+ echo "-i iterations"
+ echo "-T : capture systrace on each iteration"
+ echo "-d device : device type (shamu, volantis, bullhead,...)"
+ exit 1;;
+ esac
+ return $ret
+}
+
+CMDDIR=$(dirname $0 2>/dev/null)
+CMDDIR=${CMDDIR:=.}
+. $CMDDIR/defs.sh
+
+case $DEVICE in
+(hammerhead)
+ flingtime=300
+ downCount=2
+ upCount=6
+ UP="70 400 70 100 $flingtime"
+ DOWN="70 100 70 400 $flingtime";;
+(shamu)
+ flingtime=100
+ downCount=2
+ upCount=2
+ UP="700 1847 700 400 $flingtime"
+ DOWN="700 400 700 1847 $flingtime";;
+(angler)
+ flingtime=150
+ downCount=4
+ upCount=3
+ UP="500 1200 500 550 $flingtime"
+ DOWN="500 550 500 1200 $flingtime";;
+(bullhead|volantis)
+ flingtime=200
+ downCount=5
+ upCount=5
+ UP="500 1400 500 400 $flingtime"
+ DOWN="500 400 500 1400 $flingtime";;
+(*)
+ echo "Error: No display information available for $DEVICE"
+ exit 1;;
+esac
+
+function swipe {
+ count=0
+ while [ $count -lt $2 ]
+ do
+ doSwipe $1
+ ((count=count+1))
+ done
+ sleep 1
+}
+
+cur=1
+frameSum=0
+jankSum=0
+latency90Sum=0
+latency95Sum=0
+latency99Sum=0
+
+doKeyevent HOME
+sleep 0.5
+resetJankyFrames $(getPackageName $app)
+
+while [ $cur -le $iterations ]
+do
+ if [ $capturesystrace -gt 0 ]; then
+ ${ADB}atrace --async_start -z -c -b 16000 freq gfx view idle sched
+ fi
+ t=$(startActivity $app)
+ sleep $waittime
+ swipe "$UP" $upCount
+
+ t=$(startActivity $app)
+ sleep $waittime
+ swipe "$UP" $upCount
+
+ t=$(startActivity $app)
+ sleep $waittime
+ swipe "$UP" $upCount
+
+ doKeyevent BACK
+ sleep $waittime
+ swipe "$DOWN" $downCount
+
+ doKeyevent BACK
+ sleep $waittime
+ swipe "$DOWN" $downCount
+
+ doKeyevent BACK
+ sleep 0.5
+
+ if [ $capturesystrace -gt 0 ]; then
+ ${ADB}atrace --async_dump -z -c -b 16000 freq gfx view idle sched > trace.${cur}.out
+ fi
+ doKeyevent HOME
+ sleep 0.5
+
+ set -- $(getJankyFrames $(getPackageName $app))
+ totalDiff=$1
+ jankyDiff=$2
+ latency90=$3
+ latency95=$4
+ latency99=$5
+ if [ ${totalDiff:=0} -eq 0 ]; then
+ echo Error: could not read frame info with \"dumpsys gfxinfo\"
+ exit 1
+ fi
+
+ ((frameSum=frameSum+totalDiff))
+ ((jankSum=jankSum+jankyDiff))
+ ((latency90Sum=latency90Sum+latency90))
+ ((latency95Sum=latency95Sum+latency95))
+ ((latency99Sum=latency99Sum+latency99))
+ if [ "$totalDiff" -eq 0 ]; then
+ echo Error: no frames detected. Is the display off?
+ exit 1
+ fi
+ ((jankPct=jankyDiff*100/totalDiff))
+ resetJankyFrames $(getPackageName $app)
+
+
+ echo Frames: $totalDiff latency: $latency90/$latency95/$latency99 Janks: $jankyDiff\(${jankPct}%\)
+ ((cur=cur+1))
+done
+doKeyevent HOME
+((aveJankPct=jankSum*100/frameSum))
+((aveJanks=jankSum/iterations))
+((aveFrames=frameSum/iterations))
+((aveLatency90=latency90Sum/iterations))
+((aveLatency95=latency95Sum/iterations))
+((aveLatency99=latency99Sum/iterations))
+echo AVE: Frames: $aveFrames latency: $aveLatency90/$aveLatency95/$aveLatency99 Janks: $aveJanks\(${aveJankPct}%\)
cameraActivity='com.google.android.GoogleCamera/com.android.camera.CameraActivity'
playActivity='com.android.vending/com.google.android.finsky.activities.MainActivity'
feedlyActivity='com.devhd.feedly/com.devhd.feedly.Main'
-photosActivity='com.google.android.apps.plus/com.google.android.apps.photos.phone.PhotosHomeActivity'
+photosActivity='com.google.android.apps.photos/com.google.android.apps.photos.home.HomeActivity'
mapsActivity='com.google.android.apps.maps/com.google.android.maps.MapsActivity'
calendarActivity='com.google.android.calendar/com.android.calendar.AllInOneActivity'
earthActivity='com.google.earth/com.google.earth.EarthActivity'
-calculatorActivity='com.android.calculator2/com.android.calculator2.Calculator'
+calculatorActivity='com.google.android.calculator/com.android.calculator2.Calculator'
sheetsActivity='com.google.android.apps.docs.editors.sheets/com.google.android.apps.docs.app.NewMainProxyActivity'
docsActivity='com.google.android.apps.docs.editors.docs/com.google.android.apps.docs.app.NewMainProxyActivity'
operaActivity='com.opera.mini.native/com.opera.mini.android.Browser'
firefoxActivity='org.mozilla.firefox/org.mozilla.firefox.App'
+suntempleActivity='com.BrueComputing.SunTemple/com.epicgames.ue4.GameActivity'
homeActivity='com.google.android.googlequicksearchbox/com.google.android.launcher.GEL'
function showUsage {
isOnDevice=0
fi
+if [ $isOnDevice -gt 0 ]; then
+ case "$DEVICE" in
+ (bullhead|angler)
+ if ! echo $$ > /dev/cpuset/background/tasks; then
+ echo Could not put PID $$ in background
+ fi
+ ;;
+ (*)
+ ;;
+ esac
+fi
+
# default values if not set by options or calling script
appList=${appList:=$dfltAppList}
savetmpfiles=${savetmpfiles:=0}
output=${output:="./out"}
# clear the output file
-> $output
+if [ -f $output ]; then
+ > $output
+fi
# ADB commands
AM_FORCE_START="${ADB}am start -W -S"
in=$1
in=${in:=0.0}
set -- $(echo $in | tr . " ")
+
# shell addition via (( )) doesn't like leading zeroes in msecs
# field so remove leading zeroes
msecfield=$(expr 0 + $2)
function resetJankyFrames {
_gfxapp=$1
- _gfxapp=${app:="com.android.systemui"}
+ _gfxapp=${_gfxapp:="com.android.systemui"}
${ADB}dumpsys gfxinfo $_gfxapp reset 2>&1 >/dev/null
}
}
function startInstramentation {
+ _iter=$1
+ _iter=${_iter:=0}
+ enableAtrace=$2
+ enableAtrace=${enableAtrace:=1}
# Called at beginning of loop. Turn on instramentation like atrace
vout start instramentation $(date)
echo =============================== >> $output
- echo Before iteration >> $output
+ echo Before iteration $_iter >> $output
echo =============================== >> $output
${ADB}cat /proc/meminfo 2>&1 >> $output
${ADB}dumpsys meminfo 2>&1 >> $output
- if [ "$user" = root ]; then
+ if [ "$DEVICE" = volantis ]; then
+ ${ADB}cat /d/nvmap/iovmm/procrank 2>&1 >> $output
+ fi
+ if [ "$user" = root -a $enableAtrace -gt 0 ]; then
vout ${ADB}atrace -b 32768 --async_start $tracecategories
${ADB}atrace -b 32768 --async_start $tracecategories >> $output
echo >> $output
}
function stopInstramentation {
- if [ "$user" = root ]; then
+ enableAtrace=$1
+ enableAtrace=${enableAtrace:=1}
+ if [ "$user" = root -a $enableAtrace -gt 0 ]; then
vout ${ADB}atrace --async_stop
${ADB}atrace --async_stop > /dev/null
fi
}
function stopAndDumpInstramentation {
- # Called at beginning of loop. Turn on instramentation like atrace
vout stop instramentation $(date)
echo =============================== >> $output
echo After iteration >> $output
python $UNCOMPRESS $tmpTrace >> $traceout
rm -f $tmpTrace
else
- ${ADB}atrace $zarg -b 32768 --async_dump >> $traceout
+ ${ADB}atrace -b 32768 --async_dump > $traceout
fi
- vout ${ADB}atrace $zarg --async_dump
+ vout ${ADB}atrace $zarg -b 32768 --async_dump
vout ${ADB}atrace --async_stop
${ADB}atrace --async_stop > /dev/null
fi
function doSwipe {
vout ${ADB}input swipe $*
- ${ADB}input swipe $*
+ ${ADB}nice input swipe $*
+}
+
+function doText {
+ echo $* > ./tmpOutput
+ vout ${ADB}input text \"$*\"
+ ${ADB}input text "$(cat ./tmpOutput)"
+ rm -f ./tmpOutput
}
function doTap {
--- /dev/null
+#!/usr/bin/python
+
+import sys
+import getopt
+
+def usage():
+ print "powersum.py [OPTIONS] HZ VOLTAGE [FILE]"
+ print "OPTIONS: "
+ print "-o OFFSET: subtract OFFSET from all data points"
+ print "\nHZ: samples per second in FILE or stdin"
+ sys.exit(0)
+
+offset = 0.0
+voltage = 4.3
+
+parsedargv,argvrem = getopt.getopt(sys.argv[1:], "vo:w:l:h", ["help"])
+for o,a in parsedargv:
+ if o == '-o': offset = float(a)
+ if o == '-h' or o == '--help': usage()
+
+hz = float(argvrem[0])
+voltage = float(argvrem[1])
+if len(argvrem) > 1:
+ f = open(argvrem[2], "r")
+else:
+ f = sys.stdin
+
+totalpower = 0.0
+samplectr = 0
+
+for line in f:
+ try:
+ val = float(line.split(" ")[1]) # xxx take 2nd arg in line
+ val -= offset
+ except:
+ print "Can't parse data line, did you remember the timestamp?"
+ print "data was: %s" % line
+ sys.exit(1)
+
+ samplectr+=1
+ totalpower += val/hz
+
+avecurrent = totalpower * hz *1000 / samplectr
+avepower = avecurrent * voltage
+
+print "%.3f %.3f" % (avecurrent, avepower)
--- /dev/null
+# print summary of output generated by pwrtest.sh
+#
+# default results directories are <device>-<date>[-experiment]. By default
+# match any device and the year 201*.
+#
+# Examples:
+#
+# - show output for all bullhead tests in july 2015:
+# ./pwrsummary.sh -r "bh-201507*"
+#
+# - generate CSV file for import into spreadsheet:
+# ./pwrsummary.sh -o csv
+#
+
+CMDDIR=$(dirname $0 2>/dev/null)
+CMDDIR=${CMDDIR:=.}
+cd $CMDDIR
+CMDDIR=$(pwd)
+cd -
+POWERAVE="python $CMDDIR/powerave.py"
+
+defaultPattern="*-201*"
+defaultVoltage=4.3
+defaultFrequency=5
+
+function Usage {
+ echo "$0 [-o format] [-v voltage] [-h freq] [-f resultsDirectories]"
+}
+
+while [ $# -gt 0 ]
+do
+ case "$1" in
+ (-o) format=$2; shift;;
+ (-v) voltage=$2; shift;;
+ (-h) hz=$2; shift;;
+ (-r) testResults="$2"; shift;;
+ (--help) Usage; exit 0;;
+ (--) shift; break;;
+ (*)
+ echo Unknown option: $1
+ Usage
+ exit 1;;
+ esac
+ shift
+done
+
+testResults=${testResults:=$defaultPattern}
+voltage=${voltage:=$defaultVoltage}
+hz=${hz:=$defaultFrequency}
+
+function printHeader {
+ workload=$1
+ units="unknown"
+ case $workload in
+ (suntemple|shadowgrid2)
+ units="FPS";;
+ (recentfling|youtube|chrome)
+ units="FPS from app point of view: 1/(90th percentile render time)";;
+ (sysapps)
+ units="App start/switch per second";;
+ esac
+
+ echo "Performance unit for $workload is: $units"
+ if [ "$format" = csv ]; then
+ printf "%s,%s,%s,%s,%s,%s,%s,%s,%s\n" " " build min ave max net-mA@${voltage}v base-mW net-mW perf/W
+ else
+ printf "%-30s %-8s %12.12s %12.12s %12.12s %12.12s %12.12s %12.12s %12.12s\n" " " build min ave max net-mA@${voltage}v base-mW net-mW perf/W
+ fi
+}
+
+function average {
+ awk 'BEGIN { count=0; sum=0; max=-1000000000; min=1000000000; }
+ {
+ cur = $1;
+ sum = sum + cur;
+ if (cur > max) max = cur;
+ if (cur < min) min = cur;
+ count++;
+ }
+
+ END {
+ if (count > 0) {
+ ave = sum / count;
+ printf "%.2f %.2f %.2f\n", min, ave, max;
+ }
+ }'
+}
+
+function hwuiOutputParser {
+ # Stats since: 60659316905953ns
+ # Total frames rendered: 150
+ # Janky frames: 89 (59.33%)
+ # 90th percentile: 23ms
+ # 95th percentile: 27ms
+ # 99th percentile: 32ms
+ # Number Missed Vsync: 0
+ # Number High input latency: 0
+ # Number Slow UI thread: 0
+ # Number Slow bitmap uploads: 12
+ # Number Slow draw: 89
+ # use with "stdbuf -o0 " to disable pipe buffering
+ # stdbuf -o0 adb shell /data/hwuitest shadowgrid2 400 | stdbuf -o0 ./hwuitestfilter.sh | tee t.csv
+ sed -e 's/ns//' -e 's/[\(\)%]/ /g' | awk '
+ BEGIN { startTime=0; lastTime=0; }
+ /^Stats since:/ {
+ curTime = $3;
+ if (startTime == 0) {
+ startTime = curTime;
+ }
+ if (lastTime) {
+ interval = curTime - lastTime;
+ fps = totalFrames*1000000000 / interval;
+ diffTime = curTime - startTime;
+ printf "%.2f, %.2f, ",diffTime/1000000, fps;
+ }
+ }
+ /^Total frames/ { totalFrames=$4; }
+ /^Janky frames:/ {
+ if (lastTime) {
+ printf "%.2f\n",$4; lastTime=curTime;
+ }
+ lastTime = curTime;
+ }'
+}
+
+function sysappOutputParser {
+ awk '
+ BEGIN { fmt=0; count=0; sum=0; }
+ /^App/ {
+ if (count != 0) {
+ if (fmt > 2) printf "Ave: %0.2fms\n", sum/count;
+ else printf " %0.2f\n", sum/count;
+ count = 0;
+ sum = 0;
+ }
+ }
+ /^[a-z]/ { val=$2; if (val != 0) { count++; sum+=val; } }
+ /^Iteration/ { if (fmt > 2) printf "%s : ", $0; else if (fmt) printf "%d ", $2; }
+ '
+}
+
+function calcPerfData {
+ testdir=$1
+ workload=$2
+ baselineCurrent=$3
+ baselinePower=$4
+
+ file=${workload}.out
+ powerfile=${workload}-power.out
+ build="$(cat build 2>/dev/null)"
+ build=${build:="Unknown"}
+
+ lines=$(wc -l $file 2>/dev/null | cut -f1 -d\ )
+
+ if [ ${lines:=0} -eq -0 ]; then
+ # No performance data captured
+ if [ "$format" = csv ]; then
+ printf "%s,%s,%s\n" $testdir "$build" "no data"
+ else
+ printf "%-30s %-8s %12.12s\n" $testdir "$build" "no data"
+ fi
+ return 1
+ fi
+
+ set -- $($POWERAVE $hz $voltage $powerfile)
+ current=$(echo $1 $baselineCurrent | awk '{ printf "%.2f", $1-$2; }')
+ power=$(echo $2 $baselinePower | awk '{ printf "%.2f", $1-$2; }')
+
+ case $workload in
+ (idle)
+ set -- 0 0 0
+ ;;
+ (suntemple)
+ # units are fps
+ set -- $(grep "FPS average" $file | sed 's/^.*seconds for a //' | awk '{ print $1; }' | average)
+ ;;
+ (recentfling|youtube|chrome)
+ # units are ms, so need to convert to app/ms
+ set -- $(grep ^Frames: $file | tr "/" " " | awk '{ print $4; }' | average | awk '{ printf "%.3f %.3f %.3f\n", 1000/$3, 1000/$2, 1000/$1;}' )
+ ;;
+ (sysapps)
+ # units are ms, so need to convert to app/ms
+ set -- $(cat $file | sysappOutputParser | average | awk '{ printf "%.3f %.3f %.3f\n", 1000/$3, 1000/$2, 1000/$1;}' )
+ ;;
+ (shadowgrid2)
+ # units are fps
+ set -- $(cat $file | hwuiOutputParser | tr ',' ' ' | awk '{print $2;}' | average)
+ ;;
+ esac
+
+ minperf=$1
+ aveperf=$2
+ maxperf=$3
+ perfPerWatt=$(echo $aveperf $power | awk '{ if ($2) { val=$1*1000/$2; printf "%.3f\n", val; } else print "unknown"; }')
+ if [ "$format" = csv ]; then
+ printf "%s,%s,%f,%f,%f,%f,%f,%f," $testdir "$build" $minperf $aveperf $maxperf $current $baselinePower $power
+ printf "%s\n" $perfPerWatt
+ else
+ printf "%-30s %-8s %12.2f %12.2f %12.2f %12.2f %12.2f %12.2f " $testdir "$build" $minperf $aveperf $maxperf $current $baselinePower $power
+ printf "%12s\n" $perfPerWatt
+ fi
+}
+
+function calcBaselinePower {
+ workload=$1
+ defaultPowerFile="idle-display-power.out"
+ powerFile=$defaultPowerFile
+ case $workload in
+ (shadowgrid2|suntemple|recentfling)
+ powerFile="idle-airplane-display-power.out"
+ if [ ! -f $powerFile ]; then
+ powerFile=$defaultPowerFile
+ fi;;
+ esac
+ if [ -f $powerFile ]; then
+ $POWERAVE 5 4.3 $powerFile
+ fi
+}
+
+for t in $(cat tests)
+do
+ echo .======================= $t ================================
+ printHeader $t
+ for i in $testResults
+ do
+ cd $i
+ baseline="$(calcBaselinePower $t)"
+ if [ "$baseline" != "" ]; then
+ calcPerfData $i $t $baseline
+ else
+ echo "$i : no baseline current"
+ fi
+ cd - > /dev/null
+ done
+done
--- /dev/null
+# Script to gather perf and perf/watt data for several workloads
+#
+# Setup:
+#
+# - device connected to monsoon with USB passthrough enabled
+# - network enabled (baseline will be measured and subtracted
+# from results) (network needed for chrome, youtube tests)
+# - the device is rebooted after each test (can be inhibited
+# with "-r 0")
+#
+# Default behavior is to run each of the known workloads for
+# 30 minutes gathering both performance and power data.
+#
+# The default time can be overridden with the -t option. To
+# change individual test times, a config file can be specifed
+# via -f with times for individual tests. Example file contents:
+#
+# idleTime=60
+# recentflingTime=60
+# chromeTime=60
+# youtubeTime=0
+# sysappsTime=60
+# suntempleTime=5
+#
+# Output goes to the current directory.
+#
+# Examples:
+#
+# - Run all tests for 15 minutes (default is 30): ./pwrtest.sh -t 15 -R MDA20
+#
+# - Use a config file for test times: ./pwrtest.sh -f ./myconfig -R MDA20
+#
+# - Use a init file to setup device tuneables after each restart (this is
+# a bash script which should include adb commands to set up device):
+# ./pwrtest.sh -F devtunables
+#
+defaultTime=30
+garbageminutes=8
+
+function Usage {
+ echo "Usage: $0 [OPTIONS]"
+ echo "-d device : device type (shamu, bullhead, ...)"
+ echo "-f configFile : config file to override individual test times"
+ echo "-g garbageMinutes : time to skip power measurement at beginning of test"
+ echo " default=$garbagetime minutes"
+ echo "-r restart : 0=no reboot between tests, 1=reboot (default)"
+ echo "-t defaultTimeMin : default time to run each test"
+ echo " default=$defaultTime minutes"
+ echo "-D cmddir : directory to find defs.sh"
+ echo "-F restartHookFile : file of commands to set device tunables after restart (optional)"
+ echo "-R release : release running on device (MDA20, 2054728, etc)"
+}
+
+restart=1
+hz=5
+shadowgrid2TimeMax=25
+
+CMDDIR=$(dirname $0 2>/dev/null)
+CMDDIR=${CMDDIR:=.}
+
+MONSOON=monsoon.par
+
+while [ $# -gt 0 ]
+do
+ case "$1" in
+ (-D) CMDDIR=$2; shift;;
+ (-r) restart=$2; shift;;
+ (-t) defaultTime=$2; shift;;
+ (-F) restartfile=$2; shift;;
+ (-g) garbageminutes=$2; shift;;
+ (-f)
+ configFile=$2;
+ echo "Reading configs from $configFile..."
+ . ./$configFile
+ shift;;
+ (-R) echo $2 > ./build; shift;;
+ (--) ;;
+ (--help)
+ Usage
+ exit 0;;
+ (*)
+ echo "Unknown option: $1"
+ Usage
+ exit 1;;
+ esac
+ shift
+done
+
+. $CMDDIR/defs.sh --
+
+devdir="/data/local/tmp"
+suntempledir=${CMDDIR}/suntemple
+
+case $DEVICE in
+(shamu|hammerhead)
+ HWUITEST=hwuitest
+ onSwipe="700 1847 700 400 50"
+ ;;
+(*)
+ HWUITEST=hwuitest64
+ onSwipe="500 1200 500 550 150"
+ ;;
+esac
+
+scripts="defs.sh systemapps.sh recentfling.sh youtube.sh chromefling.sh $HWUITEST"
+
+if ! $MONSOON >/dev/null 2>&1; then
+ echo $MONSOON must be in your PATH >&2
+ exit 1
+fi
+
+function usbpassthru {
+ if [ "$1" = off ]; then
+ state=off
+ else
+ state=on
+ fi
+ echo Setting usb pass-thru to $state
+ monsoon.par --usbpassthrough=$state
+}
+
+function pwrcollect {
+ collectmin=$1
+ collectmin=${collectmin:=60}
+ # samples = hz * 60 * minutes
+ ((samples=5*60*collectmin))
+ monsoon.par --timestamp --samples $samples --hz 5
+}
+
+function copy_files {
+ adb shell mkdir -p $devdir
+ for file in $scripts
+ do
+ adb push $CMDDIR/$file $devdir
+ done
+}
+
+function install_suntemple {
+ echo Checking for suntemple installation...
+ #stdest=/storage/sdcard0/obb/com.BrueComputing.SunTemple
+ stdest=/storage/emulated/0/obb/com.BrueComputing.SunTemple
+ dircontents=$(adb ls $stdest 2>/dev/null)
+ if [ "$dircontents" = "" ]; then
+ echo Installing suntemple...
+ adb install $suntempledir/*.apk
+ adb shell mkdir -p $stdest
+ adb push $suntempledir/main*obb $stdest
+ else
+ echo dircontents=$dircontents
+ echo Suntemple already installed.
+ fi
+}
+
+function run_test {
+ testName=$1
+ collectMinutes=$2
+ collectOutput=${testName}-power-raw.out
+ powerOutput=${testName}-power.out
+ echo -----------------------------------------------------
+ echo TEST: $testName
+ echo enabled Cores $(adb shell "cat /sys/devices/system/cpu/online")
+ date
+ echo -----------------------------------------------------
+ usbpassthru off
+ pwrcollect $collectMinutes > $collectOutput 2>/dev/null
+ # take off the first 2min of samples
+ totalSamples=$(cat $collectOutput | wc -l)
+ # we throw away the first "garbageminutes" of the data
+ # since it is volatile
+ ((garbage=hz*60*garbageminutes))
+ ((remaining=totalSamples-garbage))
+ if [ $remaining -gt 0 ]; then
+ tail -$remaining $collectOutput > $powerOutput
+ else
+ cp $collectOutput $powerOutput
+ fi
+ echo power data for $testName copied to $collectOutput
+ usbpassthru on
+ sleep 10
+ adb devices
+ sleep 10
+}
+
+function start_job {
+ cmdline="$1"
+ echo Running $cmdline
+ (adb shell "cd $devdir && nohup $cmdline > test.out") &
+ sleep 5
+ kill %1 2>/dev/null
+}
+
+function cleanup_job {
+ testName=$1
+ processName=$2
+ processName=${processName:=" sh "}
+ set -- $(adb shell ps | tr "\r" " " | grep "$processName")
+ echo killing PID=$2...
+ adb shell kill $2
+ sleep 1
+ echo copying test output to $testName...
+ adb pull $devdir/test.out
+ mv test.out ${testName}.out
+ if [ $restart -gt 0 ]; then
+ restart_device
+ else
+ doKeyevent HOME
+ fi
+}
+
+function airplane_mode {
+ if [ "$1" = "on" ]; then
+ mode=true
+ setting=1
+ else
+ mode=false
+ setting=0
+ fi
+ adb shell settings put global airplane_mode_on $setting
+ adb shell am broadcast -a android.intent.action.AIRPLANE_MODE --ez state $mode
+ echo Set airplane mode to $mode
+}
+
+function restart_device {
+ adb reboot
+ echo Wait 60s for device to restart...
+ sleep 60
+ while ! adb root
+ do
+ echo Waiting for device to come up...
+ sleep 10
+ done
+ echo Wait 30s to complete boot activities...
+ sleep 30
+ echo Restart complete.
+ doSwipe $onSwipe
+ restartfile=${restartfile:="./restarthook"}
+ if [ -f $restartfile ]; then
+ # hook to change tunables after a restart
+ . $restartfile
+ fi
+}
+
+usbpassthru on
+adb devices 2>/dev/null
+
+airplane_mode off
+if [ $restart -gt 0 ]; then
+ restart_device
+fi
+
+echo Copying $scripts to device $devdir...
+copy_files
+tests=""
+
+# measure background power
+idleTime=${idleTime:=$defaultTime}
+if [ $idleTime -gt 0 ]; then
+ echo Test 1 : measure idle power for $idleTime minutes
+ run_test idle $idleTime
+ airplane_mode on
+ echo Restarting for power baseline in airplane mode...
+ restart_device
+ run_test idle-airplane $idleTime
+ airplane_mode off
+ # the screen blanks after 30 minutes. The first 2 minutes of the test
+ # have already been filtered off. For our power baseline, keep the first
+ # 20 minutes of the results
+ ((twentyminutes=hz*20*60))
+ powerOutput="idle-power.out"
+ displayPowerOutput="idle-display-power.out"
+ airplanePowerOutput="idle-airplane-power.out"
+ airplaneDisplayPowerOutput="idle-airplane-display-power.out"
+ totalSamples=$(cat $powerOutput | wc -l)
+ if [ $twentyminutes -lt $totalSamples ]; then
+ head -$twentyminutes $powerOutput > $displayPowerOutput
+ head -$twentyminutes $airplanePowerOutput > $airplaneDisplayPowerOutput
+ else
+ cp $powerOutput $displayPowerOutput
+ cp $airplanePowerOutput $airplaneDisplayPowerOutput
+ fi
+ tests="$tests idle"
+fi
+
+recentflingTime=${recentflingTime:=$defaultTime}
+if [ $recentflingTime -gt 0 ]; then
+ echo $(date) Test 2 : recents fling for $recentflingTime minutes
+ airplane_mode on
+ adb shell "cd $devdir && ./systemapps.sh -A -T -i 1"
+ start_job "./recentfling.sh -N -i 1000 -d $DEVICE"
+ run_test recentfling $recentflingTime
+ cleanup_job recentfling
+ airplane_mode off
+ date
+ tests="$tests recentfling"
+fi
+
+suntempleTime=${suntempleTime:=$defaultTime}
+if [ $suntempleTime -gt 0 ]; then
+ echo $(date) Test 2 : run Sun Temple $suntempleTime minutes
+ airplane_mode on
+ install_suntemple
+ adb shell "am start $suntempleActivity"
+ run_test suntemple $suntempleTime
+ adb pull /sdcard/SunTemple/SunTemple/Saved/Logs/SunTemple.log
+ cleanup_job suntemple BrueComp
+ airplane_mode off
+ mv SunTemple.log suntemple.out
+ # grab the suntemple log
+ date
+ tests="$tests suntemple"
+fi
+
+chromeTime=${chromeTime:=$defaultTime}
+if [ $chromeTime -gt 0 ]; then
+ echo $(date) Test 3 : chrome fling for $chromeTime minutes
+ start_job "./chromefling.sh -i 1000 -d $DEVICE"
+ run_test chrome $chromeTime
+ cleanup_job chrome
+ date
+ tests="$tests chrome"
+fi
+
+shadowgrid2Time=${shadowgrid2Time:=$defaultTime}
+if [ $shadowgrid2Time -gt $shadowgrid2TimeMax ]; then
+ # we cap shadowgrid2 time since the display goes
+ # off after 30 minutes
+ $shadowgrid2Time=$shadowgrid2TimeMax
+fi
+if [ $shadowgrid2Time -gt 0 ]; then
+ airplane_mode on
+ echo $(date) Test 4 : shadowgrid2 for $shadowgrid2Time minutes
+ start_job "./$HWUITEST shadowgrid2 100000"
+ run_test shadowgrid2 $shadowgrid2Time
+ cleanup_job shadowgrid2 $HWUITEST
+ airplane_mode off
+ date
+ tests="$tests shadowgrid2"
+fi
+
+youtubeTime=${youtubeTime:=$defaultTime}
+if [ $youtubeTime -gt 0 ]; then
+ echo $(date) Test 5 : youtube for $youtubeTime minutes
+ start_job "./youtube.sh -i 1000 -d $DEVICE"
+ run_test youtube $youtubeTime
+ cleanup_job youtube
+ date
+ tests="$tests youtube"
+fi
+
+sysappsTime=${sysappsTime:=$defaultTime}
+if [ $sysappsTime -gt 0 ]; then
+ echo $(date) Test 6 : app switching for $sysappsTime minutes
+ start_job "./systemapps.sh -T -i 1000 -d $DEVICE"
+ run_test sysapps $sysappsTime
+ cleanup_job sysapps
+ date
+ tests="$tests sysapps"
+fi
+
+echo Ran tests: $tests
+echo $tests > tests
+
upCount=6
UP="70 400 70 100 $flingtime"
DOWN="70 100 70 400 $flingtime";;
+(angler)
+ flingtime=150
+ downCount=4
+ upCount=3
+ UP="500 1200 500 550 $flingtime"
+ DOWN="500 550 500 1200 $flingtime";;
(bullhead)
flingtime=200
downCount=5
latency99=$5
if [ ${totalDiff:=0} -eq 0 ]; then
echo Error: could not read frame info with \"dumpsys gfxinfo\"
- exit 1
fi
((frameSum=frameSum+totalDiff))
((latency99Sum=latency99Sum+latency99))
if [ "$totalDiff" -eq 0 ]; then
echo Error: no frames detected. Is the display off?
- exit 1
fi
((jankPct=jankyDiff*100/totalDiff))
resetJankyFrames
# Other options are described below.
#
iterations=1
-tracecategories="gfx view am input memreclaim"
+tracecategories="gfx am memreclaim"
totaltimetest=0
forcecoldstart=0
waitTime=3.0
+memstats=0
-appList="gmail hangouts chrome youtube play home"
+appList="gmail maps chrome youtube play home"
function processLocalOption {
ret=0
(-L) appList=$2; shift; ret=1;;
(-T) totaltimetest=1;;
(-W) waitTime=$2; shift; ret=1;;
+ (-M) memstats=1;;
(*)
echo "$0: unrecognized option: $1"
echo; echo "Usage: $0 [options]"
if [ $iterations -gt 1 ]; then
echo =========================================
echo Iteration $cur of $iterations
+ date
echo =========================================
fi
if [ $iterations -gt 1 -o $cur -eq 1 ]; then
if [ $totaltimetest -eq 0 ]; then
tmpTraceOut="$tmpTraceOutBase-$app.out"
>$tmpTraceOut
- startInstramentation
+ startInstramentation "$app-$cur"
else
+ if [ "$memstats" -gt 0 ]; then
+ startInstramentation "$app-$cur" 0
+ fi
if [ $appnum -eq 0 ]; then
printf "%-8s %5s(ms) %3s(ms) %s %s\n" App Start Iter Jank Latency
fi
printf "%-10s %5.0f %5.0f\n" TOTAL $totaltime $diffTime
fi
+overallSum=0
+appCount=0
if [ $iterations -gt 1 -a $totaltimetest -eq 0 ]; then
echo
echo =========================================
((ave90=l90/iterations))
((ave95=l95/iterations))
((ave99=l99/iterations))
- ((jankPct=100*janks/frames))
+ if [ $frames -gt 0 ]; then
+ ((jankPct=100*janks/frames))
+ fi
printf "%-12s %5d %5d %5d %5d %5d %5d(%d%%) %d/%d/%d\n" $app $1 $ave $2 $4 $5 $janks $jankPct $ave90 $ave95 $ave99
+ ((overallSum=overallSum+ave))
+ ((appCount=appCount+1))
done
+ if [ $appCount -gt 0 ]; then
+ printf "Average Start Time: %.2f\n", $(echo $overallSum $appCount | awk '{ printf "%.2f\n", $1/$2 }')
+ fi
fi
--- /dev/null
+#
+# Script to play a john oliver youtube video N times.
+# For each iteration, Total frames and janky frames are reported.
+#
+# Options are described below.
+#
+iterations=10
+app=youtube
+searchText="last week tonight with john oliver: online harassment"
+
+function processLocalOption {
+ ret=0
+ case "$1" in
+ (-S) searchText="$2"; shift;;
+ (*)
+ echo "$0: unrecognized option: $1"
+ echo; echo "Usage: $0 [options]"
+ echo "-i iterations"
+ echo "-S youtube search text"
+ echo "-d device"
+ exit 1;;
+ esac
+ return $ret
+}
+
+CMDDIR=$(dirname $0 2>/dev/null)
+CMDDIR=${CMDDIR:=.}
+. $CMDDIR/defs.sh
+
+case $DEVICE in
+(angler)
+ searchButton="860 177"
+ selectFirstVideo="225 400"
+ enableControls="1000 610"
+ fullScreen="1011 632"
+ ;;
+(shamu)
+ searchButton="1200 160"
+ selectFirstVideo="480 653"
+ enableControls="1377 812"
+ fullScreen="1377 812"
+ ;;
+(bullhead|hammerhead)
+ searchButton="860 177"
+ selectFirstVideo="225 400"
+ enableControls="1000 610"
+ fullScreen="1011 632"
+ ;;
+(volantis)
+ searchButton="1356 93"
+ selectFirstVideo="378 264"
+ enableControls="1464 812"
+ fullScreen="1480 835"
+ ;;
+(*)
+ echo "Error: No display information available for $DEVICE"
+ exit 1;;
+esac
+
+function swipe {
+ count=0
+ while [ $count -lt $2 ]
+ do
+ echo doSwipe...
+ doSwipe $1
+ ((count=count+1))
+ done
+ sleep 1
+}
+
+cur=1
+frameSum=0
+jankSum=0
+latency90Sum=0
+latency95Sum=0
+latency99Sum=0
+
+doKeyevent HOME
+sleep 0.5
+resetJankyFrames $(getPackageName $app)
+
+while [ $cur -le $iterations ]
+do
+ t=$(startActivity $app)
+ sleep 4.0
+ doTap $searchButton
+ sleep 1.0
+ doText "$searchText"
+ sleep 1.0
+ doKeyevent ENTER
+ sleep 5.0
+ doTap $selectFirstVideo
+ sleep 10.0
+ doTap $fullScreen
+ sleep 0.5
+ doTap $fullScreen
+ # 15 minutes
+ ((vidTime=60*15))
+ sleep $vidTime
+ doKeyevent BACK
+ sleep 0.5
+ doKeyevent BACK
+ sleep 0.5
+ doKeyevent BACK
+ sleep 0.5
+
+ set -- $(getJankyFrames $(getPackageName $app))
+ totalDiff=$1
+ jankyDiff=$2
+ latency90=$3
+ latency95=$4
+ latency99=$5
+ if [ ${totalDiff:=0} -eq 0 ]; then
+ echo Error: could not read frame info with \"dumpsys gfxinfo\"
+ fi
+
+ ((frameSum=frameSum+totalDiff))
+ ((jankSum=jankSum+jankyDiff))
+ ((latency90Sum=latency90Sum+latency90))
+ ((latency95Sum=latency95Sum+latency95))
+ ((latency99Sum=latency99Sum+latency99))
+ if [ "$totalDiff" -eq 0 ]; then
+ echo Error: no frames detected. Is the display off?
+ fi
+ ((jankPct=jankyDiff*100/totalDiff))
+ resetJankyFrames $(getPackageName $app)
+
+
+ echo Frames: $totalDiff latency: $latency90/$latency95/$latency99 Janks: $jankyDiff\(${jankPct}%\)
+ ((cur=cur+1))
+done
+doKeyevent HOME
+((aveJankPct=jankSum*100/frameSum))
+((aveJanks=jankSum/iterations))
+((aveFrames=frameSum/iterations))
+((aveLatency90=latency90Sum/iterations))
+((aveLatency95=latency95Sum/iterations))
+((aveLatency99=latency99Sum/iterations))
+echo AVE: Frames: $aveFrames latency: $aveLatency90/$aveLatency95/$aveLatency99 Janks: $aveJanks\(${aveJankPct}%\)