am dfff022d: Merge "ext4_utils: Wrap wipe.h to be C++ compatible."
* commit 'dfff022dd84587666909c518123b55a7f73accca':
ext4_utils: Wrap wipe.h to be C++ compatible.
diff --git a/ANRdaemon/ANRdaemon.cpp b/ANRdaemon/ANRdaemon.cpp
new file mode 100644
index 0000000..5a4f8bf
--- /dev/null
+++ b/ANRdaemon/ANRdaemon.cpp
@@ -0,0 +1,637 @@
+/*
+ * Copyright (c) 2015, The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google, Inc. nor the names of its contributors
+ * may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <binder/IBinder.h>
+#include <binder/IServiceManager.h>
+#include <binder/Parcel.h>
+
+#include <ctime>
+#include <cutils/properties.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <utils/Log.h>
+#include <utils/String8.h>
+#include <utils/Trace.h>
+#include <zlib.h>
+
+using namespace android;
+
+#define CHECK_PERIOD 1 // in sec
+#define TRACING_CHECK_PERIOD 500000 // in micro sec
+#define MIN_BUFFER_SIZE 4
+#define MIN_BUFFER_SIZE_STR "4"
+#define MAX_BUFFER_SIZE 128
+#define MAX_BUFFER_SIZE_STR "128"
+#define CPU_STAT_ENTRIES 7 // number of cpu stat entries
+
+#ifdef LOG_TAG
+#undef LOG_TAG
+#endif
+
+#define LOG_TAG "anrdaemon"
+
+typedef struct cpu_stat {
+ unsigned long utime, ntime, stime, itime;
+ unsigned long iowtime, irqtime, sirqtime, steal;
+ unsigned long total;
+} cpu_stat_t;
+
+/* Make the logging on/off threshold equal to 95% cpu usage. */
+static int idle_threshold = 5;
+
+static bool quit = false;
+static bool suspend= false;
+static bool err = false;
+static char err_msg[100];
+static bool tracing = false;
+
+static const char *buf_size_kb = "16";
+static uint64_t tag = 0;
+static const char* apps = "";
+
+static cpu_stat_t new_cpu;
+static cpu_stat_t old_cpu;
+static Vector<String16> targets;
+
+/* Log certain kernel activity when enabled */
+static bool log_sched = false;
+static bool log_stack = false;
+static bool log_irq = false;
+static bool log_sync = false;
+static bool log_workq = false;
+
+/* Paths for debugfs controls*/
+static const char* dfs_trace_output_path =
+ "/d/tracing/trace";
+static const char* dfs_irq_path =
+ "/d/tracing/events/irq/enable";
+static const char* dfs_sync_path =
+ "/d/tracing/events/sync/enable";
+static const char* dfs_workq_path =
+ "/d/tracing/events/workqueue/enable";
+static const char* dfs_stack_path =
+ "/d/tracing/options/stacktrace";
+static const char* dfs_sched_switch_path =
+ "/d/tracing/events/sched/sched_switch/enable";
+static const char* dfs_sched_wakeup_path =
+ "/d/tracing/events/sched/sched_wakeup/enable";
+static const char* dfs_control_path =
+ "/d/tracing/tracing_on";
+static const char* dfs_buffer_size_path =
+ "/d/tracing/buffer_size_kb";
+static const char* dfs_tags_property = "debug.atrace.tags.enableflags";
+static const char* dfs_apps_property = "debug.atrace.app_cmdlines";
+
+/*
+ * Read accumulated cpu data from /proc/stat
+ */
+static void get_cpu_stat(cpu_stat_t *cpu) {
+ FILE *fp = NULL;
+ const char *params = "cpu %lu %lu %lu %lu %lu %lu %lu %*d %*d %*d\n";
+
+ if ((fp = fopen("/proc/stat", "r")) == NULL) {
+ err = true;
+ sprintf(err_msg, "can't read from /proc/stat with errno %d", errno);
+ } else {
+ if (fscanf(fp, params, &cpu->utime, &cpu->ntime,
+ &cpu->stime, &cpu->itime, &cpu->iowtime, &cpu->irqtime,
+ &cpu->sirqtime) != CPU_STAT_ENTRIES) {
+ /*
+ * If failed in getting status, new_cpu won't be updated and
+ * is_heavy_loaded() will return false.
+ */
+ ALOGE("Error in getting cpu status. Skipping this check.");
+ return;
+ }
+
+ cpu->total = cpu->utime + cpu->ntime + cpu->stime + cpu->itime
+ + cpu->iowtime + cpu->irqtime + cpu->sirqtime;
+
+ fclose(fp);
+ }
+}
+
+/*
+ * Calculate cpu usage in the past interval.
+ * If tracing is on, increase the idle threshold by 1% so that we do not
+ * turn on and off tracing frequently whe the cpu load is right close to
+ * threshold.
+ */
+static bool is_heavy_load(void) {
+ unsigned long diff_idle, diff_total;
+ int threshold = idle_threshold + (tracing?1:0);
+ get_cpu_stat(&new_cpu);
+ diff_idle = new_cpu.itime - old_cpu.itime;
+ diff_total = new_cpu.total - old_cpu.total;
+ old_cpu = new_cpu;
+ return (diff_idle * 100 < diff_total * threshold);
+}
+
+/*
+ * Force the userland processes to refresh their property for logging.
+ */
+static void dfs_poke_binder(Vector<String16> services) {
+ sp<IServiceManager> sm = defaultServiceManager();
+ services = sm->listServices();
+ for (size_t i = 0; i < services.size(); i++) {
+ sp<IBinder> obj = sm->checkService(services[i]);
+ if (obj != NULL) {
+ Parcel data;
+ obj->transact(IBinder::SYSPROPS_TRANSACTION, data, NULL, 0);
+ }
+ }
+}
+
+/*
+ * Enable/disable a debugfs property by writing 0/1 to its path.
+ */
+static int dfs_enable(bool enable, const char* path) {
+ int fd = open(path, O_WRONLY);
+ if (fd == -1) {
+ err = true;
+ sprintf(err_msg, "Can't open %s. Error: %d", path, errno);
+ return -1;
+ }
+ const char* control = (enable?"1":"0");
+ ssize_t len = strlen(control);
+ int max_try = 10; // Fail if write was interrupted for 10 times
+ while (write(fd, control, len) != len) {
+ if (errno == EINTR && max_try-- > 0)
+ continue;
+
+ err = true;
+ sprintf(err_msg, "Error %d in writing to %s.", errno, path);
+ }
+ close(fd);
+ return (err?-1:0);
+}
+
+/*
+ * Set the userland tracing properties.
+ */
+static void dfs_set_property(uint64_t mtag, const char* mapp, bool enable) {
+ char buf[64];
+ snprintf(buf, 64, "%#" PRIx64, mtag);
+ if (property_set(dfs_tags_property, buf) < 0) {
+ err = true;
+ sprintf(err_msg, "Failed to set debug tags system properties.");
+ }
+
+ if (strlen(mapp) > 0
+ && property_set(dfs_apps_property, mapp) < 0) {
+ err = true;
+ sprintf(err_msg, "Failed to set debug applications.");
+ }
+
+ if (log_sched) {
+ dfs_enable(enable, dfs_sched_switch_path);
+ dfs_enable(enable, dfs_sched_wakeup_path);
+ }
+ if (log_stack) {
+ dfs_enable(enable, dfs_stack_path);
+ }
+ if (log_irq) {
+ dfs_enable(enable, dfs_irq_path);
+ }
+ if (log_sync) {
+ dfs_enable(enable, dfs_sync_path);
+ }
+ if (log_workq) {
+ dfs_enable(enable, dfs_workq_path);
+ }
+}
+
+/*
+ * Start logging when cpu usage is high. Meanwhile, moniter the cpu usage and
+ * stop logging when it drops down.
+ */
+static void start_tracing(void) {
+ ALOGD("High cpu usage, start logging.");
+
+ dfs_set_property(tag, apps, true);
+ dfs_poke_binder(targets);
+
+ if (dfs_enable(true, dfs_control_path) != 0) {
+ ALOGE("Failed to start tracing.");
+ return;
+ }
+ tracing = true;
+
+ /* Stop logging when cpu usage drops or the daemon is suspended.*/
+ do {
+ usleep(TRACING_CHECK_PERIOD);
+ } while (!suspend && is_heavy_load());
+
+ if (dfs_enable(false, dfs_control_path) != 0) {
+ ALOGE("Failed to stop tracing.");
+ }
+
+ dfs_set_property(0, "", false);
+ dfs_poke_binder(targets);
+
+ ALOGD("Usage back to low, stop logging.");
+ tracing = false;
+}
+
+/*
+ * Set the tracing log buffer size.
+ * Note the actual buffer size will be buf_size_kb * number of cores.
+ * E.g. for dory, the total buffer size is buf_size_kb * 4.
+ */
+static int set_tracing_buffer_size(void) {
+ int fd = open(dfs_buffer_size_path, O_WRONLY);
+ if (fd == -1) {
+ err = true;
+ sprintf(err_msg, "Can't open atrace buffer size file under /d/tracing.");
+ return -1;
+ }
+ ssize_t len = strlen(buf_size_kb);
+ if (write(fd, buf_size_kb, len) != len) {
+ err = true;
+ sprintf(err_msg, "Error in writing to atrace buffer size file.");
+ }
+ close(fd);
+ return (err?-1:0);
+
+}
+
+/*
+ * Main loop to moniter the cpu usage and decided whether to start logging.
+ */
+static void start(void) {
+ if ((set_tracing_buffer_size()) != 0)
+ return;
+ get_cpu_stat(&old_cpu);
+ sleep(CHECK_PERIOD);
+
+ while (!quit && !err) {
+ if (!suspend && is_heavy_load()) {
+ /*
+ * Increase process priority to make sure we can stop logging when
+ * necessary and do not overwrite the buffer
+ */
+ setpriority(PRIO_PROCESS, 0, -20);
+ start_tracing();
+ setpriority(PRIO_PROCESS, 0, 0);
+ }
+ sleep(CHECK_PERIOD);
+ }
+ return;
+}
+
+/*
+ * Dump the log in a compressed format for systrace to visualize.
+ */
+static void dump_trace()
+{
+ int remain_attempts = 5;
+ suspend = true;
+ while (tracing) {
+ ALOGI("Waiting logging to stop.");
+ usleep(TRACING_CHECK_PERIOD);
+ remain_attempts--;
+ if (remain_attempts == 0) {
+ ALOGE("Can't stop logging after 5 attempts. Dump aborted.");
+ return;
+ }
+ }
+
+ /*
+ * Create a dump file "dump_of_anrdaemon.<current_time>" under /data/anr/
+ */
+ time_t now = time(0);
+ struct tm tstruct;
+ char time_buf[80];
+ char path_buf[200];
+ const char* header = " done\nTRACE:\n";
+ ssize_t header_len = strlen(header);
+ tstruct = *localtime(&now);
+ strftime(time_buf, sizeof(time_buf), "%Y-%m-%d.%X", &tstruct);
+ sprintf(path_buf, "/data/anr/dump_of_anrdaemon.%s", time_buf);
+ int output_fd = creat(path_buf, S_IRWXU);
+ if (output_fd == -1) {
+ ALOGE("Failed to create %s. Dump aborted.", path_buf);
+ return;
+ }
+
+ if (write(output_fd, header, strlen(header)) != header_len) {
+ ALOGE("Failed to write the header.");
+ close(output_fd);
+ return;
+ }
+
+ int trace_fd = open(dfs_trace_output_path, O_RDWR);
+ if (trace_fd == -1) {
+ ALOGE("Failed to open %s. Dump aborted.", dfs_trace_output_path);
+ close(output_fd);
+ return;
+ }
+
+ z_stream zs;
+ uint8_t *in, *out;
+ int result, flush;
+
+ memset(&zs, 0, sizeof(zs));
+ result = deflateInit(&zs, Z_DEFAULT_COMPRESSION);
+ if (result != Z_OK) {
+ ALOGE("error initializing zlib: %d\n", result);
+ close(trace_fd);
+ close(output_fd);
+ return;
+ }
+
+ const size_t bufSize = 64*1024;
+ in = (uint8_t*)malloc(bufSize);
+ out = (uint8_t*)malloc(bufSize);
+ flush = Z_NO_FLUSH;
+
+ zs.next_out = out;
+ zs.avail_out = bufSize;
+
+ do {
+ if (zs.avail_in == 0) {
+ result = read(trace_fd, in, bufSize);
+ if (result < 0) {
+ ALOGE("error reading trace: %s", strerror(errno));
+ result = Z_STREAM_END;
+ break;
+ } else if (result == 0) {
+ flush = Z_FINISH;
+ } else {
+ zs.next_in = in;
+ zs.avail_in = result;
+ }
+ }
+
+ if (zs.avail_out == 0) {
+ result = write(output_fd, out, bufSize);
+ if ((size_t)result < bufSize) {
+ ALOGE("error writing deflated trace: %s", strerror(errno));
+ result = Z_STREAM_END;
+ zs.avail_out = bufSize;
+ break;
+ }
+ zs.next_out = out;
+ zs.avail_out = bufSize;
+ }
+
+ } while ((result = deflate(&zs, flush)) == Z_OK);
+
+ if (result != Z_STREAM_END) {
+ ALOGE("error deflating trace: %s\n", zs.msg);
+ }
+
+ if (zs.avail_out < bufSize) {
+ size_t bytes = bufSize - zs.avail_out;
+ result = write(output_fd, out, bytes);
+ if ((size_t)result < bytes) {
+ ALOGE("error writing deflated trace: %s", strerror(errno));
+ }
+ }
+
+ result = deflateEnd(&zs);
+ if (result != Z_OK) {
+ ALOGE("error cleaning up zlib: %d\n", result);
+ }
+
+ free(in);
+ free(out);
+
+ close(trace_fd);
+ close(output_fd);
+
+ suspend = false;
+ ALOGI("Finished dump. Output file stored at: %s", path_buf);
+}
+
+static void handle_signal(int signo)
+{
+ switch (signo) {
+ case SIGQUIT:
+ suspend = true;
+ quit = true;
+ break;
+ case SIGSTOP:
+ suspend = true;
+ break;
+ case SIGCONT:
+ suspend = false;
+ break;
+ case SIGUSR1:
+ dump_trace();
+ }
+}
+
+/*
+ * Set the signal handler:
+ * SIGQUIT: Reset debugfs and tracing property and terminate the daemon.
+ * SIGSTOP: Stop logging and suspend the daemon.
+ * SIGCONT: Resume the daemon as normal.
+ * SIGUSR1: Dump the logging to a compressed format for systrace to visualize.
+ */
+static void register_sighandler(void)
+{
+ struct sigaction sa;
+ sigset_t block_mask;
+
+ sigemptyset(&block_mask);
+ sigaddset (&block_mask, SIGQUIT);
+ sigaddset (&block_mask, SIGSTOP);
+ sigaddset (&block_mask, SIGCONT);
+ sigaddset (&block_mask, SIGUSR1);
+
+ sa.sa_flags = 0;
+ sa.sa_mask = block_mask;
+ sa.sa_handler = handle_signal;
+ sigaction(SIGQUIT, &sa, NULL);
+ sigaction(SIGSTOP, &sa, NULL);
+ sigaction(SIGCONT, &sa, NULL);
+ sigaction(SIGUSR1, &sa, NULL);
+}
+
+static void show_help(void) {
+
+ fprintf(stderr, "usage: ANRdaemon [options] [categoris...]\n");
+ fprintf(stdout, "Options includes:\n"
+ " -a appname enable app-level tracing for a comma "
+ "separated list of cmdlines\n"
+ " -t N cpu threshold for logging to start "
+ "(min = 50, max = 100, default = 95)\n"
+ " -s N use a trace buffer size of N KB "
+ "default to 16KB\n"
+ " -h show helps\n");
+ fprintf(stdout, "Categoris includes:\n"
+ " am - activity manager\n"
+ " sm - sync manager\n"
+ " input - input\n"
+ " app - application\n"
+ " dalvik - dalvik VM\n"
+ " irq - kernel irq events\n"
+ " sched - kernel scheduler activity\n"
+ " stack - kernel stack\n"
+ " sync - kernel sync activity\n"
+ " workq - kernel work queues\n");
+ fprintf(stdout, "Control includes:\n"
+ " SIGQUIT: terminate the process\n"
+ " SIGSTOP: suspend all function of the daemon\n"
+ " SIGCONT: resume the normal function\n"
+ " SIGUSR1: dump the current logging in a compressed form\n");
+ exit(0);
+}
+
+static int get_options(int argc, char *argv[]) {
+ int opt = 0;
+ int threshold;
+ while ((opt = getopt(argc, argv, "a:s:t:h")) >= 0) {
+ switch(opt) {
+ case 'a':
+ apps = optarg;
+ break;
+ case 's':
+ if (atoi(optarg) > MAX_BUFFER_SIZE)
+ buf_size_kb = MAX_BUFFER_SIZE_STR;
+ else if (atoi(optarg) < MIN_BUFFER_SIZE)
+ buf_size_kb = MIN_BUFFER_SIZE_STR;
+ else
+ buf_size_kb = optarg;
+ break;
+ case 't':
+ threshold = atoi(optarg);
+ if (threshold > 100 || threshold < 50) {
+ fprintf(stderr, "logging threshold should be 50-100\n");
+ return 1;
+ }
+ idle_threshold = 100 - threshold;
+ break;
+ case 'h':
+ show_help();
+ break;
+ default:
+ fprintf(stderr, "Error in getting options.\n"
+ "run \"%s -h\" for usage.\n", argv[0]);
+ return 1;
+ }
+ }
+
+ for (int i = optind; i < argc; i++) {
+ if (strcmp(argv[i], "am") == 0) {
+ tag |= ATRACE_TAG_ACTIVITY_MANAGER;
+ } else if (strcmp(argv[i], "input") == 0) {
+ tag |= ATRACE_TAG_INPUT;
+ } else if (strcmp(argv[i], "sm") == 0) {
+ tag |= ATRACE_TAG_SYNC_MANAGER;
+ } else if (strcmp(argv[i], "app") == 0) {
+ tag |= ATRACE_TAG_APP;
+ } else if (strcmp(argv[i], "dalvik") == 0) {
+ tag |= ATRACE_TAG_DALVIK;
+ } else if (strcmp(argv[i], "sched") == 0) {
+ log_sched = true;
+ } else if (strcmp(argv[i], "stack") == 0) {
+ log_stack = true;
+ } else if (strcmp(argv[i], "workq") == 0) {
+ log_workq = true;
+ } else if (strcmp(argv[i], "irq") == 0) {
+ log_irq = true;
+ } else if (strcmp(argv[i], "sync") == 0) {
+ log_sync = true;
+ } else {
+ fprintf(stderr, "invalid category: %s\n"
+ "run \"%s -h\" for usage.\n", argv[i], argv[0]);
+ return 1;
+ }
+ }
+
+ bool kernel_log = log_sched || log_stack || log_workq || log_irq || log_sync;
+ bool app_log = (tag == 0);
+
+ /*
+ * There are ~80 services. Too expensive to poke all of them. Just include
+ * service that may help high CPU ANR analysis.
+ */
+ if (app_log) {
+ targets.push_back(String16("activity"));
+ targets.push_back(String16("alarm"));
+ targets.push_back(String16("appops"));
+ targets.push_back(String16("cpuinfo"));
+ targets.push_back(String16("meminfo"));
+ targets.push_back(String16("procstats"));
+ targets.push_back(String16("input"));
+ targets.push_back(String16("lancherapps"));
+ targets.push_back(String16("bluetooth_manager"));
+ targets.push_back(String16("SurfaceFlinger"));
+ targets.push_back(String16("ClockworkProxyNativeService"));
+ }
+ if (!kernel_log && !app_log) {
+ tag |= ATRACE_TAG_ACTIVITY_MANAGER;
+ targets.push_back(String16("activity"));
+ }
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ if(get_options(argc, argv) != 0)
+ return 1;
+
+ if (daemon(0, 0) != 0)
+ return 1;
+
+ register_sighandler();
+
+ /* Clear any the trace log file by overwrite it with a new file */
+ int fd = creat(dfs_trace_output_path, 0);
+ if (fd == -1) {
+ ALOGE("Faield to open and cleaup previous log");
+ return 1;
+ }
+ close(fd);
+
+ ALOGI("ANRdaemon starting");
+ start();
+
+ if (err)
+ ALOGE("ANRdaemon stopped due to Error: %s", err_msg);
+
+ ALOGI("ANRdaemon terminated.");
+
+ return (err?1:0);
+}
diff --git a/ANRdaemon/Android.mk b/ANRdaemon/Android.mk
new file mode 100644
index 0000000..51bebc5
--- /dev/null
+++ b/ANRdaemon/Android.mk
@@ -0,0 +1,18 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= ANRdaemon.cpp
+
+LOCAL_C_INCLUDES += external/zlib
+
+LOCAL_MODULE:= anrdaemon
+
+LOCAL_MODULE_TAGS:= optional
+
+LOCAL_SHARED_LIBRARIES := \
+ libbinder \
+ libcutils \
+ libutils \
+ libz \
+
+include $(BUILD_EXECUTABLE)
diff --git a/ext4_utils/canned_fs_config.c b/ext4_utils/canned_fs_config.c
index 69646b1..2165feb 100644
--- a/ext4_utils/canned_fs_config.c
+++ b/ext4_utils/canned_fs_config.c
@@ -80,7 +80,7 @@
return 0;
}
-void canned_fs_config(const char* path, int dir,
+void canned_fs_config(const char* path, int dir, const char* target_out_path,
unsigned* uid, unsigned* gid, unsigned* mode, uint64_t* capabilities) {
Path key;
key.path = path+1; // canned paths lack the leading '/'
@@ -99,7 +99,7 @@
unsigned c_uid, c_gid, c_mode;
uint64_t c_capabilities;
- fs_config(path, dir, &c_uid, &c_gid, &c_mode, &c_capabilities);
+ fs_config(path, dir, target_out_path, &c_uid, &c_gid, &c_mode, &c_capabilities);
if (c_uid != *uid) printf("%s uid %d %d\n", path, *uid, c_uid);
if (c_gid != *gid) printf("%s gid %d %d\n", path, *gid, c_gid);
diff --git a/ext4_utils/canned_fs_config.h b/ext4_utils/canned_fs_config.h
index aec923b..d9f51ca 100644
--- a/ext4_utils/canned_fs_config.h
+++ b/ext4_utils/canned_fs_config.h
@@ -20,7 +20,7 @@
#include <inttypes.h>
int load_canned_fs_config(const char* fn);
-void canned_fs_config(const char* path, int dir,
+void canned_fs_config(const char* path, int dir, const char* target_out_path,
unsigned* uid, unsigned* gid, unsigned* mode, uint64_t* capabilities);
#endif
diff --git a/ext4_utils/ext4_crypt_init_extensions.cpp b/ext4_utils/ext4_crypt_init_extensions.cpp
index 7ca471a..6e8695e 100644
--- a/ext4_utils/ext4_crypt_init_extensions.cpp
+++ b/ext4_utils/ext4_crypt_init_extensions.cpp
@@ -26,7 +26,7 @@
int sock = -1;
while (true) {
- sock = socket_local_client("vold",
+ sock = socket_local_client("cryptd",
ANDROID_SOCKET_NAMESPACE_RESERVED,
SOCK_STREAM);
if (sock >= 0) {
@@ -36,7 +36,7 @@
}
if (sock < 0) {
- KLOG_INFO(TAG, "Cannot open vold, failing command\n");
+ KLOG_INFO(TAG, "Cannot open vold, failing command (%s)\n", strerror(errno));
return "";
}
@@ -54,24 +54,25 @@
// framework is down, so this is (mostly) OK.
std::string actual_command = arbitrary_sequence_number + " " + command;
if (write(sock, actual_command.c_str(), actual_command.size() + 1) < 0) {
- KLOG_ERROR(TAG, "Cannot write command\n");
+ KLOG_ERROR(TAG, "Cannot write command (%s)\n", strerror(errno));
return "";
}
struct pollfd poll_sock = {sock, POLLIN, 0};
- int rc = poll(&poll_sock, 1, vold_command_timeout_ms);
+ int rc = TEMP_FAILURE_RETRY(poll(&poll_sock, 1, vold_command_timeout_ms));
if (rc < 0) {
- KLOG_ERROR(TAG, "Error in poll %s\n", strerror(errno));
+ KLOG_ERROR(TAG, "Error in poll (%s)\n", strerror(errno));
return "";
}
+
if (!(poll_sock.revents & POLLIN)) {
KLOG_ERROR(TAG, "Timeout\n");
return "";
}
char buffer[4096];
memset(buffer, 0, sizeof(buffer));
- rc = read(sock, buffer, sizeof(buffer));
+ rc = TEMP_FAILURE_RETRY(read(sock, buffer, sizeof(buffer)));
if (rc <= 0) {
if (rc == 0) {
KLOG_ERROR(TAG, "Lost connection to Vold - did it crash?\n");
@@ -102,7 +103,7 @@
// Make sure folder exists. Use make_dir to set selinux permissions.
if (ensure_dir_exists(UnencryptedProperties::GetPath(dir).c_str())) {
- KLOG_ERROR(TAG, "Failed to create %s with error %s\n",
+ KLOG_ERROR(TAG, "Failed to create %s (%s)\n",
UnencryptedProperties::GetPath(dir).c_str(),
strerror(errno));
return -1;
@@ -122,7 +123,7 @@
KEY_SPEC_SESSION_KEYRING);
if (device_keyring == -1) {
- KLOG_ERROR(TAG, "Failed to create keyring\n");
+ KLOG_ERROR(TAG, "Failed to create keyring (%s)\n", strerror(errno));
return -1;
}
@@ -163,3 +164,13 @@
return 0;
}
+
+int e4crypt_set_user_crypto_policies(const char* dir)
+{
+ auto command = std::string() + "cryptfs setusercryptopolicies " + dir;
+ auto result = vold_command(command);
+ // ext4enc:TODO proper error handling
+ KLOG_INFO(TAG, "setusercryptopolicies returned with result %s\n",
+ result.c_str());
+ return 0;
+}
diff --git a/ext4_utils/ext4_crypt_init_extensions.h b/ext4_utils/ext4_crypt_init_extensions.h
index 7931124..d02d181 100644
--- a/ext4_utils/ext4_crypt_init_extensions.h
+++ b/ext4_utils/ext4_crypt_init_extensions.h
@@ -11,5 +11,6 @@
int e4crypt_set_directory_policy(const char* path);
bool e4crypt_non_default_key(const char* path);
int do_policy_set(const char *directory, const char *policy, int policy_length);
+int e4crypt_set_user_crypto_policies(const char* path);
__END_DECLS
diff --git a/ext4_utils/ext4_utils.h b/ext4_utils/ext4_utils.h
index 8cc8c2c..0159dbe 100644
--- a/ext4_utils/ext4_utils.h
+++ b/ext4_utils/ext4_utils.h
@@ -152,12 +152,12 @@
void ext4_parse_sb_info(struct ext4_super_block *sb);
u16 ext4_crc16(u16 crc_in, const void *buf, int size);
-typedef void (*fs_config_func_t)(const char *path, int dir, unsigned *uid, unsigned *gid,
- unsigned *mode, uint64_t *capabilities);
+typedef void (*fs_config_func_t)(const char *path, int dir, const char *target_out_path,
+ unsigned *uid, unsigned *gid, unsigned *mode, uint64_t *capabilities);
struct selabel_handle;
-int make_ext4fs_internal(int fd, const char *directory,
+int make_ext4fs_internal(int fd, const char *directory, const char *_target_out_directory,
const char *mountpoint, fs_config_func_t fs_config_func, int gzip,
int sparse, int crc, int wipe, int real_uuid,
struct selabel_handle *sehnd, int verbose, time_t fixed_time,
diff --git a/ext4_utils/key_control.cpp b/ext4_utils/key_control.cpp
index 3d775b7..39bd140 100644
--- a/ext4_utils/key_control.cpp
+++ b/ext4_utils/key_control.cpp
@@ -5,8 +5,8 @@
#include <sys/syscall.h>
/* keyring keyctl commands */
+#define KEYCTL_REVOKE 3 /* revoke a key */
#define KEYCTL_SETPERM 5 /* set permissions for a key in a keyring */
-#define KEYCTL_UNLINK 9 /* unlink a key from a keyring */
#define KEYCTL_SEARCH 10 /* search for a key in a keyring */
static long keyctl(int cmd, ...)
@@ -32,6 +32,11 @@
return syscall(__NR_add_key, type, description, payload, plen, ringid);
}
+long keyctl_revoke(key_serial_t id)
+{
+ return keyctl(KEYCTL_REVOKE, id);
+}
+
long keyctl_setperm(key_serial_t id, int permissions)
{
return keyctl(KEYCTL_SETPERM, id, permissions);
diff --git a/ext4_utils/key_control.h b/ext4_utils/key_control.h
index 8e6e32b..bbf0ace 100644
--- a/ext4_utils/key_control.h
+++ b/ext4_utils/key_control.h
@@ -21,6 +21,8 @@
size_t plen,
key_serial_t ringid);
+long keyctl_revoke(key_serial_t id);
+
long keyctl_setperm(key_serial_t id, int permissions);
long keyctl_search(key_serial_t ringid, const char *type,
diff --git a/ext4_utils/make_ext4fs.c b/ext4_utils/make_ext4fs.c
index 52c3208..b4ebbce 100644
--- a/ext4_utils/make_ext4fs.c
+++ b/ext4_utils/make_ext4fs.c
@@ -123,7 +123,7 @@
that does not exist on disk (e.g. lost+found).
dir_path is an absolute path, with trailing slash, to the same directory
if the image were mounted at the specified mount point */
-static u32 build_directory_structure(const char *full_path, const char *dir_path,
+static u32 build_directory_structure(const char *full_path, const char *dir_path, const char *target_out_path,
u32 dir_inode, fs_config_func_t fs_config_func,
struct selabel_handle *sehnd, int verbose, time_t fixed_time)
{
@@ -201,7 +201,7 @@
unsigned int uid = 0;
unsigned int gid = 0;
int dir = S_ISDIR(stat.st_mode);
- fs_config_func(dentries[i].path, dir, &uid, &gid, &mode, &capabilities);
+ fs_config_func(dentries[i].path, dir, target_out_path, &uid, &gid, &mode, &capabilities);
dentries[i].mode = mode;
dentries[i].uid = uid;
dentries[i].gid = gid;
@@ -285,8 +285,8 @@
ret = asprintf(&subdir_dir_path, "%s/", dentries[i].path);
if (ret < 0)
critical_error_errno("asprintf");
- entry_inode = build_directory_structure(subdir_full_path,
- subdir_dir_path, inode, fs_config_func, sehnd, verbose, fixed_time);
+ entry_inode = build_directory_structure(subdir_full_path, subdir_dir_path, target_out_path,
+ inode, fs_config_func, sehnd, verbose, fixed_time);
free(subdir_full_path);
free(subdir_dir_path);
} else if (dentries[i].file_type == EXT4_FT_SYMLINK) {
@@ -404,7 +404,7 @@
reset_ext4fs_info();
info.len = len;
- return make_ext4fs_internal(fd, NULL, mountpoint, NULL, 0, 1, 0, 0, 0, sehnd, 0, -1, NULL);
+ return make_ext4fs_internal(fd, NULL, NULL, mountpoint, NULL, 0, 1, 0, 0, 0, sehnd, 0, -1, NULL);
}
int make_ext4fs(const char *filename, long long len,
@@ -422,7 +422,7 @@
return EXIT_FAILURE;
}
- status = make_ext4fs_internal(fd, NULL, mountpoint, NULL, 0, 0, 0, 1, 0, sehnd, 0, -1, NULL);
+ status = make_ext4fs_internal(fd, NULL, NULL, mountpoint, NULL, 0, 0, 0, 1, 0, sehnd, 0, -1, NULL);
close(fd);
return status;
@@ -488,7 +488,7 @@
return canonicalize_slashes(str, false);
}
-int make_ext4fs_internal(int fd, const char *_directory,
+int make_ext4fs_internal(int fd, const char *_directory, const char *_target_out_directory,
const char *_mountpoint, fs_config_func_t fs_config_func, int gzip,
int sparse, int crc, int wipe, int real_uuid,
struct selabel_handle *sehnd, int verbose, time_t fixed_time,
@@ -498,6 +498,7 @@
u16 root_mode;
char *mountpoint;
char *directory = NULL;
+ char *target_out_directory = NULL;
if (setjmp(setjmp_env))
return EXIT_FAILURE; /* Handle a call to longjmp() */
@@ -519,6 +520,10 @@
directory = canonicalize_rel_slashes(_directory);
}
+ if (_target_out_directory) {
+ target_out_directory = canonicalize_rel_slashes(_target_out_directory);
+ }
+
if (info.len <= 0)
info.len = get_file_size(fd);
@@ -607,7 +612,7 @@
root_inode_num = build_default_directory_structure(mountpoint, sehnd);
#else
if (directory)
- root_inode_num = build_directory_structure(directory, mountpoint, 0,
+ root_inode_num = build_directory_structure(directory, mountpoint, target_out_directory, 0,
fs_config_func, sehnd, verbose, fixed_time);
else
root_inode_num = build_default_directory_structure(mountpoint, sehnd);
diff --git a/ext4_utils/make_ext4fs_main.c b/ext4_utils/make_ext4fs_main.c
index f28e1b2..03872db 100644
--- a/ext4_utils/make_ext4fs_main.c
+++ b/ext4_utils/make_ext4fs_main.c
@@ -57,7 +57,7 @@
fprintf(stderr, " [ -L <label> ] [ -f ] [ -a <android mountpoint> ] [ -u ]\n");
fprintf(stderr, " [ -S file_contexts ] [ -C fs_config ] [ -T timestamp ]\n");
fprintf(stderr, " [ -z | -s ] [ -w ] [ -c ] [ -J ] [ -v ] [ -B <block_list_file> ]\n");
- fprintf(stderr, " <filename> [<directory>]\n");
+ fprintf(stderr, " <filename> [[<directory>] <target_out_directory>]\n");
}
int main(int argc, char **argv)
@@ -65,6 +65,7 @@
int opt;
const char *filename = NULL;
const char *directory = NULL;
+ const char *target_out_directory = NULL;
char *mountpoint = NULL;
fs_config_func_t fs_config_func = NULL;
const char *fs_config_file = NULL;
@@ -216,6 +217,9 @@
if (optind < argc)
directory = argv[optind++];
+ if (optind < argc)
+ target_out_directory = argv[optind++];
+
if (optind < argc) {
fprintf(stderr, "Unexpected argument: %s\n", argv[optind]);
usage(argv[0]);
@@ -232,7 +236,7 @@
fd = STDOUT_FILENO;
}
- exitcode = make_ext4fs_internal(fd, directory, mountpoint, fs_config_func, gzip,
+ exitcode = make_ext4fs_internal(fd, directory, target_out_directory, mountpoint, fs_config_func, gzip,
sparse, crc, wipe, real_uuid, sehnd, verbose, fixed_time, block_list_file);
close(fd);
if (block_list_file)
diff --git a/ext4_utils/mkuserimg.sh b/ext4_utils/mkuserimg.sh
index 3a6006e..8667013 100755
--- a/ext4_utils/mkuserimg.sh
+++ b/ext4_utils/mkuserimg.sh
@@ -6,7 +6,7 @@
cat<<EOT
Usage:
mkuserimg.sh [-s] SRC_DIR OUTPUT_FILE EXT_VARIANT MOUNT_POINT SIZE [-j <journal_size>]
- [-T TIMESTAMP] [-C FS_CONFIG] [-B BLOCK_LIST_FILE] [-L LABEL] [FILE_CONTEXTS]
+ [-T TIMESTAMP] [-C FS_CONFIG] [-D PRODUCT_OUT] [-B BLOCK_LIST_FILE] [-L LABEL] [FILE_CONTEXTS]
EOT
}
@@ -55,6 +55,12 @@
shift; shift
fi
+PRODUCT_OUT=
+if [[ "$1" == "-D" ]]; then
+ PRODUCT_OUT=$2
+ shift; shift
+fi
+
BLOCK_LIST=
if [[ "$1" == "-B" ]]; then
BLOCK_LIST=$2
@@ -98,7 +104,7 @@
OPT="$OPT -L $LABEL"
fi
-MAKE_EXT4FS_CMD="make_ext4fs $ENABLE_SPARSE_IMAGE -T $TIMESTAMP $OPT -l $SIZE $JOURNAL_FLAGS -a $MOUNT_POINT $OUTPUT_FILE $SRC_DIR"
+MAKE_EXT4FS_CMD="make_ext4fs $ENABLE_SPARSE_IMAGE -T $TIMESTAMP $OPT -l $SIZE $JOURNAL_FLAGS -a $MOUNT_POINT $OUTPUT_FILE $SRC_DIR $PRODUCT_OUT"
echo $MAKE_EXT4FS_CMD
$MAKE_EXT4FS_CMD
if [ $? -ne 0 ]; then
diff --git a/f2fs_utils/f2fs_sparseblock.c b/f2fs_utils/f2fs_sparseblock.c
index 950628c..e39a61f 100644
--- a/f2fs_utils/f2fs_sparseblock.c
+++ b/f2fs_utils/f2fs_sparseblock.c
@@ -262,13 +262,13 @@
struct f2fs_checkpoint *cp1, *cp2, *cur_cp;
int cur_cp_no;
- unsigned long blk_size;// = 1<<le32_to_cpu(info->sb->log_blocksize);
+ unsigned long blk_size;
unsigned long long cp1_version = 0, cp2_version = 0;
unsigned long long cp1_start_blk_no;
unsigned long long cp2_start_blk_no;
u32 bmp_size;
- blk_size = 1U<<le32_to_cpu(sb->log_blocksize);
+ blk_size = 1U << le32_to_cpu(sb->log_blocksize);
/*
* Find valid cp by reading both packs and finding most recent one.
@@ -489,7 +489,8 @@
u64 block;
unsigned int used, found, started = 0, i;
- for (block=startblock; block<info->total_blocks; block++) {
+ block = startblock;
+ while (block < info->total_blocks) {
/* TODO: Save only relevant portions of metadata */
if (block < info->main_blkaddr) {
if (func(block, data)) {
@@ -512,17 +513,24 @@
/* get SIT entry from SIT section */
if (!found) {
- sit_block_num_cur = segnum/SIT_ENTRY_PER_BLOCK;
+ sit_block_num_cur = segnum / SIT_ENTRY_PER_BLOCK;
sit_entry = &info->sit_blocks[sit_block_num_cur].entries[segnum % SIT_ENTRY_PER_BLOCK];
}
block_offset = (block - info->main_blkaddr) % info->blocks_per_segment;
+ if (block_offset == 0 && GET_SIT_VBLOCKS(sit_entry) == 0) {
+ block += info->blocks_per_segment;
+ continue;
+ }
+
used = f2fs_test_bit(block_offset, (char *)sit_entry->valid_map);
if(used)
if (func(block, data))
return -1;
}
+
+ block++;
}
return 0;
}
@@ -548,7 +556,7 @@
{
struct privdata *d = data;
char *buf;
- int pdone = (pos*100)/d->info->total_blocks;
+ int pdone = (pos * 100) / d->info->total_blocks;
if (pdone > d->done) {
d->done = pdone;
printf("Done with %d percent\n", d->done);
@@ -562,7 +570,7 @@
}
off64_t ret;
- ret = lseek64(d->outfd, pos*F2FS_BLKSIZE, SEEK_SET);
+ ret = lseek64(d->outfd, pos * F2FS_BLKSIZE, SEEK_SET);
if (ret < 0) {
SLOGE("failed to seek\n");
return ret;
diff --git a/simpleperf/workload.cpp b/simpleperf/workload.cpp
index 9138afa..6f07cda 100644
--- a/simpleperf/workload.cpp
+++ b/simpleperf/workload.cpp
@@ -108,10 +108,11 @@
TEMP_FAILURE_RETRY(write(exec_child_fd, &exec_child_failed, 1));
close(exec_child_fd);
errno = saved_errno;
- PLOG(FATAL) << "execvp(" << argv[0] << ") failed";
+ PLOG(ERROR) << "execvp(" << argv[0] << ") failed";
} else {
- PLOG(FATAL) << "child process failed to receive start_signal, nread = " << nread;
+ PLOG(DEBUG) << "child process failed to receive start_signal, nread = " << nread;
}
+ exit(1);
}
bool Workload::Start() {
diff --git a/squashfs_utils/mksquashfsimage.sh b/squashfs_utils/mksquashfsimage.sh
index dab80ba..1bc2b83 100755
--- a/squashfs_utils/mksquashfsimage.sh
+++ b/squashfs_utils/mksquashfsimage.sh
@@ -5,7 +5,7 @@
function usage() {
cat<<EOT
Usage:
-${0##*/} SRC_DIR OUTPUT_FILE [-s] [-m MOUNT_POINT] [-c FILE_CONTEXTS] [-b BLOCK_SIZE]
+${0##*/} SRC_DIR OUTPUT_FILE [-s] [-m MOUNT_POINT] [-d PRODUCT_OUT] [-c FILE_CONTEXTS] [-b BLOCK_SIZE] [-z COMPRESSOR] [-zo COMPRESSOR_OPT]
EOT
}
@@ -36,6 +36,12 @@
shift; shift
fi
+PRODUCT_OUT=
+if [[ "$1" == "-d" ]]; then
+ PRODUCT_OUT=$2
+ shift; shift
+fi
+
FILE_CONTEXTS=
if [[ "$1" == "-c" ]]; then
FILE_CONTEXTS=$2
@@ -48,10 +54,26 @@
shift; shift
fi
+COMPRESSOR="lz4"
+COMPRESSOR_OPT="-Xhc"
+if [[ "$1" == "-z" ]]; then
+ COMPRESSOR=$2
+ COMPRESSOR_OPT=
+ shift; shift
+fi
+
+if [[ "$1" == "-zo" ]]; then
+ COMPRESSOR_OPT=$2
+ shift; shift
+fi
+
OPT=""
if [ -n "$MOUNT_POINT" ]; then
OPT="$OPT -mount-point $MOUNT_POINT"
fi
+if [ -n "$PRODUCT_OUT" ]; then
+ OPT="$OPT -product-out $PRODUCT_OUT"
+fi
if [ -n "$FILE_CONTEXTS" ]; then
OPT="$OPT -context-file $FILE_CONTEXTS"
fi
@@ -59,7 +81,7 @@
OPT="$OPT -b $BLOCK_SIZE"
fi
-MAKE_SQUASHFS_CMD="mksquashfs $SRC_DIR/ $OUTPUT_FILE -no-progress -comp lz4 -Xhc -no-exports -noappend -no-recovery -android-fs-config $OPT"
+MAKE_SQUASHFS_CMD="mksquashfs $SRC_DIR/ $OUTPUT_FILE -no-progress -comp $COMPRESSOR $COMPRESSOR_OPT -no-exports -noappend -no-recovery -android-fs-config $OPT"
echo $MAKE_SQUASHFS_CMD
$MAKE_SQUASHFS_CMD
diff --git a/tests/workloads/atrace-uncompress.py b/tests/workloads/atrace-uncompress.py
new file mode 100644
index 0000000..5efb698
--- /dev/null
+++ b/tests/workloads/atrace-uncompress.py
@@ -0,0 +1,35 @@
+#
+# Uncompress a file generated via atrace -z
+#
+# Usage: python atrace-uncompress.py infile > outfile
+#
+import sys, zlib
+
+def main():
+
+ if len(sys.argv) != 2:
+ print >> sys.stderr, ('Usage: %s inputfile' % sys.argv[0])
+ sys.exit(1)
+
+ infile = open(sys.argv[1], "rb")
+ out = infile.read()
+ parts = out.split('\nTRACE:', 1)
+
+ data = ''.join(parts[1])
+
+ # Remove CR characters
+ if data.startswith('\r\n'):
+ data = data.replace('\r\n', '\n')
+
+ # Skip the initial newline.
+ data = data[1:]
+
+ if not data:
+ print >> sys.stderr, ('No trace data found')
+ sys.exit(1)
+
+ out = zlib.decompress(data)
+ print(out)
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/workloads/capture.sh b/tests/workloads/capture.sh
new file mode 100755
index 0000000..3b2f446
--- /dev/null
+++ b/tests/workloads/capture.sh
Binary files differ
diff --git a/tests/workloads/defs.sh b/tests/workloads/defs.sh
new file mode 100755
index 0000000..a2b7138
--- /dev/null
+++ b/tests/workloads/defs.sh
Binary files differ
diff --git a/tests/workloads/feedly-chrome.sh b/tests/workloads/feedly-chrome.sh
new file mode 100755
index 0000000..4c7002f
--- /dev/null
+++ b/tests/workloads/feedly-chrome.sh
@@ -0,0 +1,111 @@
+# Script to automate the following sequence:
+# - Open Feedly
+# - Open an article
+# - Scroll to bottome
+# - Open the same article in Chrome
+# - Scroll the article
+# - Back to Feely (should still be in memory)
+# - Home screen
+# ---- repeat ----
+#
+# Currently works on volantis only (verticle orientation)
+#
+
+CMDDIR=$(dirname $0 2>/dev/null)
+CMDDIR=${CMDDIR:=.}
+. $CMDDIR/defs.sh
+
+case "$DEVICE" in
+(volantis)
+ echo volantis...
+ feedlyArticle="500 700"
+ feedlyOptions="1480 100"
+ feedlyBrowserSelect="1350 650"
+ feedlyArticleSwipeUp="700 700 700 50 50"
+ feedlyArticleSwipeDown="700 200 700 700 50"
+ chromeSwipe="700 700 700 50 50"
+ ;;
+(shamu|*)
+ echo shamu...
+ feedlyArticle="676 500"
+ feedlyOptions="1327 207"
+ feedlyBrowserSelect="1278 1191"
+ feedlyArticleSwipeUp="700 1847 700 400 50"
+ feedlyArticleSwipeDown="700 400 700 1847 50"
+ chromeSwipe="700 1847 700 400 50"
+ ;;
+(hammerhead|*)
+ echo "Error: No feedly screen geometry information available for $DEVICE"
+ exit 1;;
+esac
+
+feedlySwitchToTime=600
+
+# start feedly, if not installed, error out
+t=$(forceStartActivity feedly)
+checkIsRunning feedly "initial start of feedly"
+echo Feedly start time = ${t}ms
+
+# start chrome, if not installed, error out
+t=$(forceStartActivity chrome)
+checkIsRunning chrome "initial start of chrome"
+echo Chrome start time = ${t}ms
+sleep 1
+
+feedlyStartTimes=0
+
+cur=1
+while [ $cur -le $iterations ]
+do
+ echo =======================================
+ echo Iteration $cur of $iterations
+ echo =======================================
+ startInstramentation
+ t=$(startActivity feedly)
+ if [ $(checkStartTime "$t" $feedlySwitchToTime) != true ]; then
+ handleError Feedly took too long to start: $t v $feedlySwitchToTime: $?
+ # for now, not fatal
+ # exit 1
+ fi
+ sleep 2
+ ((feedlyStartTimes=feedlyStartTimes+t))
+ echo feedly started in ${t}ms
+ checkIsRunning chrome "switch back to feedly"
+ checkIsRunning googlequicksearchbox "switch back to feedly"
+
+ # click on first article
+ doTap $feedlyArticle
+ sleep 2
+
+ # scroll through article
+ doSwipe $feedlyArticleSwipeUp
+ sleep 5
+ checkIsRunning chrome "feedly swipe"
+ checkIsRunning googlequicksearchbox "feedly swipe"
+
+ # scroll back to top
+ doSwipe $feedlyArticleSwipeDown
+ sleep 2
+
+ # switch to chrome
+ # 1. click on menu bar
+ doTap $feedlyOptions
+ sleep 1
+ # 2. click on browser
+ doTap $feedlyBrowserSelect
+ sleep 10
+
+ checkIsRunning feedly "switch to chrome"
+ checkIsRunning googlequicksearchbox "switch to chrome"
+
+ # Now we're back in chrome, swipe to bottom of article
+ doSwipe $chromeSwipe
+ sleep 2
+ checkIsRunning feedly "swiped chrome"
+ stopInstramentation
+ ((cur=cur+1))
+done
+((feedlyAve=feedlyStartTimes/iterations))
+echo Avg start times: feedly: ${feedlyAve}ms
+
+doKeyevent HOME
diff --git a/tests/workloads/recentfling.sh b/tests/workloads/recentfling.sh
new file mode 100755
index 0000000..092c8d9
--- /dev/null
+++ b/tests/workloads/recentfling.sh
@@ -0,0 +1,150 @@
+#
+# Script to start a set of apps, switch to recents and fling it back and forth.
+# For each iteration, Total frames and janky frames are reported.
+#
+# Options are described below.
+#
+# Works for volantis, shamu, and hammerhead. Can be pushed and executed on
+# the device.
+#
+iterations=10
+startapps=1
+capturesystrace=0
+
+function processLocalOption {
+ ret=0
+ case "$1" in
+ (-N) startapps=0;;
+ (-A) unset appList;;
+ (-L) appList=$2; shift; ret=1;;
+ (-T) capturesystrace=1;;
+ (*)
+ echo "$0: unrecognized option: $1"
+ echo; echo "Usage: $0 [options]"
+ echo "-A : use all known applications"
+ echo "-L applist : list of applications"
+ echo " default: $appList"
+ echo "-N : no app startups, just fling"
+ echo "-g : generate activity strings"
+ echo "-i iterations"
+ echo "-T : capture systrace on each iteration"
+ exit 1;;
+ esac
+ return $ret
+}
+
+CMDDIR=$(dirname $0 2>/dev/null)
+CMDDIR=${CMDDIR:=.}
+. $CMDDIR/defs.sh
+
+case $DEVICE in
+(shamu|hammerhead)
+ flingtime=300
+ downCount=2
+ upCount=6
+ UP="70 400 70 100 $flingtime"
+ DOWN="70 100 70 400 $flingtime";;
+(bullhead)
+ flingtime=200
+ downCount=5
+ upCount=5
+ UP="500 1200 500 550 $flingtime"
+ DOWN="500 550 500 1200 $flingtime";;
+(volantis)
+ flingtime=400
+ downCount=5
+ upCount=6
+ UP="70 400 70 70 $flingtime"
+ DOWN="70 70 70 400 $flingtime";;
+(*)
+ echo "Error: No display information available for $DEVICE"
+ exit 1;;
+esac
+
+doKeyevent HOME
+if [ $startapps -gt 0 ]; then
+
+ # start a bunch of apps
+ for app in $appList
+ do
+ echo Starting $app ...
+ t=$(startActivity $app)
+ done
+fi
+
+function swipe {
+ count=0
+ while [ $count -lt $2 ]
+ do
+ doSwipe $1
+ ((count=count+1))
+ done
+}
+
+cur=1
+frameSum=0
+jankSum=0
+latency90Sum=0
+latency95Sum=0
+latency99Sum=0
+
+echo Fling recents...
+doKeyevent HOME
+sleep 0.5
+resetJankyFrames
+
+while [ $cur -le $iterations ]
+do
+ if [ $capturesystrace -gt 0 ]; then
+ ${ADB}atrace --async_start -z -c -b 16000 freq gfx view idle sched
+ fi
+ doKeyevent APP_SWITCH
+ sleep 0.5
+ swipe "$DOWN" $downCount
+ sleep 1
+ swipe "$UP" $upCount
+ sleep 1
+ swipe "$DOWN" $downCount
+ sleep 1
+ swipe "$UP" $upCount
+ sleep 1
+ if [ $capturesystrace -gt 0 ]; then
+ ${ADB}atrace --async_dump -z -c -b 16000 freq gfx view idle sched > trace.${cur}.out
+ fi
+ doKeyevent HOME
+ sleep 0.5
+
+ set -- $(getJankyFrames)
+ totalDiff=$1
+ jankyDiff=$2
+ latency90=$3
+ latency95=$4
+ latency99=$5
+ if [ ${totalDiff:=0} -eq 0 ]; then
+ echo Error: could not read frame info with \"dumpsys gfxinfo\"
+ exit 1
+ fi
+
+ ((frameSum=frameSum+totalDiff))
+ ((jankSum=jankSum+jankyDiff))
+ ((latency90Sum=latency90Sum+latency90))
+ ((latency95Sum=latency95Sum+latency95))
+ ((latency99Sum=latency99Sum+latency99))
+ if [ "$totalDiff" -eq 0 ]; then
+ echo Error: no frames detected. Is the display off?
+ exit 1
+ fi
+ ((jankPct=jankyDiff*100/totalDiff))
+ resetJankyFrames
+
+ echo Frames: $totalDiff latency: $latency90/$latency95/$latency99 Janks: $jankyDiff\(${jankPct}%\)
+ ((cur=cur+1))
+done
+doKeyevent HOME
+((aveJankPct=jankSum*100/frameSum))
+((aveJanks=jankSum/iterations))
+((aveFrames=frameSum/iterations))
+((aveLatency90=latency90Sum/iterations))
+((aveLatency95=latency95Sum/iterations))
+((aveLatency99=latency99Sum/iterations))
+echo AVE: Frames: $aveFrames latency: $aveLatency90/$aveLatency95/$aveLatency99 Janks: $aveJanks\(${aveJankPct}%\)
diff --git a/tests/workloads/systemapps.sh b/tests/workloads/systemapps.sh
new file mode 100755
index 0000000..a263e7d
--- /dev/null
+++ b/tests/workloads/systemapps.sh
@@ -0,0 +1,264 @@
+# Script to start a set of apps in order and then in each iteration
+# switch the focus to each one. For each iteration, the time to start
+# the app is reported as measured using atrace events and via am ThisTime.
+# The output also reports if applications are restarted (eg, killed by
+# LMK since previous iteration) or if there were any direct reclaim
+# events.
+#
+# Variation: the "-T" option skips all of the atrace instramentation and
+# attempts to start the apps as quickly as possible.
+#
+# Example 1: start all default apps. 2 iterations
+#
+# ./systemapps.sh -i 2
+#
+# Example 2: just start chrome, feedly, and the home screen in a loop
+#
+# ./systemapps.sh -L "chrome feedly home" -i 5
+#
+# Example 3: just start the default apps as quickly as possible
+#
+# ./systemapps.sh -T
+#
+# Other options are described below.
+#
+iterations=1
+tracecategories="gfx view am input memreclaim"
+totaltimetest=0
+forcecoldstart=0
+waitTime=3.0
+
+appList="gmail hangouts chrome youtube play home"
+
+function processLocalOption {
+ ret=0
+ case "$1" in
+ (-A) unset appList;;
+ (-F) forcecoldstart=1;;
+ (-L) appList=$2; shift; ret=1;;
+ (-T) totaltimetest=1;;
+ (-W) waitTime=$2; shift; ret=1;;
+ (*)
+ echo "$0: unrecognized option: $1"
+ echo; echo "Usage: $0 [options]"
+ echo "-A : use all known applications"
+ echo "-F : force cold-start for all apps"
+ echo "-L applist : list of applications"
+ echo " default: $appList"
+ echo "-T : total time to start all apps"
+ echo "-W : time to wait between apps"
+ echo "-g : generate activity strings"
+ echo "-i iterations"
+ echo "-n : keep trace files"
+ echo "-o output file"
+ echo "-s : stop on error"
+ echo "-t trace categories"
+ exit 1;;
+ esac
+ return $ret
+}
+
+CMDDIR=$(dirname $0 2>/dev/null)
+CMDDIR=${CMDDIR:=.}
+. $CMDDIR/defs.sh
+
+tmpTraceOutBase=./tmptrace
+
+if [ $user != "root" -a $totaltimetest -eq 0 ]; then
+ handleError Must be root on device
+ exit 1
+fi
+doKeyevent HOME
+
+function computeStats {
+ label=$1
+ t=$2
+ restart=$3
+ reclaim=$4
+ frames=$5
+ janks=$6
+ l90=$7
+ l95=$8
+ l99=$9
+ curMax=$(eval "echo \$${label}max")
+ curMax=${curMax:=0}
+ curMin=$(eval "echo \$${label}min")
+ curMin=${curMin:=100000}
+ curSum=$(eval "echo \$${label}sum")
+ curSum=${curSum:=0}
+ curRestart=$(eval "echo \$${label}restart")
+ curRestart=${curRestart:=0}
+ curReclaim=$(eval "echo \$${label}reclaim")
+ curReclaim=${curReclaim:=0}
+ curFrames=$(eval "echo \$${label}frames")
+ curFrames=${curFrames:=0}
+ curJanks=$(eval "echo \$${label}janks")
+ curJanks=${curJanks:=0}
+ cur90=$(eval "echo \$${label}90")
+ cur90=${cur90:=0}
+ cur95=$(eval "echo \$${label}95")
+ cur95=${cur95:=0}
+ cur99=$(eval "echo \$${label}99")
+ cur99=${cur99:=0}
+ if [ $curMax -lt $t ]; then
+ eval "${label}max=$t"
+ fi
+ if [ $curMin -gt $t ]; then
+ eval "${label}min=$t"
+ fi
+ ((curSum=curSum+t))
+ eval "${label}sum=$curSum"
+
+ ((curRestart=curRestart+${restart:=0}))
+ eval "${label}restart=$curRestart"
+ ((curReclaim=curReclaim+${reclaim:=0}))
+ eval "${label}reclaim=$curReclaim"
+ ((curFrames=curFrames+${frames:=0}))
+ eval "${label}frames=$curFrames"
+ ((curJanks=curJanks+${janks:=0}))
+ eval "${label}janks=$curJanks"
+ ((cur90=cur90+${l90:=0}))
+ eval "${label}90=$cur90"
+ ((cur95=cur95+${l95:=0}))
+ eval "${label}95=$cur95"
+ ((cur99=cur99+${l99:=0}))
+ eval "${label}99=$cur99"
+}
+function getStats {
+ label=$1
+ echo $(eval "echo \$${label}max") $(eval "echo \$${label}min") $(eval "echo \$${label}sum") \
+ $(eval "echo \$${label}restart") $(eval "echo \$${label}reclaim") \
+ $(eval "echo \$${label}frames") $(eval "echo \$${label}janks") \
+ $(eval "echo \$${label}90") $(eval "echo \$${label}95") $(eval "echo \$${label}99")
+}
+
+cur=1
+totaltime=0
+startTimestamp=$(date +"%s %N")
+
+while [ $cur -le $iterations ]
+do
+ if [ $iterations -gt 1 ]; then
+ echo =========================================
+ echo Iteration $cur of $iterations
+ echo =========================================
+ fi
+ if [ $iterations -gt 1 -o $cur -eq 1 ]; then
+ if [ $totaltimetest -eq 0 ]; then
+ printf "%-6s %7s(ms) %6s(ms) %s %s %s %s\n" App Time AmTime Restart DirReclaim Jank Latency
+ fi
+ fi
+
+ appnum=-1
+ for app in $appList
+ do
+ vout Starting $app...
+ ((appnum=appnum+1))
+ loopTimestamp=$(date +"%s %N")
+ resetJankyFrames
+ resetJankyFrames $(getPackageName $app)
+ if [ $totaltimetest -eq 0 ]; then
+ tmpTraceOut="$tmpTraceOutBase-$app.out"
+ >$tmpTraceOut
+ startInstramentation
+ else
+ if [ $appnum -eq 0 ]; then
+ printf "%-8s %5s(ms) %3s(ms) %s %s\n" App Start Iter Jank Latency
+ fi
+ fi
+ if [ $forcecoldstart -eq 0 ]; then
+ t=$(startActivity $app)
+ else
+ t=$(forceStartActivity $app)
+ fi
+
+ # let app finish drawing before checking janks
+ sleep $waitTime
+ set -- $(getJankyFrames $(getPackageName $app))
+ frames=$1
+ janks=$2
+ l90=$3
+ l95=$4
+ l99=$5
+ set -- $(getJankyFrames)
+ systemFrames=$1
+ systemJanks=$2
+ s90=$3
+ s95=$4
+ s99=$5
+ ((frames=frames+systemFrames))
+ ((janks=janks+systemJanks))
+ ((l90=l90+s90))
+ ((l95=l95+s95))
+ ((l99=l99+s99))
+
+ loopEndTimestamp=$(date +"%s %N")
+ diffTime=$(computeTimeDiff $loopTimestamp $loopEndTimestamp)
+
+ if [ $frames -eq 0 ]; then
+ janks=0
+ jankPct=0
+ else
+ ((jankPct=100*janks/frames))
+ fi
+ if [ $totaltimetest -gt 0 ]; then
+ # Note: using %f since %d doesn't work correctly
+ # when running on lollipop
+ printf "%-10s %5.0f %5.0f %4.0f(%2.0f%%) %2.0f/%2.0f/%2.0f\n" $app $t $diffTime $janks $jankPct $l90 $l95 $l99
+ ((totaltime=totaltime+t))
+ continue
+ else
+ stopAndDumpInstramentation $tmpTraceOut
+ actName=$(getActivityName $app)
+ pkgName=$(getPackageName $app)
+ stime=$(getStartTime $actName $tmpTraceOut)
+ relaunch=$?
+ etime=$(getEndTime $pkgName $tmpTraceOut)
+ ((tdiff=$etime-$stime))
+ if [ $etime -eq 0 -o $stime -eq 0 ]; then
+ handleError $app : could not compute start time stime=$stime etime=$etime
+ # use AmTime so statistics make sense
+ tdiff=$t
+ fi
+ checkForDirectReclaim $actName $tmpTraceOut
+ directReclaim=$?
+
+ printf "%-12s %5d %5d %5d %5d %5d(%d%%) %d/%d/%d\n" "$app" "$tdiff" "$t" "$relaunch" "$directReclaim" "$janks" "$jankPct" $l90 $l95 $l99
+ computeStats "$app" "$tdiff" "$relaunch" "$directReclaim" "$frames" "$janks" $l90 $l95 $l99
+
+ if [ $savetmpfiles -eq 0 ]; then
+ rm -f $tmpTraceOut
+ fi
+ fi
+ done
+ ((cur=cur+1))
+done
+endTimestamp=$(date +"%s %N")
+diffTime=$(computeTimeDiff $startTimestamp $endTimestamp)
+if [ $totaltimetest -gt 0 ]; then
+ printf "%-10s %5.0f %5.0f\n" TOTAL $totaltime $diffTime
+fi
+
+if [ $iterations -gt 1 -a $totaltimetest -eq 0 ]; then
+ echo
+ echo =========================================
+ printf "Stats after $iterations iterations:\n"
+ echo =========================================
+ printf "%-6s %7s(ms) %6s(ms) %6s(ms) %s %s %s %s\n" App Max Ave Min Restart DirReclaim Jank Latency
+ for app in $appList
+ do
+ set -- $(getStats $app)
+ sum=$3
+ ((ave=sum/iterations))
+ frames=$6
+ janks=$7
+ l90=$8
+ l95=$9
+ l99=${10}
+ ((ave90=l90/iterations))
+ ((ave95=l95/iterations))
+ ((ave99=l99/iterations))
+ ((jankPct=100*janks/frames))
+ printf "%-12s %5d %5d %5d %5d %5d %5d(%d%%) %d/%d/%d\n" $app $1 $ave $2 $4 $5 $janks $jankPct $ave90 $ave95 $ave99
+ done
+fi
diff --git a/verity/Android.mk b/verity/Android.mk
index bbe74bb..586ca58 100644
--- a/verity/Android.mk
+++ b/verity/Android.mk
@@ -1,5 +1,6 @@
LOCAL_PATH:= $(call my-dir)
+ifeq ($(HOST_OS),linux)
include $(CLEAR_VARS)
LOCAL_MODULE := verify_boot_signature
LOCAL_SRC_FILES := verify_boot_signature.c
@@ -8,6 +9,7 @@
LOCAL_SHARED_LIBRARIES := libcrypto-host
LOCAL_C_INCLUDES += external/openssl/include system/extras/ext4_utils system/core/mkbootimg
include $(BUILD_HOST_EXECUTABLE)
+endif
include $(CLEAR_VARS)
LOCAL_MODULE := generate_verity_key
diff --git a/verity/Utils.java b/verity/Utils.java
index 3576e3b..937c206 100644
--- a/verity/Utils.java
+++ b/verity/Utils.java
@@ -35,6 +35,8 @@
import java.security.cert.Certificate;
import java.security.cert.CertificateFactory;
import java.security.cert.X509Certificate;
+import java.security.spec.ECPublicKeySpec;
+import java.security.spec.ECPrivateKeySpec;
import java.security.spec.X509EncodedKeySpec;
import java.security.spec.PKCS8EncodedKeySpec;
import java.security.spec.InvalidKeySpecException;
@@ -52,6 +54,7 @@
import org.bouncycastle.asn1.pkcs.PrivateKeyInfo;
import org.bouncycastle.asn1.pkcs.PKCSObjectIdentifiers;
import org.bouncycastle.asn1.x509.AlgorithmIdentifier;
+import org.bouncycastle.asn1.x9.X9ObjectIdentifiers;
import org.bouncycastle.util.encoders.Base64;
public class Utils {
@@ -63,10 +66,16 @@
ID_TO_ALG = new HashMap<String, String>();
ALG_TO_ID = new HashMap<String, String>();
+ ID_TO_ALG.put(X9ObjectIdentifiers.ecdsa_with_SHA256.getId(), "SHA256withECDSA");
+ ID_TO_ALG.put(X9ObjectIdentifiers.ecdsa_with_SHA384.getId(), "SHA384withECDSA");
+ ID_TO_ALG.put(X9ObjectIdentifiers.ecdsa_with_SHA512.getId(), "SHA512withECDSA");
ID_TO_ALG.put(PKCSObjectIdentifiers.sha1WithRSAEncryption.getId(), "SHA1withRSA");
ID_TO_ALG.put(PKCSObjectIdentifiers.sha256WithRSAEncryption.getId(), "SHA256withRSA");
ID_TO_ALG.put(PKCSObjectIdentifiers.sha512WithRSAEncryption.getId(), "SHA512withRSA");
+ ALG_TO_ID.put("SHA256withECDSA", X9ObjectIdentifiers.ecdsa_with_SHA256.getId());
+ ALG_TO_ID.put("SHA384withECDSA", X9ObjectIdentifiers.ecdsa_with_SHA384.getId());
+ ALG_TO_ID.put("SHA512withECDSA", X9ObjectIdentifiers.ecdsa_with_SHA512.getId());
ALG_TO_ID.put("SHA1withRSA", PKCSObjectIdentifiers.sha1WithRSAEncryption.getId());
ALG_TO_ID.put("SHA256withRSA", PKCSObjectIdentifiers.sha256WithRSAEncryption.getId());
ALG_TO_ID.put("SHA512withRSA", PKCSObjectIdentifiers.sha512WithRSAEncryption.getId());
@@ -208,15 +217,36 @@
}
}
- private static String getSignatureAlgorithm(Key key) {
- if ("RSA".equals(key.getAlgorithm())) {
+ private static String getSignatureAlgorithm(Key key) throws Exception {
+ if ("EC".equals(key.getAlgorithm())) {
+ int curveSize;
+ KeyFactory factory = KeyFactory.getInstance("EC");
+
+ if (key instanceof PublicKey) {
+ ECPublicKeySpec spec = factory.getKeySpec(key, ECPublicKeySpec.class);
+ curveSize = spec.getParams().getCurve().getField().getFieldSize();
+ } else if (key instanceof PrivateKey) {
+ ECPrivateKeySpec spec = factory.getKeySpec(key, ECPrivateKeySpec.class);
+ curveSize = spec.getParams().getCurve().getField().getFieldSize();
+ } else {
+ throw new InvalidKeySpecException();
+ }
+
+ if (curveSize <= 256) {
+ return "SHA256withECDSA";
+ } else if (curveSize <= 384) {
+ return "SHA384withECDSA";
+ } else {
+ return "SHA512withECDSA";
+ }
+ } else if ("RSA".equals(key.getAlgorithm())) {
return "SHA256withRSA";
} else {
throw new IllegalArgumentException("Unsupported key type " + key.getAlgorithm());
}
}
- static AlgorithmIdentifier getSignatureAlgorithmIdentifier(Key key) {
+ static AlgorithmIdentifier getSignatureAlgorithmIdentifier(Key key) throws Exception {
String id = ALG_TO_ID.get(getSignatureAlgorithm(key));
if (id == null) {
diff --git a/verity/VerityVerifier.java b/verity/VerityVerifier.java
index 5c9d7d2..6b3f49e 100644
--- a/verity/VerityVerifier.java
+++ b/verity/VerityVerifier.java
@@ -20,31 +20,83 @@
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
+import java.lang.Math;
import java.lang.Process;
import java.lang.Runtime;
+import java.math.BigInteger;
+import java.security.KeyFactory;
+import java.security.MessageDigest;
import java.security.PublicKey;
-import java.security.PrivateKey;
import java.security.Security;
import java.security.cert.X509Certificate;
+import java.security.interfaces.RSAPublicKey;
+import java.security.spec.RSAPublicKeySpec;
+import java.util.ArrayList;
+import java.util.Arrays;
+import javax.xml.bind.DatatypeConverter;
import org.bouncycastle.jce.provider.BouncyCastleProvider;
public class VerityVerifier {
+ private ArrayList<Integer> hashBlocksLevel;
+ private byte[] hashTree;
+ private byte[] rootHash;
+ private byte[] salt;
+ private byte[] signature;
+ private byte[] table;
+ private File image;
+ private int blockSize;
+ private int hashBlockSize;
+ private int hashOffsetForData;
+ private int hashSize;
+ private int hashTreeSize;
+ private long hashStart;
+ private long imageSize;
+ private MessageDigest digest;
+
private static final int EXT4_SB_MAGIC = 0xEF53;
private static final int EXT4_SB_OFFSET = 0x400;
private static final int EXT4_SB_OFFSET_MAGIC = EXT4_SB_OFFSET + 0x38;
private static final int EXT4_SB_OFFSET_LOG_BLOCK_SIZE = EXT4_SB_OFFSET + 0x18;
private static final int EXT4_SB_OFFSET_BLOCKS_COUNT_LO = EXT4_SB_OFFSET + 0x4;
private static final int EXT4_SB_OFFSET_BLOCKS_COUNT_HI = EXT4_SB_OFFSET + 0x150;
+ private static final int MINCRYPT_OFFSET_MODULUS = 0x8;
+ private static final int MINCRYPT_OFFSET_EXPONENT = 0x208;
+ private static final int MINCRYPT_MODULUS_SIZE = 0x100;
+ private static final int MINCRYPT_EXPONENT_SIZE = 0x4;
+ private static final int VERITY_FIELDS = 10;
private static final int VERITY_MAGIC = 0xB001B001;
private static final int VERITY_SIGNATURE_SIZE = 256;
private static final int VERITY_VERSION = 0;
+ public VerityVerifier(String fname) throws Exception {
+ digest = MessageDigest.getInstance("SHA-256");
+ hashSize = digest.getDigestLength();
+ hashBlocksLevel = new ArrayList<Integer>();
+ hashTreeSize = -1;
+ openImage(fname);
+ readVerityData();
+ }
+
+ /**
+ * Reverses the order of bytes in a byte array
+ * @param value Byte array to reverse
+ */
+ private static byte[] reverse(byte[] value) {
+ for (int i = 0; i < value.length / 2; i++) {
+ byte tmp = value[i];
+ value[i] = value[value.length - i - 1];
+ value[value.length - i - 1] = tmp;
+ }
+
+ return value;
+ }
+
/**
* Converts a 4-byte little endian value to a Java integer
* @param value Little endian integer to convert
*/
- public static int fromle(int value) {
+ private static int fromle(int value) {
byte[] bytes = ByteBuffer.allocate(4).putInt(value).array();
return ByteBuffer.wrap(bytes).order(ByteOrder.LITTLE_ENDIAN).getInt();
}
@@ -53,28 +105,51 @@
* Converts a 2-byte little endian value to Java a integer
* @param value Little endian short to convert
*/
- public static int fromle(short value) {
+ private static int fromle(short value) {
return fromle(value << 16);
}
/**
+ * Reads a 2048-bit RSA public key saved in mincrypt format, and returns
+ * a Java PublicKey for it.
+ * @param fname Name of the mincrypt public key file
+ */
+ private static PublicKey getMincryptPublicKey(String fname) throws Exception {
+ try (RandomAccessFile key = new RandomAccessFile(fname, "r")) {
+ byte[] binaryMod = new byte[MINCRYPT_MODULUS_SIZE];
+ byte[] binaryExp = new byte[MINCRYPT_EXPONENT_SIZE];
+
+ key.seek(MINCRYPT_OFFSET_MODULUS);
+ key.readFully(binaryMod);
+
+ key.seek(MINCRYPT_OFFSET_EXPONENT);
+ key.readFully(binaryExp);
+
+ BigInteger modulus = new BigInteger(1, reverse(binaryMod));
+ BigInteger exponent = new BigInteger(1, reverse(binaryExp));
+
+ RSAPublicKeySpec spec = new RSAPublicKeySpec(modulus, exponent);
+ KeyFactory factory = KeyFactory.getInstance("RSA");
+ return factory.generatePublic(spec);
+ }
+ }
+
+ /**
* Unsparses a sparse image into a temporary file and returns a
* handle to the file
* @param fname Path to a sparse image file
*/
- public static RandomAccessFile openImage(String fname) throws Exception {
- File tmp = File.createTempFile("system", ".raw");
- tmp.deleteOnExit();
+ private void openImage(String fname) throws Exception {
+ image = File.createTempFile("system", ".raw");
+ image.deleteOnExit();
Process p = Runtime.getRuntime().exec("simg2img " + fname +
- " " + tmp.getAbsoluteFile());
+ " " + image.getAbsoluteFile());
p.waitFor();
if (p.exitValue() != 0) {
throw new IllegalArgumentException("Invalid image: failed to unsparse");
}
-
- return new RandomAccessFile(tmp, "r");
}
/**
@@ -106,56 +181,234 @@
}
/**
- * Reads and validates verity metadata, and check the signature against the
+ * Calculates the size of the verity hash tree based on the image size
+ */
+ private int calculateHashTreeSize() {
+ if (hashTreeSize > 0) {
+ return hashTreeSize;
+ }
+
+ int totalBlocks = 0;
+ int hashes = (int) (imageSize / blockSize);
+
+ hashBlocksLevel.clear();
+
+ do {
+ hashBlocksLevel.add(0, hashes);
+
+ int hashBlocks =
+ (int) Math.ceil((double) hashes * hashSize / hashBlockSize);
+
+ totalBlocks += hashBlocks;
+
+ hashes = hashBlocks;
+ } while (hashes > 1);
+
+ hashTreeSize = totalBlocks * hashBlockSize;
+ return hashTreeSize;
+ }
+
+ /**
+ * Parses the verity mapping table and reads the hash tree from
+ * the image file
+ * @param img Handle to the image file
+ * @param table Verity mapping table
+ */
+ private void readHashTree(RandomAccessFile img, byte[] table)
+ throws Exception {
+ String tableStr = new String(table);
+ String[] fields = tableStr.split(" ");
+
+ if (fields.length != VERITY_FIELDS) {
+ throw new IllegalArgumentException("Invalid image: unexpected number of fields "
+ + "in verity mapping table (" + fields.length + ")");
+ }
+
+ String hashVersion = fields[0];
+
+ if (!"1".equals(hashVersion)) {
+ throw new IllegalArgumentException("Invalid image: unsupported hash format");
+ }
+
+ String alg = fields[7];
+
+ if (!"sha256".equals(alg)) {
+ throw new IllegalArgumentException("Invalid image: unsupported hash algorithm");
+ }
+
+ blockSize = Integer.parseInt(fields[3]);
+ hashBlockSize = Integer.parseInt(fields[4]);
+
+ int blocks = Integer.parseInt(fields[5]);
+ int start = Integer.parseInt(fields[6]);
+
+ if (imageSize != (long) blocks * blockSize) {
+ throw new IllegalArgumentException("Invalid image: size mismatch in mapping "
+ + "table");
+ }
+
+ rootHash = DatatypeConverter.parseHexBinary(fields[8]);
+ salt = DatatypeConverter.parseHexBinary(fields[9]);
+
+ hashStart = (long) start * blockSize;
+ img.seek(hashStart);
+
+ int treeSize = calculateHashTreeSize();
+
+ hashTree = new byte[treeSize];
+ img.readFully(hashTree);
+ }
+
+ /**
+ * Reads verity data from the image file
+ */
+ private void readVerityData() throws Exception {
+ try (RandomAccessFile img = new RandomAccessFile(image, "r")) {
+ imageSize = getMetadataPosition(img);
+ img.seek(imageSize);
+
+ int magic = fromle(img.readInt());
+
+ if (magic != VERITY_MAGIC) {
+ throw new IllegalArgumentException("Invalid image: verity metadata not found");
+ }
+
+ int version = fromle(img.readInt());
+
+ if (version != VERITY_VERSION) {
+ throw new IllegalArgumentException("Invalid image: unknown metadata version");
+ }
+
+ signature = new byte[VERITY_SIGNATURE_SIZE];
+ img.readFully(signature);
+
+ int tableSize = fromle(img.readInt());
+
+ table = new byte[tableSize];
+ img.readFully(table);
+
+ readHashTree(img, table);
+ }
+ }
+
+ /**
+ * Reads and validates verity metadata, and checks the signature against the
* given public key
- * @param img File handle to the image file
* @param key Public key to use for signature verification
*/
- public static boolean verifyMetaData(RandomAccessFile img, PublicKey key)
+ public boolean verifyMetaData(PublicKey key)
throws Exception {
- img.seek(getMetadataPosition(img));
- int magic = fromle(img.readInt());
-
- if (magic != VERITY_MAGIC) {
- throw new IllegalArgumentException("Invalid image: verity metadata not found");
- }
-
- int version = fromle(img.readInt());
-
- if (version != VERITY_VERSION) {
- throw new IllegalArgumentException("Invalid image: unknown metadata version");
- }
-
- byte[] signature = new byte[VERITY_SIGNATURE_SIZE];
- img.readFully(signature);
-
- int tableSize = fromle(img.readInt());
-
- byte[] table = new byte[tableSize];
- img.readFully(table);
-
- return Utils.verify(key, table, signature,
+ return Utils.verify(key, table, signature,
Utils.getSignatureAlgorithmIdentifier(key));
}
+ /**
+ * Hashes a block of data using a salt and checks of the results are expected
+ * @param hash The expected hash value
+ * @param data The data block to check
+ */
+ private boolean checkBlock(byte[] hash, byte[] data) {
+ digest.reset();
+ digest.update(salt);
+ digest.update(data);
+ return Arrays.equals(hash, digest.digest());
+ }
+
+ /**
+ * Verifies the root hash and the first N-1 levels of the hash tree
+ */
+ private boolean verifyHashTree() throws Exception {
+ int hashOffset = 0;
+ int dataOffset = hashBlockSize;
+
+ if (!checkBlock(rootHash, Arrays.copyOfRange(hashTree, 0, hashBlockSize))) {
+ System.err.println("Root hash mismatch");
+ return false;
+ }
+
+ for (int level = 0; level < hashBlocksLevel.size() - 1; level++) {
+ int blocks = hashBlocksLevel.get(level);
+
+ for (int i = 0; i < blocks; i++) {
+ byte[] hashBlock = Arrays.copyOfRange(hashTree,
+ hashOffset + i * hashSize,
+ hashOffset + i * hashSize + hashSize);
+
+ byte[] dataBlock = Arrays.copyOfRange(hashTree,
+ dataOffset + i * hashBlockSize,
+ dataOffset + i * hashBlockSize + hashBlockSize);
+
+ if (!checkBlock(hashBlock, dataBlock)) {
+ System.err.printf("Hash mismatch at tree level %d, block %d\n", level, i);
+ return false;
+ }
+ }
+
+ hashOffset = dataOffset;
+ hashOffsetForData = dataOffset;
+ dataOffset += blocks * hashBlockSize;
+ }
+
+ return true;
+ }
+
+ /**
+ * Validates the image against the hash tree
+ */
+ public boolean verifyData() throws Exception {
+ if (!verifyHashTree()) {
+ return false;
+ }
+
+ try (RandomAccessFile img = new RandomAccessFile(image, "r")) {
+ byte[] dataBlock = new byte[blockSize];
+ int hashOffset = hashOffsetForData;
+
+ for (int i = 0; (long) i * blockSize < imageSize; i++) {
+ byte[] hashBlock = Arrays.copyOfRange(hashTree,
+ hashOffset + i * hashSize,
+ hashOffset + i * hashSize + hashSize);
+
+ img.readFully(dataBlock);
+
+ if (!checkBlock(hashBlock, dataBlock)) {
+ System.err.printf("Hash mismatch at block %d\n", i);
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
+
+ /**
+ * Verifies the integrity of the image and the verity metadata
+ * @param key Public key to use for signature verification
+ */
+ public boolean verify(PublicKey key) throws Exception {
+ return (verifyMetaData(key) && verifyData());
+ }
+
public static void main(String[] args) throws Exception {
- if (args.length != 2) {
- System.err.println("Usage: VerityVerifier <sparse.img> <certificate.x509.pem>");
+ Security.addProvider(new BouncyCastleProvider());
+ PublicKey key = null;
+
+ if (args.length == 3 && "-mincrypt".equals(args[1])) {
+ key = getMincryptPublicKey(args[2]);
+ } else if (args.length == 2) {
+ X509Certificate cert = Utils.loadPEMCertificate(args[1]);
+ key = cert.getPublicKey();
+ } else {
+ System.err.println("Usage: VerityVerifier <sparse.img> <certificate.x509.pem> | -mincrypt <mincrypt_key>");
System.exit(1);
}
- Security.addProvider(new BouncyCastleProvider());
-
- X509Certificate cert = Utils.loadPEMCertificate(args[1]);
- PublicKey key = cert.getPublicKey();
- RandomAccessFile img = openImage(args[0]);
+ VerityVerifier verifier = new VerityVerifier(args[0]);
try {
- if (verifyMetaData(img, key)) {
+ if (verifier.verify(key)) {
System.err.println("Signature is VALID");
System.exit(0);
- } else {
- System.err.println("Signature is INVALID");
}
} catch (Exception e) {
e.printStackTrace(System.err);