AU: Include the old/new kernel/rootfs size/hash in the update metadata.
This is the server side change.
BUG=7348
TEST=unit test
Change-Id: I956e056826dbb13b0f97509f9992ebcedb48ea42
Review URL: http://codereview.chromium.org/3588015
diff --git a/delta_diff_generator.cc b/delta_diff_generator.cc
index 9a5219d..23c2dd0 100644
--- a/delta_diff_generator.cc
+++ b/delta_diff_generator.cc
@@ -29,6 +29,7 @@
#include "update_engine/filesystem_iterator.h"
#include "update_engine/graph_types.h"
#include "update_engine/graph_utils.h"
+#include "update_engine/omaha_hash_calculator.h"
#include "update_engine/payload_signer.h"
#include "update_engine/subprocess.h"
#include "update_engine/topological_sort.h"
@@ -586,6 +587,40 @@
return true;
}
+bool InitializePartitionInfo(const string& partition, PartitionInfo* info) {
+ const off_t size = utils::FileSize(partition);
+ TEST_AND_RETURN_FALSE(size >= 0);
+ info->set_size(size);
+ OmahaHashCalculator hasher;
+ TEST_AND_RETURN_FALSE(hasher.UpdateFile(partition, -1) == size);
+ TEST_AND_RETURN_FALSE(hasher.Finalize());
+ const vector<char>& hash = hasher.raw_hash();
+ info->set_hash(hash.data(), hash.size());
+ return true;
+}
+
+bool InitializePartitionInfos(const string& old_kernel,
+ const string& new_kernel,
+ const string& old_rootfs,
+ const string& new_rootfs,
+ DeltaArchiveManifest* manifest) {
+ if (!old_kernel.empty()) {
+ TEST_AND_RETURN_FALSE(
+ InitializePartitionInfo(old_kernel,
+ manifest->mutable_old_kernel_info()));
+ }
+ TEST_AND_RETURN_FALSE(
+ InitializePartitionInfo(new_kernel, manifest->mutable_new_kernel_info()));
+ if (!old_rootfs.empty()) {
+ TEST_AND_RETURN_FALSE(
+ InitializePartitionInfo(old_rootfs,
+ manifest->mutable_old_rootfs_info()));
+ }
+ TEST_AND_RETURN_FALSE(
+ InitializePartitionInfo(new_rootfs, manifest->mutable_new_rootfs_info()));
+ return true;
+}
+
namespace {
// Takes a collection (vector or RepeatedPtrField) of Extent and
@@ -679,7 +714,7 @@
graph->resize(graph->size() + 1);
cuts.back().old_src = it->first;
cuts.back().old_dst = it->second;
-
+
EdgeProperties& cut_edge_properties =
(*graph)[it->first].out_edges.find(it->second)->second;
@@ -903,7 +938,7 @@
edge_i != edge_e; ++edge_i) {
ranges.SubtractExtents(edge_i->second.extents);
}
-
+
uint64_t blocks_found = ranges.blocks();
if (blocks_found < blocks_needed) {
if (blocks_found > 0)
@@ -919,12 +954,12 @@
// depend on old_dst.
vector<Extent> real_extents =
ranges.GetExtentsForBlockCount(blocks_needed);
-
+
// Fix the old dest node w/ the real blocks
SubstituteBlocks(&(*graph)[node],
cuts[i].tmp_extents,
real_extents);
-
+
// Fix the new node w/ the real blocks. Since the new node is just a
// copy operation, we can replace all the dest extents w/ the real
// blocks.
@@ -932,7 +967,7 @@
&(*graph)[cuts[i].new_vertex].op;
op->clear_dst_extents();
StoreExtents(real_extents, op->mutable_dst_extents());
-
+
// Add an edge from the real-block supplier to the old dest block.
graph_utils::AddReadBeforeDepExtents(&(*graph)[test_node],
node,
@@ -960,7 +995,7 @@
}
new_op_indexes.push_back(cuts[i].old_dst);
op_indexes->swap(new_op_indexes);
-
+
GenerateReverseTopoOrderMap(*op_indexes, reverse_op_indexes);
}
if (i == e) {
@@ -1045,11 +1080,11 @@
int data_fd,
off_t* data_file_size) {
// Drop all incoming edges, keep all outgoing edges
-
+
// Keep all outgoing edges
Vertex::EdgeMap out_edges = (*graph)[cut.old_dst].out_edges;
graph_utils::DropWriteBeforeDeps(&out_edges);
-
+
TEST_AND_RETURN_FALSE(DeltaReadFile(graph,
cut.old_dst,
NULL,
@@ -1058,7 +1093,7 @@
(*graph)[cut.old_dst].file_name,
data_fd,
data_file_size));
-
+
(*graph)[cut.old_dst].out_edges = out_edges;
// Right now we don't have doubly-linked edges, so we have to scan
@@ -1132,7 +1167,7 @@
std::vector<Vertex::Index>* final_order) {
TEST_AND_RETURN_FALSE(chunk_size > 0);
TEST_AND_RETURN_FALSE((chunk_size % kBlockSize) == 0);
-
+
// Get the sizes early in the function, so we can fail fast if the user
// passed us bad paths.
const off_t image_size = utils::FileSize(new_image);
@@ -1166,17 +1201,17 @@
op = &kernel_ops->back();
}
LOG(INFO) << "have an op";
-
+
vector<char> buf(min(bytes_left, chunk_size));
LOG(INFO) << "buf size: " << buf.size();
ssize_t bytes_read = -1;
-
+
TEST_AND_RETURN_FALSE(utils::PReadAll(
in_fd, &buf[0], buf.size(), offset, &bytes_read));
TEST_AND_RETURN_FALSE(bytes_read == static_cast<ssize_t>(buf.size()));
-
+
vector<char> buf_compressed;
-
+
TEST_AND_RETURN_FALSE(BzipCompress(buf, &buf_compressed));
const bool compress = buf_compressed.size() < buf.size();
const vector<char>& use_buf = compress ? buf_compressed : buf;
@@ -1381,6 +1416,12 @@
kBlockSize);
}
+ TEST_AND_RETURN_FALSE(InitializePartitionInfos(old_kernel_part,
+ new_kernel_part,
+ old_image,
+ new_image,
+ &manifest));
+
// Serialize protobuf
string serialized_manifest;
diff --git a/delta_performer_unittest.cc b/delta_performer_unittest.cc
index 8717e07..5565c88 100755
--- a/delta_performer_unittest.cc
+++ b/delta_performer_unittest.cc
@@ -214,7 +214,7 @@
uint64_t manifest_metadata_size;
- // Check that the null signature blob exists
+ // Check the metadata.
{
LOG(INFO) << "delta size: " << delta.size();
DeltaArchiveManifest manifest;
@@ -242,6 +242,16 @@
&expected_sig_data_length));
EXPECT_EQ(expected_sig_data_length, manifest.signatures_size());
EXPECT_FALSE(signature.data().empty());
+
+ EXPECT_EQ(old_kernel_data.size(), manifest.old_kernel_info().size());
+ EXPECT_EQ(new_kernel_data.size(), manifest.new_kernel_info().size());
+ EXPECT_EQ(utils::FileSize(a_img), manifest.old_rootfs_info().size());
+ EXPECT_EQ(utils::FileSize(b_img), manifest.new_rootfs_info().size());
+
+ EXPECT_FALSE(manifest.old_kernel_info().hash().empty());
+ EXPECT_FALSE(manifest.new_kernel_info().hash().empty());
+ EXPECT_FALSE(manifest.old_rootfs_info().hash().empty());
+ EXPECT_FALSE(manifest.new_rootfs_info().hash().empty());
}
PrefsMock prefs;
diff --git a/omaha_hash_calculator.cc b/omaha_hash_calculator.cc
index bc71476..fdc70f0 100644
--- a/omaha_hash_calculator.cc
+++ b/omaha_hash_calculator.cc
@@ -4,10 +4,14 @@
#include "update_engine/omaha_hash_calculator.h"
+#include <fcntl.h>
+
+#include <base/eintr_wrapper.h>
+#include <base/logging.h>
#include <openssl/bio.h>
#include <openssl/buffer.h>
#include <openssl/evp.h>
-#include "base/logging.h"
+
#include "update_engine/utils.h"
using std::string;
@@ -31,6 +35,34 @@
return true;
}
+off_t OmahaHashCalculator::UpdateFile(const string& name, off_t length) {
+ int fd = HANDLE_EINTR(open(name.c_str(), O_RDONLY));
+ if (fd < 0) {
+ return -1;
+ }
+
+ const int kBufferSize = 128 * 1024; // 128 KiB
+ vector<char> buffer(kBufferSize);
+ off_t bytes_processed = 0;
+ while (length < 0 || bytes_processed < length) {
+ off_t bytes_to_read = buffer.size();
+ if (length >= 0 && bytes_to_read > length - bytes_processed) {
+ bytes_to_read = length - bytes_processed;
+ }
+ ssize_t rc = HANDLE_EINTR(read(fd, buffer.data(), bytes_to_read));
+ if (rc == 0) { // EOF
+ break;
+ }
+ if (rc < 0 || !Update(buffer.data(), rc)) {
+ bytes_processed = -1;
+ break;
+ }
+ bytes_processed += rc;
+ }
+ HANDLE_EINTR(close(fd));
+ return bytes_processed;
+}
+
// Call Finalize() when all data has been passed in. This mostly just
// calls OpenSSL's SHA256_Final() and then base64 encodes the hash.
bool OmahaHashCalculator::Finalize() {
diff --git a/omaha_hash_calculator.h b/omaha_hash_calculator.h
index 7aa37e4..5daba78 100644
--- a/omaha_hash_calculator.h
+++ b/omaha_hash_calculator.h
@@ -7,9 +7,10 @@
#include <string>
#include <vector>
+
#include <openssl/sha.h>
-#include "base/basictypes.h"
-#include "base/logging.h"
+#include <base/basictypes.h>
+#include <base/logging.h>
// Omaha uses base64 encoded SHA-256 as the hash. This class provides a simple
// wrapper around OpenSSL providing such a formatted hash of data passed in.
@@ -28,6 +29,11 @@
// Returns true on success.
bool Update(const char* data, size_t length);
+ // Updates the hash with up to |length| bytes of data from |file|. If |length|
+ // is negative, reads in and updates with the whole file. Returns the number
+ // of bytes that the hash was updated with, or -1 on error.
+ off_t UpdateFile(const std::string& name, off_t length);
+
// Call Finalize() when all data has been passed in. This method tells
// OpenSSl that no more data will come in and base64 encodes the resulting
// hash.
diff --git a/omaha_hash_calculator_unittest.cc b/omaha_hash_calculator_unittest.cc
index 597b21f..5ba3ac1 100644
--- a/omaha_hash_calculator_unittest.cc
+++ b/omaha_hash_calculator_unittest.cc
@@ -5,6 +5,7 @@
#include <math.h>
#include <unistd.h>
+#include <string>
#include <vector>
#include <glib.h>
@@ -12,7 +13,9 @@
#include "update_engine/libcurl_http_fetcher.h"
#include "update_engine/omaha_hash_calculator.h"
+#include "update_engine/utils.h"
+using std::string;
using std::vector;
namespace chromeos_update_engine {
@@ -54,13 +57,16 @@
TEST(OmahaHashCalculatorTest, ContextTest) {
OmahaHashCalculator calc;
calc.Update("h", 1);
+ string calc_context = calc.GetContext();
+ calc.Finalize();
OmahaHashCalculator calc_next;
- calc_next.SetContext(calc.GetContext());
+ calc_next.SetContext(calc_context);
calc_next.Update("i", 1);
calc_next.Finalize();
- // Generated by running this on a linux shell:
- // $ echo -n hi | openssl dgst -sha256 -binary | openssl base64
- EXPECT_EQ("j0NDRmSPa5bfid2pAcUXaxCm2Dlh3TwayItZstwyeqQ=", calc_next.hash());
+ EXPECT_EQ(kExpectedHash, calc_next.hash());
+ vector<char> raw_hash(kExpectedRawHash,
+ kExpectedRawHash + arraysize(kExpectedRawHash));
+ EXPECT_TRUE(raw_hash == calc_next.raw_hash());
}
TEST(OmahaHashCalculatorTest, BigTest) {
@@ -83,6 +89,37 @@
EXPECT_EQ("NZf8k6SPBkYMvhaX8YgzuMgbkLP1XZ+neM8K5wcSsf8=", calc.hash());
}
+TEST(OmahaHashCalculatorTest, UpdateFileSimpleTest) {
+ string data_path;
+ ASSERT_TRUE(
+ utils::MakeTempFile("/tmp/data.XXXXXX", &data_path, NULL));
+ ScopedPathUnlinker data_path_unlinker(data_path);
+ ASSERT_TRUE(utils::WriteFile(data_path.c_str(), "hi", 2));
+
+ static const int kLengths[] = { -1, 2, 10 };
+ for (size_t i = 0; i < arraysize(kLengths); i++) {
+ OmahaHashCalculator calc;
+ EXPECT_EQ(2, calc.UpdateFile(data_path, kLengths[i]));
+ EXPECT_TRUE(calc.Finalize());
+ EXPECT_EQ(kExpectedHash, calc.hash());
+ vector<char> raw_hash(kExpectedRawHash,
+ kExpectedRawHash + arraysize(kExpectedRawHash));
+ EXPECT_TRUE(raw_hash == calc.raw_hash());
+ }
+
+ OmahaHashCalculator calc;
+ EXPECT_EQ(0, calc.UpdateFile(data_path, 0));
+ EXPECT_EQ(1, calc.UpdateFile(data_path, 1));
+ EXPECT_TRUE(calc.Finalize());
+ // echo -n h | openssl dgst -sha256 -binary | openssl base64
+ EXPECT_EQ("qqlAJmTxpB9A67xSyZk+tmrrNmYClY/fqig7ceZNsSM=", calc.hash());
+}
+
+TEST(OmahaHashCalculatorTest, UpdateFileNonexistentTest) {
+ OmahaHashCalculator calc;
+ EXPECT_EQ(-1, calc.UpdateFile("/some/non-existent/file", -1));
+}
+
TEST(OmahaHashCalculatorTest, AbortTest) {
// Just make sure we don't crash and valgrind doesn't detect memory leaks
{
diff --git a/update_metadata.proto b/update_metadata.proto
index 213e740..40ba24f 100644
--- a/update_metadata.proto
+++ b/update_metadata.proto
@@ -1,4 +1,4 @@
-// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -85,6 +85,11 @@
repeated Signature signatures = 1;
}
+message PartitionInfo {
+ optional uint64 size = 1;
+ optional bytes hash = 2;
+}
+
message DeltaArchiveManifest {
message InstallOperation {
enum Type {
@@ -126,4 +131,10 @@
// file.
optional uint64 signatures_offset = 4;
optional uint64 signatures_size = 5;
+
+ // Partition data that can be used to validate the update.
+ optional PartitionInfo old_kernel_info = 6;
+ optional PartitionInfo new_kernel_info = 7;
+ optional PartitionInfo old_rootfs_info = 8;
+ optional PartitionInfo new_rootfs_info = 9;
}