AU: MultiHttpFetcher, an HttpFetcher for specific byte ranges
MultiHttpFetcher takes an HttpFetcher class via template parameter,
and a collection of byte ranges. It hits up the URL multiple times,
once per range specified. For each time, it uses a new HttpFetcher of
the type specified and fast-forwards to the offset requested, and
aborting after enough bytes have been downloaded. Any range many
specify a length of -1, which means until the end of the file (as
dictated by the server). Thus, a single range of [0, -1] makes
MultiHttpFetcher a pass-through.
HttpFetcher change: ability to supply an offset.
LibcurlHttpFetcher changes: offset support (from HttpFetcher API),
ability to be terminted in a write-callback.
test_http_fetcher: support for failures in write() on the socket (at
least in the /big url case).
BUG=7391
TEST=unittests
Review URL: http://codereview.chromium.org/3591018
diff --git a/http_fetcher.h b/http_fetcher.h
index f8e510a..f61116a 100644
--- a/http_fetcher.h
+++ b/http_fetcher.h
@@ -42,6 +42,9 @@
post_data_.insert(post_data_.end(), char_data, char_data + size);
}
+ // Downloading should resume from this offset
+ virtual void SetOffset(off_t offset) = 0;
+
// Begins the transfer to the specified URL.
virtual void BeginTransfer(const std::string& url) = 0;
@@ -59,6 +62,11 @@
// Unpause() returns
virtual void Unpause() = 0;
+ // These two function are overloaded in LibcurlHttp fetcher to speed
+ // testing.
+ virtual void set_idle_seconds(int seconds) {}
+ virtual void set_retry_seconds(int seconds) {}
+
protected:
// The URL we're actively fetching from
std::string url_;
diff --git a/http_fetcher_unittest.cc b/http_fetcher_unittest.cc
index d096267..06cabac 100644
--- a/http_fetcher_unittest.cc
+++ b/http_fetcher_unittest.cc
@@ -5,6 +5,7 @@
#include <unistd.h>
#include <string>
+#include <utility>
#include <vector>
#include "base/logging.h"
@@ -14,15 +15,18 @@
#include "gtest/gtest.h"
#include "update_engine/libcurl_http_fetcher.h"
#include "update_engine/mock_http_fetcher.h"
+#include "update_engine/multi_http_fetcher.h"
+using std::make_pair;
using std::string;
using std::vector;
namespace chromeos_update_engine {
namespace {
-// WARNING, if you update this, you must also update test_http_server.py
+// WARNING, if you update these, you must also update test_http_server.py
const char* const kServerPort = "8080";
+const int kBigSize = 100000;
string LocalServerUrlForPath(const string& path) {
return string("http://127.0.0.1:") + kServerPort + path;
}
@@ -36,6 +40,7 @@
string BigUrl() const = 0;
string SmallUrl() const = 0;
bool IsMock() const = 0;
+ bool IsMulti() const = 0;
};
class NullHttpServer {
@@ -63,6 +68,7 @@
return "unused://unused";
}
bool IsMock() const { return true; }
+ bool IsMulti() const { return false; }
typedef NullHttpServer HttpServer;
void IgnoreServerAborting(HttpServer* server) const {}
};
@@ -101,6 +107,7 @@
}
}
free(argv[0]);
+ LOG(INFO) << "gdb attach now!";
return;
}
~PythonHttpServer() {
@@ -123,7 +130,7 @@
template <>
class HttpFetcherTest<LibcurlHttpFetcher> : public ::testing::Test {
public:
- HttpFetcher* NewLargeFetcher() {
+ virtual HttpFetcher* NewLargeFetcher() {
LibcurlHttpFetcher *ret = new LibcurlHttpFetcher;
// Speed up test execution.
ret->set_idle_seconds(1);
@@ -140,6 +147,7 @@
return LocalServerUrlForPath("/foo");
}
bool IsMock() const { return false; }
+ bool IsMulti() const { return false; }
typedef PythonHttpServer HttpServer;
void IgnoreServerAborting(HttpServer* server) const {
PythonHttpServer *pyserver = reinterpret_cast<PythonHttpServer*>(server);
@@ -147,8 +155,28 @@
}
};
-typedef ::testing::Types<LibcurlHttpFetcher, MockHttpFetcher>
- HttpFetcherTestTypes;
+template <>
+class HttpFetcherTest<MultiHttpFetcher<LibcurlHttpFetcher> >
+ : public HttpFetcherTest<LibcurlHttpFetcher> {
+ public:
+ HttpFetcher* NewLargeFetcher() {
+ MultiHttpFetcher<LibcurlHttpFetcher> *ret =
+ new MultiHttpFetcher<LibcurlHttpFetcher>;
+ MultiHttpFetcher<LibcurlHttpFetcher>::RangesVect
+ ranges(1, make_pair(0, -1));
+ ret->set_ranges(ranges);
+ // Speed up test execution.
+ ret->set_idle_seconds(1);
+ ret->set_retry_seconds(1);
+ return ret;
+ }
+ bool IsMulti() const { return true; }
+};
+
+typedef ::testing::Types<LibcurlHttpFetcher,
+ MockHttpFetcher,
+ MultiHttpFetcher<LibcurlHttpFetcher> >
+HttpFetcherTestTypes;
TYPED_TEST_CASE(HttpFetcherTest, HttpFetcherTestTypes);
namespace {
@@ -333,7 +361,6 @@
g_main_loop_run(loop);
g_source_destroy(timeout_source_);
- EXPECT_EQ(0, fetcher->http_response_code());
}
g_main_loop_unref(loop);
}
@@ -550,4 +577,123 @@
RedirectTest(false, url, this->NewLargeFetcher());
}
+namespace {
+class MultiHttpFetcherTestDelegate : public HttpFetcherDelegate {
+ public:
+ MultiHttpFetcherTestDelegate(int expected_response_code)
+ : expected_response_code_(expected_response_code) {}
+ virtual void ReceivedBytes(HttpFetcher* fetcher,
+ const char* bytes, int length) {
+ data.append(bytes, length);
+ }
+ virtual void TransferComplete(HttpFetcher* fetcher, bool successful) {
+ EXPECT_EQ(expected_response_code_ != 0, successful);
+ if (expected_response_code_ != 0)
+ EXPECT_EQ(expected_response_code_, fetcher->http_response_code());
+ g_main_loop_quit(loop_);
+ }
+ int expected_response_code_;
+ string data;
+ GMainLoop* loop_;
+};
+
+void MultiTest(HttpFetcher* fetcher_in,
+ const string& url,
+ const MultiHttpFetcher<LibcurlHttpFetcher>::RangesVect& ranges,
+ const string& expected_prefix,
+ off_t expected_size,
+ int expected_response_code) {
+ GMainLoop* loop = g_main_loop_new(g_main_context_default(), FALSE);
+ {
+ MultiHttpFetcherTestDelegate delegate(expected_response_code);
+ delegate.loop_ = loop;
+ scoped_ptr<HttpFetcher> fetcher(fetcher_in);
+ MultiHttpFetcher<LibcurlHttpFetcher>* multi_fetcher =
+ dynamic_cast<MultiHttpFetcher<LibcurlHttpFetcher>*>(fetcher.get());
+ ASSERT_TRUE(multi_fetcher);
+ multi_fetcher->set_ranges(ranges);
+ fetcher->set_delegate(&delegate);
+
+ StartTransferArgs start_xfer_args = {fetcher.get(), url};
+
+ g_timeout_add(0, StartTransfer, &start_xfer_args);
+ g_main_loop_run(loop);
+
+ EXPECT_EQ(expected_size, delegate.data.size());
+ EXPECT_EQ(expected_prefix,
+ string(delegate.data.data(), expected_prefix.size()));
+ }
+ g_main_loop_unref(loop);
+}
+} // namespace {}
+
+TYPED_TEST(HttpFetcherTest, MultiHttpFetcherSimplTest) {
+ if (!this->IsMulti())
+ return;
+ typename TestFixture::HttpServer server;
+ ASSERT_TRUE(server.started_);
+
+ MultiHttpFetcher<LibcurlHttpFetcher>::RangesVect ranges;
+ ranges.push_back(make_pair(0, 25));
+ ranges.push_back(make_pair(99, -1));
+ MultiTest(this->NewLargeFetcher(),
+ this->BigUrl(),
+ ranges,
+ "abcdefghijabcdefghijabcdejabcdefghijabcdef",
+ kBigSize - (99 - 25),
+ 206);
+}
+
+TYPED_TEST(HttpFetcherTest, MultiHttpFetcherLengthLimitTest) {
+ if (!this->IsMulti())
+ return;
+ typename TestFixture::HttpServer server;
+ ASSERT_TRUE(server.started_);
+
+ MultiHttpFetcher<LibcurlHttpFetcher>::RangesVect ranges;
+ ranges.push_back(make_pair(0, 24));
+ MultiTest(this->NewLargeFetcher(),
+ this->BigUrl(),
+ ranges,
+ "abcdefghijabcdefghijabcd",
+ 24,
+ 200);
+}
+
+TYPED_TEST(HttpFetcherTest, MultiHttpFetcherMultiEndTest) {
+ if (!this->IsMulti())
+ return;
+ typename TestFixture::HttpServer server;
+ ASSERT_TRUE(server.started_);
+
+ MultiHttpFetcher<LibcurlHttpFetcher>::RangesVect ranges;
+ ranges.push_back(make_pair(kBigSize - 2, -1));
+ ranges.push_back(make_pair(kBigSize - 3, -1));
+ MultiTest(this->NewLargeFetcher(),
+ this->BigUrl(),
+ ranges,
+ "ijhij",
+ 5,
+ 206);
+}
+
+TYPED_TEST(HttpFetcherTest, MultiHttpFetcherInsufficientTest) {
+ if (!this->IsMulti())
+ return;
+ typename TestFixture::HttpServer server;
+ ASSERT_TRUE(server.started_);
+
+ MultiHttpFetcher<LibcurlHttpFetcher>::RangesVect ranges;
+ ranges.push_back(make_pair(kBigSize - 2, 4));
+ for (int i = 0; i < 2; ++i) {
+ MultiTest(this->NewLargeFetcher(),
+ this->BigUrl(),
+ ranges,
+ "ij",
+ 2,
+ 0);
+ ranges.push_back(make_pair(0, 5));
+ }
+}
+
} // namespace chromeos_update_engine
diff --git a/libcurl_http_fetcher.cc b/libcurl_http_fetcher.cc
index 1dcea9e..c846812 100644
--- a/libcurl_http_fetcher.cc
+++ b/libcurl_http_fetcher.cc
@@ -81,7 +81,6 @@
// Begins the transfer, which must not have already been started.
void LibcurlHttpFetcher::BeginTransfer(const std::string& url) {
transfer_size_ = -1;
- bytes_downloaded_ = 0;
resume_offset_ = 0;
retry_count_ = 0;
http_response_code_ = 0;
@@ -90,7 +89,10 @@
}
void LibcurlHttpFetcher::TerminateTransfer() {
- CleanUp();
+ if (in_write_callback_)
+ terminate_requested_ = true;
+ else
+ CleanUp();
}
void LibcurlHttpFetcher::CurlPerformOnce() {
@@ -102,17 +104,18 @@
// returns, so we do. libcurl promises that curl_multi_perform will not block.
while (CURLM_CALL_MULTI_PERFORM == retcode) {
retcode = curl_multi_perform(curl_multi_handle_, &running_handles);
+ if (terminate_requested_) {
+ CleanUp();
+ return;
+ }
}
if (0 == running_handles) {
- long http_response_code = 0;
- if (curl_easy_getinfo(curl_handle_,
- CURLINFO_RESPONSE_CODE,
- &http_response_code) == CURLE_OK) {
- LOG(INFO) << "HTTP response code: " << http_response_code;
+ GetHttpResponseCode();
+ if (http_response_code_) {
+ LOG(INFO) << "HTTP response code: " << http_response_code_;
} else {
LOG(ERROR) << "Unable to get http response code.";
}
- http_response_code_ = static_cast<int>(http_response_code);
// we're done!
CleanUp();
@@ -135,8 +138,8 @@
} else {
if (delegate_) {
// success is when http_response_code is 2xx
- bool success = (http_response_code >= 200) &&
- (http_response_code < 300);
+ bool success = (http_response_code_ >= 200) &&
+ (http_response_code_ < 300);
delegate_->TransferComplete(this, success);
}
}
@@ -147,6 +150,7 @@
}
size_t LibcurlHttpFetcher::LibcurlWrite(void *ptr, size_t size, size_t nmemb) {
+ GetHttpResponseCode();
{
double transfer_size_double;
CHECK_EQ(curl_easy_getinfo(curl_handle_,
@@ -158,8 +162,10 @@
}
}
bytes_downloaded_ += size * nmemb;
+ in_write_callback_ = true;
if (delegate_)
delegate_->ReceivedBytes(this, reinterpret_cast<char*>(ptr), size * nmemb);
+ in_write_callback_ = false;
return size * nmemb;
}
@@ -294,4 +300,13 @@
transfer_in_progress_ = false;
}
+void LibcurlHttpFetcher::GetHttpResponseCode() {
+ long http_response_code = 0;
+ if (curl_easy_getinfo(curl_handle_,
+ CURLINFO_RESPONSE_CODE,
+ &http_response_code) == CURLE_OK) {
+ http_response_code_ = static_cast<int>(http_response_code);
+ }
+}
+
} // namespace chromeos_update_engine
diff --git a/libcurl_http_fetcher.h b/libcurl_http_fetcher.h
index 8908638..69e794f 100644
--- a/libcurl_http_fetcher.h
+++ b/libcurl_http_fetcher.h
@@ -27,13 +27,20 @@
curl_handle_(NULL),
timeout_source_(NULL),
transfer_in_progress_(false),
+ transfer_size_(0),
+ bytes_downloaded_(0),
+ resume_offset_(0),
retry_count_(0),
retry_seconds_(60),
- idle_seconds_(1) {}
+ idle_seconds_(1),
+ in_write_callback_(false),
+ terminate_requested_(false) {}
// Cleans up all internal state. Does not notify delegate
~LibcurlHttpFetcher();
+ void SetOffset(off_t offset) { bytes_downloaded_ = offset; }
+
// Begins the transfer if it hasn't already begun.
virtual void BeginTransfer(const std::string& url);
@@ -62,6 +69,9 @@
void set_retry_seconds(int seconds) { retry_seconds_ = seconds; }
private:
+ // Asks libcurl for the http response code and stores it in the object.
+ void GetHttpResponseCode();
+
// Resumes a transfer where it left off. This will use the
// HTTP Range: header to make a new connection from where the last
// left off.
@@ -135,6 +145,8 @@
// If we resumed an earlier transfer, data offset that we used for the
// new connection. 0 otherwise.
+ // In this class, resume refers to resuming a dropped HTTP connection,
+ // not to resuming an interrupted download.
off_t resume_offset_;
// Number of resumes performed.
@@ -146,6 +158,13 @@
// Seconds to wait before asking libcurl to "perform".
int idle_seconds_;
+ // If true, we are currently performing a write callback on the delegate.
+ bool in_write_callback_;
+
+ // We can't clean everything up while we're in a write callback, so
+ // if we get a terminate request, queue it until we can handle it.
+ bool terminate_requested_;
+
DISALLOW_COPY_AND_ASSIGN(LibcurlHttpFetcher);
};
diff --git a/mock_http_fetcher.h b/mock_http_fetcher.h
index 87cbe99..91d50dd 100644
--- a/mock_http_fetcher.h
+++ b/mock_http_fetcher.h
@@ -34,6 +34,9 @@
// Cleans up all internal state. Does not notify delegate
~MockHttpFetcher();
+ // Ignores this.
+ virtual void SetOffset(off_t offset) {}
+
// Begins the transfer if it hasn't already begun.
virtual void BeginTransfer(const std::string& url);
diff --git a/multi_http_fetcher.h b/multi_http_fetcher.h
new file mode 100644
index 0000000..cee7de0
--- /dev/null
+++ b/multi_http_fetcher.h
@@ -0,0 +1,189 @@
+// Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CHROMEOS_PLATFORM_UPDATE_ENGINE_MULTI_HTTP_FETCHER_H__
+#define CHROMEOS_PLATFORM_UPDATE_ENGINE_MULTI_HTTP_FETCHER_H__
+
+#include <tr1/memory>
+#include <utility>
+#include <vector>
+
+#include "update_engine/http_fetcher.h"
+
+// This class is a simple wrapper around an HttpFetcher. The client
+// specifies a vector of byte ranges. MultiHttpFetcher will fetch bytes
+// from those offsets. Pass -1 as a length to specify unlimited length.
+// It really only would make sense for the last range specified to have
+// unlimited length.
+
+namespace chromeos_update_engine {
+
+template<typename BaseHttpFetcher>
+class MultiHttpFetcher : public HttpFetcher, public HttpFetcherDelegate {
+ public:
+ typedef std::vector<std::pair<off_t, off_t> > RangesVect;
+
+ MultiHttpFetcher()
+ : sent_transfer_complete_(false),
+ current_index_(0),
+ bytes_received_this_fetcher_(0) {}
+ ~MultiHttpFetcher() {}
+
+ void set_ranges(const RangesVect& ranges) {
+ ranges_ = ranges;
+ fetchers_.resize(ranges_.size()); // Allocate the fetchers
+ for (typename std::vector<std::tr1::shared_ptr<BaseHttpFetcher>
+ >::iterator it = fetchers_.begin(), e = fetchers_.end();
+ it != e; ++it) {
+ (*it) = std::tr1::shared_ptr<BaseHttpFetcher>(new BaseHttpFetcher);
+ (*it)->set_delegate(this);
+ }
+ }
+
+ void SetOffset(off_t offset) {} // for now, doesn't support this
+
+ // Begins the transfer to the specified URL.
+ void BeginTransfer(const std::string& url) {
+ url_ = url;
+ if (ranges_.empty()) {
+ if (delegate_)
+ delegate_->TransferComplete(this, true);
+ return;
+ }
+ current_index_ = 0;
+ LOG(INFO) << "starting first transfer";
+ StartTransfer();
+ }
+
+ void TerminateTransfer() {
+ if (current_index_ < fetchers_.size())
+ fetchers_[current_index_]->TerminateTransfer();
+ current_index_ = ranges_.size();
+ sent_transfer_complete_ = true; // a fib
+ }
+
+ void Pause() {
+ if (current_index_ < fetchers_.size())
+ fetchers_[current_index_]->Pause();
+ }
+
+ void Unpause() {
+ if (current_index_ < fetchers_.size())
+ fetchers_[current_index_]->Unpause();
+ }
+
+ // These two function are overloaded in LibcurlHttp fetcher to speed
+ // testing.
+ void set_idle_seconds(int seconds) {
+ for (typename std::vector<std::tr1::shared_ptr<BaseHttpFetcher> >::iterator
+ it = fetchers_.begin(),
+ e = fetchers_.end(); it != e; ++it) {
+ (*it)->set_idle_seconds(seconds);
+ }
+ }
+ void set_retry_seconds(int seconds) {
+ for (typename std::vector<std::tr1::shared_ptr<BaseHttpFetcher> >::iterator
+ it = fetchers_.begin(),
+ e = fetchers_.end(); it != e; ++it) {
+ (*it)->set_retry_seconds(seconds);
+ }
+ }
+
+ private:
+ void SendTransferComplete(HttpFetcher* fetcher, bool successful) {
+ if (sent_transfer_complete_)
+ return;
+ LOG(INFO) << "Sending transfer complete";
+ sent_transfer_complete_ = true;
+ http_response_code_ = fetcher->http_response_code();
+ if (delegate_)
+ delegate_->TransferComplete(this, successful);
+ }
+
+ void StartTransfer() {
+ if (current_index_ >= ranges_.size()) {
+ return;
+ }
+ LOG(INFO) << "Starting a transfer";
+ bytes_received_this_fetcher_ = 0;
+ fetchers_[current_index_]->SetOffset(ranges_[current_index_].first);
+ fetchers_[current_index_]->BeginTransfer(url_);
+ }
+
+ void ReceivedBytes(HttpFetcher* fetcher,
+ const char* bytes,
+ int length) {
+ if (current_index_ >= ranges_.size())
+ return;
+ if (fetcher != fetchers_[current_index_].get()) {
+ LOG(WARNING) << "Received bytes from invalid fetcher";
+ return;
+ }
+ off_t next_size = length;
+ if (ranges_[current_index_].second >= 0) {
+ next_size = std::min(next_size,
+ ranges_[current_index_].second -
+ bytes_received_this_fetcher_);
+ }
+ LOG_IF(WARNING, next_size <= 0) << "Asked to write length <= 0";
+ if (delegate_)
+ delegate_->ReceivedBytes(this, bytes, next_size);
+ bytes_received_this_fetcher_ += length;
+ if (ranges_[current_index_].second >= 0 &&
+ bytes_received_this_fetcher_ >= ranges_[current_index_].second) {
+ fetchers_[current_index_]->TerminateTransfer();
+ current_index_++;
+ if (current_index_ == ranges_.size()) {
+ SendTransferComplete(fetchers_[current_index_ - 1].get(), true);
+ } else {
+ StartTransfer();
+ }
+ }
+ }
+
+ void TransferComplete(HttpFetcher* fetcher, bool successful) {
+ LOG(INFO) << "Received transfer complete";
+ if (current_index_ >= ranges_.size()) {
+ SendTransferComplete(fetcher, true);
+ return;
+ }
+
+ if (ranges_[current_index_].second < 0) {
+ // We're done with the current operation
+ current_index_++;
+ if (current_index_ >= ranges_.size() || !successful) {
+ SendTransferComplete(fetcher, successful);
+ } else {
+ // Do the next transfer
+ StartTransfer();
+ }
+ return;
+ }
+
+ if (bytes_received_this_fetcher_ < ranges_[current_index_].second) {
+ LOG(WARNING) << "Received insufficient bytes from fetcher. "
+ << "Ending early";
+ SendTransferComplete(fetcher, false);
+ return;
+ } else {
+ LOG(INFO) << "Got spurious TransferComplete. Ingoring.";
+ }
+ }
+
+ // If true, do not send any more data or TransferComplete to the delegate.
+ bool sent_transfer_complete_;
+
+ RangesVect ranges_;
+ std::vector<std::tr1::shared_ptr<BaseHttpFetcher> > fetchers_;
+
+ RangesVect::size_type current_index_; // index into ranges_, fetchers_
+ off_t bytes_received_this_fetcher_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MultiHttpFetcher);
+};
+
+} // namespace chromeos_update_engine
+
+#endif // CHROMEOS_PLATFORM_UPDATE_ENGINE_MULTI_HTTP_FETCHER_H__
diff --git a/test_http_server.cc b/test_http_server.cc
index 9573c31..de5c619 100644
--- a/test_http_server.cc
+++ b/test_http_server.cc
@@ -10,19 +10,22 @@
// To use this, simply make an HTTP connection to localhost:port and
// GET a url.
-#include <netinet/in.h>
-#include <sys/socket.h>
-#include <sys/types.h>
#include <errno.h>
#include <inttypes.h>
+#include <netinet/in.h>
+#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/socket.h>
+#include <sys/types.h>
#include <unistd.h>
+
#include <algorithm>
#include <string>
#include <vector>
-#include "base/logging.h"
+
+#include <base/logging.h>
using std::min;
using std::string;
@@ -101,19 +104,19 @@
return true;
}
-void WriteString(int fd, const string& str) {
+bool WriteString(int fd, const string& str) {
unsigned int bytes_written = 0;
while (bytes_written < str.size()) {
- ssize_t r = write(fd, str.c_str() + bytes_written,
+ ssize_t r = write(fd, str.data() + bytes_written,
str.size() - bytes_written);
- LOG(INFO) << "write() wrote " << r << " bytes";
if (r < 0) {
perror("write");
- return;
+ LOG(INFO) << "write failed";
+ return false;
}
bytes_written += r;
}
- LOG(INFO) << "WriteString wrote " << bytes_written << " bytes";
+ return true;
}
string Itoa(off_t num) {
@@ -145,16 +148,21 @@
}
void HandleBig(int fd, const HttpRequest& request, int big_length) {
+ LOG(INFO) << "starting big";
const off_t full_length = big_length;
WriteHeaders(fd, true, full_length, request.offset, request.return_code);
- const off_t content_length = full_length - request.offset;
int i = request.offset;
- for (; i % 10; i++)
- WriteString(fd, string(1, 'a' + (i % 10)));
- CHECK_EQ(i % 10, 0);
- for (; i < content_length; i += 10)
- WriteString(fd, "abcdefghij");
- CHECK_EQ(i, full_length);
+ bool success = true;
+ for (; (i % 10) && success; i++)
+ success = WriteString(fd, string(1, 'a' + (i % 10)));
+ if (success)
+ CHECK_EQ(i % 10, 0);
+ for (; (i < full_length) && success; i += 10) {
+ success = WriteString(fd, "abcdefghij");
+ }
+ if (success)
+ CHECK_EQ(i, full_length);
+ LOG(INFO) << "Done w/ big";
}
// This is like /big, but it writes at most 9000 bytes. Also,
@@ -252,6 +260,9 @@
using namespace chromeos_update_engine;
int main(int argc, char** argv) {
+ // Ignore SIGPIPE on write() to sockets.
+ signal(SIGPIPE, SIG_IGN);
+
socklen_t clilen;
struct sockaddr_in server_addr;
struct sockaddr_in client_addr;