1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "chrome/browser/safe_browsing/protocol_manager.h"
8 #include "base/base64.h"
10 #include "base/environment.h"
11 #include "base/logging.h"
12 #include "base/metrics/histogram.h"
13 #include "base/rand_util.h"
14 #include "base/stl_util.h"
15 #include "base/strings/string_util.h"
16 #include "base/strings/stringprintf.h"
17 #include "base/timer/timer.h"
18 #include "chrome/browser/safe_browsing/protocol_parser.h"
19 #include "chrome/common/chrome_version_info.h"
20 #include "chrome/common/env_vars.h"
21 #include "google_apis/google_api_keys.h"
22 #include "net/base/escape.h"
23 #include "net/base/load_flags.h"
24 #include "net/base/net_errors.h"
25 #include "net/url_request/url_fetcher.h"
26 #include "net/url_request/url_request_context_getter.h"
27 #include "net/url_request/url_request_status.h"
30 using base::TimeDelta;
34 // UpdateResult indicates what happened with the primary and/or backup update
35 // requests. The ordering of the values must stay the same for UMA consistency,
36 // and is also ordered in this way to match ProtocolManager::BackupUpdateReason.
39 UPDATE_RESULT_SUCCESS,
40 UPDATE_RESULT_BACKUP_CONNECT_FAIL,
41 UPDATE_RESULT_BACKUP_CONNECT_SUCCESS,
42 UPDATE_RESULT_BACKUP_HTTP_FAIL,
43 UPDATE_RESULT_BACKUP_HTTP_SUCCESS,
44 UPDATE_RESULT_BACKUP_NETWORK_FAIL,
45 UPDATE_RESULT_BACKUP_NETWORK_SUCCESS,
47 UPDATE_RESULT_BACKUP_START = UPDATE_RESULT_BACKUP_CONNECT_FAIL,
50 void RecordUpdateResult(UpdateResult result) {
51 DCHECK(result >= 0 && result < UPDATE_RESULT_MAX);
52 UMA_HISTOGRAM_ENUMERATION("SB2.UpdateResult", result, UPDATE_RESULT_MAX);
57 // Minimum time, in seconds, from start up before we must issue an update query.
58 static const int kSbTimerStartIntervalSecMin = 60;
60 // Maximum time, in seconds, from start up before we must issue an update query.
61 static const int kSbTimerStartIntervalSecMax = 300;
63 // The maximum time, in seconds, to wait for a response to an update request.
64 static const int kSbMaxUpdateWaitSec = 30;
66 // Maximum back off multiplier.
67 static const int kSbMaxBackOff = 8;
69 // The default SBProtocolManagerFactory.
70 class SBProtocolManagerFactoryImpl : public SBProtocolManagerFactory {
72 SBProtocolManagerFactoryImpl() { }
73 virtual ~SBProtocolManagerFactoryImpl() { }
74 virtual SafeBrowsingProtocolManager* CreateProtocolManager(
75 SafeBrowsingProtocolManagerDelegate* delegate,
76 net::URLRequestContextGetter* request_context_getter,
77 const SafeBrowsingProtocolConfig& config) OVERRIDE {
78 return new SafeBrowsingProtocolManager(
79 delegate, request_context_getter, config);
82 DISALLOW_COPY_AND_ASSIGN(SBProtocolManagerFactoryImpl);
85 // SafeBrowsingProtocolManager implementation ----------------------------------
88 SBProtocolManagerFactory* SafeBrowsingProtocolManager::factory_ = NULL;
91 SafeBrowsingProtocolManager* SafeBrowsingProtocolManager::Create(
92 SafeBrowsingProtocolManagerDelegate* delegate,
93 net::URLRequestContextGetter* request_context_getter,
94 const SafeBrowsingProtocolConfig& config) {
96 factory_ = new SBProtocolManagerFactoryImpl();
97 return factory_->CreateProtocolManager(
98 delegate, request_context_getter, config);
101 SafeBrowsingProtocolManager::SafeBrowsingProtocolManager(
102 SafeBrowsingProtocolManagerDelegate* delegate,
103 net::URLRequestContextGetter* request_context_getter,
104 const SafeBrowsingProtocolConfig& config)
105 : delegate_(delegate),
106 request_type_(NO_REQUEST),
107 update_error_count_(0),
108 gethash_error_count_(0),
109 update_back_off_mult_(1),
110 gethash_back_off_mult_(1),
111 next_update_interval_(base::TimeDelta::FromSeconds(
112 base::RandInt(kSbTimerStartIntervalSecMin,
113 kSbTimerStartIntervalSecMax))),
114 update_state_(FIRST_REQUEST),
115 chunk_pending_to_write_(false),
116 version_(config.version),
118 client_name_(config.client_name),
119 request_context_getter_(request_context_getter),
120 url_prefix_(config.url_prefix),
121 backup_update_reason_(BACKUP_UPDATE_REASON_MAX),
122 disable_auto_update_(config.disable_auto_update),
124 DCHECK(!url_prefix_.empty());
126 backup_url_prefixes_[BACKUP_UPDATE_REASON_CONNECT] =
127 config.backup_connect_error_url_prefix;
128 backup_url_prefixes_[BACKUP_UPDATE_REASON_HTTP] =
129 config.backup_http_error_url_prefix;
130 backup_url_prefixes_[BACKUP_UPDATE_REASON_NETWORK] =
131 config.backup_network_error_url_prefix;
133 // Set the backoff multiplier fuzz to a random value between 0 and 1.
134 back_off_fuzz_ = static_cast<float>(base::RandDouble());
135 if (version_.empty())
136 version_ = SafeBrowsingProtocolManagerHelper::Version();
140 void SafeBrowsingProtocolManager::RecordGetHashResult(
141 bool is_download, ResultType result_type) {
143 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResultDownload", result_type,
144 GET_HASH_RESULT_MAX);
146 UMA_HISTOGRAM_ENUMERATION("SB2.GetHashResult", result_type,
147 GET_HASH_RESULT_MAX);
151 bool SafeBrowsingProtocolManager::IsUpdateScheduled() const {
152 return update_timer_.IsRunning();
155 SafeBrowsingProtocolManager::~SafeBrowsingProtocolManager() {
156 // Delete in-progress SafeBrowsing requests.
157 STLDeleteContainerPairFirstPointers(hash_requests_.begin(),
158 hash_requests_.end());
159 hash_requests_.clear();
162 // We can only have one update or chunk request outstanding, but there may be
163 // multiple GetHash requests pending since we don't want to serialize them and
164 // slow down the user.
165 void SafeBrowsingProtocolManager::GetFullHash(
166 const std::vector<SBPrefix>& prefixes,
167 FullHashCallback callback,
169 DCHECK(CalledOnValidThread());
170 // If we are in GetHash backoff, we need to check if we're past the next
171 // allowed time. If we are, we can proceed with the request. If not, we are
172 // required to return empty results (i.e. treat the page as safe).
173 if (gethash_error_count_ && Time::Now() <= next_gethash_time_) {
174 RecordGetHashResult(is_download, GET_HASH_BACKOFF_ERROR);
175 std::vector<SBFullHashResult> full_hashes;
176 callback.Run(full_hashes, false);
179 GURL gethash_url = GetHashUrl();
180 net::URLFetcher* fetcher = net::URLFetcher::Create(
181 url_fetcher_id_++, gethash_url, net::URLFetcher::POST, this);
182 hash_requests_[fetcher] = FullHashDetails(callback, is_download);
184 std::string get_hash;
185 SafeBrowsingProtocolParser parser;
186 parser.FormatGetHash(prefixes, &get_hash);
188 fetcher->SetLoadFlags(net::LOAD_DISABLE_CACHE);
189 fetcher->SetRequestContext(request_context_getter_.get());
190 fetcher->SetUploadData("text/plain", get_hash);
194 void SafeBrowsingProtocolManager::GetNextUpdate() {
195 DCHECK(CalledOnValidThread());
196 if (!request_.get() && request_type_ == NO_REQUEST)
197 IssueUpdateRequest();
200 // net::URLFetcherDelegate implementation ----------------------------------
202 // All SafeBrowsing request responses are handled here.
203 // TODO(paulg): Clarify with the SafeBrowsing team whether a failed parse of a
204 // chunk should retry the download and parse of that chunk (and
205 // what back off / how many times to try), and if that effects the
206 // update back off. For now, a failed parse of the chunk means we
207 // drop it. This isn't so bad because the next UPDATE_REQUEST we
208 // do will report all the chunks we have. If that chunk is still
209 // required, the SafeBrowsing servers will tell us to get it again.
210 void SafeBrowsingProtocolManager::OnURLFetchComplete(
211 const net::URLFetcher* source) {
212 DCHECK(CalledOnValidThread());
213 scoped_ptr<const net::URLFetcher> fetcher;
214 bool parsed_ok = true;
216 HashRequests::iterator it = hash_requests_.find(source);
217 if (it != hash_requests_.end()) {
219 fetcher.reset(it->first);
220 const FullHashDetails& details = it->second;
221 std::vector<SBFullHashResult> full_hashes;
222 bool can_cache = false;
223 if (source->GetStatus().is_success() &&
224 (source->GetResponseCode() == 200 ||
225 source->GetResponseCode() == 204)) {
226 // For tracking our GetHash false positive (204) rate, compared to real
228 if (source->GetResponseCode() == 200)
229 RecordGetHashResult(details.is_download, GET_HASH_STATUS_200);
231 RecordGetHashResult(details.is_download, GET_HASH_STATUS_204);
233 gethash_error_count_ = 0;
234 gethash_back_off_mult_ = 1;
235 SafeBrowsingProtocolParser parser;
237 source->GetResponseAsString(&data);
238 parsed_ok = parser.ParseGetHash(
240 static_cast<int>(data.length()),
244 RecordGetHashResult(details.is_download, GET_HASH_PARSE_ERROR);
245 // TODO(cbentzel): Should can_cache be set to false here? (See
246 // http://crbug.com/360232.)
249 HandleGetHashError(Time::Now());
250 if (source->GetStatus().status() == net::URLRequestStatus::FAILED) {
251 RecordGetHashResult(details.is_download, GET_HASH_NETWORK_ERROR);
252 VLOG(1) << "SafeBrowsing GetHash request for: " << source->GetURL()
253 << " failed with error: " << source->GetStatus().error();
255 RecordGetHashResult(details.is_download, GET_HASH_HTTP_ERROR);
256 VLOG(1) << "SafeBrowsing GetHash request for: " << source->GetURL()
257 << " failed with error: " << source->GetResponseCode();
261 // Invoke the callback with full_hashes, even if there was a parse error or
262 // an error response code (in which case full_hashes will be empty). The
263 // caller can't be blocked indefinitely.
264 details.callback.Run(full_hashes, can_cache);
266 hash_requests_.erase(it);
268 // Update or chunk response.
269 fetcher.reset(request_.release());
271 if (request_type_ == UPDATE_REQUEST ||
272 request_type_ == BACKUP_UPDATE_REQUEST) {
273 if (!fetcher.get()) {
274 // We've timed out waiting for an update response, so we've cancelled
275 // the update request and scheduled a new one. Ignore this response.
279 // Cancel the update response timeout now that we have the response.
280 timeout_timer_.Stop();
283 net::URLRequestStatus status = source->GetStatus();
284 if (status.is_success() && source->GetResponseCode() == 200) {
285 // We have data from the SafeBrowsing service.
287 source->GetResponseAsString(&data);
288 parsed_ok = HandleServiceResponse(
289 source->GetURL(), data.data(), static_cast<int>(data.length()));
291 VLOG(1) << "SafeBrowsing request for: " << source->GetURL()
293 chunk_request_urls_.clear();
294 if (request_type_ == UPDATE_REQUEST &&
295 IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_HTTP)) {
298 UpdateFinished(false);
301 switch (request_type_) {
304 chunk_request_urls_.pop_front();
305 if (chunk_request_urls_.empty() && !chunk_pending_to_write_)
306 UpdateFinished(true);
310 case BACKUP_UPDATE_REQUEST:
311 if (chunk_request_urls_.empty() && parsed_ok) {
312 // We are up to date since the servers gave us nothing new, so we
313 // are done with this update cycle.
314 UpdateFinished(true);
318 // This can happen if HandleServiceResponse fails above.
325 if (status.status() == net::URLRequestStatus::FAILED) {
326 VLOG(1) << "SafeBrowsing request for: " << source->GetURL()
327 << " failed with error: " << source->GetStatus().error();
329 VLOG(1) << "SafeBrowsing request for: " << source->GetURL()
330 << " failed with error: " << source->GetResponseCode();
332 if (request_type_ == CHUNK_REQUEST) {
333 // The SafeBrowsing service error, or very bad response code: back off.
334 chunk_request_urls_.clear();
335 } else if (request_type_ == UPDATE_REQUEST) {
336 BackupUpdateReason backup_update_reason = BACKUP_UPDATE_REASON_MAX;
337 if (status.is_success()) {
338 backup_update_reason = BACKUP_UPDATE_REASON_HTTP;
340 switch (status.error()) {
341 case net::ERR_INTERNET_DISCONNECTED:
342 case net::ERR_NETWORK_CHANGED:
343 backup_update_reason = BACKUP_UPDATE_REASON_NETWORK;
346 backup_update_reason = BACKUP_UPDATE_REASON_CONNECT;
350 if (backup_update_reason != BACKUP_UPDATE_REASON_MAX &&
351 IssueBackupUpdateRequest(backup_update_reason)) {
355 UpdateFinished(false);
359 // Get the next chunk if available.
363 bool SafeBrowsingProtocolManager::HandleServiceResponse(const GURL& url,
366 DCHECK(CalledOnValidThread());
367 SafeBrowsingProtocolParser parser;
369 switch (request_type_) {
371 case BACKUP_UPDATE_REQUEST: {
372 int next_update_sec = -1;
374 scoped_ptr<std::vector<SBChunkDelete> > chunk_deletes(
375 new std::vector<SBChunkDelete>);
376 std::vector<ChunkUrl> chunk_urls;
377 if (!parser.ParseUpdate(data, length, &next_update_sec,
378 &reset, chunk_deletes.get(), &chunk_urls)) {
382 base::TimeDelta next_update_interval =
383 base::TimeDelta::FromSeconds(next_update_sec);
384 last_update_ = Time::Now();
386 if (update_state_ == FIRST_REQUEST)
387 update_state_ = SECOND_REQUEST;
388 else if (update_state_ == SECOND_REQUEST)
389 update_state_ = NORMAL_REQUEST;
391 // New time for the next update.
392 if (next_update_interval > base::TimeDelta()) {
393 next_update_interval_ = next_update_interval;
394 } else if (update_state_ == SECOND_REQUEST) {
395 next_update_interval_ = base::TimeDelta::FromSeconds(
396 base::RandInt(15, 45));
399 // New chunks to download.
400 if (!chunk_urls.empty()) {
401 UMA_HISTOGRAM_COUNTS("SB2.UpdateUrls", chunk_urls.size());
402 for (size_t i = 0; i < chunk_urls.size(); ++i)
403 chunk_request_urls_.push_back(chunk_urls[i]);
406 // Handle the case were the SafeBrowsing service tells us to dump our
409 delegate_->ResetDatabase();
413 // Chunks to delete from our storage. Pass ownership of
415 if (!chunk_deletes->empty())
416 delegate_->DeleteChunks(chunk_deletes.release());
420 case CHUNK_REQUEST: {
421 UMA_HISTOGRAM_TIMES("SB2.ChunkRequest",
422 base::Time::Now() - chunk_request_start_);
424 const ChunkUrl chunk_url = chunk_request_urls_.front();
425 scoped_ptr<SBChunkList> chunks(new SBChunkList);
426 UMA_HISTOGRAM_COUNTS("SB2.ChunkSize", length);
427 update_size_ += length;
428 if (!parser.ParseChunk(chunk_url.list_name, data, length,
431 std::string data_str;
432 data_str.assign(data, length);
433 std::string encoded_chunk;
434 base::Base64Encode(data_str, &encoded_chunk);
435 VLOG(1) << "ParseChunk error for chunk: " << chunk_url.url
436 << ", Base64Encode(data): " << encoded_chunk
437 << ", length: " << length;
442 // Chunks to add to storage. Pass ownership of |chunks|.
443 if (!chunks->empty()) {
444 chunk_pending_to_write_ = true;
445 delegate_->AddChunks(
446 chunk_url.list_name, chunks.release(),
447 base::Bind(&SafeBrowsingProtocolManager::OnAddChunksComplete,
448 base::Unretained(this)));
461 void SafeBrowsingProtocolManager::Initialize() {
462 DCHECK(CalledOnValidThread());
463 // Don't want to hit the safe browsing servers on build/chrome bots.
464 scoped_ptr<base::Environment> env(base::Environment::Create());
465 if (env->HasVar(env_vars::kHeadless))
467 ScheduleNextUpdate(false /* no back off */);
470 void SafeBrowsingProtocolManager::ScheduleNextUpdate(bool back_off) {
471 DCHECK(CalledOnValidThread());
472 if (disable_auto_update_) {
473 // Unschedule any current timer.
474 update_timer_.Stop();
477 // Reschedule with the new update.
478 base::TimeDelta next_update_interval = GetNextUpdateInterval(back_off);
479 ForceScheduleNextUpdate(next_update_interval);
482 void SafeBrowsingProtocolManager::ForceScheduleNextUpdate(
483 base::TimeDelta interval) {
484 DCHECK(CalledOnValidThread());
485 DCHECK(interval >= base::TimeDelta());
486 // Unschedule any current timer.
487 update_timer_.Stop();
488 update_timer_.Start(FROM_HERE, interval, this,
489 &SafeBrowsingProtocolManager::GetNextUpdate);
492 // According to section 5 of the SafeBrowsing protocol specification, we must
493 // back off after a certain number of errors. We only change |next_update_sec_|
494 // when we receive a response from the SafeBrowsing service.
495 base::TimeDelta SafeBrowsingProtocolManager::GetNextUpdateInterval(
497 DCHECK(CalledOnValidThread());
498 DCHECK(next_update_interval_ > base::TimeDelta());
499 base::TimeDelta next = next_update_interval_;
501 next = GetNextBackOffInterval(&update_error_count_, &update_back_off_mult_);
503 // Successful response means error reset.
504 update_error_count_ = 0;
505 update_back_off_mult_ = 1;
510 base::TimeDelta SafeBrowsingProtocolManager::GetNextBackOffInterval(
511 int* error_count, int* multiplier) const {
512 DCHECK(CalledOnValidThread());
513 DCHECK(multiplier && error_count);
515 if (*error_count > 1 && *error_count < 6) {
516 base::TimeDelta next = base::TimeDelta::FromMinutes(
517 *multiplier * (1 + back_off_fuzz_) * 30);
519 if (*multiplier > kSbMaxBackOff)
520 *multiplier = kSbMaxBackOff;
523 if (*error_count >= 6)
524 return base::TimeDelta::FromHours(8);
525 return base::TimeDelta::FromMinutes(1);
528 // This request requires getting a list of all the chunks for each list from the
529 // database asynchronously. The request will be issued when we're called back in
530 // OnGetChunksComplete.
531 // TODO(paulg): We should get this at start up and maintain a ChunkRange cache
532 // to avoid hitting the database with each update request. On the
533 // otherhand, this request will only occur ~20-30 minutes so there
534 // isn't that much overhead. Measure!
535 void SafeBrowsingProtocolManager::IssueUpdateRequest() {
536 DCHECK(CalledOnValidThread());
537 request_type_ = UPDATE_REQUEST;
538 delegate_->UpdateStarted();
539 delegate_->GetChunks(
540 base::Bind(&SafeBrowsingProtocolManager::OnGetChunksComplete,
541 base::Unretained(this)));
544 // The backup request can run immediately since the chunks have already been
545 // retrieved from the DB.
546 bool SafeBrowsingProtocolManager::IssueBackupUpdateRequest(
547 BackupUpdateReason backup_update_reason) {
548 DCHECK(CalledOnValidThread());
549 DCHECK_EQ(request_type_, UPDATE_REQUEST);
550 DCHECK(backup_update_reason >= 0 &&
551 backup_update_reason < BACKUP_UPDATE_REASON_MAX);
552 if (backup_url_prefixes_[backup_update_reason].empty())
554 request_type_ = BACKUP_UPDATE_REQUEST;
555 backup_update_reason_ = backup_update_reason;
557 GURL backup_update_url = BackupUpdateUrl(backup_update_reason);
558 request_.reset(net::URLFetcher::Create(
559 url_fetcher_id_++, backup_update_url, net::URLFetcher::POST, this));
560 request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
561 request_->SetRequestContext(request_context_getter_.get());
562 request_->SetUploadData("text/plain", update_list_data_);
565 // Begin the update request timeout.
566 timeout_timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec),
568 &SafeBrowsingProtocolManager::UpdateResponseTimeout);
573 void SafeBrowsingProtocolManager::IssueChunkRequest() {
574 DCHECK(CalledOnValidThread());
575 // We are only allowed to have one request outstanding at any time. Also,
576 // don't get the next url until the previous one has been written to disk so
577 // that we don't use too much memory.
578 if (request_.get() || chunk_request_urls_.empty() || chunk_pending_to_write_)
581 ChunkUrl next_chunk = chunk_request_urls_.front();
582 DCHECK(!next_chunk.url.empty());
583 GURL chunk_url = NextChunkUrl(next_chunk.url);
584 request_type_ = CHUNK_REQUEST;
585 request_.reset(net::URLFetcher::Create(
586 url_fetcher_id_++, chunk_url, net::URLFetcher::GET, this));
587 request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
588 request_->SetRequestContext(request_context_getter_.get());
589 chunk_request_start_ = base::Time::Now();
593 void SafeBrowsingProtocolManager::OnGetChunksComplete(
594 const std::vector<SBListChunkRanges>& lists, bool database_error) {
595 DCHECK(CalledOnValidThread());
596 DCHECK_EQ(request_type_, UPDATE_REQUEST);
597 DCHECK(update_list_data_.empty());
598 if (database_error) {
599 // The update was not successful, but don't back off.
600 UpdateFinished(false, false);
604 // Format our stored chunks:
605 bool found_malware = false;
606 bool found_phishing = false;
607 for (size_t i = 0; i < lists.size(); ++i) {
608 update_list_data_.append(FormatList(lists[i]));
609 if (lists[i].name == safe_browsing_util::kPhishingList)
610 found_phishing = true;
612 if (lists[i].name == safe_browsing_util::kMalwareList)
613 found_malware = true;
616 // If we have an empty database, let the server know we want data for these
619 update_list_data_.append(FormatList(
620 SBListChunkRanges(safe_browsing_util::kPhishingList)));
623 update_list_data_.append(FormatList(
624 SBListChunkRanges(safe_browsing_util::kMalwareList)));
626 // Large requests are (probably) a sign of database corruption.
627 // Record stats to inform decisions about whether to automate
628 // deletion of such databases. http://crbug.com/120219
629 UMA_HISTOGRAM_COUNTS("SB2.UpdateRequestSize", update_list_data_.size());
631 GURL update_url = UpdateUrl();
632 request_.reset(net::URLFetcher::Create(
633 url_fetcher_id_++, update_url, net::URLFetcher::POST, this));
634 request_->SetLoadFlags(net::LOAD_DISABLE_CACHE);
635 request_->SetRequestContext(request_context_getter_.get());
636 request_->SetUploadData("text/plain", update_list_data_);
639 // Begin the update request timeout.
640 timeout_timer_.Start(FROM_HERE, TimeDelta::FromSeconds(kSbMaxUpdateWaitSec),
642 &SafeBrowsingProtocolManager::UpdateResponseTimeout);
645 // If we haven't heard back from the server with an update response, this method
646 // will run. Close the current update session and schedule another update.
647 void SafeBrowsingProtocolManager::UpdateResponseTimeout() {
648 DCHECK(CalledOnValidThread());
649 DCHECK(request_type_ == UPDATE_REQUEST ||
650 request_type_ == BACKUP_UPDATE_REQUEST);
652 if (request_type_ == UPDATE_REQUEST &&
653 IssueBackupUpdateRequest(BACKUP_UPDATE_REASON_CONNECT)) {
656 UpdateFinished(false);
659 void SafeBrowsingProtocolManager::OnAddChunksComplete() {
660 DCHECK(CalledOnValidThread());
661 chunk_pending_to_write_ = false;
663 if (chunk_request_urls_.empty()) {
664 UMA_HISTOGRAM_LONG_TIMES("SB2.Update", Time::Now() - last_update_);
665 UpdateFinished(true);
672 std::string SafeBrowsingProtocolManager::FormatList(
673 const SBListChunkRanges& list) {
674 std::string formatted_results;
675 formatted_results.append(list.name);
676 formatted_results.append(";");
677 if (!list.adds.empty()) {
678 formatted_results.append("a:" + list.adds);
679 if (!list.subs.empty())
680 formatted_results.append(":");
682 if (!list.subs.empty()) {
683 formatted_results.append("s:" + list.subs);
685 formatted_results.append("\n");
687 return formatted_results;
690 void SafeBrowsingProtocolManager::HandleGetHashError(const Time& now) {
691 DCHECK(CalledOnValidThread());
692 base::TimeDelta next = GetNextBackOffInterval(
693 &gethash_error_count_, &gethash_back_off_mult_);
694 next_gethash_time_ = now + next;
697 void SafeBrowsingProtocolManager::UpdateFinished(bool success) {
698 UpdateFinished(success, !success);
701 void SafeBrowsingProtocolManager::UpdateFinished(bool success, bool back_off) {
702 DCHECK(CalledOnValidThread());
703 UMA_HISTOGRAM_COUNTS("SB2.UpdateSize", update_size_);
705 bool update_success = success || request_type_ == CHUNK_REQUEST;
706 if (backup_update_reason_ == BACKUP_UPDATE_REASON_MAX) {
708 update_success ? UPDATE_RESULT_SUCCESS : UPDATE_RESULT_FAIL);
710 UpdateResult update_result = static_cast<UpdateResult>(
711 UPDATE_RESULT_BACKUP_START +
712 (static_cast<int>(backup_update_reason_) * 2) +
714 RecordUpdateResult(update_result);
716 backup_update_reason_ = BACKUP_UPDATE_REASON_MAX;
717 request_type_ = NO_REQUEST;
718 update_list_data_.clear();
719 delegate_->UpdateFinished(success);
720 ScheduleNextUpdate(back_off);
723 GURL SafeBrowsingProtocolManager::UpdateUrl() const {
724 std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
725 url_prefix_, "downloads", client_name_, version_, additional_query_);
729 GURL SafeBrowsingProtocolManager::BackupUpdateUrl(
730 BackupUpdateReason backup_update_reason) const {
731 DCHECK(backup_update_reason >= 0 &&
732 backup_update_reason < BACKUP_UPDATE_REASON_MAX);
733 DCHECK(!backup_url_prefixes_[backup_update_reason].empty());
734 std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
735 backup_url_prefixes_[backup_update_reason], "downloads", client_name_,
736 version_, additional_query_);
740 GURL SafeBrowsingProtocolManager::GetHashUrl() const {
741 std::string url = SafeBrowsingProtocolManagerHelper::ComposeUrl(
742 url_prefix_, "gethash", client_name_, version_, additional_query_);
746 GURL SafeBrowsingProtocolManager::NextChunkUrl(const std::string& url) const {
747 DCHECK(CalledOnValidThread());
748 std::string next_url;
749 if (!StartsWithASCII(url, "http://", false) &&
750 !StartsWithASCII(url, "https://", false)) {
751 // Use https if we updated via https, otherwise http (useful for testing).
752 if (StartsWithASCII(url_prefix_, "https://", false))
753 next_url.append("https://");
755 next_url.append("http://");
756 next_url.append(url);
760 if (!additional_query_.empty()) {
761 if (next_url.find("?") != std::string::npos) {
762 next_url.append("&");
764 next_url.append("?");
766 next_url.append(additional_query_);
768 return GURL(next_url);
771 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails()
776 SafeBrowsingProtocolManager::FullHashDetails::FullHashDetails(
777 FullHashCallback callback, bool is_download)
778 : callback(callback),
779 is_download(is_download) {
782 SafeBrowsingProtocolManager::FullHashDetails::~FullHashDetails() {
785 SafeBrowsingProtocolManagerDelegate::~SafeBrowsingProtocolManagerDelegate() {