1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "net/url_request/url_request_http_job.h"
7 #include "base/base_switches.h"
9 #include "base/bind_helpers.h"
10 #include "base/command_line.h"
11 #include "base/compiler_specific.h"
12 #include "base/file_version_info.h"
13 #include "base/message_loop/message_loop.h"
14 #include "base/metrics/field_trial.h"
15 #include "base/metrics/histogram.h"
16 #include "base/rand_util.h"
17 #include "base/strings/string_util.h"
18 #include "base/time/time.h"
19 #include "net/base/filter.h"
20 #include "net/base/host_port_pair.h"
21 #include "net/base/load_flags.h"
22 #include "net/base/mime_util.h"
23 #include "net/base/net_errors.h"
24 #include "net/base/net_util.h"
25 #include "net/base/network_delegate.h"
26 #include "net/base/sdch_manager.h"
27 #include "net/cert/cert_status_flags.h"
28 #include "net/cookies/cookie_monster.h"
29 #include "net/http/http_network_session.h"
30 #include "net/http/http_request_headers.h"
31 #include "net/http/http_response_headers.h"
32 #include "net/http/http_response_info.h"
33 #include "net/http/http_status_code.h"
34 #include "net/http/http_transaction.h"
35 #include "net/http/http_transaction_delegate.h"
36 #include "net/http/http_transaction_factory.h"
37 #include "net/http/http_util.h"
38 #include "net/ssl/ssl_cert_request_info.h"
39 #include "net/ssl/ssl_config_service.h"
40 #include "net/url_request/fraudulent_certificate_reporter.h"
41 #include "net/url_request/http_user_agent_settings.h"
42 #include "net/url_request/url_request.h"
43 #include "net/url_request/url_request_context.h"
44 #include "net/url_request/url_request_error_job.h"
45 #include "net/url_request/url_request_job_factory.h"
46 #include "net/url_request/url_request_redirect_job.h"
47 #include "net/url_request/url_request_throttler_header_adapter.h"
48 #include "net/url_request/url_request_throttler_manager.h"
50 static const char kAvailDictionaryHeader[] = "Avail-Dictionary";
54 class URLRequestHttpJob::HttpFilterContext : public FilterContext {
56 explicit HttpFilterContext(URLRequestHttpJob* job);
57 virtual ~HttpFilterContext();
59 // FilterContext implementation.
60 virtual bool GetMimeType(std::string* mime_type) const OVERRIDE;
61 virtual bool GetURL(GURL* gurl) const OVERRIDE;
62 virtual base::Time GetRequestTime() const OVERRIDE;
63 virtual bool IsCachedContent() const OVERRIDE;
64 virtual bool IsDownload() const OVERRIDE;
65 virtual bool IsSdchResponse() const OVERRIDE;
66 virtual int64 GetByteReadCount() const OVERRIDE;
67 virtual int GetResponseCode() const OVERRIDE;
68 virtual void RecordPacketStats(StatisticSelector statistic) const OVERRIDE;
70 // Method to allow us to reset filter context for a response that should have
71 // been SDCH encoded when there is an update due to an explicit HTTP header.
72 void ResetSdchResponseToFalse();
75 URLRequestHttpJob* job_;
77 DISALLOW_COPY_AND_ASSIGN(HttpFilterContext);
80 class URLRequestHttpJob::HttpTransactionDelegateImpl
81 : public HttpTransactionDelegate {
83 HttpTransactionDelegateImpl(URLRequest* request,
84 NetworkDelegate* network_delegate)
86 network_delegate_(network_delegate),
87 state_(NONE_ACTIVE) {}
88 virtual ~HttpTransactionDelegateImpl() { OnDetachRequest(); }
89 void OnDetachRequest() {
90 if (!IsRequestAndDelegateActive())
92 NotifyStateChange(NetworkDelegate::REQUEST_WAIT_STATE_RESET);
96 virtual void OnCacheActionStart() OVERRIDE {
97 HandleStateChange(NONE_ACTIVE,
99 NetworkDelegate::REQUEST_WAIT_STATE_CACHE_START);
101 virtual void OnCacheActionFinish() OVERRIDE {
102 HandleStateChange(CACHE_ACTIVE,
104 NetworkDelegate::REQUEST_WAIT_STATE_CACHE_FINISH);
106 virtual void OnNetworkActionStart() OVERRIDE {
107 HandleStateChange(NONE_ACTIVE,
109 NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_START);
111 virtual void OnNetworkActionFinish() OVERRIDE {
112 HandleStateChange(NETWORK_ACTIVE,
114 NetworkDelegate::REQUEST_WAIT_STATE_NETWORK_FINISH);
124 // Returns true if this object still has an active request and network
126 bool IsRequestAndDelegateActive() const {
127 return request_ && network_delegate_;
130 // Notifies the |network_delegate_| object of a change in the state of the
131 // |request_| to the state given by the |request_wait_state| argument.
132 void NotifyStateChange(NetworkDelegate::RequestWaitState request_wait_state) {
133 network_delegate_->NotifyRequestWaitStateChange(*request_,
137 // Checks the request and delegate are still active, changes |state_| from
138 // |expected_state| to |next_state|, and then notifies the network delegate of
139 // the change to |request_wait_state|.
140 void HandleStateChange(State expected_state,
142 NetworkDelegate::RequestWaitState request_wait_state) {
143 if (!IsRequestAndDelegateActive())
145 DCHECK_EQ(expected_state, state_);
147 NotifyStateChange(request_wait_state);
150 URLRequest* request_;
151 NetworkDelegate* network_delegate_;
152 // Internal state tracking, for sanity checking.
155 DISALLOW_COPY_AND_ASSIGN(HttpTransactionDelegateImpl);
158 URLRequestHttpJob::HttpFilterContext::HttpFilterContext(URLRequestHttpJob* job)
163 URLRequestHttpJob::HttpFilterContext::~HttpFilterContext() {
166 bool URLRequestHttpJob::HttpFilterContext::GetMimeType(
167 std::string* mime_type) const {
168 return job_->GetMimeType(mime_type);
171 bool URLRequestHttpJob::HttpFilterContext::GetURL(GURL* gurl) const {
172 if (!job_->request())
174 *gurl = job_->request()->url();
178 base::Time URLRequestHttpJob::HttpFilterContext::GetRequestTime() const {
179 return job_->request() ? job_->request()->request_time() : base::Time();
182 bool URLRequestHttpJob::HttpFilterContext::IsCachedContent() const {
183 return job_->is_cached_content_;
186 bool URLRequestHttpJob::HttpFilterContext::IsDownload() const {
187 return (job_->request_info_.load_flags & LOAD_IS_DOWNLOAD) != 0;
190 void URLRequestHttpJob::HttpFilterContext::ResetSdchResponseToFalse() {
191 DCHECK(job_->sdch_dictionary_advertised_);
192 job_->sdch_dictionary_advertised_ = false;
195 bool URLRequestHttpJob::HttpFilterContext::IsSdchResponse() const {
196 return job_->sdch_dictionary_advertised_;
199 int64 URLRequestHttpJob::HttpFilterContext::GetByteReadCount() const {
200 return job_->filter_input_byte_count();
203 int URLRequestHttpJob::HttpFilterContext::GetResponseCode() const {
204 return job_->GetResponseCode();
207 void URLRequestHttpJob::HttpFilterContext::RecordPacketStats(
208 StatisticSelector statistic) const {
209 job_->RecordPacketStats(statistic);
212 // TODO(darin): make sure the port blocking code is not lost
214 URLRequestJob* URLRequestHttpJob::Factory(URLRequest* request,
215 NetworkDelegate* network_delegate,
216 const std::string& scheme) {
217 DCHECK(scheme == "http" || scheme == "https");
219 if (!request->context()->http_transaction_factory()) {
220 NOTREACHED() << "requires a valid context";
221 return new URLRequestErrorJob(
222 request, network_delegate, ERR_INVALID_ARGUMENT);
226 if (request->GetHSTSRedirect(&redirect_url)) {
227 return new URLRequestRedirectJob(
228 request, network_delegate, redirect_url,
229 // Use status code 307 to preserve the method, so POST requests work.
230 URLRequestRedirectJob::REDIRECT_307_TEMPORARY_REDIRECT);
232 return new URLRequestHttpJob(request,
234 request->context()->http_user_agent_settings());
237 URLRequestHttpJob::URLRequestHttpJob(
239 NetworkDelegate* network_delegate,
240 const HttpUserAgentSettings* http_user_agent_settings)
241 : URLRequestJob(request, network_delegate),
242 priority_(DEFAULT_PRIORITY),
243 response_info_(NULL),
244 response_cookies_save_index_(0),
245 proxy_auth_state_(AUTH_STATE_DONT_NEED_AUTH),
246 server_auth_state_(AUTH_STATE_DONT_NEED_AUTH),
247 start_callback_(base::Bind(&URLRequestHttpJob::OnStartCompleted,
248 base::Unretained(this))),
249 notify_before_headers_sent_callback_(
250 base::Bind(&URLRequestHttpJob::NotifyBeforeSendHeadersCallback,
251 base::Unretained(this))),
252 read_in_progress_(false),
253 throttling_entry_(NULL),
254 sdch_dictionary_advertised_(false),
255 sdch_test_activated_(false),
256 sdch_test_control_(false),
257 is_cached_content_(false),
258 request_creation_time_(),
259 packet_timing_enabled_(false),
261 bytes_observed_in_packets_(0),
262 request_time_snapshot_(),
263 final_packet_time_(),
264 filter_context_(new HttpFilterContext(this)),
266 on_headers_received_callback_(
267 base::Bind(&URLRequestHttpJob::OnHeadersReceivedCallback,
268 base::Unretained(this))),
269 awaiting_callback_(false),
270 http_transaction_delegate_(
271 new HttpTransactionDelegateImpl(request, network_delegate)),
272 http_user_agent_settings_(http_user_agent_settings) {
273 URLRequestThrottlerManager* manager = request->context()->throttler_manager();
275 throttling_entry_ = manager->RegisterRequestUrl(request->url());
280 URLRequestHttpJob::~URLRequestHttpJob() {
281 CHECK(!awaiting_callback_);
283 DCHECK(!sdch_test_control_ || !sdch_test_activated_);
284 if (!is_cached_content_) {
285 if (sdch_test_control_)
286 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_HOLDBACK);
287 if (sdch_test_activated_)
288 RecordPacketStats(FilterContext::SDCH_EXPERIMENT_DECODE);
290 // Make sure SDCH filters are told to emit histogram data while
291 // filter_context_ is still alive.
294 if (sdch_dictionary_url_.is_valid()) {
295 // Prior to reaching the destructor, request_ has been set to a NULL
296 // pointer, so request_->url() is no longer valid in the destructor, and we
297 // use an alternate copy |request_info_.url|.
298 SdchManager* manager = SdchManager::Global();
299 // To be extra safe, since this is a "different time" from when we decided
300 // to get the dictionary, we'll validate that an SdchManager is available.
301 // At shutdown time, care is taken to be sure that we don't delete this
302 // globally useful instance "too soon," so this check is just defensive
303 // coding to assure that IF the system is shutting down, we don't have any
304 // problem if the manager was deleted ahead of time.
305 if (manager) // Defensive programming.
306 manager->FetchDictionary(request_info_.url, sdch_dictionary_url_);
308 DoneWithRequest(ABORTED);
311 void URLRequestHttpJob::SetPriority(RequestPriority priority) {
312 priority_ = priority;
314 transaction_->SetPriority(priority_);
317 void URLRequestHttpJob::Start() {
318 DCHECK(!transaction_.get());
320 // URLRequest::SetReferrer ensures that we do not send username and password
321 // fields in the referrer.
322 GURL referrer(request_->referrer());
324 request_info_.url = request_->url();
325 request_info_.method = request_->method();
326 request_info_.load_flags = request_->load_flags();
327 // Enable privacy mode if cookie settings or flags tell us not send or
329 bool enable_privacy_mode =
330 (request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES) ||
331 (request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) ||
332 CanEnablePrivacyMode();
333 // Privacy mode could still be disabled in OnCookiesLoaded if we are going
334 // to send previously saved cookies.
335 request_info_.privacy_mode = enable_privacy_mode ?
336 kPrivacyModeEnabled : kPrivacyModeDisabled;
338 // Strip Referer from request_info_.extra_headers to prevent, e.g., plugins
339 // from overriding headers that are controlled using other means. Otherwise a
340 // plugin could set a referrer although sending the referrer is inhibited.
341 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kReferer);
343 // Our consumer should have made sure that this is a safe referrer. See for
344 // instance WebCore::FrameLoader::HideReferrer.
345 if (referrer.is_valid()) {
346 request_info_.extra_headers.SetHeader(HttpRequestHeaders::kReferer,
350 request_info_.extra_headers.SetHeaderIfMissing(
351 HttpRequestHeaders::kUserAgent,
352 http_user_agent_settings_ ?
353 http_user_agent_settings_->GetUserAgent(request_->url()) :
357 AddCookieHeaderAndStart();
360 void URLRequestHttpJob::Kill() {
361 http_transaction_delegate_->OnDetachRequest();
363 if (!transaction_.get())
366 weak_factory_.InvalidateWeakPtrs();
367 DestroyTransaction();
368 URLRequestJob::Kill();
371 void URLRequestHttpJob::NotifyHeadersComplete() {
372 DCHECK(!response_info_);
374 response_info_ = transaction_->GetResponseInfo();
376 // Save boolean, as we'll need this info at destruction time, and filters may
377 // also need this info.
378 is_cached_content_ = response_info_->was_cached;
380 if (!is_cached_content_ && throttling_entry_.get()) {
381 URLRequestThrottlerHeaderAdapter response_adapter(GetResponseHeaders());
382 throttling_entry_->UpdateWithResponse(request_info_.url.host(),
386 // The ordering of these calls is not important.
387 ProcessStrictTransportSecurityHeader();
388 ProcessPublicKeyPinsHeader();
390 if (SdchManager::Global() &&
391 SdchManager::Global()->IsInSupportedDomain(request_->url())) {
392 const std::string name = "Get-Dictionary";
393 std::string url_text;
395 // TODO(jar): We need to not fetch dictionaries the first time they are
396 // seen, but rather wait until we can justify their usefulness.
397 // For now, we will only fetch the first dictionary, which will at least
398 // require multiple suggestions before we get additional ones for this site.
399 // Eventually we should wait until a dictionary is requested several times
400 // before we even download it (so that we don't waste memory or bandwidth).
401 if (GetResponseHeaders()->EnumerateHeader(&iter, name, &url_text)) {
402 // request_->url() won't be valid in the destructor, so we use an
404 DCHECK_EQ(request_->url(), request_info_.url);
405 // Resolve suggested URL relative to request url.
406 sdch_dictionary_url_ = request_info_.url.Resolve(url_text);
410 // The HTTP transaction may be restarted several times for the purposes
411 // of sending authorization information. Each time it restarts, we get
412 // notified of the headers completion so that we can update the cookie store.
413 if (transaction_->IsReadyToRestartForAuth()) {
414 DCHECK(!response_info_->auth_challenge.get());
415 // TODO(battre): This breaks the webrequest API for
416 // URLRequestTestHTTP.BasicAuthWithCookies
417 // where OnBeforeSendHeaders -> OnSendHeaders -> OnBeforeSendHeaders
419 RestartTransactionWithAuth(AuthCredentials());
423 URLRequestJob::NotifyHeadersComplete();
426 void URLRequestHttpJob::NotifyDone(const URLRequestStatus& status) {
427 DoneWithRequest(FINISHED);
428 URLRequestJob::NotifyDone(status);
431 void URLRequestHttpJob::DestroyTransaction() {
432 DCHECK(transaction_.get());
434 DoneWithRequest(ABORTED);
435 transaction_.reset();
436 response_info_ = NULL;
437 receive_headers_end_ = base::TimeTicks();
440 void URLRequestHttpJob::StartTransaction() {
441 if (network_delegate()) {
443 int rv = network_delegate()->NotifyBeforeSendHeaders(
444 request_, notify_before_headers_sent_callback_,
445 &request_info_.extra_headers);
446 // If an extension blocks the request, we rely on the callback to
447 // MaybeStartTransactionInternal().
448 if (rv == ERR_IO_PENDING)
450 MaybeStartTransactionInternal(rv);
453 StartTransactionInternal();
456 void URLRequestHttpJob::NotifyBeforeSendHeadersCallback(int result) {
457 // Check that there are no callbacks to already canceled requests.
458 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status());
460 MaybeStartTransactionInternal(result);
463 void URLRequestHttpJob::MaybeStartTransactionInternal(int result) {
464 OnCallToDelegateComplete();
466 StartTransactionInternal();
468 std::string source("delegate");
469 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
470 NetLog::StringCallback("source", &source));
472 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
476 void URLRequestHttpJob::StartTransactionInternal() {
477 // NOTE: This method assumes that request_info_ is already setup properly.
479 // If we already have a transaction, then we should restart the transaction
480 // with auth provided by auth_credentials_.
484 if (network_delegate()) {
485 network_delegate()->NotifySendHeaders(
486 request_, request_info_.extra_headers);
489 if (transaction_.get()) {
490 rv = transaction_->RestartWithAuth(auth_credentials_, start_callback_);
491 auth_credentials_ = AuthCredentials();
493 DCHECK(request_->context()->http_transaction_factory());
495 rv = request_->context()->http_transaction_factory()->CreateTransaction(
496 priority_, &transaction_, http_transaction_delegate_.get());
498 if (!throttling_entry_.get() ||
499 !throttling_entry_->ShouldRejectRequest(*request_)) {
500 rv = transaction_->Start(
501 &request_info_, start_callback_, request_->net_log());
502 start_time_ = base::TimeTicks::Now();
504 // Special error code for the exponential back-off module.
505 rv = ERR_TEMPORARILY_THROTTLED;
510 if (rv == ERR_IO_PENDING)
513 // The transaction started synchronously, but we need to notify the
514 // URLRequest delegate via the message loop.
515 base::MessageLoop::current()->PostTask(
517 base::Bind(&URLRequestHttpJob::OnStartCompleted,
518 weak_factory_.GetWeakPtr(), rv));
521 void URLRequestHttpJob::AddExtraHeaders() {
522 // Supply Accept-Encoding field only if it is not already provided.
523 // It should be provided IF the content is known to have restrictions on
524 // potential encoding, such as streaming multi-media.
525 // For details see bug 47381.
526 // TODO(jar, enal): jpeg files etc. should set up a request header if
527 // possible. Right now it is done only by buffered_resource_loader and
528 // simple_data_source.
529 if (!request_info_.extra_headers.HasHeader(
530 HttpRequestHeaders::kAcceptEncoding)) {
531 bool advertise_sdch = SdchManager::Global() &&
532 SdchManager::Global()->IsInSupportedDomain(request_->url());
533 std::string avail_dictionaries;
534 if (advertise_sdch) {
535 SdchManager::Global()->GetAvailDictionaryList(request_->url(),
536 &avail_dictionaries);
538 // The AllowLatencyExperiment() is only true if we've successfully done a
539 // full SDCH compression recently in this browser session for this host.
540 // Note that for this path, there might be no applicable dictionaries,
541 // and hence we can't participate in the experiment.
542 if (!avail_dictionaries.empty() &&
543 SdchManager::Global()->AllowLatencyExperiment(request_->url())) {
544 // We are participating in the test (or control), and hence we'll
545 // eventually record statistics via either SDCH_EXPERIMENT_DECODE or
546 // SDCH_EXPERIMENT_HOLDBACK, and we'll need some packet timing data.
547 packet_timing_enabled_ = true;
548 if (base::RandDouble() < .01) {
549 sdch_test_control_ = true; // 1% probability.
550 advertise_sdch = false;
552 sdch_test_activated_ = true;
557 // Supply Accept-Encoding headers first so that it is more likely that they
558 // will be in the first transmitted packet. This can sometimes make it
559 // easier to filter and analyze the streams to assure that a proxy has not
560 // damaged these headers. Some proxies deliberately corrupt Accept-Encoding
562 if (!advertise_sdch) {
563 // Tell the server what compression formats we support (other than SDCH).
564 request_info_.extra_headers.SetHeader(
565 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate");
567 // Include SDCH in acceptable list.
568 request_info_.extra_headers.SetHeader(
569 HttpRequestHeaders::kAcceptEncoding, "gzip,deflate,sdch");
570 if (!avail_dictionaries.empty()) {
571 request_info_.extra_headers.SetHeader(
572 kAvailDictionaryHeader,
574 sdch_dictionary_advertised_ = true;
575 // Since we're tagging this transaction as advertising a dictionary,
576 // we'll definitely employ an SDCH filter (or tentative sdch filter)
577 // when we get a response. When done, we'll record histograms via
578 // SDCH_DECODE or SDCH_PASSTHROUGH. Hence we need to record packet
580 packet_timing_enabled_ = true;
585 if (http_user_agent_settings_) {
586 // Only add default Accept-Language if the request didn't have it
588 std::string accept_language =
589 http_user_agent_settings_->GetAcceptLanguage();
590 if (!accept_language.empty()) {
591 request_info_.extra_headers.SetHeaderIfMissing(
592 HttpRequestHeaders::kAcceptLanguage,
598 void URLRequestHttpJob::AddCookieHeaderAndStart() {
599 // No matter what, we want to report our status as IO pending since we will
600 // be notifying our consumer asynchronously via OnStartCompleted.
601 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
603 // If the request was destroyed, then there is no more work to do.
607 CookieStore* cookie_store = request_->context()->cookie_store();
608 if (cookie_store && !(request_info_.load_flags & LOAD_DO_NOT_SEND_COOKIES)) {
609 net::CookieMonster* cookie_monster = cookie_store->GetCookieMonster();
610 if (cookie_monster) {
611 cookie_monster->GetAllCookiesForURLAsync(
613 base::Bind(&URLRequestHttpJob::CheckCookiePolicyAndLoad,
614 weak_factory_.GetWeakPtr()));
616 CheckCookiePolicyAndLoad(CookieList());
619 DoStartTransaction();
623 void URLRequestHttpJob::DoLoadCookies() {
624 CookieOptions options;
625 options.set_include_httponly();
626 request_->context()->cookie_store()->GetCookiesWithOptionsAsync(
627 request_->url(), options,
628 base::Bind(&URLRequestHttpJob::OnCookiesLoaded,
629 weak_factory_.GetWeakPtr()));
632 void URLRequestHttpJob::CheckCookiePolicyAndLoad(
633 const CookieList& cookie_list) {
634 if (CanGetCookies(cookie_list))
637 DoStartTransaction();
640 void URLRequestHttpJob::OnCookiesLoaded(const std::string& cookie_line) {
641 if (!cookie_line.empty()) {
642 request_info_.extra_headers.SetHeader(
643 HttpRequestHeaders::kCookie, cookie_line);
644 // Disable privacy mode as we are sending cookies anyway.
645 request_info_.privacy_mode = kPrivacyModeDisabled;
647 DoStartTransaction();
650 void URLRequestHttpJob::DoStartTransaction() {
651 // We may have been canceled while retrieving cookies.
652 if (GetStatus().is_success()) {
659 void URLRequestHttpJob::SaveCookiesAndNotifyHeadersComplete(int result) {
660 // End of the call started in OnStartCompleted.
661 OnCallToDelegateComplete();
663 if (result != net::OK) {
664 std::string source("delegate");
665 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
666 NetLog::StringCallback("source", &source));
667 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
671 DCHECK(transaction_.get());
673 const HttpResponseInfo* response_info = transaction_->GetResponseInfo();
674 DCHECK(response_info);
676 response_cookies_.clear();
677 response_cookies_save_index_ = 0;
679 FetchResponseCookies(&response_cookies_);
681 if (!GetResponseHeaders()->GetDateValue(&response_date_))
682 response_date_ = base::Time();
684 // Now, loop over the response cookies, and attempt to persist each.
688 // If the save occurs synchronously, SaveNextCookie will loop and save the next
689 // cookie. If the save is deferred, the callback is responsible for continuing
690 // to iterate through the cookies.
691 // TODO(erikwright): Modify the CookieStore API to indicate via return value
692 // whether it completed synchronously or asynchronously.
693 // See http://crbug.com/131066.
694 void URLRequestHttpJob::SaveNextCookie() {
695 // No matter what, we want to report our status as IO pending since we will
696 // be notifying our consumer asynchronously via OnStartCompleted.
697 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
699 // Used to communicate with the callback. See the implementation of
701 scoped_refptr<SharedBoolean> callback_pending = new SharedBoolean(false);
702 scoped_refptr<SharedBoolean> save_next_cookie_running =
703 new SharedBoolean(true);
705 if (!(request_info_.load_flags & LOAD_DO_NOT_SAVE_COOKIES) &&
706 request_->context()->cookie_store() &&
707 response_cookies_.size() > 0) {
708 CookieOptions options;
709 options.set_include_httponly();
710 options.set_server_time(response_date_);
712 net::CookieStore::SetCookiesCallback callback(
713 base::Bind(&URLRequestHttpJob::OnCookieSaved,
714 weak_factory_.GetWeakPtr(),
715 save_next_cookie_running,
718 // Loop through the cookies as long as SetCookieWithOptionsAsync completes
720 while (!callback_pending->data &&
721 response_cookies_save_index_ < response_cookies_.size()) {
723 response_cookies_[response_cookies_save_index_], &options)) {
724 callback_pending->data = true;
725 request_->context()->cookie_store()->SetCookieWithOptionsAsync(
726 request_->url(), response_cookies_[response_cookies_save_index_],
729 ++response_cookies_save_index_;
733 save_next_cookie_running->data = false;
735 if (!callback_pending->data) {
736 response_cookies_.clear();
737 response_cookies_save_index_ = 0;
738 SetStatus(URLRequestStatus()); // Clear the IO_PENDING status
739 NotifyHeadersComplete();
744 // |save_next_cookie_running| is true when the callback is bound and set to
745 // false when SaveNextCookie exits, allowing the callback to determine if the
746 // save occurred synchronously or asynchronously.
747 // |callback_pending| is false when the callback is invoked and will be set to
748 // true by the callback, allowing SaveNextCookie to detect whether the save
749 // occurred synchronously.
750 // See SaveNextCookie() for more information.
751 void URLRequestHttpJob::OnCookieSaved(
752 scoped_refptr<SharedBoolean> save_next_cookie_running,
753 scoped_refptr<SharedBoolean> callback_pending,
754 bool cookie_status) {
755 callback_pending->data = false;
757 // If we were called synchronously, return.
758 if (save_next_cookie_running->data) {
762 // We were called asynchronously, so trigger the next save.
763 // We may have been canceled within OnSetCookie.
764 if (GetStatus().is_success()) {
771 void URLRequestHttpJob::FetchResponseCookies(
772 std::vector<std::string>* cookies) {
773 const std::string name = "Set-Cookie";
777 HttpResponseHeaders* headers = GetResponseHeaders();
778 while (headers->EnumerateHeader(&iter, name, &value)) {
780 cookies->push_back(value);
784 // NOTE: |ProcessStrictTransportSecurityHeader| and
785 // |ProcessPublicKeyPinsHeader| have very similar structures, by design.
786 void URLRequestHttpJob::ProcessStrictTransportSecurityHeader() {
787 DCHECK(response_info_);
788 TransportSecurityState* security_state =
789 request_->context()->transport_security_state();
790 const SSLInfo& ssl_info = response_info_->ssl_info;
792 // Only accept HSTS headers on HTTPS connections that have no
793 // certificate errors.
794 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) ||
798 // http://tools.ietf.org/html/draft-ietf-websec-strict-transport-sec:
800 // If a UA receives more than one STS header field in a HTTP response
801 // message over secure transport, then the UA MUST process only the
802 // first such header field.
803 HttpResponseHeaders* headers = GetResponseHeaders();
805 if (headers->EnumerateHeader(NULL, "Strict-Transport-Security", &value))
806 security_state->AddHSTSHeader(request_info_.url.host(), value);
809 void URLRequestHttpJob::ProcessPublicKeyPinsHeader() {
810 DCHECK(response_info_);
811 TransportSecurityState* security_state =
812 request_->context()->transport_security_state();
813 const SSLInfo& ssl_info = response_info_->ssl_info;
815 // Only accept HPKP headers on HTTPS connections that have no
816 // certificate errors.
817 if (!ssl_info.is_valid() || IsCertStatusError(ssl_info.cert_status) ||
821 // http://tools.ietf.org/html/draft-ietf-websec-key-pinning:
823 // If a UA receives more than one PKP header field in an HTTP
824 // response message over secure transport, then the UA MUST process
825 // only the first such header field.
826 HttpResponseHeaders* headers = GetResponseHeaders();
828 if (headers->EnumerateHeader(NULL, "Public-Key-Pins", &value))
829 security_state->AddHPKPHeader(request_info_.url.host(), value, ssl_info);
832 void URLRequestHttpJob::OnStartCompleted(int result) {
835 // If the request was destroyed, then there is no more work to do.
839 // If the transaction was destroyed, then the job was cancelled, and
840 // we can just ignore this notification.
841 if (!transaction_.get())
844 receive_headers_end_ = base::TimeTicks::Now();
846 // Clear the IO_PENDING status
847 SetStatus(URLRequestStatus());
849 const URLRequestContext* context = request_->context();
851 if (result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN &&
852 transaction_->GetResponseInfo() != NULL) {
853 FraudulentCertificateReporter* reporter =
854 context->fraudulent_certificate_reporter();
855 if (reporter != NULL) {
856 const SSLInfo& ssl_info = transaction_->GetResponseInfo()->ssl_info;
857 bool sni_available = SSLConfigService::IsSNIAvailable(
858 context->ssl_config_service());
859 const std::string& host = request_->url().host();
861 reporter->SendReport(host, ssl_info, sni_available);
866 scoped_refptr<HttpResponseHeaders> headers = GetResponseHeaders();
867 if (network_delegate()) {
868 // Note that |this| may not be deleted until
869 // |on_headers_received_callback_| or
870 // |NetworkDelegate::URLRequestDestroyed()| has been called.
872 int error = network_delegate()->NotifyHeadersReceived(
874 on_headers_received_callback_,
876 &override_response_headers_);
877 if (error != net::OK) {
878 if (error == net::ERR_IO_PENDING) {
879 awaiting_callback_ = true;
881 std::string source("delegate");
882 request_->net_log().AddEvent(NetLog::TYPE_CANCELLED,
883 NetLog::StringCallback("source",
885 OnCallToDelegateComplete();
886 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, error));
892 SaveCookiesAndNotifyHeadersComplete(net::OK);
893 } else if (IsCertificateError(result)) {
894 // We encountered an SSL certificate error.
895 if (result == ERR_SSL_WEAK_SERVER_EPHEMERAL_DH_KEY ||
896 result == ERR_SSL_PINNED_KEY_NOT_IN_CERT_CHAIN) {
897 // These are hard failures. They're handled separately and don't have
898 // the correct cert status, so set it here.
899 SSLInfo info(transaction_->GetResponseInfo()->ssl_info);
900 info.cert_status = MapNetErrorToCertStatus(result);
901 NotifySSLCertificateError(info, true);
903 // Maybe overridable, maybe not. Ask the delegate to decide.
904 TransportSecurityState::DomainState domain_state;
905 const URLRequestContext* context = request_->context();
906 const bool fatal = context->transport_security_state() &&
907 context->transport_security_state()->GetDomainState(
908 request_info_.url.host(),
909 SSLConfigService::IsSNIAvailable(context->ssl_config_service()),
911 domain_state.ShouldSSLErrorsBeFatal();
912 NotifySSLCertificateError(
913 transaction_->GetResponseInfo()->ssl_info, fatal);
915 } else if (result == ERR_SSL_CLIENT_AUTH_CERT_NEEDED) {
916 NotifyCertificateRequested(
917 transaction_->GetResponseInfo()->cert_request_info.get());
919 NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result));
923 void URLRequestHttpJob::OnHeadersReceivedCallback(int result) {
924 awaiting_callback_ = false;
926 // Check that there are no callbacks to already canceled requests.
927 DCHECK_NE(URLRequestStatus::CANCELED, GetStatus().status());
929 SaveCookiesAndNotifyHeadersComplete(result);
932 void URLRequestHttpJob::OnReadCompleted(int result) {
933 read_in_progress_ = false;
935 if (ShouldFixMismatchedContentLength(result))
939 NotifyDone(URLRequestStatus());
940 } else if (result < 0) {
941 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, result));
943 // Clear the IO_PENDING status
944 SetStatus(URLRequestStatus());
947 NotifyReadComplete(result);
950 void URLRequestHttpJob::RestartTransactionWithAuth(
951 const AuthCredentials& credentials) {
952 auth_credentials_ = credentials;
954 // These will be reset in OnStartCompleted.
955 response_info_ = NULL;
956 receive_headers_end_ = base::TimeTicks();
957 response_cookies_.clear();
961 // Update the cookies, since the cookie store may have been updated from the
962 // headers in the 401/407. Since cookies were already appended to
963 // extra_headers, we need to strip them out before adding them again.
964 request_info_.extra_headers.RemoveHeader(HttpRequestHeaders::kCookie);
966 AddCookieHeaderAndStart();
969 void URLRequestHttpJob::SetUpload(UploadDataStream* upload) {
970 DCHECK(!transaction_.get()) << "cannot change once started";
971 request_info_.upload_data_stream = upload;
974 void URLRequestHttpJob::SetExtraRequestHeaders(
975 const HttpRequestHeaders& headers) {
976 DCHECK(!transaction_.get()) << "cannot change once started";
977 request_info_.extra_headers.CopyFrom(headers);
980 LoadState URLRequestHttpJob::GetLoadState() const {
981 return transaction_.get() ?
982 transaction_->GetLoadState() : LOAD_STATE_IDLE;
985 UploadProgress URLRequestHttpJob::GetUploadProgress() const {
986 return transaction_.get() ?
987 transaction_->GetUploadProgress() : UploadProgress();
990 bool URLRequestHttpJob::GetMimeType(std::string* mime_type) const {
991 DCHECK(transaction_.get());
996 return GetResponseHeaders()->GetMimeType(mime_type);
999 bool URLRequestHttpJob::GetCharset(std::string* charset) {
1000 DCHECK(transaction_.get());
1002 if (!response_info_)
1005 return GetResponseHeaders()->GetCharset(charset);
1008 void URLRequestHttpJob::GetResponseInfo(HttpResponseInfo* info) {
1010 DCHECK(transaction_.get());
1012 if (response_info_) {
1013 *info = *response_info_;
1014 if (override_response_headers_.get())
1015 info->headers = override_response_headers_;
1019 void URLRequestHttpJob::GetLoadTimingInfo(
1020 LoadTimingInfo* load_timing_info) const {
1021 // If haven't made it far enough to receive any headers, don't return
1022 // anything. This makes for more consistent behavior in the case of errors.
1023 if (!transaction_ || receive_headers_end_.is_null())
1025 if (transaction_->GetLoadTimingInfo(load_timing_info))
1026 load_timing_info->receive_headers_end = receive_headers_end_;
1029 bool URLRequestHttpJob::GetResponseCookies(std::vector<std::string>* cookies) {
1030 DCHECK(transaction_.get());
1032 if (!response_info_)
1035 // TODO(darin): Why are we extracting response cookies again? Perhaps we
1036 // should just leverage response_cookies_.
1039 FetchResponseCookies(cookies);
1043 int URLRequestHttpJob::GetResponseCode() const {
1044 DCHECK(transaction_.get());
1046 if (!response_info_)
1049 return GetResponseHeaders()->response_code();
1052 Filter* URLRequestHttpJob::SetupFilter() const {
1053 DCHECK(transaction_.get());
1054 if (!response_info_)
1057 std::vector<Filter::FilterType> encoding_types;
1058 std::string encoding_type;
1059 HttpResponseHeaders* headers = GetResponseHeaders();
1061 while (headers->EnumerateHeader(&iter, "Content-Encoding", &encoding_type)) {
1062 encoding_types.push_back(Filter::ConvertEncodingToType(encoding_type));
1065 if (filter_context_->IsSdchResponse()) {
1066 // We are wary of proxies that discard or damage SDCH encoding. If a server
1067 // explicitly states that this is not SDCH content, then we can correct our
1068 // assumption that this is an SDCH response, and avoid the need to recover
1069 // as though the content is corrupted (when we discover it is not SDCH
1071 std::string sdch_response_status;
1073 while (headers->EnumerateHeader(&iter, "X-Sdch-Encode",
1074 &sdch_response_status)) {
1075 if (sdch_response_status == "0") {
1076 filter_context_->ResetSdchResponseToFalse();
1082 // Even if encoding types are empty, there is a chance that we need to add
1083 // some decoding, as some proxies strip encoding completely. In such cases,
1084 // we may need to add (for example) SDCH filtering (when the context suggests
1085 // it is appropriate).
1086 Filter::FixupEncodingTypes(*filter_context_, &encoding_types);
1088 return !encoding_types.empty()
1089 ? Filter::Factory(encoding_types, *filter_context_) : NULL;
1092 bool URLRequestHttpJob::IsSafeRedirect(const GURL& location) {
1093 // HTTP is always safe.
1094 // TODO(pauljensen): Remove once crbug.com/146591 is fixed.
1095 if (location.is_valid() &&
1096 (location.scheme() == "http" || location.scheme() == "https")) {
1099 // Query URLRequestJobFactory as to whether |location| would be safe to
1101 return request_->context()->job_factory() &&
1102 request_->context()->job_factory()->IsSafeRedirectTarget(location);
1105 bool URLRequestHttpJob::NeedsAuth() {
1106 int code = GetResponseCode();
1110 // Check if we need either Proxy or WWW Authentication. This could happen
1111 // because we either provided no auth info, or provided incorrect info.
1114 if (proxy_auth_state_ == AUTH_STATE_CANCELED)
1116 proxy_auth_state_ = AUTH_STATE_NEED_AUTH;
1119 if (server_auth_state_ == AUTH_STATE_CANCELED)
1121 server_auth_state_ = AUTH_STATE_NEED_AUTH;
1127 void URLRequestHttpJob::GetAuthChallengeInfo(
1128 scoped_refptr<AuthChallengeInfo>* result) {
1129 DCHECK(transaction_.get());
1130 DCHECK(response_info_);
1133 DCHECK(proxy_auth_state_ == AUTH_STATE_NEED_AUTH ||
1134 server_auth_state_ == AUTH_STATE_NEED_AUTH);
1135 DCHECK((GetResponseHeaders()->response_code() == HTTP_UNAUTHORIZED) ||
1136 (GetResponseHeaders()->response_code() ==
1137 HTTP_PROXY_AUTHENTICATION_REQUIRED));
1139 *result = response_info_->auth_challenge;
1142 void URLRequestHttpJob::SetAuth(const AuthCredentials& credentials) {
1143 DCHECK(transaction_.get());
1145 // Proxy gets set first, then WWW.
1146 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
1147 proxy_auth_state_ = AUTH_STATE_HAVE_AUTH;
1149 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH);
1150 server_auth_state_ = AUTH_STATE_HAVE_AUTH;
1153 RestartTransactionWithAuth(credentials);
1156 void URLRequestHttpJob::CancelAuth() {
1157 // Proxy gets set first, then WWW.
1158 if (proxy_auth_state_ == AUTH_STATE_NEED_AUTH) {
1159 proxy_auth_state_ = AUTH_STATE_CANCELED;
1161 DCHECK_EQ(server_auth_state_, AUTH_STATE_NEED_AUTH);
1162 server_auth_state_ = AUTH_STATE_CANCELED;
1165 // These will be reset in OnStartCompleted.
1166 response_info_ = NULL;
1167 receive_headers_end_ = base::TimeTicks::Now();
1168 response_cookies_.clear();
1172 // OK, let the consumer read the error page...
1174 // Because we set the AUTH_STATE_CANCELED flag, NeedsAuth will return false,
1175 // which will cause the consumer to receive OnResponseStarted instead of
1178 // We have to do this via InvokeLater to avoid "recursing" the consumer.
1180 base::MessageLoop::current()->PostTask(
1182 base::Bind(&URLRequestHttpJob::OnStartCompleted,
1183 weak_factory_.GetWeakPtr(), OK));
1186 void URLRequestHttpJob::ContinueWithCertificate(
1187 X509Certificate* client_cert) {
1188 DCHECK(transaction_.get());
1190 DCHECK(!response_info_) << "should not have a response yet";
1191 receive_headers_end_ = base::TimeTicks();
1195 // No matter what, we want to report our status as IO pending since we will
1196 // be notifying our consumer asynchronously via OnStartCompleted.
1197 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
1199 int rv = transaction_->RestartWithCertificate(client_cert, start_callback_);
1200 if (rv == ERR_IO_PENDING)
1203 // The transaction started synchronously, but we need to notify the
1204 // URLRequest delegate via the message loop.
1205 base::MessageLoop::current()->PostTask(
1207 base::Bind(&URLRequestHttpJob::OnStartCompleted,
1208 weak_factory_.GetWeakPtr(), rv));
1211 void URLRequestHttpJob::ContinueDespiteLastError() {
1212 // If the transaction was destroyed, then the job was cancelled.
1213 if (!transaction_.get())
1216 DCHECK(!response_info_) << "should not have a response yet";
1217 receive_headers_end_ = base::TimeTicks();
1221 // No matter what, we want to report our status as IO pending since we will
1222 // be notifying our consumer asynchronously via OnStartCompleted.
1223 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
1225 int rv = transaction_->RestartIgnoringLastError(start_callback_);
1226 if (rv == ERR_IO_PENDING)
1229 // The transaction started synchronously, but we need to notify the
1230 // URLRequest delegate via the message loop.
1231 base::MessageLoop::current()->PostTask(
1233 base::Bind(&URLRequestHttpJob::OnStartCompleted,
1234 weak_factory_.GetWeakPtr(), rv));
1237 bool URLRequestHttpJob::ShouldFixMismatchedContentLength(int rv) const {
1238 // Some servers send the body compressed, but specify the content length as
1239 // the uncompressed size. Although this violates the HTTP spec we want to
1240 // support it (as IE and FireFox do), but *only* for an exact match.
1241 // See http://crbug.com/79694.
1242 if (rv == net::ERR_CONTENT_LENGTH_MISMATCH ||
1243 rv == net::ERR_INCOMPLETE_CHUNKED_ENCODING) {
1244 if (request_ && request_->response_headers()) {
1245 int64 expected_length = request_->response_headers()->GetContentLength();
1246 VLOG(1) << __FUNCTION__ << "() "
1247 << "\"" << request_->url().spec() << "\""
1248 << " content-length = " << expected_length
1249 << " pre total = " << prefilter_bytes_read()
1250 << " post total = " << postfilter_bytes_read();
1251 if (postfilter_bytes_read() == expected_length) {
1260 bool URLRequestHttpJob::ReadRawData(IOBuffer* buf, int buf_size,
1262 DCHECK_NE(buf_size, 0);
1264 DCHECK(!read_in_progress_);
1266 int rv = transaction_->Read(
1268 base::Bind(&URLRequestHttpJob::OnReadCompleted, base::Unretained(this)));
1270 if (ShouldFixMismatchedContentLength(rv))
1276 DoneWithRequest(FINISHED);
1280 if (rv == ERR_IO_PENDING) {
1281 read_in_progress_ = true;
1282 SetStatus(URLRequestStatus(URLRequestStatus::IO_PENDING, 0));
1284 NotifyDone(URLRequestStatus(URLRequestStatus::FAILED, rv));
1290 void URLRequestHttpJob::StopCaching() {
1291 if (transaction_.get())
1292 transaction_->StopCaching();
1295 bool URLRequestHttpJob::GetFullRequestHeaders(
1296 HttpRequestHeaders* headers) const {
1300 return transaction_->GetFullRequestHeaders(headers);
1303 void URLRequestHttpJob::DoneReading() {
1304 if (transaction_.get())
1305 transaction_->DoneReading();
1306 DoneWithRequest(FINISHED);
1309 HostPortPair URLRequestHttpJob::GetSocketAddress() const {
1310 return response_info_ ? response_info_->socket_address : HostPortPair();
1313 void URLRequestHttpJob::RecordTimer() {
1314 if (request_creation_time_.is_null()) {
1316 << "The same transaction shouldn't start twice without new timing.";
1320 base::TimeDelta to_start = base::Time::Now() - request_creation_time_;
1321 request_creation_time_ = base::Time();
1323 UMA_HISTOGRAM_MEDIUM_TIMES("Net.HttpTimeToFirstByte", to_start);
1326 void URLRequestHttpJob::ResetTimer() {
1327 if (!request_creation_time_.is_null()) {
1329 << "The timer was reset before it was recorded.";
1332 request_creation_time_ = base::Time::Now();
1335 void URLRequestHttpJob::UpdatePacketReadTimes() {
1336 if (!packet_timing_enabled_)
1339 if (filter_input_byte_count() <= bytes_observed_in_packets_) {
1340 DCHECK_EQ(filter_input_byte_count(), bytes_observed_in_packets_);
1341 return; // No new bytes have arrived.
1344 final_packet_time_ = base::Time::Now();
1345 if (!bytes_observed_in_packets_)
1346 request_time_snapshot_ = request_ ? request_->request_time() : base::Time();
1348 bytes_observed_in_packets_ = filter_input_byte_count();
1351 void URLRequestHttpJob::RecordPacketStats(
1352 FilterContext::StatisticSelector statistic) const {
1353 if (!packet_timing_enabled_ || (final_packet_time_ == base::Time()))
1356 base::TimeDelta duration = final_packet_time_ - request_time_snapshot_;
1357 switch (statistic) {
1358 case FilterContext::SDCH_DECODE: {
1359 UMA_HISTOGRAM_CUSTOM_COUNTS("Sdch3.Network_Decode_Bytes_Processed_b",
1360 static_cast<int>(bytes_observed_in_packets_), 500, 100000, 100);
1363 case FilterContext::SDCH_PASSTHROUGH: {
1364 // Despite advertising a dictionary, we handled non-sdch compressed
1369 case FilterContext::SDCH_EXPERIMENT_DECODE: {
1370 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Decode",
1372 base::TimeDelta::FromMilliseconds(20),
1373 base::TimeDelta::FromMinutes(10), 100);
1376 case FilterContext::SDCH_EXPERIMENT_HOLDBACK: {
1377 UMA_HISTOGRAM_CUSTOM_TIMES("Sdch3.Experiment2_Holdback",
1379 base::TimeDelta::FromMilliseconds(20),
1380 base::TimeDelta::FromMinutes(10), 100);
1389 // The common type of histogram we use for all compression-tracking histograms.
1390 #define COMPRESSION_HISTOGRAM(name, sample) \
1392 UMA_HISTOGRAM_CUSTOM_COUNTS("Net.Compress." name, sample, \
1393 500, 1000000, 100); \
1396 void URLRequestHttpJob::RecordCompressionHistograms() {
1401 if (is_cached_content_ || // Don't record cached content
1402 !GetStatus().is_success() || // Don't record failed content
1403 !IsCompressibleContent() || // Only record compressible content
1404 !prefilter_bytes_read()) // Zero-byte responses aren't useful.
1407 // Miniature requests aren't really compressible. Don't count them.
1408 const int kMinSize = 16;
1409 if (prefilter_bytes_read() < kMinSize)
1412 // Only record for http or https urls.
1413 bool is_http = request_->url().SchemeIs("http");
1414 bool is_https = request_->url().SchemeIs("https");
1415 if (!is_http && !is_https)
1418 int compressed_B = prefilter_bytes_read();
1419 int decompressed_B = postfilter_bytes_read();
1420 bool was_filtered = HasFilter();
1422 // We want to record how often downloaded resources are compressed.
1423 // But, we recognize that different protocols may have different
1424 // properties. So, for each request, we'll put it into one of 3
1427 // Proxies cannot tamper with compression headers with SSL.
1428 // b) Non-SSL, loaded-via-proxy resources
1429 // In this case, we know a proxy might have interfered.
1430 // c) Non-SSL, loaded-without-proxy resources
1431 // In this case, we know there was no explicit proxy. However,
1432 // it is possible that a transparent proxy was still interfering.
1434 // For each group, we record the same 3 histograms.
1438 COMPRESSION_HISTOGRAM("SSL.BytesBeforeCompression", compressed_B);
1439 COMPRESSION_HISTOGRAM("SSL.BytesAfterCompression", decompressed_B);
1441 COMPRESSION_HISTOGRAM("SSL.ShouldHaveBeenCompressed", decompressed_B);
1446 if (request_->was_fetched_via_proxy()) {
1448 COMPRESSION_HISTOGRAM("Proxy.BytesBeforeCompression", compressed_B);
1449 COMPRESSION_HISTOGRAM("Proxy.BytesAfterCompression", decompressed_B);
1451 COMPRESSION_HISTOGRAM("Proxy.ShouldHaveBeenCompressed", decompressed_B);
1457 COMPRESSION_HISTOGRAM("NoProxy.BytesBeforeCompression", compressed_B);
1458 COMPRESSION_HISTOGRAM("NoProxy.BytesAfterCompression", decompressed_B);
1460 COMPRESSION_HISTOGRAM("NoProxy.ShouldHaveBeenCompressed", decompressed_B);
1464 bool URLRequestHttpJob::IsCompressibleContent() const {
1465 std::string mime_type;
1466 return GetMimeType(&mime_type) &&
1467 (IsSupportedJavascriptMimeType(mime_type.c_str()) ||
1468 IsSupportedNonImageMimeType(mime_type.c_str()));
1471 void URLRequestHttpJob::RecordPerfHistograms(CompletionCause reason) {
1472 if (start_time_.is_null())
1475 base::TimeDelta total_time = base::TimeTicks::Now() - start_time_;
1476 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTime", total_time);
1478 if (reason == FINISHED) {
1479 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeSuccess", total_time);
1481 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCancel", total_time);
1484 if (response_info_) {
1485 if (response_info_->was_cached) {
1486 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeCached", total_time);
1488 UMA_HISTOGRAM_TIMES("Net.HttpJob.TotalTimeNotCached", total_time);
1492 if (request_info_.load_flags & LOAD_PREFETCH && !request_->was_cached())
1493 UMA_HISTOGRAM_COUNTS("Net.Prefetch.PrefilterBytesReadFromNetwork",
1494 prefilter_bytes_read());
1496 start_time_ = base::TimeTicks();
1499 void URLRequestHttpJob::DoneWithRequest(CompletionCause reason) {
1503 RecordPerfHistograms(reason);
1504 if (reason == FINISHED) {
1505 request_->set_received_response_content_length(prefilter_bytes_read());
1506 RecordCompressionHistograms();
1510 HttpResponseHeaders* URLRequestHttpJob::GetResponseHeaders() const {
1511 DCHECK(transaction_.get());
1512 DCHECK(transaction_->GetResponseInfo());
1513 return override_response_headers_.get() ?
1514 override_response_headers_.get() :
1515 transaction_->GetResponseInfo()->headers.get();
1518 void URLRequestHttpJob::NotifyURLRequestDestroyed() {
1519 awaiting_callback_ = false;
1522 void URLRequestHttpJob::OnDetachRequest() {
1523 http_transaction_delegate_->OnDetachRequest();