7c6432379a6f082876c3b4d6181ae022062ba8be
[platform/upstream/grpc.git] / test / cpp / end2end / grpclb_end2end_test.cc
1 /*
2  *
3  * Copyright 2017 gRPC authors.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18
19 #include <memory>
20 #include <mutex>
21 #include <set>
22 #include <sstream>
23 #include <thread>
24
25 #include <grpc/grpc.h>
26 #include <grpc/support/alloc.h>
27 #include <grpc/support/log.h>
28 #include <grpc/support/string_util.h>
29 #include <grpc/support/time.h>
30 #include <grpcpp/channel.h>
31 #include <grpcpp/client_context.h>
32 #include <grpcpp/create_channel.h>
33 #include <grpcpp/server.h>
34 #include <grpcpp/server_builder.h>
35
36 #include "src/core/ext/filters/client_channel/parse_address.h"
37 #include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
38 #include "src/core/ext/filters/client_channel/server_address.h"
39 #include "src/core/ext/filters/client_channel/service_config.h"
40 #include "src/core/lib/gpr/env.h"
41 #include "src/core/lib/gprpp/ref_counted_ptr.h"
42 #include "src/core/lib/iomgr/sockaddr.h"
43 #include "src/core/lib/security/credentials/fake/fake_credentials.h"
44 #include "src/cpp/client/secure_credentials.h"
45 #include "src/cpp/server/secure_server_credentials.h"
46
47 #include "test/core/util/port.h"
48 #include "test/core/util/test_config.h"
49 #include "test/cpp/end2end/test_service_impl.h"
50
51 #include "src/proto/grpc/lb/v1/load_balancer.grpc.pb.h"
52 #include "src/proto/grpc/testing/echo.grpc.pb.h"
53
54 #include <gmock/gmock.h>
55 #include <gtest/gtest.h>
56
57 // TODO(dgq): Other scenarios in need of testing:
58 // - Send a serverlist with faulty ip:port addresses (port > 2^16, etc).
59 // - Test reception of invalid serverlist
60 // - Test against a non-LB server.
61 // - Random LB server closing the stream unexpectedly.
62 //
63 // Findings from end to end testing to be covered here:
64 // - Handling of LB servers restart, including reconnection after backing-off
65 //   retries.
66 // - Destruction of load balanced channel (and therefore of grpclb instance)
67 //   while:
68 //   1) the internal LB call is still active. This should work by virtue
69 //   of the weak reference the LB call holds. The call should be terminated as
70 //   part of the grpclb shutdown process.
71 //   2) the retry timer is active. Again, the weak reference it holds should
72 //   prevent a premature call to \a glb_destroy.
73
74 using std::chrono::system_clock;
75
76 using grpc::lb::v1::LoadBalanceRequest;
77 using grpc::lb::v1::LoadBalanceResponse;
78 using grpc::lb::v1::LoadBalancer;
79
80 namespace grpc {
81 namespace testing {
82 namespace {
83
84 template <typename ServiceType>
85 class CountedService : public ServiceType {
86  public:
87   size_t request_count() {
88     std::unique_lock<std::mutex> lock(mu_);
89     return request_count_;
90   }
91
92   size_t response_count() {
93     std::unique_lock<std::mutex> lock(mu_);
94     return response_count_;
95   }
96
97   void IncreaseResponseCount() {
98     std::unique_lock<std::mutex> lock(mu_);
99     ++response_count_;
100   }
101   void IncreaseRequestCount() {
102     std::unique_lock<std::mutex> lock(mu_);
103     ++request_count_;
104   }
105
106   void ResetCounters() {
107     std::unique_lock<std::mutex> lock(mu_);
108     request_count_ = 0;
109     response_count_ = 0;
110   }
111
112  protected:
113   std::mutex mu_;
114
115  private:
116   size_t request_count_ = 0;
117   size_t response_count_ = 0;
118 };
119
120 using BackendService = CountedService<TestServiceImpl>;
121 using BalancerService = CountedService<LoadBalancer::Service>;
122
123 const char g_kCallCredsMdKey[] = "Balancer should not ...";
124 const char g_kCallCredsMdValue[] = "... receive me";
125
126 class BackendServiceImpl : public BackendService {
127  public:
128   BackendServiceImpl() {}
129
130   Status Echo(ServerContext* context, const EchoRequest* request,
131               EchoResponse* response) override {
132     // Backend should receive the call credentials metadata.
133     auto call_credentials_entry =
134         context->client_metadata().find(g_kCallCredsMdKey);
135     EXPECT_NE(call_credentials_entry, context->client_metadata().end());
136     if (call_credentials_entry != context->client_metadata().end()) {
137       EXPECT_EQ(call_credentials_entry->second, g_kCallCredsMdValue);
138     }
139     IncreaseRequestCount();
140     const auto status = TestServiceImpl::Echo(context, request, response);
141     IncreaseResponseCount();
142     AddClient(context->peer());
143     return status;
144   }
145
146   void Start() {}
147
148   void Shutdown() {}
149
150   std::set<grpc::string> clients() {
151     std::unique_lock<std::mutex> lock(clients_mu_);
152     return clients_;
153   }
154
155  private:
156   void AddClient(const grpc::string& client) {
157     std::unique_lock<std::mutex> lock(clients_mu_);
158     clients_.insert(client);
159   }
160
161   std::mutex mu_;
162   std::mutex clients_mu_;
163   std::set<grpc::string> clients_;
164 };
165
166 grpc::string Ip4ToPackedString(const char* ip_str) {
167   struct in_addr ip4;
168   GPR_ASSERT(inet_pton(AF_INET, ip_str, &ip4) == 1);
169   return grpc::string(reinterpret_cast<const char*>(&ip4), sizeof(ip4));
170 }
171
172 struct ClientStats {
173   size_t num_calls_started = 0;
174   size_t num_calls_finished = 0;
175   size_t num_calls_finished_with_client_failed_to_send = 0;
176   size_t num_calls_finished_known_received = 0;
177   std::map<grpc::string, size_t> drop_token_counts;
178
179   ClientStats& operator+=(const ClientStats& other) {
180     num_calls_started += other.num_calls_started;
181     num_calls_finished += other.num_calls_finished;
182     num_calls_finished_with_client_failed_to_send +=
183         other.num_calls_finished_with_client_failed_to_send;
184     num_calls_finished_known_received +=
185         other.num_calls_finished_known_received;
186     for (const auto& p : other.drop_token_counts) {
187       drop_token_counts[p.first] += p.second;
188     }
189     return *this;
190   }
191
192   void Reset() {
193     num_calls_started = 0;
194     num_calls_finished = 0;
195     num_calls_finished_with_client_failed_to_send = 0;
196     num_calls_finished_known_received = 0;
197     drop_token_counts.clear();
198   }
199 };
200
201 class BalancerServiceImpl : public BalancerService {
202  public:
203   using Stream = ServerReaderWriter<LoadBalanceResponse, LoadBalanceRequest>;
204   using ResponseDelayPair = std::pair<LoadBalanceResponse, int>;
205
206   explicit BalancerServiceImpl(int client_load_reporting_interval_seconds)
207       : client_load_reporting_interval_seconds_(
208             client_load_reporting_interval_seconds) {}
209
210   Status BalanceLoad(ServerContext* context, Stream* stream) override {
211     // Balancer shouldn't receive the call credentials metadata.
212     EXPECT_EQ(context->client_metadata().find(g_kCallCredsMdKey),
213               context->client_metadata().end());
214     gpr_log(GPR_INFO, "LB[%p]: BalanceLoad", this);
215     LoadBalanceRequest request;
216     std::vector<ResponseDelayPair> responses_and_delays;
217
218     if (!stream->Read(&request)) {
219       goto done;
220     }
221     IncreaseRequestCount();
222     gpr_log(GPR_INFO, "LB[%p]: received initial message '%s'", this,
223             request.DebugString().c_str());
224
225     // TODO(juanlishen): Initial response should always be the first response.
226     if (client_load_reporting_interval_seconds_ > 0) {
227       LoadBalanceResponse initial_response;
228       initial_response.mutable_initial_response()
229           ->mutable_client_stats_report_interval()
230           ->set_seconds(client_load_reporting_interval_seconds_);
231       stream->Write(initial_response);
232     }
233
234     {
235       std::unique_lock<std::mutex> lock(mu_);
236       responses_and_delays = responses_and_delays_;
237     }
238     for (const auto& response_and_delay : responses_and_delays) {
239       SendResponse(stream, response_and_delay.first, response_and_delay.second);
240     }
241     {
242       std::unique_lock<std::mutex> lock(mu_);
243       serverlist_cond_.wait(lock, [this] { return serverlist_done_; });
244     }
245
246     if (client_load_reporting_interval_seconds_ > 0) {
247       request.Clear();
248       if (stream->Read(&request)) {
249         gpr_log(GPR_INFO, "LB[%p]: received client load report message '%s'",
250                 this, request.DebugString().c_str());
251         GPR_ASSERT(request.has_client_stats());
252         // We need to acquire the lock here in order to prevent the notify_one
253         // below from firing before its corresponding wait is executed.
254         std::lock_guard<std::mutex> lock(mu_);
255         client_stats_.num_calls_started +=
256             request.client_stats().num_calls_started();
257         client_stats_.num_calls_finished +=
258             request.client_stats().num_calls_finished();
259         client_stats_.num_calls_finished_with_client_failed_to_send +=
260             request.client_stats()
261                 .num_calls_finished_with_client_failed_to_send();
262         client_stats_.num_calls_finished_known_received +=
263             request.client_stats().num_calls_finished_known_received();
264         for (const auto& drop_token_count :
265              request.client_stats().calls_finished_with_drop()) {
266           client_stats_
267               .drop_token_counts[drop_token_count.load_balance_token()] +=
268               drop_token_count.num_calls();
269         }
270         load_report_ready_ = true;
271         load_report_cond_.notify_one();
272       }
273     }
274   done:
275     gpr_log(GPR_INFO, "LB[%p]: done", this);
276     return Status::OK;
277   }
278
279   void add_response(const LoadBalanceResponse& response, int send_after_ms) {
280     std::unique_lock<std::mutex> lock(mu_);
281     responses_and_delays_.push_back(std::make_pair(response, send_after_ms));
282   }
283
284   void Start() {
285     std::lock_guard<std::mutex> lock(mu_);
286     serverlist_done_ = false;
287     load_report_ready_ = false;
288     responses_and_delays_.clear();
289     client_stats_.Reset();
290   }
291
292   void Shutdown() {
293     NotifyDoneWithServerlists();
294     gpr_log(GPR_INFO, "LB[%p]: shut down", this);
295   }
296
297   static LoadBalanceResponse BuildResponseForBackends(
298       const std::vector<int>& backend_ports,
299       const std::map<grpc::string, size_t>& drop_token_counts) {
300     LoadBalanceResponse response;
301     for (const auto& drop_token_count : drop_token_counts) {
302       for (size_t i = 0; i < drop_token_count.second; ++i) {
303         auto* server = response.mutable_server_list()->add_servers();
304         server->set_drop(true);
305         server->set_load_balance_token(drop_token_count.first);
306       }
307     }
308     for (const int& backend_port : backend_ports) {
309       auto* server = response.mutable_server_list()->add_servers();
310       server->set_ip_address(Ip4ToPackedString("127.0.0.1"));
311       server->set_port(backend_port);
312       static int token_count = 0;
313       char* token;
314       gpr_asprintf(&token, "token%03d", ++token_count);
315       server->set_load_balance_token(token);
316       gpr_free(token);
317     }
318     return response;
319   }
320
321   const ClientStats& WaitForLoadReport() {
322     std::unique_lock<std::mutex> lock(mu_);
323     load_report_cond_.wait(lock, [this] { return load_report_ready_; });
324     load_report_ready_ = false;
325     return client_stats_;
326   }
327
328   void NotifyDoneWithServerlists() {
329     std::lock_guard<std::mutex> lock(mu_);
330     if (!serverlist_done_) {
331       serverlist_done_ = true;
332       serverlist_cond_.notify_all();
333     }
334   }
335
336  private:
337   void SendResponse(Stream* stream, const LoadBalanceResponse& response,
338                     int delay_ms) {
339     gpr_log(GPR_INFO, "LB[%p]: sleeping for %d ms...", this, delay_ms);
340     if (delay_ms > 0) {
341       gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(delay_ms));
342     }
343     gpr_log(GPR_INFO, "LB[%p]: Woke up! Sending response '%s'", this,
344             response.DebugString().c_str());
345     IncreaseResponseCount();
346     stream->Write(response);
347   }
348
349   const int client_load_reporting_interval_seconds_;
350   std::vector<ResponseDelayPair> responses_and_delays_;
351   std::mutex mu_;
352   std::condition_variable load_report_cond_;
353   bool load_report_ready_ = false;
354   std::condition_variable serverlist_cond_;
355   bool serverlist_done_ = false;
356   ClientStats client_stats_;
357 };
358
359 class GrpclbEnd2endTest : public ::testing::Test {
360  protected:
361   GrpclbEnd2endTest(size_t num_backends, size_t num_balancers,
362                     int client_load_reporting_interval_seconds)
363       : server_host_("localhost"),
364         num_backends_(num_backends),
365         num_balancers_(num_balancers),
366         client_load_reporting_interval_seconds_(
367             client_load_reporting_interval_seconds) {
368     // Make the backup poller poll very frequently in order to pick up
369     // updates from all the subchannels's FDs.
370     gpr_setenv("GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS", "1");
371   }
372
373   void SetUp() override {
374     response_generator_ =
375         grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
376     // Start the backends.
377     for (size_t i = 0; i < num_backends_; ++i) {
378       backends_.emplace_back(new ServerThread<BackendServiceImpl>("backend"));
379       backends_.back()->Start(server_host_);
380     }
381     // Start the load balancers.
382     for (size_t i = 0; i < num_balancers_; ++i) {
383       balancers_.emplace_back(new ServerThread<BalancerServiceImpl>(
384           "balancer", client_load_reporting_interval_seconds_));
385       balancers_.back()->Start(server_host_);
386     }
387     ResetStub();
388   }
389
390   void TearDown() override {
391     ShutdownAllBackends();
392     for (auto& balancer : balancers_) balancer->Shutdown();
393   }
394
395   void StartAllBackends() {
396     for (auto& backend : backends_) backend->Start(server_host_);
397   }
398
399   void StartBackend(size_t index) { backends_[index]->Start(server_host_); }
400
401   void ShutdownAllBackends() {
402     for (auto& backend : backends_) backend->Shutdown();
403   }
404
405   void ShutdownBackend(size_t index) { backends_[index]->Shutdown(); }
406
407   void ResetStub(int fallback_timeout = 0,
408                  const grpc::string& expected_targets = "") {
409     ChannelArguments args;
410     if (fallback_timeout > 0) args.SetGrpclbFallbackTimeout(fallback_timeout);
411     args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR,
412                     response_generator_.get());
413     if (!expected_targets.empty()) {
414       args.SetString(GRPC_ARG_FAKE_SECURITY_EXPECTED_TARGETS, expected_targets);
415     }
416     std::ostringstream uri;
417     uri << "fake:///" << kApplicationTargetName_;
418     // TODO(dgq): templatize tests to run everything using both secure and
419     // insecure channel credentials.
420     grpc_channel_credentials* channel_creds =
421         grpc_fake_transport_security_credentials_create();
422     grpc_call_credentials* call_creds = grpc_md_only_test_credentials_create(
423         g_kCallCredsMdKey, g_kCallCredsMdValue, false);
424     std::shared_ptr<ChannelCredentials> creds(
425         new SecureChannelCredentials(grpc_composite_channel_credentials_create(
426             channel_creds, call_creds, nullptr)));
427     call_creds->Unref();
428     channel_creds->Unref();
429     channel_ = CreateCustomChannel(uri.str(), creds, args);
430     stub_ = grpc::testing::EchoTestService::NewStub(channel_);
431   }
432
433   void ResetBackendCounters() {
434     for (auto& backend : backends_) backend->service_.ResetCounters();
435   }
436
437   ClientStats WaitForLoadReports() {
438     ClientStats client_stats;
439     for (auto& balancer : balancers_) {
440       client_stats += balancer->service_.WaitForLoadReport();
441     }
442     return client_stats;
443   }
444
445   bool SeenAllBackends(size_t start_index = 0, size_t stop_index = 0) {
446     if (stop_index == 0) stop_index = backends_.size();
447     for (size_t i = start_index; i < stop_index; ++i) {
448       if (backends_[i]->service_.request_count() == 0) return false;
449     }
450     return true;
451   }
452
453   void SendRpcAndCount(int* num_total, int* num_ok, int* num_failure,
454                        int* num_drops) {
455     const Status status = SendRpc();
456     if (status.ok()) {
457       ++*num_ok;
458     } else {
459       if (status.error_message() == "Call dropped by load balancing policy") {
460         ++*num_drops;
461       } else {
462         ++*num_failure;
463       }
464     }
465     ++*num_total;
466   }
467
468   std::tuple<int, int, int> WaitForAllBackends(int num_requests_multiple_of = 1,
469                                                size_t start_index = 0,
470                                                size_t stop_index = 0) {
471     int num_ok = 0;
472     int num_failure = 0;
473     int num_drops = 0;
474     int num_total = 0;
475     while (!SeenAllBackends(start_index, stop_index)) {
476       SendRpcAndCount(&num_total, &num_ok, &num_failure, &num_drops);
477     }
478     while (num_total % num_requests_multiple_of != 0) {
479       SendRpcAndCount(&num_total, &num_ok, &num_failure, &num_drops);
480     }
481     ResetBackendCounters();
482     gpr_log(GPR_INFO,
483             "Performed %d warm up requests (a multiple of %d) against the "
484             "backends. %d succeeded, %d failed, %d dropped.",
485             num_total, num_requests_multiple_of, num_ok, num_failure,
486             num_drops);
487     return std::make_tuple(num_ok, num_failure, num_drops);
488   }
489
490   void WaitForBackend(size_t backend_idx) {
491     do {
492       (void)SendRpc();
493     } while (backends_[backend_idx]->service_.request_count() == 0);
494     ResetBackendCounters();
495   }
496
497   struct AddressData {
498     int port;
499     bool is_balancer;
500     grpc::string balancer_name;
501   };
502
503   grpc_core::ServerAddressList CreateLbAddressesFromAddressDataList(
504       const std::vector<AddressData>& address_data) {
505     grpc_core::ServerAddressList addresses;
506     for (const auto& addr : address_data) {
507       char* lb_uri_str;
508       gpr_asprintf(&lb_uri_str, "ipv4:127.0.0.1:%d", addr.port);
509       grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str, true);
510       GPR_ASSERT(lb_uri != nullptr);
511       grpc_resolved_address address;
512       GPR_ASSERT(grpc_parse_uri(lb_uri, &address));
513       std::vector<grpc_arg> args_to_add;
514       if (addr.is_balancer) {
515         args_to_add.emplace_back(grpc_channel_arg_integer_create(
516             const_cast<char*>(GRPC_ARG_ADDRESS_IS_BALANCER), 1));
517         args_to_add.emplace_back(grpc_channel_arg_string_create(
518             const_cast<char*>(GRPC_ARG_ADDRESS_BALANCER_NAME),
519             const_cast<char*>(addr.balancer_name.c_str())));
520       }
521       grpc_channel_args* args = grpc_channel_args_copy_and_add(
522           nullptr, args_to_add.data(), args_to_add.size());
523       addresses.emplace_back(address.addr, address.len, args);
524       grpc_uri_destroy(lb_uri);
525       gpr_free(lb_uri_str);
526     }
527     return addresses;
528   }
529
530   void SetNextResolutionAllBalancers(
531       const char* service_config_json = nullptr) {
532     std::vector<AddressData> addresses;
533     for (size_t i = 0; i < balancers_.size(); ++i) {
534       addresses.emplace_back(AddressData{balancers_[i]->port_, true, ""});
535     }
536     SetNextResolution(addresses, service_config_json);
537   }
538
539   void SetNextResolution(const std::vector<AddressData>& address_data,
540                          const char* service_config_json = nullptr) {
541     grpc_core::ExecCtx exec_ctx;
542     grpc_core::Resolver::Result result;
543     result.addresses = CreateLbAddressesFromAddressDataList(address_data);
544     if (service_config_json != nullptr) {
545       result.service_config =
546           grpc_core::ServiceConfig::Create(service_config_json);
547     }
548     response_generator_->SetResponse(std::move(result));
549   }
550
551   void SetNextReresolutionResponse(
552       const std::vector<AddressData>& address_data) {
553     grpc_core::ExecCtx exec_ctx;
554     grpc_core::Resolver::Result result;
555     result.addresses = CreateLbAddressesFromAddressDataList(address_data);
556     response_generator_->SetReresolutionResponse(std::move(result));
557   }
558
559   const std::vector<int> GetBackendPorts(size_t start_index = 0,
560                                          size_t stop_index = 0) const {
561     if (stop_index == 0) stop_index = backends_.size();
562     std::vector<int> backend_ports;
563     for (size_t i = start_index; i < stop_index; ++i) {
564       backend_ports.push_back(backends_[i]->port_);
565     }
566     return backend_ports;
567   }
568
569   void ScheduleResponseForBalancer(size_t i,
570                                    const LoadBalanceResponse& response,
571                                    int delay_ms) {
572     balancers_[i]->service_.add_response(response, delay_ms);
573   }
574
575   Status SendRpc(EchoResponse* response = nullptr, int timeout_ms = 1000,
576                  bool wait_for_ready = false) {
577     const bool local_response = (response == nullptr);
578     if (local_response) response = new EchoResponse;
579     EchoRequest request;
580     request.set_message(kRequestMessage_);
581     ClientContext context;
582     context.set_deadline(grpc_timeout_milliseconds_to_deadline(timeout_ms));
583     if (wait_for_ready) context.set_wait_for_ready(true);
584     Status status = stub_->Echo(&context, request, response);
585     if (local_response) delete response;
586     return status;
587   }
588
589   void CheckRpcSendOk(const size_t times = 1, const int timeout_ms = 1000,
590                       bool wait_for_ready = false) {
591     for (size_t i = 0; i < times; ++i) {
592       EchoResponse response;
593       const Status status = SendRpc(&response, timeout_ms, wait_for_ready);
594       EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
595                                << " message=" << status.error_message();
596       EXPECT_EQ(response.message(), kRequestMessage_);
597     }
598   }
599
600   void CheckRpcSendFailure() {
601     const Status status = SendRpc();
602     EXPECT_FALSE(status.ok());
603   }
604
605   template <typename T>
606   struct ServerThread {
607     template <typename... Args>
608     explicit ServerThread(const grpc::string& type, Args&&... args)
609         : port_(grpc_pick_unused_port_or_die()),
610           type_(type),
611           service_(std::forward<Args>(args)...) {}
612
613     void Start(const grpc::string& server_host) {
614       gpr_log(GPR_INFO, "starting %s server on port %d", type_.c_str(), port_);
615       GPR_ASSERT(!running_);
616       running_ = true;
617       service_.Start();
618       std::mutex mu;
619       // We need to acquire the lock here in order to prevent the notify_one
620       // by ServerThread::Serve from firing before the wait below is hit.
621       std::unique_lock<std::mutex> lock(mu);
622       std::condition_variable cond;
623       thread_.reset(new std::thread(
624           std::bind(&ServerThread::Serve, this, server_host, &mu, &cond)));
625       cond.wait(lock);
626       gpr_log(GPR_INFO, "%s server startup complete", type_.c_str());
627     }
628
629     void Serve(const grpc::string& server_host, std::mutex* mu,
630                std::condition_variable* cond) {
631       // We need to acquire the lock here in order to prevent the notify_one
632       // below from firing before its corresponding wait is executed.
633       std::lock_guard<std::mutex> lock(*mu);
634       std::ostringstream server_address;
635       server_address << server_host << ":" << port_;
636       ServerBuilder builder;
637       std::shared_ptr<ServerCredentials> creds(new SecureServerCredentials(
638           grpc_fake_transport_security_server_credentials_create()));
639       builder.AddListeningPort(server_address.str(), creds);
640       builder.RegisterService(&service_);
641       server_ = builder.BuildAndStart();
642       cond->notify_one();
643     }
644
645     void Shutdown() {
646       if (!running_) return;
647       gpr_log(GPR_INFO, "%s about to shutdown", type_.c_str());
648       service_.Shutdown();
649       server_->Shutdown(grpc_timeout_milliseconds_to_deadline(0));
650       thread_->join();
651       gpr_log(GPR_INFO, "%s shutdown completed", type_.c_str());
652       running_ = false;
653     }
654
655     const int port_;
656     grpc::string type_;
657     T service_;
658     std::unique_ptr<Server> server_;
659     std::unique_ptr<std::thread> thread_;
660     bool running_ = false;
661   };
662
663   const grpc::string server_host_;
664   const size_t num_backends_;
665   const size_t num_balancers_;
666   const int client_load_reporting_interval_seconds_;
667   std::shared_ptr<Channel> channel_;
668   std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
669   std::vector<std::unique_ptr<ServerThread<BackendServiceImpl>>> backends_;
670   std::vector<std::unique_ptr<ServerThread<BalancerServiceImpl>>> balancers_;
671   grpc_core::RefCountedPtr<grpc_core::FakeResolverResponseGenerator>
672       response_generator_;
673   const grpc::string kRequestMessage_ = "Live long and prosper.";
674   const grpc::string kApplicationTargetName_ = "application_target_name";
675 };
676
677 class SingleBalancerTest : public GrpclbEnd2endTest {
678  public:
679   SingleBalancerTest() : GrpclbEnd2endTest(4, 1, 0) {}
680 };
681
682 TEST_F(SingleBalancerTest, Vanilla) {
683   SetNextResolutionAllBalancers();
684   const size_t kNumRpcsPerAddress = 100;
685   ScheduleResponseForBalancer(
686       0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
687       0);
688   // Make sure that trying to connect works without a call.
689   channel_->GetState(true /* try_to_connect */);
690   // We need to wait for all backends to come online.
691   WaitForAllBackends();
692   // Send kNumRpcsPerAddress RPCs per server.
693   CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
694
695   // Each backend should have gotten 100 requests.
696   for (size_t i = 0; i < backends_.size(); ++i) {
697     EXPECT_EQ(kNumRpcsPerAddress, backends_[i]->service_.request_count());
698   }
699   balancers_[0]->service_.NotifyDoneWithServerlists();
700   // The balancer got a single request.
701   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
702   // and sent a single response.
703   EXPECT_EQ(1U, balancers_[0]->service_.response_count());
704
705   // Check LB policy name for the channel.
706   EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
707 }
708
709 TEST_F(SingleBalancerTest, SelectGrpclbWithMigrationServiceConfig) {
710   SetNextResolutionAllBalancers(
711       "{\n"
712       "  \"loadBalancingConfig\":[\n"
713       "    { \"does_not_exist\":{} },\n"
714       "    { \"grpclb\":{} }\n"
715       "  ]\n"
716       "}");
717   ScheduleResponseForBalancer(
718       0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
719       0);
720   CheckRpcSendOk(1, 1000 /* timeout_ms */, true /* wait_for_ready */);
721   balancers_[0]->service_.NotifyDoneWithServerlists();
722   // The balancer got a single request.
723   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
724   // and sent a single response.
725   EXPECT_EQ(1U, balancers_[0]->service_.response_count());
726   // Check LB policy name for the channel.
727   EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
728 }
729
730 TEST_F(SingleBalancerTest,
731        SelectGrpclbWithMigrationServiceConfigAndNoAddresses) {
732   const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
733   ResetStub(kFallbackTimeoutMs);
734   SetNextResolution({},
735                     "{\n"
736                     "  \"loadBalancingConfig\":[\n"
737                     "    { \"does_not_exist\":{} },\n"
738                     "    { \"grpclb\":{} }\n"
739                     "  ]\n"
740                     "}");
741   // Try to connect.
742   EXPECT_EQ(GRPC_CHANNEL_IDLE, channel_->GetState(true));
743   // Should go into state TRANSIENT_FAILURE when we enter fallback mode.
744   const gpr_timespec deadline = grpc_timeout_seconds_to_deadline(1);
745   grpc_connectivity_state state;
746   while ((state = channel_->GetState(false)) !=
747          GRPC_CHANNEL_TRANSIENT_FAILURE) {
748     ASSERT_TRUE(channel_->WaitForStateChange(state, deadline));
749   }
750   // Check LB policy name for the channel.
751   EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
752 }
753
754 TEST_F(SingleBalancerTest,
755        SelectGrpclbWithMigrationServiceConfigAndNoBalancerAddresses) {
756   const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
757   ResetStub(kFallbackTimeoutMs);
758   // Resolution includes fallback address but no balancers.
759   SetNextResolution({AddressData{backends_[0]->port_, false, ""}},
760                     "{\n"
761                     "  \"loadBalancingConfig\":[\n"
762                     "    { \"does_not_exist\":{} },\n"
763                     "    { \"grpclb\":{} }\n"
764                     "  ]\n"
765                     "}");
766   CheckRpcSendOk(1, 1000 /* timeout_ms */, true /* wait_for_ready */);
767   // Check LB policy name for the channel.
768   EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
769 }
770
771 TEST_F(SingleBalancerTest, UsePickFirstChildPolicy) {
772   SetNextResolutionAllBalancers(
773       "{\n"
774       "  \"loadBalancingConfig\":[\n"
775       "    { \"grpclb\":{\n"
776       "      \"childPolicy\":[\n"
777       "        { \"pick_first\":{} }\n"
778       "      ]\n"
779       "    } }\n"
780       "  ]\n"
781       "}");
782   ScheduleResponseForBalancer(
783       0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
784       0);
785   const size_t kNumRpcs = num_backends_ * 2;
786   CheckRpcSendOk(kNumRpcs, 1000 /* timeout_ms */, true /* wait_for_ready */);
787   balancers_[0]->service_.NotifyDoneWithServerlists();
788   // Check that all requests went to the first backend.  This verifies
789   // that we used pick_first instead of round_robin as the child policy.
790   EXPECT_EQ(backends_[0]->service_.request_count(), kNumRpcs);
791   for (size_t i = 1; i < backends_.size(); ++i) {
792     EXPECT_EQ(backends_[i]->service_.request_count(), 0UL);
793   }
794   // The balancer got a single request.
795   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
796   // and sent a single response.
797   EXPECT_EQ(1U, balancers_[0]->service_.response_count());
798   // Check LB policy name for the channel.
799   EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
800 }
801
802 TEST_F(SingleBalancerTest, SwapChildPolicy) {
803   SetNextResolutionAllBalancers(
804       "{\n"
805       "  \"loadBalancingConfig\":[\n"
806       "    { \"grpclb\":{\n"
807       "      \"childPolicy\":[\n"
808       "        { \"pick_first\":{} }\n"
809       "      ]\n"
810       "    } }\n"
811       "  ]\n"
812       "}");
813   ScheduleResponseForBalancer(
814       0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
815       0);
816   const size_t kNumRpcs = num_backends_ * 2;
817   CheckRpcSendOk(kNumRpcs, 1000 /* timeout_ms */, true /* wait_for_ready */);
818   // Check that all requests went to the first backend.  This verifies
819   // that we used pick_first instead of round_robin as the child policy.
820   EXPECT_EQ(backends_[0]->service_.request_count(), kNumRpcs);
821   for (size_t i = 1; i < backends_.size(); ++i) {
822     EXPECT_EQ(backends_[i]->service_.request_count(), 0UL);
823   }
824   // Send new resolution that removes child policy from service config.
825   SetNextResolutionAllBalancers("{}");
826   WaitForAllBackends();
827   CheckRpcSendOk(kNumRpcs, 1000 /* timeout_ms */, true /* wait_for_ready */);
828   // Check that every backend saw the same number of requests.  This verifies
829   // that we used round_robin.
830   for (size_t i = 0; i < backends_.size(); ++i) {
831     EXPECT_EQ(backends_[i]->service_.request_count(), 2UL);
832   }
833   // Done.
834   balancers_[0]->service_.NotifyDoneWithServerlists();
835   // The balancer got a single request.
836   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
837   // and sent a single response.
838   EXPECT_EQ(1U, balancers_[0]->service_.response_count());
839   // Check LB policy name for the channel.
840   EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
841 }
842
843 TEST_F(SingleBalancerTest, UpdatesGoToMostRecentChildPolicy) {
844   const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
845   ResetStub(kFallbackTimeoutMs);
846   int unreachable_balancer_port = grpc_pick_unused_port_or_die();
847   int unreachable_backend_port = grpc_pick_unused_port_or_die();
848   // Phase 1: Start with RR pointing to first backend.
849   gpr_log(GPR_INFO, "PHASE 1: Initial setup with RR with first backend");
850   SetNextResolution(
851       {
852           // Unreachable balancer.
853           {unreachable_balancer_port, true, ""},
854           // Fallback address: first backend.
855           {backends_[0]->port_, false, ""},
856       },
857       "{\n"
858       "  \"loadBalancingConfig\":[\n"
859       "    { \"grpclb\":{\n"
860       "      \"childPolicy\":[\n"
861       "        { \"round_robin\":{} }\n"
862       "      ]\n"
863       "    } }\n"
864       "  ]\n"
865       "}");
866   // RPCs should go to first backend.
867   WaitForBackend(0);
868   // Phase 2: Switch to PF pointing to unreachable backend.
869   gpr_log(GPR_INFO, "PHASE 2: Update to use PF with unreachable backend");
870   SetNextResolution(
871       {
872           // Unreachable balancer.
873           {unreachable_balancer_port, true, ""},
874           // Fallback address: unreachable backend.
875           {unreachable_backend_port, false, ""},
876       },
877       "{\n"
878       "  \"loadBalancingConfig\":[\n"
879       "    { \"grpclb\":{\n"
880       "      \"childPolicy\":[\n"
881       "        { \"pick_first\":{} }\n"
882       "      ]\n"
883       "    } }\n"
884       "  ]\n"
885       "}");
886   // RPCs should continue to go to the first backend, because the new
887   // PF child policy will never go into state READY.
888   WaitForBackend(0);
889   // Phase 3: Switch back to RR pointing to second and third backends.
890   // This ensures that we create a new policy rather than updating the
891   // pending PF policy.
892   gpr_log(GPR_INFO, "PHASE 3: Update to use RR again with two backends");
893   SetNextResolution(
894       {
895           // Unreachable balancer.
896           {unreachable_balancer_port, true, ""},
897           // Fallback address: second and third backends.
898           {backends_[1]->port_, false, ""},
899           {backends_[2]->port_, false, ""},
900       },
901       "{\n"
902       "  \"loadBalancingConfig\":[\n"
903       "    { \"grpclb\":{\n"
904       "      \"childPolicy\":[\n"
905       "        { \"round_robin\":{} }\n"
906       "      ]\n"
907       "    } }\n"
908       "  ]\n"
909       "}");
910   // RPCs should go to the second and third backends.
911   WaitForBackend(1);
912   WaitForBackend(2);
913 }
914
915 TEST_F(SingleBalancerTest, SameBackendListedMultipleTimes) {
916   SetNextResolutionAllBalancers();
917   // Same backend listed twice.
918   std::vector<int> ports;
919   ports.push_back(backends_[0]->port_);
920   ports.push_back(backends_[0]->port_);
921   const size_t kNumRpcsPerAddress = 10;
922   ScheduleResponseForBalancer(
923       0, BalancerServiceImpl::BuildResponseForBackends(ports, {}), 0);
924   // We need to wait for the backend to come online.
925   WaitForBackend(0);
926   // Send kNumRpcsPerAddress RPCs per server.
927   CheckRpcSendOk(kNumRpcsPerAddress * ports.size());
928   // Backend should have gotten 20 requests.
929   EXPECT_EQ(kNumRpcsPerAddress * 2, backends_[0]->service_.request_count());
930   // And they should have come from a single client port, because of
931   // subchannel sharing.
932   EXPECT_EQ(1UL, backends_[0]->service_.clients().size());
933   balancers_[0]->service_.NotifyDoneWithServerlists();
934 }
935
936 TEST_F(SingleBalancerTest, SecureNaming) {
937   ResetStub(0, kApplicationTargetName_ + ";lb");
938   SetNextResolution({AddressData{balancers_[0]->port_, true, "lb"}});
939   const size_t kNumRpcsPerAddress = 100;
940   ScheduleResponseForBalancer(
941       0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
942       0);
943   // Make sure that trying to connect works without a call.
944   channel_->GetState(true /* try_to_connect */);
945   // We need to wait for all backends to come online.
946   WaitForAllBackends();
947   // Send kNumRpcsPerAddress RPCs per server.
948   CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
949
950   // Each backend should have gotten 100 requests.
951   for (size_t i = 0; i < backends_.size(); ++i) {
952     EXPECT_EQ(kNumRpcsPerAddress, backends_[i]->service_.request_count());
953   }
954   balancers_[0]->service_.NotifyDoneWithServerlists();
955   // The balancer got a single request.
956   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
957   // and sent a single response.
958   EXPECT_EQ(1U, balancers_[0]->service_.response_count());
959   // Check LB policy name for the channel.
960   EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName());
961 }
962
963 TEST_F(SingleBalancerTest, SecureNamingDeathTest) {
964   ::testing::FLAGS_gtest_death_test_style = "threadsafe";
965   // Make sure that we blow up (via abort() from the security connector) when
966   // the name from the balancer doesn't match expectations.
967   ASSERT_DEATH(
968       {
969         ResetStub(0, kApplicationTargetName_ + ";lb");
970         SetNextResolution({AddressData{balancers_[0]->port_, true, "woops"}});
971         channel_->WaitForConnected(grpc_timeout_seconds_to_deadline(1));
972       },
973       "");
974 }
975
976 TEST_F(SingleBalancerTest, InitiallyEmptyServerlist) {
977   SetNextResolutionAllBalancers();
978   const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
979   const int kCallDeadlineMs = kServerlistDelayMs * 2;
980   // First response is an empty serverlist, sent right away.
981   ScheduleResponseForBalancer(0, LoadBalanceResponse(), 0);
982   // Send non-empty serverlist only after kServerlistDelayMs
983   ScheduleResponseForBalancer(
984       0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
985       kServerlistDelayMs);
986   const auto t0 = system_clock::now();
987   // Client will block: LB will initially send empty serverlist.
988   CheckRpcSendOk(1, kCallDeadlineMs, true /* wait_for_ready */);
989   const auto ellapsed_ms =
990       std::chrono::duration_cast<std::chrono::milliseconds>(
991           system_clock::now() - t0);
992   // but eventually, the LB sends a serverlist update that allows the call to
993   // proceed. The call delay must be larger than the delay in sending the
994   // populated serverlist but under the call's deadline (which is enforced by
995   // the call's deadline).
996   EXPECT_GT(ellapsed_ms.count(), kServerlistDelayMs);
997   balancers_[0]->service_.NotifyDoneWithServerlists();
998   // The balancer got a single request.
999   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
1000   // and sent two responses.
1001   EXPECT_EQ(2U, balancers_[0]->service_.response_count());
1002 }
1003
1004 TEST_F(SingleBalancerTest, AllServersUnreachableFailFast) {
1005   SetNextResolutionAllBalancers();
1006   const size_t kNumUnreachableServers = 5;
1007   std::vector<int> ports;
1008   for (size_t i = 0; i < kNumUnreachableServers; ++i) {
1009     ports.push_back(grpc_pick_unused_port_or_die());
1010   }
1011   ScheduleResponseForBalancer(
1012       0, BalancerServiceImpl::BuildResponseForBackends(ports, {}), 0);
1013   const Status status = SendRpc();
1014   // The error shouldn't be DEADLINE_EXCEEDED.
1015   EXPECT_EQ(StatusCode::UNAVAILABLE, status.error_code());
1016   balancers_[0]->service_.NotifyDoneWithServerlists();
1017   // The balancer got a single request.
1018   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
1019   // and sent a single response.
1020   EXPECT_EQ(1U, balancers_[0]->service_.response_count());
1021 }
1022
1023 TEST_F(SingleBalancerTest, Fallback) {
1024   SetNextResolutionAllBalancers();
1025   const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
1026   const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
1027   const size_t kNumBackendInResolution = backends_.size() / 2;
1028
1029   ResetStub(kFallbackTimeoutMs);
1030   std::vector<AddressData> addresses;
1031   addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""});
1032   for (size_t i = 0; i < kNumBackendInResolution; ++i) {
1033     addresses.emplace_back(AddressData{backends_[i]->port_, false, ""});
1034   }
1035   SetNextResolution(addresses);
1036
1037   // Send non-empty serverlist only after kServerlistDelayMs.
1038   ScheduleResponseForBalancer(
1039       0,
1040       BalancerServiceImpl::BuildResponseForBackends(
1041           GetBackendPorts(kNumBackendInResolution /* start_index */), {}),
1042       kServerlistDelayMs);
1043
1044   // Wait until all the fallback backends are reachable.
1045   for (size_t i = 0; i < kNumBackendInResolution; ++i) {
1046     WaitForBackend(i);
1047   }
1048
1049   // The first request.
1050   gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
1051   CheckRpcSendOk(kNumBackendInResolution);
1052   gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
1053
1054   // Fallback is used: each backend returned by the resolver should have
1055   // gotten one request.
1056   for (size_t i = 0; i < kNumBackendInResolution; ++i) {
1057     EXPECT_EQ(1U, backends_[i]->service_.request_count());
1058   }
1059   for (size_t i = kNumBackendInResolution; i < backends_.size(); ++i) {
1060     EXPECT_EQ(0U, backends_[i]->service_.request_count());
1061   }
1062
1063   // Wait until the serverlist reception has been processed and all backends
1064   // in the serverlist are reachable.
1065   for (size_t i = kNumBackendInResolution; i < backends_.size(); ++i) {
1066     WaitForBackend(i);
1067   }
1068
1069   // Send out the second request.
1070   gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
1071   CheckRpcSendOk(backends_.size() - kNumBackendInResolution);
1072   gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
1073
1074   // Serverlist is used: each backend returned by the balancer should
1075   // have gotten one request.
1076   for (size_t i = 0; i < kNumBackendInResolution; ++i) {
1077     EXPECT_EQ(0U, backends_[i]->service_.request_count());
1078   }
1079   for (size_t i = kNumBackendInResolution; i < backends_.size(); ++i) {
1080     EXPECT_EQ(1U, backends_[i]->service_.request_count());
1081   }
1082
1083   balancers_[0]->service_.NotifyDoneWithServerlists();
1084   // The balancer got a single request.
1085   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
1086   // and sent a single response.
1087   EXPECT_EQ(1U, balancers_[0]->service_.response_count());
1088 }
1089
1090 TEST_F(SingleBalancerTest, FallbackUpdate) {
1091   SetNextResolutionAllBalancers();
1092   const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
1093   const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
1094   const size_t kNumBackendInResolution = backends_.size() / 3;
1095   const size_t kNumBackendInResolutionUpdate = backends_.size() / 3;
1096
1097   ResetStub(kFallbackTimeoutMs);
1098   std::vector<AddressData> addresses;
1099   addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""});
1100   for (size_t i = 0; i < kNumBackendInResolution; ++i) {
1101     addresses.emplace_back(AddressData{backends_[i]->port_, false, ""});
1102   }
1103   SetNextResolution(addresses);
1104
1105   // Send non-empty serverlist only after kServerlistDelayMs.
1106   ScheduleResponseForBalancer(
1107       0,
1108       BalancerServiceImpl::BuildResponseForBackends(
1109           GetBackendPorts(kNumBackendInResolution +
1110                           kNumBackendInResolutionUpdate /* start_index */),
1111           {}),
1112       kServerlistDelayMs);
1113
1114   // Wait until all the fallback backends are reachable.
1115   for (size_t i = 0; i < kNumBackendInResolution; ++i) {
1116     WaitForBackend(i);
1117   }
1118
1119   // The first request.
1120   gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
1121   CheckRpcSendOk(kNumBackendInResolution);
1122   gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
1123
1124   // Fallback is used: each backend returned by the resolver should have
1125   // gotten one request.
1126   for (size_t i = 0; i < kNumBackendInResolution; ++i) {
1127     EXPECT_EQ(1U, backends_[i]->service_.request_count());
1128   }
1129   for (size_t i = kNumBackendInResolution; i < backends_.size(); ++i) {
1130     EXPECT_EQ(0U, backends_[i]->service_.request_count());
1131   }
1132
1133   addresses.clear();
1134   addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""});
1135   for (size_t i = kNumBackendInResolution;
1136        i < kNumBackendInResolution + kNumBackendInResolutionUpdate; ++i) {
1137     addresses.emplace_back(AddressData{backends_[i]->port_, false, ""});
1138   }
1139   SetNextResolution(addresses);
1140
1141   // Wait until the resolution update has been processed and all the new
1142   // fallback backends are reachable.
1143   for (size_t i = kNumBackendInResolution;
1144        i < kNumBackendInResolution + kNumBackendInResolutionUpdate; ++i) {
1145     WaitForBackend(i);
1146   }
1147
1148   // Send out the second request.
1149   gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
1150   CheckRpcSendOk(kNumBackendInResolutionUpdate);
1151   gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
1152
1153   // The resolution update is used: each backend in the resolution update should
1154   // have gotten one request.
1155   for (size_t i = 0; i < kNumBackendInResolution; ++i) {
1156     EXPECT_EQ(0U, backends_[i]->service_.request_count());
1157   }
1158   for (size_t i = kNumBackendInResolution;
1159        i < kNumBackendInResolution + kNumBackendInResolutionUpdate; ++i) {
1160     EXPECT_EQ(1U, backends_[i]->service_.request_count());
1161   }
1162   for (size_t i = kNumBackendInResolution + kNumBackendInResolutionUpdate;
1163        i < backends_.size(); ++i) {
1164     EXPECT_EQ(0U, backends_[i]->service_.request_count());
1165   }
1166
1167   // Wait until the serverlist reception has been processed and all backends
1168   // in the serverlist are reachable.
1169   for (size_t i = kNumBackendInResolution + kNumBackendInResolutionUpdate;
1170        i < backends_.size(); ++i) {
1171     WaitForBackend(i);
1172   }
1173
1174   // Send out the third request.
1175   gpr_log(GPR_INFO, "========= BEFORE THIRD BATCH ==========");
1176   CheckRpcSendOk(backends_.size() - kNumBackendInResolution -
1177                  kNumBackendInResolutionUpdate);
1178   gpr_log(GPR_INFO, "========= DONE WITH THIRD BATCH ==========");
1179
1180   // Serverlist is used: each backend returned by the balancer should
1181   // have gotten one request.
1182   for (size_t i = 0;
1183        i < kNumBackendInResolution + kNumBackendInResolutionUpdate; ++i) {
1184     EXPECT_EQ(0U, backends_[i]->service_.request_count());
1185   }
1186   for (size_t i = kNumBackendInResolution + kNumBackendInResolutionUpdate;
1187        i < backends_.size(); ++i) {
1188     EXPECT_EQ(1U, backends_[i]->service_.request_count());
1189   }
1190
1191   balancers_[0]->service_.NotifyDoneWithServerlists();
1192   // The balancer got a single request.
1193   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
1194   // and sent a single response.
1195   EXPECT_EQ(1U, balancers_[0]->service_.response_count());
1196 }
1197
1198 TEST_F(SingleBalancerTest,
1199        FallbackAfterStartup_LoseContactWithBalancerThenBackends) {
1200   // First two backends are fallback, last two are pointed to by balancer.
1201   const size_t kNumFallbackBackends = 2;
1202   const size_t kNumBalancerBackends = backends_.size() - kNumFallbackBackends;
1203   std::vector<AddressData> addresses;
1204   for (size_t i = 0; i < kNumFallbackBackends; ++i) {
1205     addresses.emplace_back(AddressData{backends_[i]->port_, false, ""});
1206   }
1207   for (size_t i = 0; i < balancers_.size(); ++i) {
1208     addresses.emplace_back(AddressData{balancers_[i]->port_, true, ""});
1209   }
1210   SetNextResolution(addresses);
1211   ScheduleResponseForBalancer(0,
1212                               BalancerServiceImpl::BuildResponseForBackends(
1213                                   GetBackendPorts(kNumFallbackBackends), {}),
1214                               0);
1215   // Try to connect.
1216   channel_->GetState(true /* try_to_connect */);
1217   WaitForAllBackends(1 /* num_requests_multiple_of */,
1218                      kNumFallbackBackends /* start_index */);
1219   // Stop balancer.  RPCs should continue going to backends from balancer.
1220   balancers_[0]->Shutdown();
1221   CheckRpcSendOk(100 * kNumBalancerBackends);
1222   for (size_t i = kNumFallbackBackends; i < backends_.size(); ++i) {
1223     EXPECT_EQ(100UL, backends_[i]->service_.request_count());
1224   }
1225   // Stop backends from balancer.  This should put us in fallback mode.
1226   for (size_t i = kNumFallbackBackends; i < backends_.size(); ++i) {
1227     ShutdownBackend(i);
1228   }
1229   WaitForAllBackends(1 /* num_requests_multiple_of */, 0 /* start_index */,
1230                      kNumFallbackBackends /* stop_index */);
1231   // Restart the backends from the balancer.  We should *not* start
1232   // sending traffic back to them at this point (although the behavior
1233   // in xds may be different).
1234   for (size_t i = kNumFallbackBackends; i < backends_.size(); ++i) {
1235     StartBackend(i);
1236   }
1237   CheckRpcSendOk(100 * kNumBalancerBackends);
1238   for (size_t i = 0; i < kNumFallbackBackends; ++i) {
1239     EXPECT_EQ(100UL, backends_[i]->service_.request_count());
1240   }
1241   // Now start the balancer again.  This should cause us to exit
1242   // fallback mode.
1243   balancers_[0]->Start(server_host_);
1244   ScheduleResponseForBalancer(0,
1245                               BalancerServiceImpl::BuildResponseForBackends(
1246                                   GetBackendPorts(kNumFallbackBackends), {}),
1247                               0);
1248   WaitForAllBackends(1 /* num_requests_multiple_of */,
1249                      kNumFallbackBackends /* start_index */);
1250 }
1251
1252 TEST_F(SingleBalancerTest,
1253        FallbackAfterStartup_LoseContactWithBackendsThenBalancer) {
1254   // First two backends are fallback, last two are pointed to by balancer.
1255   const size_t kNumFallbackBackends = 2;
1256   const size_t kNumBalancerBackends = backends_.size() - kNumFallbackBackends;
1257   std::vector<AddressData> addresses;
1258   for (size_t i = 0; i < kNumFallbackBackends; ++i) {
1259     addresses.emplace_back(AddressData{backends_[i]->port_, false, ""});
1260   }
1261   for (size_t i = 0; i < balancers_.size(); ++i) {
1262     addresses.emplace_back(AddressData{balancers_[i]->port_, true, ""});
1263   }
1264   SetNextResolution(addresses);
1265   ScheduleResponseForBalancer(0,
1266                               BalancerServiceImpl::BuildResponseForBackends(
1267                                   GetBackendPorts(kNumFallbackBackends), {}),
1268                               0);
1269   // Try to connect.
1270   channel_->GetState(true /* try_to_connect */);
1271   WaitForAllBackends(1 /* num_requests_multiple_of */,
1272                      kNumFallbackBackends /* start_index */);
1273   // Stop backends from balancer.  Since we are still in contact with
1274   // the balancer at this point, RPCs should be failing.
1275   for (size_t i = kNumFallbackBackends; i < backends_.size(); ++i) {
1276     ShutdownBackend(i);
1277   }
1278   CheckRpcSendFailure();
1279   // Stop balancer.  This should put us in fallback mode.
1280   balancers_[0]->Shutdown();
1281   WaitForAllBackends(1 /* num_requests_multiple_of */, 0 /* start_index */,
1282                      kNumFallbackBackends /* stop_index */);
1283   // Restart the backends from the balancer.  We should *not* start
1284   // sending traffic back to them at this point (although the behavior
1285   // in xds may be different).
1286   for (size_t i = kNumFallbackBackends; i < backends_.size(); ++i) {
1287     StartBackend(i);
1288   }
1289   CheckRpcSendOk(100 * kNumBalancerBackends);
1290   for (size_t i = 0; i < kNumFallbackBackends; ++i) {
1291     EXPECT_EQ(100UL, backends_[i]->service_.request_count());
1292   }
1293   // Now start the balancer again.  This should cause us to exit
1294   // fallback mode.
1295   balancers_[0]->Start(server_host_);
1296   ScheduleResponseForBalancer(0,
1297                               BalancerServiceImpl::BuildResponseForBackends(
1298                                   GetBackendPorts(kNumFallbackBackends), {}),
1299                               0);
1300   WaitForAllBackends(1 /* num_requests_multiple_of */,
1301                      kNumFallbackBackends /* start_index */);
1302 }
1303
1304 TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerChannelFails) {
1305   const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor();
1306   ResetStub(kFallbackTimeoutMs);
1307   // Return an unreachable balancer and one fallback backend.
1308   std::vector<AddressData> addresses;
1309   addresses.emplace_back(AddressData{grpc_pick_unused_port_or_die(), true, ""});
1310   addresses.emplace_back(AddressData{backends_[0]->port_, false, ""});
1311   SetNextResolution(addresses);
1312   // Send RPC with deadline less than the fallback timeout and make sure it
1313   // succeeds.
1314   CheckRpcSendOk(/* times */ 1, /* timeout_ms */ 1000,
1315                  /* wait_for_ready */ false);
1316 }
1317
1318 TEST_F(SingleBalancerTest, FallbackEarlyWhenBalancerCallFails) {
1319   const int kFallbackTimeoutMs = 10000 * grpc_test_slowdown_factor();
1320   ResetStub(kFallbackTimeoutMs);
1321   // Return an unreachable balancer and one fallback backend.
1322   std::vector<AddressData> addresses;
1323   addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""});
1324   addresses.emplace_back(AddressData{backends_[0]->port_, false, ""});
1325   SetNextResolution(addresses);
1326   // Balancer drops call without sending a serverlist.
1327   balancers_[0]->service_.NotifyDoneWithServerlists();
1328   // Send RPC with deadline less than the fallback timeout and make sure it
1329   // succeeds.
1330   CheckRpcSendOk(/* times */ 1, /* timeout_ms */ 1000,
1331                  /* wait_for_ready */ false);
1332 }
1333
1334 TEST_F(SingleBalancerTest, BackendsRestart) {
1335   SetNextResolutionAllBalancers();
1336   const size_t kNumRpcsPerAddress = 100;
1337   ScheduleResponseForBalancer(
1338       0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
1339       0);
1340   // Make sure that trying to connect works without a call.
1341   channel_->GetState(true /* try_to_connect */);
1342   // Send kNumRpcsPerAddress RPCs per server.
1343   CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
1344   // Stop backends.  RPCs should fail.
1345   ShutdownAllBackends();
1346   CheckRpcSendFailure();
1347   // Restart backends.  RPCs should start succeeding again.
1348   StartAllBackends();
1349   CheckRpcSendOk(1 /* times */, 2000 /* timeout_ms */,
1350                  true /* wait_for_ready */);
1351   // The balancer got a single request.
1352   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
1353   // and sent a single response.
1354   EXPECT_EQ(1U, balancers_[0]->service_.response_count());
1355 }
1356
1357 class UpdatesTest : public GrpclbEnd2endTest {
1358  public:
1359   UpdatesTest() : GrpclbEnd2endTest(4, 3, 0) {}
1360 };
1361
1362 TEST_F(UpdatesTest, UpdateBalancers) {
1363   SetNextResolutionAllBalancers();
1364   const std::vector<int> first_backend{GetBackendPorts()[0]};
1365   const std::vector<int> second_backend{GetBackendPorts()[1]};
1366   ScheduleResponseForBalancer(
1367       0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
1368   ScheduleResponseForBalancer(
1369       1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
1370
1371   // Wait until the first backend is ready.
1372   WaitForBackend(0);
1373
1374   // Send 10 requests.
1375   gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
1376   CheckRpcSendOk(10);
1377   gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
1378
1379   // All 10 requests should have gone to the first backend.
1380   EXPECT_EQ(10U, backends_[0]->service_.request_count());
1381
1382   balancers_[0]->service_.NotifyDoneWithServerlists();
1383   balancers_[1]->service_.NotifyDoneWithServerlists();
1384   balancers_[2]->service_.NotifyDoneWithServerlists();
1385   // Balancer 0 got a single request.
1386   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
1387   // and sent a single response.
1388   EXPECT_EQ(1U, balancers_[0]->service_.response_count());
1389   EXPECT_EQ(0U, balancers_[1]->service_.request_count());
1390   EXPECT_EQ(0U, balancers_[1]->service_.response_count());
1391   EXPECT_EQ(0U, balancers_[2]->service_.request_count());
1392   EXPECT_EQ(0U, balancers_[2]->service_.response_count());
1393
1394   std::vector<AddressData> addresses;
1395   addresses.emplace_back(AddressData{balancers_[1]->port_, true, ""});
1396   gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
1397   SetNextResolution(addresses);
1398   gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
1399
1400   // Wait until update has been processed, as signaled by the second backend
1401   // receiving a request.
1402   EXPECT_EQ(0U, backends_[1]->service_.request_count());
1403   WaitForBackend(1);
1404
1405   backends_[1]->service_.ResetCounters();
1406   gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
1407   CheckRpcSendOk(10);
1408   gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
1409   // All 10 requests should have gone to the second backend.
1410   EXPECT_EQ(10U, backends_[1]->service_.request_count());
1411
1412   balancers_[0]->service_.NotifyDoneWithServerlists();
1413   balancers_[1]->service_.NotifyDoneWithServerlists();
1414   balancers_[2]->service_.NotifyDoneWithServerlists();
1415   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
1416   EXPECT_EQ(1U, balancers_[0]->service_.response_count());
1417   EXPECT_EQ(1U, balancers_[1]->service_.request_count());
1418   EXPECT_EQ(1U, balancers_[1]->service_.response_count());
1419   EXPECT_EQ(0U, balancers_[2]->service_.request_count());
1420   EXPECT_EQ(0U, balancers_[2]->service_.response_count());
1421 }
1422
1423 // Send an update with the same set of LBs as the one in SetUp() in order to
1424 // verify that the LB channel inside grpclb keeps the initial connection (which
1425 // by definition is also present in the update).
1426 TEST_F(UpdatesTest, UpdateBalancersRepeated) {
1427   SetNextResolutionAllBalancers();
1428   const std::vector<int> first_backend{GetBackendPorts()[0]};
1429   const std::vector<int> second_backend{GetBackendPorts()[0]};
1430
1431   ScheduleResponseForBalancer(
1432       0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
1433   ScheduleResponseForBalancer(
1434       1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
1435
1436   // Wait until the first backend is ready.
1437   WaitForBackend(0);
1438
1439   // Send 10 requests.
1440   gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
1441   CheckRpcSendOk(10);
1442   gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
1443
1444   // All 10 requests should have gone to the first backend.
1445   EXPECT_EQ(10U, backends_[0]->service_.request_count());
1446
1447   balancers_[0]->service_.NotifyDoneWithServerlists();
1448   // Balancer 0 got a single request.
1449   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
1450   // and sent a single response.
1451   EXPECT_EQ(1U, balancers_[0]->service_.response_count());
1452   EXPECT_EQ(0U, balancers_[1]->service_.request_count());
1453   EXPECT_EQ(0U, balancers_[1]->service_.response_count());
1454   EXPECT_EQ(0U, balancers_[2]->service_.request_count());
1455   EXPECT_EQ(0U, balancers_[2]->service_.response_count());
1456
1457   std::vector<AddressData> addresses;
1458   addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""});
1459   addresses.emplace_back(AddressData{balancers_[1]->port_, true, ""});
1460   addresses.emplace_back(AddressData{balancers_[2]->port_, true, ""});
1461   gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
1462   SetNextResolution(addresses);
1463   gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
1464
1465   EXPECT_EQ(0U, backends_[1]->service_.request_count());
1466   gpr_timespec deadline = gpr_time_add(
1467       gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(10000, GPR_TIMESPAN));
1468   // Send 10 seconds worth of RPCs
1469   do {
1470     CheckRpcSendOk();
1471   } while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
1472   // grpclb continued using the original LB call to the first balancer, which
1473   // doesn't assign the second backend.
1474   EXPECT_EQ(0U, backends_[1]->service_.request_count());
1475   balancers_[0]->service_.NotifyDoneWithServerlists();
1476
1477   addresses.clear();
1478   addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""});
1479   addresses.emplace_back(AddressData{balancers_[1]->port_, true, ""});
1480   gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 2 ==========");
1481   SetNextResolution(addresses);
1482   gpr_log(GPR_INFO, "========= UPDATE 2 DONE ==========");
1483
1484   EXPECT_EQ(0U, backends_[1]->service_.request_count());
1485   deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
1486                           gpr_time_from_millis(10000, GPR_TIMESPAN));
1487   // Send 10 seconds worth of RPCs
1488   do {
1489     CheckRpcSendOk();
1490   } while (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), deadline) < 0);
1491   // grpclb continued using the original LB call to the first balancer, which
1492   // doesn't assign the second backend.
1493   EXPECT_EQ(0U, backends_[1]->service_.request_count());
1494   balancers_[0]->service_.NotifyDoneWithServerlists();
1495 }
1496
1497 TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
1498   std::vector<AddressData> addresses;
1499   addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""});
1500   SetNextResolution(addresses);
1501   const std::vector<int> first_backend{GetBackendPorts()[0]};
1502   const std::vector<int> second_backend{GetBackendPorts()[1]};
1503
1504   ScheduleResponseForBalancer(
1505       0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
1506   ScheduleResponseForBalancer(
1507       1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
1508
1509   // Start servers and send 10 RPCs per server.
1510   gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
1511   CheckRpcSendOk(10);
1512   gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
1513   // All 10 requests should have gone to the first backend.
1514   EXPECT_EQ(10U, backends_[0]->service_.request_count());
1515
1516   // Kill balancer 0
1517   gpr_log(GPR_INFO, "********** ABOUT TO KILL BALANCER 0 *************");
1518   balancers_[0]->Shutdown();
1519   gpr_log(GPR_INFO, "********** KILLED BALANCER 0 *************");
1520
1521   // This is serviced by the existing RR policy
1522   gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
1523   CheckRpcSendOk(10);
1524   gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
1525   // All 10 requests should again have gone to the first backend.
1526   EXPECT_EQ(20U, backends_[0]->service_.request_count());
1527   EXPECT_EQ(0U, backends_[1]->service_.request_count());
1528
1529   balancers_[0]->service_.NotifyDoneWithServerlists();
1530   balancers_[1]->service_.NotifyDoneWithServerlists();
1531   balancers_[2]->service_.NotifyDoneWithServerlists();
1532   // Balancer 0 got a single request.
1533   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
1534   // and sent a single response.
1535   EXPECT_EQ(1U, balancers_[0]->service_.response_count());
1536   EXPECT_EQ(0U, balancers_[1]->service_.request_count());
1537   EXPECT_EQ(0U, balancers_[1]->service_.response_count());
1538   EXPECT_EQ(0U, balancers_[2]->service_.request_count());
1539   EXPECT_EQ(0U, balancers_[2]->service_.response_count());
1540
1541   addresses.clear();
1542   addresses.emplace_back(AddressData{balancers_[1]->port_, true, ""});
1543   gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
1544   SetNextResolution(addresses);
1545   gpr_log(GPR_INFO, "========= UPDATE 1 DONE ==========");
1546
1547   // Wait until update has been processed, as signaled by the second backend
1548   // receiving a request. In the meantime, the client continues to be serviced
1549   // (by the first backend) without interruption.
1550   EXPECT_EQ(0U, backends_[1]->service_.request_count());
1551   WaitForBackend(1);
1552
1553   // This is serviced by the updated RR policy
1554   backends_[1]->service_.ResetCounters();
1555   gpr_log(GPR_INFO, "========= BEFORE THIRD BATCH ==========");
1556   CheckRpcSendOk(10);
1557   gpr_log(GPR_INFO, "========= DONE WITH THIRD BATCH ==========");
1558   // All 10 requests should have gone to the second backend.
1559   EXPECT_EQ(10U, backends_[1]->service_.request_count());
1560
1561   balancers_[0]->service_.NotifyDoneWithServerlists();
1562   balancers_[1]->service_.NotifyDoneWithServerlists();
1563   balancers_[2]->service_.NotifyDoneWithServerlists();
1564   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
1565   EXPECT_EQ(1U, balancers_[0]->service_.response_count());
1566   // The second balancer, published as part of the first update, may end up
1567   // getting two requests (that is, 1 <= #req <= 2) if the LB call retry timer
1568   // firing races with the arrival of the update containing the second
1569   // balancer.
1570   EXPECT_GE(balancers_[1]->service_.request_count(), 1U);
1571   EXPECT_GE(balancers_[1]->service_.response_count(), 1U);
1572   EXPECT_LE(balancers_[1]->service_.request_count(), 2U);
1573   EXPECT_LE(balancers_[1]->service_.response_count(), 2U);
1574   EXPECT_EQ(0U, balancers_[2]->service_.request_count());
1575   EXPECT_EQ(0U, balancers_[2]->service_.response_count());
1576 }
1577
1578 TEST_F(UpdatesTest, ReresolveDeadBackend) {
1579   ResetStub(500);
1580   // The first resolution contains the addresses of a balancer that never
1581   // responds, and a fallback backend.
1582   std::vector<AddressData> addresses;
1583   addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""});
1584   addresses.emplace_back(AddressData{backends_[0]->port_, false, ""});
1585   SetNextResolution(addresses);
1586   // The re-resolution result will contain the addresses of the same balancer
1587   // and a new fallback backend.
1588   addresses.clear();
1589   addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""});
1590   addresses.emplace_back(AddressData{backends_[1]->port_, false, ""});
1591   SetNextReresolutionResponse(addresses);
1592
1593   // Start servers and send 10 RPCs per server.
1594   gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
1595   CheckRpcSendOk(10);
1596   gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
1597   // All 10 requests should have gone to the fallback backend.
1598   EXPECT_EQ(10U, backends_[0]->service_.request_count());
1599
1600   // Kill backend 0.
1601   gpr_log(GPR_INFO, "********** ABOUT TO KILL BACKEND 0 *************");
1602   backends_[0]->Shutdown();
1603   gpr_log(GPR_INFO, "********** KILLED BACKEND 0 *************");
1604
1605   // Wait until re-resolution has finished, as signaled by the second backend
1606   // receiving a request.
1607   WaitForBackend(1);
1608
1609   gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
1610   CheckRpcSendOk(10);
1611   gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
1612   // All 10 requests should have gone to the second backend.
1613   EXPECT_EQ(10U, backends_[1]->service_.request_count());
1614
1615   balancers_[0]->service_.NotifyDoneWithServerlists();
1616   balancers_[1]->service_.NotifyDoneWithServerlists();
1617   balancers_[2]->service_.NotifyDoneWithServerlists();
1618   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
1619   EXPECT_EQ(0U, balancers_[0]->service_.response_count());
1620   EXPECT_EQ(0U, balancers_[1]->service_.request_count());
1621   EXPECT_EQ(0U, balancers_[1]->service_.response_count());
1622   EXPECT_EQ(0U, balancers_[2]->service_.request_count());
1623   EXPECT_EQ(0U, balancers_[2]->service_.response_count());
1624 }
1625
1626 // TODO(juanlishen): Should be removed when the first response is always the
1627 // initial response. Currently, if client load reporting is not enabled, the
1628 // balancer doesn't send initial response. When the backend shuts down, an
1629 // unexpected re-resolution will happen. This test configuration is a workaround
1630 // for test ReresolveDeadBalancer.
1631 class UpdatesWithClientLoadReportingTest : public GrpclbEnd2endTest {
1632  public:
1633   UpdatesWithClientLoadReportingTest() : GrpclbEnd2endTest(4, 3, 2) {}
1634 };
1635
1636 TEST_F(UpdatesWithClientLoadReportingTest, ReresolveDeadBalancer) {
1637   std::vector<AddressData> addresses;
1638   addresses.emplace_back(AddressData{balancers_[0]->port_, true, ""});
1639   SetNextResolution(addresses);
1640   addresses.clear();
1641   addresses.emplace_back(AddressData{balancers_[1]->port_, true, ""});
1642   SetNextReresolutionResponse(addresses);
1643   const std::vector<int> first_backend{GetBackendPorts()[0]};
1644   const std::vector<int> second_backend{GetBackendPorts()[1]};
1645
1646   ScheduleResponseForBalancer(
1647       0, BalancerServiceImpl::BuildResponseForBackends(first_backend, {}), 0);
1648   ScheduleResponseForBalancer(
1649       1, BalancerServiceImpl::BuildResponseForBackends(second_backend, {}), 0);
1650
1651   // Start servers and send 10 RPCs per server.
1652   gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH ==========");
1653   CheckRpcSendOk(10);
1654   gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH ==========");
1655   // All 10 requests should have gone to the first backend.
1656   EXPECT_EQ(10U, backends_[0]->service_.request_count());
1657
1658   // Kill backend 0.
1659   gpr_log(GPR_INFO, "********** ABOUT TO KILL BACKEND 0 *************");
1660   backends_[0]->Shutdown();
1661   gpr_log(GPR_INFO, "********** KILLED BACKEND 0 *************");
1662
1663   CheckRpcSendFailure();
1664
1665   // Balancer 0 got a single request.
1666   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
1667   // and sent a single response.
1668   EXPECT_EQ(1U, balancers_[0]->service_.response_count());
1669   EXPECT_EQ(0U, balancers_[1]->service_.request_count());
1670   EXPECT_EQ(0U, balancers_[1]->service_.response_count());
1671   EXPECT_EQ(0U, balancers_[2]->service_.request_count());
1672   EXPECT_EQ(0U, balancers_[2]->service_.response_count());
1673
1674   // Kill balancer 0.
1675   gpr_log(GPR_INFO, "********** ABOUT TO KILL BALANCER 0 *************");
1676   balancers_[0]->Shutdown();
1677   gpr_log(GPR_INFO, "********** KILLED BALANCER 0 *************");
1678
1679   // Wait until re-resolution has finished, as signaled by the second backend
1680   // receiving a request.
1681   WaitForBackend(1);
1682
1683   // This is serviced by the new serverlist.
1684   gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH ==========");
1685   CheckRpcSendOk(10);
1686   gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH ==========");
1687   // All 10 requests should have gone to the second backend.
1688   EXPECT_EQ(10U, backends_[1]->service_.request_count());
1689
1690   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
1691   EXPECT_EQ(1U, balancers_[0]->service_.response_count());
1692   // After balancer 0 is killed, we restart an LB call immediately (because we
1693   // disconnect to a previously connected balancer). Although we will cancel
1694   // this call when the re-resolution update is done and another LB call restart
1695   // is needed, this old call may still succeed reaching the LB server if
1696   // re-resolution is slow. So balancer 1 may have received 2 requests and sent
1697   // 2 responses.
1698   EXPECT_GE(balancers_[1]->service_.request_count(), 1U);
1699   EXPECT_GE(balancers_[1]->service_.response_count(), 1U);
1700   EXPECT_LE(balancers_[1]->service_.request_count(), 2U);
1701   EXPECT_LE(balancers_[1]->service_.response_count(), 2U);
1702   EXPECT_EQ(0U, balancers_[2]->service_.request_count());
1703   EXPECT_EQ(0U, balancers_[2]->service_.response_count());
1704 }
1705
1706 TEST_F(SingleBalancerTest, Drop) {
1707   SetNextResolutionAllBalancers();
1708   const size_t kNumRpcsPerAddress = 100;
1709   const int num_of_drop_by_rate_limiting_addresses = 1;
1710   const int num_of_drop_by_load_balancing_addresses = 2;
1711   const int num_of_drop_addresses = num_of_drop_by_rate_limiting_addresses +
1712                                     num_of_drop_by_load_balancing_addresses;
1713   const int num_total_addresses = num_backends_ + num_of_drop_addresses;
1714   ScheduleResponseForBalancer(
1715       0,
1716       BalancerServiceImpl::BuildResponseForBackends(
1717           GetBackendPorts(),
1718           {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
1719            {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
1720       0);
1721   // Wait until all backends are ready.
1722   WaitForAllBackends();
1723   // Send kNumRpcsPerAddress RPCs for each server and drop address.
1724   size_t num_drops = 0;
1725   for (size_t i = 0; i < kNumRpcsPerAddress * num_total_addresses; ++i) {
1726     EchoResponse response;
1727     const Status status = SendRpc(&response);
1728     if (!status.ok() &&
1729         status.error_message() == "Call dropped by load balancing policy") {
1730       ++num_drops;
1731     } else {
1732       EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
1733                                << " message=" << status.error_message();
1734       EXPECT_EQ(response.message(), kRequestMessage_);
1735     }
1736   }
1737   EXPECT_EQ(kNumRpcsPerAddress * num_of_drop_addresses, num_drops);
1738   // Each backend should have gotten 100 requests.
1739   for (size_t i = 0; i < backends_.size(); ++i) {
1740     EXPECT_EQ(kNumRpcsPerAddress, backends_[i]->service_.request_count());
1741   }
1742   // The balancer got a single request.
1743   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
1744   // and sent a single response.
1745   EXPECT_EQ(1U, balancers_[0]->service_.response_count());
1746 }
1747
1748 TEST_F(SingleBalancerTest, DropAllFirst) {
1749   SetNextResolutionAllBalancers();
1750   // All registered addresses are marked as "drop".
1751   const int num_of_drop_by_rate_limiting_addresses = 1;
1752   const int num_of_drop_by_load_balancing_addresses = 1;
1753   ScheduleResponseForBalancer(
1754       0,
1755       BalancerServiceImpl::BuildResponseForBackends(
1756           {}, {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
1757                {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
1758       0);
1759   const Status status = SendRpc(nullptr, 1000, true);
1760   EXPECT_FALSE(status.ok());
1761   EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy");
1762 }
1763
1764 TEST_F(SingleBalancerTest, DropAll) {
1765   SetNextResolutionAllBalancers();
1766   ScheduleResponseForBalancer(
1767       0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
1768       0);
1769   const int num_of_drop_by_rate_limiting_addresses = 1;
1770   const int num_of_drop_by_load_balancing_addresses = 1;
1771   ScheduleResponseForBalancer(
1772       0,
1773       BalancerServiceImpl::BuildResponseForBackends(
1774           {}, {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
1775                {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
1776       1000);
1777
1778   // First call succeeds.
1779   CheckRpcSendOk();
1780   // But eventually, the update with only dropped servers is processed and calls
1781   // fail.
1782   Status status;
1783   do {
1784     status = SendRpc(nullptr, 1000, true);
1785   } while (status.ok());
1786   EXPECT_FALSE(status.ok());
1787   EXPECT_EQ(status.error_message(), "Call dropped by load balancing policy");
1788 }
1789
1790 class SingleBalancerWithClientLoadReportingTest : public GrpclbEnd2endTest {
1791  public:
1792   SingleBalancerWithClientLoadReportingTest() : GrpclbEnd2endTest(4, 1, 3) {}
1793 };
1794
1795 TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) {
1796   SetNextResolutionAllBalancers();
1797   const size_t kNumRpcsPerAddress = 100;
1798   ScheduleResponseForBalancer(
1799       0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
1800       0);
1801   // Wait until all backends are ready.
1802   int num_ok = 0;
1803   int num_failure = 0;
1804   int num_drops = 0;
1805   std::tie(num_ok, num_failure, num_drops) = WaitForAllBackends();
1806   // Send kNumRpcsPerAddress RPCs per server.
1807   CheckRpcSendOk(kNumRpcsPerAddress * num_backends_);
1808   // Each backend should have gotten 100 requests.
1809   for (size_t i = 0; i < backends_.size(); ++i) {
1810     EXPECT_EQ(kNumRpcsPerAddress, backends_[i]->service_.request_count());
1811   }
1812   balancers_[0]->service_.NotifyDoneWithServerlists();
1813   // The balancer got a single request.
1814   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
1815   // and sent a single response.
1816   EXPECT_EQ(1U, balancers_[0]->service_.response_count());
1817
1818   const ClientStats client_stats = WaitForLoadReports();
1819   EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok,
1820             client_stats.num_calls_started);
1821   EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_ok,
1822             client_stats.num_calls_finished);
1823   EXPECT_EQ(0U, client_stats.num_calls_finished_with_client_failed_to_send);
1824   EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + (num_ok + num_drops),
1825             client_stats.num_calls_finished_known_received);
1826   EXPECT_THAT(client_stats.drop_token_counts, ::testing::ElementsAre());
1827 }
1828
1829 TEST_F(SingleBalancerWithClientLoadReportingTest, BalancerRestart) {
1830   SetNextResolutionAllBalancers();
1831   const size_t kNumBackendsFirstPass = 2;
1832   const size_t kNumBackendsSecondPass =
1833       backends_.size() - kNumBackendsFirstPass;
1834   // Balancer returns backends starting at index 1.
1835   ScheduleResponseForBalancer(
1836       0,
1837       BalancerServiceImpl::BuildResponseForBackends(
1838           GetBackendPorts(0, kNumBackendsFirstPass), {}),
1839       0);
1840   // Wait until all backends returned by the balancer are ready.
1841   int num_ok = 0;
1842   int num_failure = 0;
1843   int num_drops = 0;
1844   std::tie(num_ok, num_failure, num_drops) =
1845       WaitForAllBackends(/* num_requests_multiple_of */ 1, /* start_index */ 0,
1846                          /* stop_index */ kNumBackendsFirstPass);
1847   balancers_[0]->service_.NotifyDoneWithServerlists();
1848   ClientStats client_stats = WaitForLoadReports();
1849   EXPECT_EQ(static_cast<size_t>(num_ok), client_stats.num_calls_started);
1850   EXPECT_EQ(static_cast<size_t>(num_ok), client_stats.num_calls_finished);
1851   EXPECT_EQ(0U, client_stats.num_calls_finished_with_client_failed_to_send);
1852   EXPECT_EQ(static_cast<size_t>(num_ok),
1853             client_stats.num_calls_finished_known_received);
1854   EXPECT_THAT(client_stats.drop_token_counts, ::testing::ElementsAre());
1855   // Shut down the balancer.
1856   balancers_[0]->Shutdown();
1857   // Send 10 more requests per backend.  This will continue using the
1858   // last serverlist we received from the balancer before it was shut down.
1859   ResetBackendCounters();
1860   CheckRpcSendOk(kNumBackendsFirstPass);
1861   // Each backend should have gotten 1 request.
1862   for (size_t i = 0; i < kNumBackendsFirstPass; ++i) {
1863     EXPECT_EQ(1UL, backends_[i]->service_.request_count());
1864   }
1865   // Now restart the balancer, this time pointing to all backends.
1866   balancers_[0]->Start(server_host_);
1867   ScheduleResponseForBalancer(0,
1868                               BalancerServiceImpl::BuildResponseForBackends(
1869                                   GetBackendPorts(kNumBackendsFirstPass), {}),
1870                               0);
1871   // Wait for queries to start going to one of the new backends.
1872   // This tells us that we're now using the new serverlist.
1873   do {
1874     CheckRpcSendOk();
1875   } while (backends_[2]->service_.request_count() == 0 &&
1876            backends_[3]->service_.request_count() == 0);
1877   // Send one RPC per backend.
1878   CheckRpcSendOk(kNumBackendsSecondPass);
1879   balancers_[0]->service_.NotifyDoneWithServerlists();
1880   // Check client stats.
1881   client_stats = WaitForLoadReports();
1882   EXPECT_EQ(kNumBackendsSecondPass + 1, client_stats.num_calls_started);
1883   EXPECT_EQ(kNumBackendsSecondPass + 1, client_stats.num_calls_finished);
1884   EXPECT_EQ(0U, client_stats.num_calls_finished_with_client_failed_to_send);
1885   EXPECT_EQ(kNumBackendsSecondPass + 1,
1886             client_stats.num_calls_finished_known_received);
1887   EXPECT_THAT(client_stats.drop_token_counts, ::testing::ElementsAre());
1888 }
1889
1890 TEST_F(SingleBalancerWithClientLoadReportingTest, Drop) {
1891   SetNextResolutionAllBalancers();
1892   const size_t kNumRpcsPerAddress = 3;
1893   const int num_of_drop_by_rate_limiting_addresses = 2;
1894   const int num_of_drop_by_load_balancing_addresses = 1;
1895   const int num_of_drop_addresses = num_of_drop_by_rate_limiting_addresses +
1896                                     num_of_drop_by_load_balancing_addresses;
1897   const int num_total_addresses = num_backends_ + num_of_drop_addresses;
1898   ScheduleResponseForBalancer(
1899       0,
1900       BalancerServiceImpl::BuildResponseForBackends(
1901           GetBackendPorts(),
1902           {{"rate_limiting", num_of_drop_by_rate_limiting_addresses},
1903            {"load_balancing", num_of_drop_by_load_balancing_addresses}}),
1904       0);
1905   // Wait until all backends are ready.
1906   int num_warmup_ok = 0;
1907   int num_warmup_failure = 0;
1908   int num_warmup_drops = 0;
1909   std::tie(num_warmup_ok, num_warmup_failure, num_warmup_drops) =
1910       WaitForAllBackends(num_total_addresses /* num_requests_multiple_of */);
1911   const int num_total_warmup_requests =
1912       num_warmup_ok + num_warmup_failure + num_warmup_drops;
1913   size_t num_drops = 0;
1914   for (size_t i = 0; i < kNumRpcsPerAddress * num_total_addresses; ++i) {
1915     EchoResponse response;
1916     const Status status = SendRpc(&response);
1917     if (!status.ok() &&
1918         status.error_message() == "Call dropped by load balancing policy") {
1919       ++num_drops;
1920     } else {
1921       EXPECT_TRUE(status.ok()) << "code=" << status.error_code()
1922                                << " message=" << status.error_message();
1923       EXPECT_EQ(response.message(), kRequestMessage_);
1924     }
1925   }
1926   EXPECT_EQ(kNumRpcsPerAddress * num_of_drop_addresses, num_drops);
1927   // Each backend should have gotten 100 requests.
1928   for (size_t i = 0; i < backends_.size(); ++i) {
1929     EXPECT_EQ(kNumRpcsPerAddress, backends_[i]->service_.request_count());
1930   }
1931   balancers_[0]->service_.NotifyDoneWithServerlists();
1932   // The balancer got a single request.
1933   EXPECT_EQ(1U, balancers_[0]->service_.request_count());
1934   // and sent a single response.
1935   EXPECT_EQ(1U, balancers_[0]->service_.response_count());
1936
1937   const ClientStats client_stats = WaitForLoadReports();
1938   EXPECT_EQ(
1939       kNumRpcsPerAddress * num_total_addresses + num_total_warmup_requests,
1940       client_stats.num_calls_started);
1941   EXPECT_EQ(
1942       kNumRpcsPerAddress * num_total_addresses + num_total_warmup_requests,
1943       client_stats.num_calls_finished);
1944   EXPECT_EQ(0U, client_stats.num_calls_finished_with_client_failed_to_send);
1945   EXPECT_EQ(kNumRpcsPerAddress * num_backends_ + num_warmup_ok,
1946             client_stats.num_calls_finished_known_received);
1947   // The number of warmup request is a multiple of the number of addresses.
1948   // Therefore, all addresses in the scheduled balancer response are hit the
1949   // same number of times.
1950   const int num_times_drop_addresses_hit =
1951       num_warmup_drops / num_of_drop_addresses;
1952   EXPECT_THAT(
1953       client_stats.drop_token_counts,
1954       ::testing::ElementsAre(
1955           ::testing::Pair("load_balancing",
1956                           (kNumRpcsPerAddress + num_times_drop_addresses_hit)),
1957           ::testing::Pair(
1958               "rate_limiting",
1959               (kNumRpcsPerAddress + num_times_drop_addresses_hit) * 2)));
1960 }
1961
1962 }  // namespace
1963 }  // namespace testing
1964 }  // namespace grpc
1965
1966 int main(int argc, char** argv) {
1967   grpc_init();
1968   grpc::testing::TestEnvironment env(argc, argv);
1969   ::testing::InitGoogleTest(&argc, argv);
1970   const auto result = RUN_ALL_TESTS();
1971   grpc_shutdown();
1972   return result;
1973 }