Imported Upstream version 1.32.0
[platform/upstream/grpc.git] / test / core / transport / chttp2 / too_many_pings_test.cc
1 /*
2  *
3  * Copyright 2020 gRPC authors.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *
17  */
18
19 #include <grpc/support/port_platform.h>
20
21 #include <gmock/gmock.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include <functional>
25 #include <set>
26 #include <thread>
27
28 #include "absl/strings/str_cat.h"
29
30 #include <grpc/grpc.h>
31 #include <grpc/grpc_security.h>
32 #include <grpc/impl/codegen/grpc_types.h>
33 #include <grpc/slice.h>
34 #include <grpc/support/alloc.h>
35 #include <grpc/support/log.h>
36 #include <grpc/support/string_util.h>
37 #include <grpc/support/time.h>
38
39 #include <grpcpp/impl/codegen/service_type.h>
40 #include <grpcpp/server_builder.h>
41
42 #include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
43 #include "src/core/lib/gpr/useful.h"
44 #include "src/core/lib/gprpp/host_port.h"
45 #include "src/core/lib/gprpp/thd.h"
46 #include "src/core/lib/iomgr/error.h"
47 #include "src/core/lib/iomgr/parse_address.h"
48 #include "src/core/lib/security/credentials/alts/alts_credentials.h"
49 #include "src/core/lib/security/credentials/credentials.h"
50 #include "src/core/lib/security/security_connector/alts/alts_security_connector.h"
51 #include "src/core/lib/slice/slice_string_helpers.h"
52
53 #include "test/core/util/memory_counters.h"
54 #include "test/core/util/port.h"
55 #include "test/core/util/test_config.h"
56
57 #include "test/core/end2end/cq_verifier.h"
58
59 namespace {
60
61 void* tag(int i) { return (void*)static_cast<intptr_t>(i); }
62
63 // Perform a simple RPC where the server cancels the request with
64 // grpc_call_cancel_with_status
65 grpc_status_code PerformCall(grpc_channel* channel, grpc_server* server,
66                              grpc_completion_queue* cq) {
67   grpc_call* c;
68   grpc_call* s;
69   cq_verifier* cqv = cq_verifier_create(cq);
70   grpc_op ops[6];
71   grpc_op* op;
72   grpc_metadata_array trailing_metadata_recv;
73   grpc_metadata_array request_metadata_recv;
74   grpc_call_details call_details;
75   grpc_status_code status;
76   grpc_call_error error;
77   grpc_slice details;
78   gpr_timespec deadline = grpc_timeout_seconds_to_deadline(5);
79   // Start a call
80   c = grpc_channel_create_call(channel, nullptr, GRPC_PROPAGATE_DEFAULTS, cq,
81                                grpc_slice_from_static_string("/foo"), nullptr,
82                                deadline, nullptr);
83   GPR_ASSERT(c);
84   grpc_metadata_array_init(&trailing_metadata_recv);
85   grpc_metadata_array_init(&request_metadata_recv);
86   grpc_call_details_init(&call_details);
87   memset(ops, 0, sizeof(ops));
88   op = ops;
89   op->op = GRPC_OP_SEND_INITIAL_METADATA;
90   op->data.send_initial_metadata.count = 0;
91   op->flags = 0;
92   op->reserved = nullptr;
93   op++;
94   op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
95   op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
96   op->data.recv_status_on_client.status = &status;
97   op->data.recv_status_on_client.status_details = &details;
98   op->flags = 0;
99   op->reserved = nullptr;
100   op++;
101   error = grpc_call_start_batch(c, ops, static_cast<size_t>(op - ops), tag(1),
102                                 nullptr);
103   GPR_ASSERT(GRPC_CALL_OK == error);
104   // Request a call on the server
105   error = grpc_server_request_call(server, &s, &call_details,
106                                    &request_metadata_recv, cq, cq, tag(101));
107   GPR_ASSERT(GRPC_CALL_OK == error);
108   CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
109   cq_verify(cqv);
110   grpc_call_cancel_with_status(s, GRPC_STATUS_PERMISSION_DENIED, "test status",
111                                nullptr);
112   CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
113   cq_verify(cqv);
114   // cleanup
115   grpc_slice_unref(details);
116   grpc_metadata_array_destroy(&trailing_metadata_recv);
117   grpc_metadata_array_destroy(&request_metadata_recv);
118   grpc_call_details_destroy(&call_details);
119   grpc_call_unref(c);
120   grpc_call_unref(s);
121   cq_verifier_destroy(cqv);
122   return status;
123 }
124
125 // Test that sending a lot of RPCs that are cancelled by the server doesn't
126 // result in too many pings due to the pings sent by BDP.
127 TEST(TooManyPings, TestLotsOfServerCancelledRpcsDoesntGiveTooManyPings) {
128   grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
129   // create the server
130   grpc_server* server = grpc_server_create(nullptr, nullptr);
131   std::string server_address =
132       grpc_core::JoinHostPort("localhost", grpc_pick_unused_port_or_die());
133   grpc_server_register_completion_queue(server, cq, nullptr);
134   GPR_ASSERT(
135       grpc_server_add_insecure_http2_port(server, server_address.c_str()));
136   grpc_server_start(server);
137   // create the channel (bdp pings are enabled by default)
138   grpc_channel* channel = grpc_insecure_channel_create(
139       server_address.c_str(), nullptr /* channel args */, nullptr);
140   std::map<grpc_status_code, int> statuses_and_counts;
141   const int kNumTotalRpcs = 1e5;
142   // perform an RPC
143   gpr_log(GPR_INFO,
144           "Performing %d total RPCs and expecting them all to receive status "
145           "PERMISSION_DENIED (%d)",
146           kNumTotalRpcs, GRPC_STATUS_PERMISSION_DENIED);
147   for (int i = 0; i < kNumTotalRpcs; i++) {
148     grpc_status_code status = PerformCall(channel, server, cq);
149     statuses_and_counts[status] += 1;
150   }
151   int num_not_cancelled = 0;
152   for (auto itr = statuses_and_counts.begin(); itr != statuses_and_counts.end();
153        itr++) {
154     if (itr->first != GRPC_STATUS_PERMISSION_DENIED) {
155       num_not_cancelled += itr->second;
156     }
157     gpr_log(GPR_INFO, "%d / %d RPCs received status code: %d", itr->second,
158             kNumTotalRpcs, itr->first);
159   }
160   if (num_not_cancelled > 0) {
161     gpr_log(GPR_ERROR,
162             "Expected all RPCs to receive status PERMISSION_DENIED (%d) but %d "
163             "received other status codes",
164             GRPC_STATUS_PERMISSION_DENIED, num_not_cancelled);
165     FAIL();
166   }
167   // shutdown and destroy the client and server
168   grpc_channel_destroy(channel);
169   grpc_server_shutdown_and_notify(server, cq, nullptr);
170   grpc_completion_queue_shutdown(cq);
171   while (grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME),
172                                     nullptr)
173              .type != GRPC_QUEUE_SHUTDOWN)
174     ;
175   grpc_server_destroy(server);
176   grpc_completion_queue_destroy(cq);
177 }
178
179 // Perform a simple RPC where the client makes a request, and both the client
180 // and server continue reading so that gRPC can send and receive keepalive
181 // pings.
182 grpc_status_code PerformWaitingCall(grpc_channel* channel, grpc_server* server,
183                                     grpc_completion_queue* cq) {
184   grpc_call* c;
185   grpc_call* s;
186   cq_verifier* cqv = cq_verifier_create(cq);
187   grpc_op ops[6];
188   grpc_op* op;
189   grpc_metadata_array trailing_metadata_recv;
190   grpc_metadata_array request_metadata_recv;
191   grpc_call_details call_details;
192   grpc_status_code status;
193   grpc_call_error error;
194   grpc_slice details;
195   gpr_timespec deadline = grpc_timeout_seconds_to_deadline(15);
196   // Start a call
197   c = grpc_channel_create_call(channel, nullptr, GRPC_PROPAGATE_DEFAULTS, cq,
198                                grpc_slice_from_static_string("/foo"), nullptr,
199                                deadline, nullptr);
200   GPR_ASSERT(c);
201   grpc_metadata_array_init(&trailing_metadata_recv);
202   grpc_metadata_array_init(&request_metadata_recv);
203   grpc_call_details_init(&call_details);
204   memset(ops, 0, sizeof(ops));
205   op = ops;
206   op->op = GRPC_OP_SEND_INITIAL_METADATA;
207   op->data.send_initial_metadata.count = 0;
208   op->flags = 0;
209   op->reserved = nullptr;
210   op++;
211   op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
212   op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
213   op->data.recv_status_on_client.status = &status;
214   op->data.recv_status_on_client.status_details = &details;
215   op->flags = 0;
216   op->reserved = nullptr;
217   op++;
218   error = grpc_call_start_batch(c, ops, static_cast<size_t>(op - ops), tag(1),
219                                 nullptr);
220   GPR_ASSERT(GRPC_CALL_OK == error);
221   // Request a call on the server
222   error = grpc_server_request_call(server, &s, &call_details,
223                                    &request_metadata_recv, cq, cq, tag(101));
224   GPR_ASSERT(GRPC_CALL_OK == error);
225   CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
226   cq_verify(cqv);
227   // Since the server is configured to allow only a single ping strike, it would
228   // take 3 pings to trigger the GOAWAY frame with "too_many_pings" from the
229   // server. (The second ping from the client would be the first bad ping sent
230   // too quickly leading to a ping strike and the third ping would lead to the
231   // GOAWAY.) If the client settings match with the server's settings, there
232   // won't be a bad ping, and the call will end due to the deadline expiring
233   // instead.
234   CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
235   // The call will end after this
236   cq_verify(cqv, 60);
237   // cleanup
238   grpc_slice_unref(details);
239   grpc_metadata_array_destroy(&trailing_metadata_recv);
240   grpc_metadata_array_destroy(&request_metadata_recv);
241   grpc_call_details_destroy(&call_details);
242   grpc_call_unref(c);
243   grpc_call_unref(s);
244   cq_verifier_destroy(cqv);
245   return status;
246 }
247
248 class KeepaliveThrottlingTest : public ::testing::Test {
249  protected:
250   // Starts the server and makes sure that the channel is able to get connected.
251   grpc_server* ServerStart(const char* addr, grpc_completion_queue* cq) {
252     // Set up server channel args to expect pings at an interval of 5 seconds
253     // and use a single ping strike
254     grpc_arg server_args[] = {
255         grpc_channel_arg_integer_create(
256             const_cast<char*>(
257                 GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS),
258             5 * 1000),
259         grpc_channel_arg_integer_create(
260             const_cast<char*>(GRPC_ARG_HTTP2_MAX_PING_STRIKES), 1)};
261     grpc_channel_args server_channel_args = {GPR_ARRAY_SIZE(server_args),
262                                              server_args};
263     // Create server
264     grpc_server* server = grpc_server_create(&server_channel_args, nullptr);
265     grpc_server_register_completion_queue(server, cq, nullptr);
266     GPR_ASSERT(grpc_server_add_insecure_http2_port(server, addr));
267     grpc_server_start(server);
268     return server;
269   }
270
271   // Shuts down and destroys the server. Also, makes sure that the channel
272   // receives the disconnection event.
273   void ServerShutdownAndDestroy(grpc_server* server,
274                                 grpc_completion_queue* cq) {
275     // Shutdown and destroy server
276     grpc_server_shutdown_and_notify(server, cq, (void*)(1000));
277     while (grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME),
278                                       nullptr)
279                .tag != (void*)(1000))
280       ;
281     grpc_server_destroy(server);
282   }
283
284   void VerifyChannelReady(grpc_channel* channel, grpc_completion_queue* cq) {
285     grpc_connectivity_state state =
286         grpc_channel_check_connectivity_state(channel, 1 /* try_to_connect */);
287     while (state != GRPC_CHANNEL_READY) {
288       grpc_channel_watch_connectivity_state(
289           channel, state, grpc_timeout_seconds_to_deadline(5), cq, nullptr);
290       grpc_completion_queue_next(cq, grpc_timeout_seconds_to_deadline(5),
291                                  nullptr);
292       state = grpc_channel_check_connectivity_state(channel, 0);
293     }
294   }
295
296   void VerifyChannelDisconnected(grpc_channel* channel,
297                                  grpc_completion_queue* cq) {
298     // Verify channel gets disconnected. Use a ping to make sure that clients
299     // tries sending/receiving bytes if the channel is connected.
300     grpc_channel_ping(channel, cq, (void*)(2000), nullptr);
301     grpc_event ev = grpc_completion_queue_next(
302         cq, grpc_timeout_seconds_to_deadline(5), nullptr);
303     GPR_ASSERT(ev.type == GRPC_OP_COMPLETE);
304     GPR_ASSERT(ev.tag == (void*)(2000));
305     GPR_ASSERT(ev.success == 0);
306     GPR_ASSERT(grpc_channel_check_connectivity_state(channel, 0) !=
307                GRPC_CHANNEL_READY);
308   }
309 };
310
311 TEST_F(KeepaliveThrottlingTest, KeepaliveThrottlingMultipleChannels) {
312   grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
313   std::string server_address =
314       grpc_core::JoinHostPort("127.0.0.1", grpc_pick_unused_port_or_die());
315   grpc_server* server = ServerStart(server_address.c_str(), cq);
316   // create two channel with a keepalive ping interval of 1 second.
317   grpc_arg client_args[] = {
318       grpc_channel_arg_integer_create(
319           const_cast<char*>(GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA), 0),
320       grpc_channel_arg_integer_create(
321           const_cast<char*>(
322               GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS),
323           0),
324       grpc_channel_arg_integer_create(
325           const_cast<char*>(GRPC_ARG_KEEPALIVE_TIME_MS), 1 * 1000),
326       grpc_channel_arg_integer_create(
327           const_cast<char*>(GRPC_ARG_HTTP2_BDP_PROBE), 0)};
328   grpc_channel_args client_channel_args = {GPR_ARRAY_SIZE(client_args),
329                                            client_args};
330   grpc_channel* channel = grpc_insecure_channel_create(
331       server_address.c_str(), &client_channel_args, nullptr);
332   grpc_channel* channel_dup = grpc_insecure_channel_create(
333       server_address.c_str(), &client_channel_args, nullptr);
334   int expected_keepalive_time_sec = 1;
335   // We need 3 GOAWAY frames to throttle the keepalive time from 1 second to 8
336   // seconds (> 5sec).
337   for (int i = 0; i < 3; i++) {
338     gpr_log(GPR_INFO, "Expected keepalive time : %d",
339             expected_keepalive_time_sec);
340     EXPECT_EQ(PerformWaitingCall(channel, server, cq), GRPC_STATUS_UNAVAILABLE);
341     expected_keepalive_time_sec *= 2;
342   }
343   gpr_log(
344       GPR_INFO,
345       "Client keepalive time %d should now be in sync with the server settings",
346       expected_keepalive_time_sec);
347   EXPECT_EQ(PerformWaitingCall(channel, server, cq),
348             GRPC_STATUS_DEADLINE_EXCEEDED);
349   // Since the subchannel is shared, the second channel should also have
350   // keepalive settings in sync with the server.
351   gpr_log(GPR_INFO, "Now testing second channel sharing the same subchannel");
352   EXPECT_EQ(PerformWaitingCall(channel_dup, server, cq),
353             GRPC_STATUS_DEADLINE_EXCEEDED);
354   // shutdown and destroy the client and server
355   grpc_channel_destroy(channel);
356   grpc_channel_destroy(channel_dup);
357   ServerShutdownAndDestroy(server, cq);
358   grpc_completion_queue_shutdown(cq);
359   while (grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME),
360                                     nullptr)
361              .type != GRPC_QUEUE_SHUTDOWN)
362     ;
363   grpc_completion_queue_destroy(cq);
364 }
365
366 grpc_core::Resolver::Result BuildResolverResult(
367     const std::vector<std::string>& addresses) {
368   grpc_core::Resolver::Result result;
369   for (const auto& address_str : addresses) {
370     grpc_uri* uri = grpc_uri_parse(address_str.c_str(), true);
371     if (uri == nullptr) {
372       gpr_log(GPR_ERROR, "Failed to parse uri:%s", address_str.c_str());
373       GPR_ASSERT(0);
374     }
375     grpc_resolved_address address;
376     GPR_ASSERT(grpc_parse_uri(uri, &address));
377     result.addresses.emplace_back(address.addr, address.len, nullptr);
378     grpc_uri_destroy(uri);
379   }
380   return result;
381 }
382
383 // Tests that when new subchannels are created due to a change in resolved
384 // addresses, the new subchannels use the updated keepalive time.
385 TEST_F(KeepaliveThrottlingTest, NewSubchannelsUseUpdatedKeepaliveTime) {
386   grpc_core::ExecCtx exec_ctx;
387   grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
388   std::string server_address1 =
389       grpc_core::JoinHostPort("127.0.0.1", grpc_pick_unused_port_or_die());
390   std::string server_address2 =
391       grpc_core::JoinHostPort("127.0.0.1", grpc_pick_unused_port_or_die());
392   grpc_server* server1 = ServerStart(server_address1.c_str(), cq);
393   grpc_server* server2 = ServerStart(server_address2.c_str(), cq);
394   // create a single channel with multiple subchannels with a keepalive ping
395   // interval of 1 second. To get finer control on subchannel connection times,
396   // we are using pick_first instead of round_robin and using the fake resolver
397   // response generator to switch between the two.
398   auto response_generator =
399       grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
400   grpc_arg client_args[] = {
401       grpc_channel_arg_integer_create(
402           const_cast<char*>(GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA), 0),
403       grpc_channel_arg_integer_create(
404           const_cast<char*>(
405               GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS),
406           0),
407       grpc_channel_arg_integer_create(
408           const_cast<char*>(GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS), 0),
409       grpc_channel_arg_integer_create(
410           const_cast<char*>(GRPC_ARG_KEEPALIVE_TIME_MS), 1 * 1000),
411       grpc_channel_arg_integer_create(
412           const_cast<char*>(GRPC_ARG_HTTP2_BDP_PROBE), 0),
413       grpc_core::FakeResolverResponseGenerator::MakeChannelArg(
414           response_generator.get())};
415   grpc_channel_args client_channel_args = {GPR_ARRAY_SIZE(client_args),
416                                            client_args};
417   grpc_channel* channel =
418       grpc_insecure_channel_create("fake:///", &client_channel_args, nullptr);
419   // For a single subchannel 3 GOAWAYs would be sufficient to increase the
420   // keepalive time from 1 second to beyond 5 seconds. Even though we are
421   // alternating between two subchannels, 3 GOAWAYs should still be enough since
422   // the channel should start all new transports with the new keepalive value
423   // (even those from a different subchannel).
424   int expected_keepalive_time_sec = 1;
425   for (int i = 0; i < 3; i++) {
426     gpr_log(GPR_INFO, "Expected keepalive time : %d",
427             expected_keepalive_time_sec);
428     response_generator->SetResponse(BuildResolverResult({absl::StrCat(
429         "ipv4:", i % 2 == 0 ? server_address1 : server_address2)}));
430     // ExecCtx::Flush() might not be enough to make sure that the resolver
431     // result has been propagated, so sleep for a bit.
432     grpc_core::ExecCtx::Get()->Flush();
433     gpr_sleep_until(grpc_timeout_seconds_to_deadline(1));
434     EXPECT_EQ(PerformWaitingCall(channel, i % 2 == 0 ? server1 : server2, cq),
435               GRPC_STATUS_UNAVAILABLE);
436     expected_keepalive_time_sec *= 2;
437   }
438   gpr_log(
439       GPR_INFO,
440       "Client keepalive time %d should now be in sync with the server settings",
441       expected_keepalive_time_sec);
442   response_generator->SetResponse(
443       BuildResolverResult({absl::StrCat("ipv4:", server_address2)}));
444   grpc_core::ExecCtx::Get()->Flush();
445   gpr_sleep_until(grpc_timeout_seconds_to_deadline(1));
446   EXPECT_EQ(PerformWaitingCall(channel, server2, cq),
447             GRPC_STATUS_DEADLINE_EXCEEDED);
448   // shutdown and destroy the client and server
449   grpc_channel_destroy(channel);
450   ServerShutdownAndDestroy(server1, cq);
451   ServerShutdownAndDestroy(server2, cq);
452   grpc_completion_queue_shutdown(cq);
453   while (grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME),
454                                     nullptr)
455              .type != GRPC_QUEUE_SHUTDOWN)
456     ;
457   grpc_completion_queue_destroy(cq);
458 }
459
460 // Tests that when a channel has multiple subchannels and receives a GOAWAY with
461 // "too_many_pings" on one of them, all subchannels start any new transports
462 // with an updated keepalive time.
463 TEST_F(KeepaliveThrottlingTest,
464        ExistingSubchannelsUseNewKeepaliveTimeWhenReconnecting) {
465   grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
466   std::string server_address1 =
467       grpc_core::JoinHostPort("127.0.0.1", grpc_pick_unused_port_or_die());
468   std::string server_address2 =
469       grpc_core::JoinHostPort("127.0.0.1", grpc_pick_unused_port_or_die());
470   // create a single channel with round robin load balancing policy.
471   auto response_generator =
472       grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
473   grpc_arg client_args[] = {
474       grpc_channel_arg_integer_create(
475           const_cast<char*>(GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA), 0),
476       grpc_channel_arg_integer_create(
477           const_cast<char*>(
478               GRPC_ARG_HTTP2_MIN_SENT_PING_INTERVAL_WITHOUT_DATA_MS),
479           0),
480       grpc_channel_arg_integer_create(
481           const_cast<char*>(GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS), 0),
482       grpc_channel_arg_integer_create(
483           const_cast<char*>(GRPC_ARG_KEEPALIVE_TIME_MS), 1 * 1000),
484       grpc_channel_arg_integer_create(
485           const_cast<char*>(GRPC_ARG_HTTP2_BDP_PROBE), 0),
486       grpc_core::FakeResolverResponseGenerator::MakeChannelArg(
487           response_generator.get())};
488   grpc_channel_args client_channel_args = {GPR_ARRAY_SIZE(client_args),
489                                            client_args};
490   grpc_channel* channel =
491       grpc_insecure_channel_create("fake:///", &client_channel_args, nullptr);
492   response_generator->SetResponse(
493       BuildResolverResult({absl::StrCat("ipv4:", server_address1),
494                            absl::StrCat("ipv4:", server_address2)}));
495   // For a single subchannel 3 GOAWAYs would be sufficient to increase the
496   // keepalive time from 1 second to beyond 5 seconds. Even though we are
497   // alternating between two subchannels, 3 GOAWAYs should still be enough since
498   // the channel should start all new transports with the new keepalive value
499   // (even those from a different subchannel).
500   int expected_keepalive_time_sec = 1;
501   for (int i = 0; i < 3; i++) {
502     gpr_log(GPR_ERROR, "Expected keepalive time : %d",
503             expected_keepalive_time_sec);
504     grpc_server* server = ServerStart(
505         i % 2 == 0 ? server_address1.c_str() : server_address2.c_str(), cq);
506     VerifyChannelReady(channel, cq);
507     EXPECT_EQ(PerformWaitingCall(channel, server, cq), GRPC_STATUS_UNAVAILABLE);
508     ServerShutdownAndDestroy(server, cq);
509     VerifyChannelDisconnected(channel, cq);
510     expected_keepalive_time_sec *= 2;
511   }
512   gpr_log(
513       GPR_INFO,
514       "Client keepalive time %d should now be in sync with the server settings",
515       expected_keepalive_time_sec);
516   grpc_server* server = ServerStart(server_address1.c_str(), cq);
517   VerifyChannelReady(channel, cq);
518   EXPECT_EQ(PerformWaitingCall(channel, server, cq),
519             GRPC_STATUS_DEADLINE_EXCEEDED);
520   ServerShutdownAndDestroy(server, cq);
521   // shutdown and destroy the client
522   grpc_channel_destroy(channel);
523   grpc_completion_queue_shutdown(cq);
524   while (grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME),
525                                     nullptr)
526              .type != GRPC_QUEUE_SHUTDOWN)
527     ;
528   grpc_completion_queue_destroy(cq);
529 }
530
531 }  // namespace
532
533 int main(int argc, char** argv) {
534   ::testing::InitGoogleTest(&argc, argv);
535   grpc::testing::TestEnvironment env(argc, argv);
536   grpc_init();
537   auto result = RUN_ALL_TESTS();
538   grpc_shutdown();
539   return result;
540 }