Imported Upstream version 1.34.0
[platform/upstream/grpc.git] / test / core / transport / chttp2 / too_many_pings_test.cc
index 708ddd1..2356bb5 100644 (file)
@@ -25,6 +25,8 @@
 #include <set>
 #include <thread>
 
+#include "absl/strings/str_cat.h"
+
 #include <grpc/grpc.h>
 #include <grpc/grpc_security.h>
 #include <grpc/impl/codegen/grpc_types.h>
 #include <grpcpp/impl/codegen/service_type.h>
 #include <grpcpp/server_builder.h>
 
+#include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
 #include "src/core/lib/gpr/useful.h"
 #include "src/core/lib/gprpp/host_port.h"
 #include "src/core/lib/gprpp/thd.h"
 #include "src/core/lib/iomgr/error.h"
+#include "src/core/lib/iomgr/parse_address.h"
 #include "src/core/lib/security/credentials/alts/alts_credentials.h"
 #include "src/core/lib/security/credentials/credentials.h"
 #include "src/core/lib/security/security_connector/alts/alts_security_connector.h"
 #include "src/core/lib/slice/slice_string_helpers.h"
+#include "src/core/lib/surface/channel.h"
 
 #include "test/core/util/memory_counters.h"
 #include "test/core/util/port.h"
@@ -65,7 +70,6 @@ grpc_status_code PerformCall(grpc_channel* channel, grpc_server* server,
   cq_verifier* cqv = cq_verifier_create(cq);
   grpc_op ops[6];
   grpc_op* op;
-  grpc_metadata_array initial_metadata_recv;
   grpc_metadata_array trailing_metadata_recv;
   grpc_metadata_array request_metadata_recv;
   grpc_call_details call_details;
@@ -78,7 +82,6 @@ grpc_status_code PerformCall(grpc_channel* channel, grpc_server* server,
                                grpc_slice_from_static_string("/foo"), nullptr,
                                deadline, nullptr);
   GPR_ASSERT(c);
-  grpc_metadata_array_init(&initial_metadata_recv);
   grpc_metadata_array_init(&trailing_metadata_recv);
   grpc_metadata_array_init(&request_metadata_recv);
   grpc_call_details_init(&call_details);
@@ -111,7 +114,6 @@ grpc_status_code PerformCall(grpc_channel* channel, grpc_server* server,
   cq_verify(cqv);
   // cleanup
   grpc_slice_unref(details);
-  grpc_metadata_array_destroy(&initial_metadata_recv);
   grpc_metadata_array_destroy(&trailing_metadata_recv);
   grpc_metadata_array_destroy(&request_metadata_recv);
   grpc_call_details_destroy(&call_details);
@@ -169,9 +171,554 @@ TEST(TooManyPings, TestLotsOfServerCancelledRpcsDoesntGiveTooManyPings) {
   grpc_completion_queue_shutdown(cq);
   while (grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME),
                                     nullptr)
-             .type != GRPC_QUEUE_SHUTDOWN)
-    ;
+             .type != GRPC_QUEUE_SHUTDOWN) {
+  }
+  grpc_server_destroy(server);
+  grpc_completion_queue_destroy(cq);
+}
+
+// Perform a simple RPC where the client makes a request, and both the client
+// and server continue reading so that gRPC can send and receive keepalive
+// pings.
+grpc_status_code PerformWaitingCall(grpc_channel* channel, grpc_server* server,
+                                    grpc_completion_queue* cq) {
+  grpc_call* c;
+  grpc_call* s;
+  cq_verifier* cqv = cq_verifier_create(cq);
+  grpc_op ops[6];
+  grpc_op* op;
+  grpc_metadata_array trailing_metadata_recv;
+  grpc_metadata_array request_metadata_recv;
+  grpc_call_details call_details;
+  grpc_status_code status;
+  grpc_call_error error;
+  grpc_slice details;
+  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(15);
+  // Start a call
+  c = grpc_channel_create_call(channel, nullptr, GRPC_PROPAGATE_DEFAULTS, cq,
+                               grpc_slice_from_static_string("/foo"), nullptr,
+                               deadline, nullptr);
+  GPR_ASSERT(c);
+  grpc_metadata_array_init(&trailing_metadata_recv);
+  grpc_metadata_array_init(&request_metadata_recv);
+  grpc_call_details_init(&call_details);
+  memset(ops, 0, sizeof(ops));
+  op = ops;
+  op->op = GRPC_OP_SEND_INITIAL_METADATA;
+  op->data.send_initial_metadata.count = 0;
+  op->flags = 0;
+  op->reserved = nullptr;
+  op++;
+  op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
+  op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
+  op->data.recv_status_on_client.status = &status;
+  op->data.recv_status_on_client.status_details = &details;
+  op->flags = 0;
+  op->reserved = nullptr;
+  op++;
+  error = grpc_call_start_batch(c, ops, static_cast<size_t>(op - ops), tag(1),
+                                nullptr);
+  GPR_ASSERT(GRPC_CALL_OK == error);
+  // Request a call on the server
+  error = grpc_server_request_call(server, &s, &call_details,
+                                   &request_metadata_recv, cq, cq, tag(101));
+  GPR_ASSERT(GRPC_CALL_OK == error);
+  CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
+  cq_verify(cqv);
+  // Since the server is configured to allow only a single ping strike, it would
+  // take 3 pings to trigger the GOAWAY frame with "too_many_pings" from the
+  // server. (The second ping from the client would be the first bad ping sent
+  // too quickly leading to a ping strike and the third ping would lead to the
+  // GOAWAY.) If the client settings match with the server's settings, there
+  // won't be a bad ping, and the call will end due to the deadline expiring
+  // instead.
+  CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
+  // The call will end after this
+  cq_verify(cqv, 60);
+  // cleanup
+  grpc_slice_unref(details);
+  grpc_metadata_array_destroy(&trailing_metadata_recv);
+  grpc_metadata_array_destroy(&request_metadata_recv);
+  grpc_call_details_destroy(&call_details);
+  grpc_call_unref(c);
+  grpc_call_unref(s);
+  cq_verifier_destroy(cqv);
+  return status;
+}
+
+// Shuts down and destroys the server.
+void ServerShutdownAndDestroy(grpc_server* server, grpc_completion_queue* cq) {
+  // Shutdown and destroy server
+  grpc_server_shutdown_and_notify(server, cq, (void*)(1000));
+  while (grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME),
+                                    nullptr)
+             .tag != (void*)(1000)) {
+  }
   grpc_server_destroy(server);
+}
+
+void VerifyChannelReady(grpc_channel* channel, grpc_completion_queue* cq) {
+  grpc_connectivity_state state =
+      grpc_channel_check_connectivity_state(channel, 1 /* try_to_connect */);
+  while (state != GRPC_CHANNEL_READY) {
+    grpc_channel_watch_connectivity_state(
+        channel, state, grpc_timeout_seconds_to_deadline(5), cq, nullptr);
+    grpc_completion_queue_next(cq, grpc_timeout_seconds_to_deadline(5),
+                               nullptr);
+    state = grpc_channel_check_connectivity_state(channel, 0);
+  }
+}
+
+void VerifyChannelDisconnected(grpc_channel* channel,
+                               grpc_completion_queue* cq) {
+  // Verify channel gets disconnected. Use a ping to make sure that clients
+  // tries sending/receiving bytes if the channel is connected.
+  grpc_channel_ping(channel, cq, (void*)(2000), nullptr);
+  grpc_event ev = grpc_completion_queue_next(
+      cq, grpc_timeout_seconds_to_deadline(5), nullptr);
+  GPR_ASSERT(ev.type == GRPC_OP_COMPLETE);
+  GPR_ASSERT(ev.tag == (void*)(2000));
+  GPR_ASSERT(ev.success == 0);
+  GPR_ASSERT(grpc_channel_check_connectivity_state(channel, 0) !=
+             GRPC_CHANNEL_READY);
+}
+
+class KeepaliveThrottlingTest : public ::testing::Test {
+ protected:
+  // Starts the server and makes sure that the channel is able to get connected.
+  grpc_server* ServerStart(const char* addr, grpc_completion_queue* cq) {
+    // Set up server channel args to expect pings at an interval of 5 seconds
+    // and use a single ping strike
+    grpc_arg server_args[] = {
+        grpc_channel_arg_integer_create(
+            const_cast<char*>(
+                GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS),
+            5 * 1000),
+        grpc_channel_arg_integer_create(
+            const_cast<char*>(GRPC_ARG_HTTP2_MAX_PING_STRIKES), 1)};
+    grpc_channel_args server_channel_args = {GPR_ARRAY_SIZE(server_args),
+                                             server_args};
+    // Create server
+    grpc_server* server = grpc_server_create(&server_channel_args, nullptr);
+    grpc_server_register_completion_queue(server, cq, nullptr);
+    GPR_ASSERT(grpc_server_add_insecure_http2_port(server, addr));
+    grpc_server_start(server);
+    return server;
+  }
+};
+
+TEST_F(KeepaliveThrottlingTest, KeepaliveThrottlingMultipleChannels) {
+  grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
+  std::string server_address =
+      grpc_core::JoinHostPort("127.0.0.1", grpc_pick_unused_port_or_die());
+  grpc_server* server = ServerStart(server_address.c_str(), cq);
+  // create two channel with a keepalive ping interval of 1 second.
+  grpc_arg client_args[] = {
+      grpc_channel_arg_integer_create(
+          const_cast<char*>(GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA), 0),
+      grpc_channel_arg_integer_create(
+          const_cast<char*>(GRPC_ARG_KEEPALIVE_TIME_MS), 1 * 1000),
+      grpc_channel_arg_integer_create(
+          const_cast<char*>(GRPC_ARG_HTTP2_BDP_PROBE), 0)};
+  grpc_channel_args client_channel_args = {GPR_ARRAY_SIZE(client_args),
+                                           client_args};
+  grpc_channel* channel = grpc_insecure_channel_create(
+      server_address.c_str(), &client_channel_args, nullptr);
+  grpc_channel* channel_dup = grpc_insecure_channel_create(
+      server_address.c_str(), &client_channel_args, nullptr);
+  int expected_keepalive_time_sec = 1;
+  // We need 3 GOAWAY frames to throttle the keepalive time from 1 second to 8
+  // seconds (> 5sec).
+  for (int i = 0; i < 3; i++) {
+    gpr_log(GPR_INFO, "Expected keepalive time : %d",
+            expected_keepalive_time_sec);
+    EXPECT_EQ(PerformWaitingCall(channel, server, cq), GRPC_STATUS_UNAVAILABLE);
+    expected_keepalive_time_sec *= 2;
+  }
+  gpr_log(
+      GPR_INFO,
+      "Client keepalive time %d should now be in sync with the server settings",
+      expected_keepalive_time_sec);
+  EXPECT_EQ(PerformWaitingCall(channel, server, cq),
+            GRPC_STATUS_DEADLINE_EXCEEDED);
+  // Since the subchannel is shared, the second channel should also have
+  // keepalive settings in sync with the server.
+  gpr_log(GPR_INFO, "Now testing second channel sharing the same subchannel");
+  EXPECT_EQ(PerformWaitingCall(channel_dup, server, cq),
+            GRPC_STATUS_DEADLINE_EXCEEDED);
+  // shutdown and destroy the client and server
+  grpc_channel_destroy(channel);
+  grpc_channel_destroy(channel_dup);
+  ServerShutdownAndDestroy(server, cq);
+  grpc_completion_queue_shutdown(cq);
+  while (grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME),
+                                    nullptr)
+             .type != GRPC_QUEUE_SHUTDOWN) {
+  }
+  grpc_completion_queue_destroy(cq);
+}
+
+grpc_core::Resolver::Result BuildResolverResult(
+    const std::vector<std::string>& addresses) {
+  grpc_core::Resolver::Result result;
+  for (const auto& address_str : addresses) {
+    grpc_uri* uri = grpc_uri_parse(address_str.c_str(), true);
+    if (uri == nullptr) {
+      gpr_log(GPR_ERROR, "Failed to parse uri:%s", address_str.c_str());
+      GPR_ASSERT(0);
+    }
+    grpc_resolved_address address;
+    GPR_ASSERT(grpc_parse_uri(uri, &address));
+    result.addresses.emplace_back(address.addr, address.len, nullptr);
+    grpc_uri_destroy(uri);
+  }
+  return result;
+}
+
+// Tests that when new subchannels are created due to a change in resolved
+// addresses, the new subchannels use the updated keepalive time.
+TEST_F(KeepaliveThrottlingTest, NewSubchannelsUseUpdatedKeepaliveTime) {
+  grpc_core::ExecCtx exec_ctx;
+  grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
+  std::string server_address1 =
+      grpc_core::JoinHostPort("127.0.0.1", grpc_pick_unused_port_or_die());
+  std::string server_address2 =
+      grpc_core::JoinHostPort("127.0.0.1", grpc_pick_unused_port_or_die());
+  grpc_server* server1 = ServerStart(server_address1.c_str(), cq);
+  grpc_server* server2 = ServerStart(server_address2.c_str(), cq);
+  // create a single channel with multiple subchannels with a keepalive ping
+  // interval of 1 second. To get finer control on subchannel connection times,
+  // we are using pick_first instead of round_robin and using the fake resolver
+  // response generator to switch between the two.
+  auto response_generator =
+      grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
+  grpc_arg client_args[] = {
+      grpc_channel_arg_integer_create(
+          const_cast<char*>(GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA), 0),
+      grpc_channel_arg_integer_create(
+          const_cast<char*>(GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS), 0),
+      grpc_channel_arg_integer_create(
+          const_cast<char*>(GRPC_ARG_KEEPALIVE_TIME_MS), 1 * 1000),
+      grpc_channel_arg_integer_create(
+          const_cast<char*>(GRPC_ARG_HTTP2_BDP_PROBE), 0),
+      grpc_core::FakeResolverResponseGenerator::MakeChannelArg(
+          response_generator.get())};
+  grpc_channel_args client_channel_args = {GPR_ARRAY_SIZE(client_args),
+                                           client_args};
+  grpc_channel* channel =
+      grpc_insecure_channel_create("fake:///", &client_channel_args, nullptr);
+  // For a single subchannel 3 GOAWAYs would be sufficient to increase the
+  // keepalive time from 1 second to beyond 5 seconds. Even though we are
+  // alternating between two subchannels, 3 GOAWAYs should still be enough since
+  // the channel should start all new transports with the new keepalive value
+  // (even those from a different subchannel).
+  int expected_keepalive_time_sec = 1;
+  for (int i = 0; i < 3; i++) {
+    gpr_log(GPR_INFO, "Expected keepalive time : %d",
+            expected_keepalive_time_sec);
+    response_generator->SetResponse(BuildResolverResult({absl::StrCat(
+        "ipv4:", i % 2 == 0 ? server_address1 : server_address2)}));
+    // ExecCtx::Flush() might not be enough to make sure that the resolver
+    // result has been propagated, so sleep for a bit.
+    grpc_core::ExecCtx::Get()->Flush();
+    gpr_sleep_until(grpc_timeout_seconds_to_deadline(1));
+    EXPECT_EQ(PerformWaitingCall(channel, i % 2 == 0 ? server1 : server2, cq),
+              GRPC_STATUS_UNAVAILABLE);
+    expected_keepalive_time_sec *= 2;
+  }
+  gpr_log(
+      GPR_INFO,
+      "Client keepalive time %d should now be in sync with the server settings",
+      expected_keepalive_time_sec);
+  response_generator->SetResponse(
+      BuildResolverResult({absl::StrCat("ipv4:", server_address2)}));
+  grpc_core::ExecCtx::Get()->Flush();
+  gpr_sleep_until(grpc_timeout_seconds_to_deadline(1));
+  EXPECT_EQ(PerformWaitingCall(channel, server2, cq),
+            GRPC_STATUS_DEADLINE_EXCEEDED);
+  // shutdown and destroy the client and server
+  grpc_channel_destroy(channel);
+  ServerShutdownAndDestroy(server1, cq);
+  ServerShutdownAndDestroy(server2, cq);
+  grpc_completion_queue_shutdown(cq);
+  while (grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME),
+                                    nullptr)
+             .type != GRPC_QUEUE_SHUTDOWN) {
+  }
+  grpc_completion_queue_destroy(cq);
+}
+
+// Tests that when a channel has multiple subchannels and receives a GOAWAY with
+// "too_many_pings" on one of them, all subchannels start any new transports
+// with an updated keepalive time.
+TEST_F(KeepaliveThrottlingTest,
+       ExistingSubchannelsUseNewKeepaliveTimeWhenReconnecting) {
+  grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
+  std::string server_address1 =
+      grpc_core::JoinHostPort("127.0.0.1", grpc_pick_unused_port_or_die());
+  std::string server_address2 =
+      grpc_core::JoinHostPort("127.0.0.1", grpc_pick_unused_port_or_die());
+  // create a single channel with round robin load balancing policy.
+  auto response_generator =
+      grpc_core::MakeRefCounted<grpc_core::FakeResolverResponseGenerator>();
+  grpc_arg client_args[] = {
+      grpc_channel_arg_integer_create(
+          const_cast<char*>(GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA), 0),
+      grpc_channel_arg_integer_create(
+          const_cast<char*>(GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS), 0),
+      grpc_channel_arg_integer_create(
+          const_cast<char*>(GRPC_ARG_KEEPALIVE_TIME_MS), 1 * 1000),
+      grpc_channel_arg_integer_create(
+          const_cast<char*>(GRPC_ARG_HTTP2_BDP_PROBE), 0),
+      grpc_core::FakeResolverResponseGenerator::MakeChannelArg(
+          response_generator.get())};
+  grpc_channel_args client_channel_args = {GPR_ARRAY_SIZE(client_args),
+                                           client_args};
+  grpc_channel* channel =
+      grpc_insecure_channel_create("fake:///", &client_channel_args, nullptr);
+  response_generator->SetResponse(
+      BuildResolverResult({absl::StrCat("ipv4:", server_address1),
+                           absl::StrCat("ipv4:", server_address2)}));
+  // For a single subchannel 3 GOAWAYs would be sufficient to increase the
+  // keepalive time from 1 second to beyond 5 seconds. Even though we are
+  // alternating between two subchannels, 3 GOAWAYs should still be enough since
+  // the channel should start all new transports with the new keepalive value
+  // (even those from a different subchannel).
+  int expected_keepalive_time_sec = 1;
+  for (int i = 0; i < 3; i++) {
+    gpr_log(GPR_ERROR, "Expected keepalive time : %d",
+            expected_keepalive_time_sec);
+    grpc_server* server = ServerStart(
+        i % 2 == 0 ? server_address1.c_str() : server_address2.c_str(), cq);
+    VerifyChannelReady(channel, cq);
+    EXPECT_EQ(PerformWaitingCall(channel, server, cq), GRPC_STATUS_UNAVAILABLE);
+    ServerShutdownAndDestroy(server, cq);
+    VerifyChannelDisconnected(channel, cq);
+    expected_keepalive_time_sec *= 2;
+  }
+  gpr_log(
+      GPR_INFO,
+      "Client keepalive time %d should now be in sync with the server settings",
+      expected_keepalive_time_sec);
+  grpc_server* server = ServerStart(server_address1.c_str(), cq);
+  VerifyChannelReady(channel, cq);
+  EXPECT_EQ(PerformWaitingCall(channel, server, cq),
+            GRPC_STATUS_DEADLINE_EXCEEDED);
+  ServerShutdownAndDestroy(server, cq);
+  // shutdown and destroy the client
+  grpc_channel_destroy(channel);
+  grpc_completion_queue_shutdown(cq);
+  while (grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME),
+                                    nullptr)
+             .type != GRPC_QUEUE_SHUTDOWN) {
+  }
+  grpc_completion_queue_destroy(cq);
+}
+
+// Perform a simple RPC where the client makes a request expecting a response
+// with payload.
+void PerformCallWithResponsePayload(grpc_channel* channel, grpc_server* server,
+                                    grpc_completion_queue* cq) {
+  grpc_slice response_payload_slice = grpc_slice_from_static_string("hello");
+
+  grpc_call* c;
+  grpc_call* s;
+  grpc_byte_buffer* response_payload =
+      grpc_raw_byte_buffer_create(&response_payload_slice, 1);
+  cq_verifier* cqv = cq_verifier_create(cq);
+  grpc_op ops[6];
+  grpc_op* op;
+  grpc_metadata_array initial_metadata_recv;
+  grpc_metadata_array trailing_metadata_recv;
+  grpc_metadata_array request_metadata_recv;
+  grpc_byte_buffer* response_payload_recv = nullptr;
+  grpc_call_details call_details;
+  grpc_status_code status;
+  grpc_call_error error;
+  grpc_slice details;
+  int was_cancelled = 2;
+
+  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(60);
+  c = grpc_channel_create_call(channel, nullptr, GRPC_PROPAGATE_DEFAULTS, cq,
+                               grpc_slice_from_static_string("/foo"), nullptr,
+                               deadline, nullptr);
+  GPR_ASSERT(c);
+
+  grpc_metadata_array_init(&initial_metadata_recv);
+  grpc_metadata_array_init(&trailing_metadata_recv);
+  grpc_metadata_array_init(&request_metadata_recv);
+  grpc_call_details_init(&call_details);
+
+  memset(ops, 0, sizeof(ops));
+  op = ops;
+  op->op = GRPC_OP_SEND_INITIAL_METADATA;
+  op->data.send_initial_metadata.count = 0;
+  op->flags = 0;
+  op->reserved = nullptr;
+  op++;
+  op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
+  op->flags = 0;
+  op->reserved = nullptr;
+  op++;
+  op->op = GRPC_OP_RECV_INITIAL_METADATA;
+  op->data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_recv;
+  op->flags = 0;
+  op->reserved = nullptr;
+  op++;
+  op->op = GRPC_OP_RECV_MESSAGE;
+  op->data.recv_message.recv_message = &response_payload_recv;
+  op->flags = 0;
+  op->reserved = nullptr;
+  op++;
+  op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
+  op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
+  op->data.recv_status_on_client.status = &status;
+  op->data.recv_status_on_client.status_details = &details;
+  op->flags = 0;
+  op->reserved = nullptr;
+  op++;
+  error = grpc_call_start_batch(c, ops, static_cast<size_t>(op - ops), tag(1),
+                                nullptr);
+  GPR_ASSERT(GRPC_CALL_OK == error);
+
+  error = grpc_server_request_call(server, &s, &call_details,
+                                   &request_metadata_recv, cq, cq, tag(101));
+  GPR_ASSERT(GRPC_CALL_OK == error);
+  CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
+  cq_verify(cqv);
+
+  memset(ops, 0, sizeof(ops));
+  op = ops;
+  op->op = GRPC_OP_SEND_INITIAL_METADATA;
+  op->data.send_initial_metadata.count = 0;
+  op->flags = 0;
+  op->reserved = nullptr;
+  op++;
+  error = grpc_call_start_batch(s, ops, static_cast<size_t>(op - ops), tag(102),
+                                nullptr);
+  GPR_ASSERT(GRPC_CALL_OK == error);
+
+  CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
+  cq_verify(cqv);
+
+  memset(ops, 0, sizeof(ops));
+  op = ops;
+  op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
+  op->data.recv_close_on_server.cancelled = &was_cancelled;
+  op->flags = 0;
+  op->reserved = nullptr;
+  op++;
+  op->op = GRPC_OP_SEND_MESSAGE;
+  op->data.send_message.send_message = response_payload;
+  op->flags = 0;
+  op->reserved = nullptr;
+  op++;
+  op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
+  op->data.send_status_from_server.trailing_metadata_count = 0;
+  op->data.send_status_from_server.status = GRPC_STATUS_OK;
+  grpc_slice status_details = grpc_slice_from_static_string("xyz");
+  op->data.send_status_from_server.status_details = &status_details;
+  op->flags = 0;
+  op->reserved = nullptr;
+  op++;
+  error = grpc_call_start_batch(s, ops, static_cast<size_t>(op - ops), tag(103),
+                                nullptr);
+  GPR_ASSERT(GRPC_CALL_OK == error);
+
+  CQ_EXPECT_COMPLETION(cqv, tag(103), 1);
+  CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
+  cq_verify(cqv);
+
+  GPR_ASSERT(status == GRPC_STATUS_OK);
+  GPR_ASSERT(0 == grpc_slice_str_cmp(details, "xyz"));
+  GPR_ASSERT(0 == grpc_slice_str_cmp(call_details.method, "/foo"));
+  GPR_ASSERT(was_cancelled == 0);
+  GPR_ASSERT(
+      byte_buffer_eq_slice(response_payload_recv, response_payload_slice));
+
+  grpc_slice_unref(details);
+  grpc_metadata_array_destroy(&initial_metadata_recv);
+  grpc_metadata_array_destroy(&trailing_metadata_recv);
+  grpc_metadata_array_destroy(&request_metadata_recv);
+  grpc_call_details_destroy(&call_details);
+
+  grpc_call_unref(c);
+  grpc_call_unref(s);
+
+  cq_verifier_destroy(cqv);
+
+  grpc_byte_buffer_destroy(response_payload);
+  grpc_byte_buffer_destroy(response_payload_recv);
+}
+
+TEST(TooManyPings, BdpPingNotSentWithoutReceiveSideActivity) {
+  grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
+  // create the server
+  std::string server_address =
+      grpc_core::JoinHostPort("localhost", grpc_pick_unused_port_or_die());
+  grpc_arg server_args[] = {
+      grpc_channel_arg_integer_create(
+          const_cast<char*>(
+              GRPC_ARG_HTTP2_MIN_RECV_PING_INTERVAL_WITHOUT_DATA_MS),
+          60 * 1000),
+      grpc_channel_arg_integer_create(
+          const_cast<char*>(GRPC_ARG_HTTP2_MAX_PING_STRIKES), 1)};
+  grpc_channel_args server_channel_args = {GPR_ARRAY_SIZE(server_args),
+                                           server_args};
+  grpc_server* server = grpc_server_create(&server_channel_args, nullptr);
+  grpc_server_register_completion_queue(server, cq, nullptr);
+  GPR_ASSERT(
+      grpc_server_add_insecure_http2_port(server, server_address.c_str()));
+  grpc_server_start(server);
+  // create the channel (bdp pings are enabled by default)
+  grpc_arg client_args[] = {
+      grpc_channel_arg_integer_create(
+          const_cast<char*>(GRPC_ARG_HTTP2_MAX_PINGS_WITHOUT_DATA), 0),
+      grpc_channel_arg_integer_create(
+          const_cast<char*>(GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS), 1)};
+  grpc_channel_args client_channel_args = {GPR_ARRAY_SIZE(client_args),
+                                           client_args};
+  grpc_channel* channel = grpc_insecure_channel_create(
+      server_address.c_str(), &client_channel_args, nullptr);
+  VerifyChannelReady(channel, cq);
+  cq_verifier* cqv = cq_verifier_create(cq);
+  cq_verify_empty_timeout(cqv, 1);
+  // Channel should be able to send two pings without disconnect if there was no
+  // BDP sent.
+  grpc_channel_ping(channel, cq, tag(1), nullptr);
+  CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
+  cq_verify(cqv, 5);
+  // Second ping
+  grpc_channel_ping(channel, cq, tag(2), nullptr);
+  CQ_EXPECT_COMPLETION(cqv, tag(2), 1);
+  cq_verify(cqv, 5);
+  ASSERT_EQ(grpc_channel_check_connectivity_state(channel, 0),
+            GRPC_CHANNEL_READY);
+  PerformCallWithResponsePayload(channel, server, cq);
+  // Wait a bit to make sure that the BDP ping goes out.
+  cq_verify_empty_timeout(cqv, 1);
+  // The call with a response payload should have triggered a BDP ping.
+  // Send two more pings to verify. The second ping should cause a disconnect.
+  // If BDP was not sent, the second ping would not cause a disconnect.
+  grpc_channel_ping(channel, cq, tag(3), nullptr);
+  CQ_EXPECT_COMPLETION(cqv, tag(3), 1);
+  cq_verify(cqv, 5);
+  // Second ping
+  grpc_channel_ping(channel, cq, tag(4), nullptr);
+  CQ_EXPECT_COMPLETION(cqv, tag(4), 1);
+  cq_verify(cqv, 5);
+  cq_verify_empty_timeout(cqv, 1);
+  ASSERT_NE(grpc_channel_check_connectivity_state(channel, 0),
+            GRPC_CHANNEL_READY);
+  cq_verifier_destroy(cqv);
+  // shutdown and destroy the client and server
+  ServerShutdownAndDestroy(server, cq);
+  grpc_channel_destroy(channel);
+  grpc_completion_queue_shutdown(cq);
+  while (grpc_completion_queue_next(cq, gpr_inf_future(GPR_CLOCK_REALTIME),
+                                    nullptr)
+             .type != GRPC_QUEUE_SHUTDOWN) {
+  }
   grpc_completion_queue_destroy(cq);
 }