3 * Copyright 2015 gRPC authors.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
19 /* Microbenchmarks around CHTTP2 transport operations */
21 #include <benchmark/benchmark.h>
22 #include <grpc/support/alloc.h>
23 #include <grpc/support/log.h>
24 #include <grpc/support/string_util.h>
25 #include <grpcpp/support/channel_arguments.h>
30 #include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
31 #include "src/core/ext/transport/chttp2/transport/internal.h"
32 #include "src/core/lib/iomgr/closure.h"
33 #include "src/core/lib/iomgr/resource_quota.h"
34 #include "src/core/lib/slice/slice_internal.h"
35 #include "src/core/lib/transport/static_metadata.h"
36 #include "test/cpp/microbenchmarks/helpers.h"
37 #include "test/cpp/util/test_config.h"
39 ////////////////////////////////////////////////////////////////////////////////
43 class DummyEndpoint : public grpc_endpoint {
46 static const grpc_endpoint_vtable my_vtable = {read,
50 delete_from_pollset_set,
57 grpc_endpoint::vtable = &my_vtable;
58 ru_ = grpc_resource_user_create(LibraryInitializer::get().rq(),
62 void PushInput(grpc_slice slice) {
63 if (read_cb_ == nullptr) {
64 GPR_ASSERT(!have_slice_);
65 buffered_slice_ = slice;
69 grpc_slice_buffer_add(slices_, slice);
70 GRPC_CLOSURE_SCHED(read_cb_, GRPC_ERROR_NONE);
75 grpc_resource_user* ru_;
76 grpc_closure* read_cb_ = nullptr;
77 grpc_slice_buffer* slices_ = nullptr;
78 bool have_slice_ = false;
79 grpc_slice buffered_slice_;
81 void QueueRead(grpc_slice_buffer* slices, grpc_closure* cb) {
82 GPR_ASSERT(read_cb_ == nullptr);
85 grpc_slice_buffer_add(slices, buffered_slice_);
86 GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_NONE);
93 static void read(grpc_endpoint* ep, grpc_slice_buffer* slices,
94 grpc_closure* cb, bool urgent) {
95 static_cast<DummyEndpoint*>(ep)->QueueRead(slices, cb);
98 static void write(grpc_endpoint* ep, grpc_slice_buffer* slices,
99 grpc_closure* cb, void* arg) {
100 GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_NONE);
103 static void add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {}
105 static void add_to_pollset_set(grpc_endpoint* ep, grpc_pollset_set* pollset) {
108 static void delete_from_pollset_set(grpc_endpoint* ep,
109 grpc_pollset_set* pollset) {}
111 static void shutdown(grpc_endpoint* ep, grpc_error* why) {
112 grpc_resource_user_shutdown(static_cast<DummyEndpoint*>(ep)->ru_);
113 GRPC_CLOSURE_SCHED(static_cast<DummyEndpoint*>(ep)->read_cb_, why);
116 static void destroy(grpc_endpoint* ep) {
117 grpc_resource_user_unref(static_cast<DummyEndpoint*>(ep)->ru_);
118 delete static_cast<DummyEndpoint*>(ep);
121 static grpc_resource_user* get_resource_user(grpc_endpoint* ep) {
122 return static_cast<DummyEndpoint*>(ep)->ru_;
124 static char* get_peer(grpc_endpoint* ep) { return gpr_strdup("test"); }
125 static int get_fd(grpc_endpoint* ep) { return 0; }
126 static bool can_track_err(grpc_endpoint* ep) { return false; }
131 Fixture(const grpc::ChannelArguments& args, bool client) {
132 grpc_channel_args c_args = args.c_channel_args();
133 ep_ = new DummyEndpoint;
134 t_ = grpc_create_chttp2_transport(&c_args, ep_, client);
135 grpc_chttp2_transport_start_reading(t_, nullptr, nullptr);
139 void FlushExecCtx() { grpc_core::ExecCtx::Get()->Flush(); }
141 ~Fixture() { grpc_transport_destroy(t_); }
143 grpc_chttp2_transport* chttp2_transport() {
144 return reinterpret_cast<grpc_chttp2_transport*>(t_);
146 grpc_transport* transport() { return t_; }
148 void PushInput(grpc_slice slice) { ep_->PushInput(slice); }
155 class Closure : public grpc_closure {
157 virtual ~Closure() {}
161 std::unique_ptr<Closure> MakeClosure(
162 F f, grpc_closure_scheduler* sched = grpc_schedule_on_exec_ctx) {
163 struct C : public Closure {
164 C(const F& f, grpc_closure_scheduler* sched) : f_(f) {
165 GRPC_CLOSURE_INIT(this, Execute, this, sched);
168 static void Execute(void* arg, grpc_error* error) {
169 static_cast<C*>(arg)->f_(error);
172 return std::unique_ptr<Closure>(new C(f, sched));
176 grpc_closure* MakeOnceClosure(
177 F f, grpc_closure_scheduler* sched = grpc_schedule_on_exec_ctx) {
178 struct C : public grpc_closure {
179 C(const F& f) : f_(f) {}
181 static void Execute(void* arg, grpc_error* error) {
182 static_cast<C*>(arg)->f_(error);
183 delete static_cast<C*>(arg);
187 return GRPC_CLOSURE_INIT(c, C::Execute, c, sched);
192 Stream(Fixture* f) : f_(f) {
193 stream_size_ = grpc_transport_stream_size(f->transport());
194 stream_ = gpr_malloc(stream_size_);
195 arena_ = grpc_core::Arena::Create(4096);
199 gpr_event_wait(&done_, gpr_inf_future(GPR_CLOCK_REALTIME));
204 void Init(benchmark::State& state) {
205 GRPC_STREAM_REF_INIT(&refcount_, 1, &Stream::FinishDestroy, this,
207 gpr_event_init(&done_);
208 memset(stream_, 0, stream_size_);
209 if ((state.iterations() & 0xffff) == 0) {
211 arena_ = grpc_core::Arena::Create(4096);
213 grpc_transport_init_stream(f_->transport(),
214 static_cast<grpc_stream*>(stream_), &refcount_,
218 void DestroyThen(grpc_closure* closure) {
219 destroy_closure_ = closure;
221 grpc_stream_unref(&refcount_, "DestroyThen");
223 grpc_stream_unref(&refcount_);
227 void Op(grpc_transport_stream_op_batch* op) {
228 grpc_transport_perform_stream_op(f_->transport(),
229 static_cast<grpc_stream*>(stream_), op);
232 grpc_chttp2_stream* chttp2_stream() {
233 return static_cast<grpc_chttp2_stream*>(stream_);
237 static void FinishDestroy(void* arg, grpc_error* error) {
238 auto stream = static_cast<Stream*>(arg);
239 grpc_transport_destroy_stream(stream->f_->transport(),
240 static_cast<grpc_stream*>(stream->stream_),
241 stream->destroy_closure_);
242 gpr_event_set(&stream->done_, (void*)1);
246 grpc_stream_refcount refcount_;
247 grpc_core::Arena* arena_;
250 grpc_closure* destroy_closure_ = nullptr;
254 ////////////////////////////////////////////////////////////////////////////////
258 static void BM_StreamCreateDestroy(benchmark::State& state) {
259 TrackCounters track_counters;
260 grpc_core::ExecCtx exec_ctx;
261 Fixture f(grpc::ChannelArguments(), true);
262 auto* s = new Stream(&f);
263 grpc_transport_stream_op_batch op;
264 grpc_transport_stream_op_batch_payload op_payload(nullptr);
265 memset(&op, 0, sizeof(op));
266 op.cancel_stream = true;
267 op.payload = &op_payload;
268 op_payload.cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
269 std::unique_ptr<Closure> next = MakeClosure([&, s](grpc_error* error) {
270 if (!state.KeepRunning()) {
276 s->DestroyThen(next.get());
278 GRPC_CLOSURE_RUN(next.get(), GRPC_ERROR_NONE);
280 track_counters.Finish(state);
282 BENCHMARK(BM_StreamCreateDestroy);
284 class RepresentativeClientInitialMetadata {
286 static std::vector<grpc_mdelem> GetElems() {
288 GRPC_MDELEM_SCHEME_HTTP,
289 GRPC_MDELEM_METHOD_POST,
290 grpc_mdelem_from_slices(
292 grpc_slice_intern(grpc_slice_from_static_string("/foo/bar"))),
293 grpc_mdelem_from_slices(GRPC_MDSTR_AUTHORITY,
294 grpc_slice_intern(grpc_slice_from_static_string(
295 "foo.test.google.fr:1234"))),
296 GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE_COMMA_GZIP,
297 GRPC_MDELEM_TE_TRAILERS,
298 GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC,
299 grpc_mdelem_from_slices(
300 GRPC_MDSTR_USER_AGENT,
301 grpc_slice_intern(grpc_slice_from_static_string(
302 "grpc-c/3.0.0-dev (linux; chttp2; green)")))};
306 template <class Metadata>
307 static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State& state) {
308 TrackCounters track_counters;
309 grpc_core::ExecCtx exec_ctx;
310 Fixture f(grpc::ChannelArguments(), true);
311 auto* s = new Stream(&f);
312 grpc_transport_stream_op_batch op;
313 grpc_transport_stream_op_batch_payload op_payload(nullptr);
314 std::unique_ptr<Closure> start;
315 std::unique_ptr<Closure> done;
317 auto reset_op = [&]() {
318 memset(&op, 0, sizeof(op));
319 op.payload = &op_payload;
322 grpc_metadata_batch b;
323 grpc_metadata_batch_init(&b);
324 b.deadline = GRPC_MILLIS_INF_FUTURE;
325 std::vector<grpc_mdelem> elems = Metadata::GetElems();
326 std::vector<grpc_linked_mdelem> storage(elems.size());
327 for (size_t i = 0; i < elems.size(); i++) {
328 GPR_ASSERT(GRPC_LOG_IF_ERROR(
329 "addmd", grpc_metadata_batch_add_tail(&b, &storage[i], elems[i])));
333 start = MakeClosure([&, s](grpc_error* error) {
334 if (!state.KeepRunning()) {
340 op.on_complete = done.get();
341 op.send_initial_metadata = true;
342 op.payload->send_initial_metadata.send_initial_metadata = &b;
345 done = MakeClosure([&](grpc_error* error) {
347 op.cancel_stream = true;
348 op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
350 s->DestroyThen(start.get());
352 GRPC_CLOSURE_SCHED(start.get(), GRPC_ERROR_NONE);
354 grpc_metadata_batch_destroy(&b);
355 track_counters.Finish(state);
357 BENCHMARK_TEMPLATE(BM_StreamCreateSendInitialMetadataDestroy,
358 RepresentativeClientInitialMetadata);
360 static void BM_TransportEmptyOp(benchmark::State& state) {
361 TrackCounters track_counters;
362 grpc_core::ExecCtx exec_ctx;
363 Fixture f(grpc::ChannelArguments(), true);
364 auto* s = new Stream(&f);
366 grpc_transport_stream_op_batch op;
367 grpc_transport_stream_op_batch_payload op_payload(nullptr);
368 auto reset_op = [&]() {
369 memset(&op, 0, sizeof(op));
370 op.payload = &op_payload;
372 std::unique_ptr<Closure> c = MakeClosure([&](grpc_error* error) {
373 if (!state.KeepRunning()) return;
375 op.on_complete = c.get();
378 GRPC_CLOSURE_SCHED(c.get(), GRPC_ERROR_NONE);
381 op.cancel_stream = true;
382 op_payload.cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
384 s->DestroyThen(MakeOnceClosure([s](grpc_error* error) { delete s; }));
386 track_counters.Finish(state);
388 BENCHMARK(BM_TransportEmptyOp);
390 std::vector<std::unique_ptr<gpr_event>> done_events;
392 static void BM_TransportStreamSend(benchmark::State& state) {
393 TrackCounters track_counters;
394 grpc_core::ExecCtx exec_ctx;
395 Fixture f(grpc::ChannelArguments(), true);
396 auto* s = new Stream(&f);
398 grpc_transport_stream_op_batch op;
399 grpc_transport_stream_op_batch_payload op_payload(nullptr);
400 auto reset_op = [&]() {
401 memset(&op, 0, sizeof(op));
402 op.payload = &op_payload;
404 // Create the send_message payload slice.
405 // Note: We use grpc_slice_malloc_large() instead of grpc_slice_malloc()
406 // to force the slice to be refcounted, so that it remains alive when it
407 // is unreffed after each send_message op.
408 grpc_slice send_slice = grpc_slice_malloc_large(state.range(0));
409 memset(GRPC_SLICE_START_PTR(send_slice), 0, GRPC_SLICE_LENGTH(send_slice));
410 grpc_core::ManualConstructor<grpc_core::SliceBufferByteStream> send_stream;
411 grpc_metadata_batch b;
412 grpc_metadata_batch_init(&b);
413 b.deadline = GRPC_MILLIS_INF_FUTURE;
414 std::vector<grpc_mdelem> elems =
415 RepresentativeClientInitialMetadata::GetElems();
416 std::vector<grpc_linked_mdelem> storage(elems.size());
417 for (size_t i = 0; i < elems.size(); i++) {
418 GPR_ASSERT(GRPC_LOG_IF_ERROR(
419 "addmd", grpc_metadata_batch_add_tail(&b, &storage[i], elems[i])));
422 gpr_event* bm_done = new gpr_event;
423 gpr_event_init(bm_done);
425 std::unique_ptr<Closure> c = MakeClosure([&](grpc_error* error) {
426 if (!state.KeepRunning()) {
427 gpr_event_set(bm_done, (void*)1);
430 grpc_slice_buffer send_buffer;
431 grpc_slice_buffer_init(&send_buffer);
432 grpc_slice_buffer_add(&send_buffer, grpc_slice_ref(send_slice));
433 send_stream.Init(&send_buffer, 0);
434 grpc_slice_buffer_destroy(&send_buffer);
435 // force outgoing window to be yuge
436 s->chttp2_stream()->flow_control->TestOnlyForceHugeWindow();
437 f.chttp2_transport()->flow_control->TestOnlyForceHugeWindow();
439 op.on_complete = c.get();
440 op.send_message = true;
441 op.payload->send_message.send_message.reset(send_stream.get());
446 op.send_initial_metadata = true;
447 op.payload->send_initial_metadata.send_initial_metadata = &b;
448 op.on_complete = c.get();
452 gpr_event_wait(bm_done, gpr_inf_future(GPR_CLOCK_REALTIME));
453 done_events.emplace_back(bm_done);
456 op.cancel_stream = true;
457 op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
459 s->DestroyThen(MakeOnceClosure([s](grpc_error* error) { delete s; }));
461 track_counters.Finish(state);
462 grpc_metadata_batch_destroy(&b);
463 grpc_slice_unref(send_slice);
465 BENCHMARK(BM_TransportStreamSend)->Range(0, 128 * 1024 * 1024);
467 #define SLICE_FROM_BUFFER(s) grpc_slice_from_static_buffer(s, sizeof(s) - 1)
469 static grpc_slice CreateIncomingDataSlice(size_t length, size_t frame_size) {
470 std::queue<char> unframed;
472 unframed.push(static_cast<uint8_t>(0));
473 unframed.push(static_cast<uint8_t>(length >> 24));
474 unframed.push(static_cast<uint8_t>(length >> 16));
475 unframed.push(static_cast<uint8_t>(length >> 8));
476 unframed.push(static_cast<uint8_t>(length));
477 for (size_t i = 0; i < length; i++) {
481 std::vector<char> framed;
482 while (unframed.size() > frame_size) {
484 framed.push_back(static_cast<uint8_t>(frame_size >> 16));
485 framed.push_back(static_cast<uint8_t>(frame_size >> 8));
486 framed.push_back(static_cast<uint8_t>(frame_size));
497 for (size_t i = 0; i < frame_size; i++) {
498 framed.push_back(unframed.front());
504 framed.push_back(static_cast<uint8_t>(unframed.size() >> 16));
505 framed.push_back(static_cast<uint8_t>(unframed.size() >> 8));
506 framed.push_back(static_cast<uint8_t>(unframed.size()));
516 while (!unframed.empty()) {
517 framed.push_back(unframed.front());
521 return grpc_slice_from_copied_buffer(framed.data(), framed.size());
524 static void BM_TransportStreamRecv(benchmark::State& state) {
525 TrackCounters track_counters;
526 grpc_core::ExecCtx exec_ctx;
527 Fixture f(grpc::ChannelArguments(), true);
528 auto* s = new Stream(&f);
530 grpc_transport_stream_op_batch_payload op_payload(nullptr);
531 grpc_transport_stream_op_batch op;
532 grpc_core::OrphanablePtr<grpc_core::ByteStream> recv_stream;
533 grpc_slice incoming_data = CreateIncomingDataSlice(state.range(0), 16384);
535 auto reset_op = [&]() {
536 memset(&op, 0, sizeof(op));
537 op.payload = &op_payload;
540 grpc_metadata_batch b;
541 grpc_metadata_batch_init(&b);
542 grpc_metadata_batch b_recv;
543 grpc_metadata_batch_init(&b_recv);
544 b.deadline = GRPC_MILLIS_INF_FUTURE;
545 std::vector<grpc_mdelem> elems =
546 RepresentativeClientInitialMetadata::GetElems();
547 std::vector<grpc_linked_mdelem> storage(elems.size());
548 for (size_t i = 0; i < elems.size(); i++) {
549 GPR_ASSERT(GRPC_LOG_IF_ERROR(
550 "addmd", grpc_metadata_batch_add_tail(&b, &storage[i], elems[i])));
553 std::unique_ptr<Closure> do_nothing = MakeClosure([](grpc_error* error) {});
557 std::unique_ptr<Closure> drain_start;
558 std::unique_ptr<Closure> drain;
559 std::unique_ptr<Closure> drain_continue;
560 grpc_slice recv_slice;
562 std::unique_ptr<Closure> c = MakeClosure([&](grpc_error* error) {
563 if (!state.KeepRunning()) return;
564 // force outgoing window to be yuge
565 s->chttp2_stream()->flow_control->TestOnlyForceHugeWindow();
566 f.chttp2_transport()->flow_control->TestOnlyForceHugeWindow();
569 op.on_complete = do_nothing.get();
570 op.recv_message = true;
571 op.payload->recv_message.recv_message = &recv_stream;
572 op.payload->recv_message.recv_message_ready = drain_start.get();
574 f.PushInput(grpc_slice_ref(incoming_data));
577 drain_start = MakeClosure([&](grpc_error* error) {
578 if (recv_stream == nullptr) {
579 GPR_ASSERT(!state.KeepRunning());
582 GRPC_CLOSURE_RUN(drain.get(), GRPC_ERROR_NONE);
585 drain = MakeClosure([&](grpc_error* error) {
587 if (received == recv_stream->length()) {
589 GRPC_CLOSURE_SCHED(c.get(), GRPC_ERROR_NONE);
592 } while (recv_stream->Next(recv_stream->length() - received,
593 drain_continue.get()) &&
594 GRPC_ERROR_NONE == recv_stream->Pull(&recv_slice) &&
595 (received += GRPC_SLICE_LENGTH(recv_slice),
596 grpc_slice_unref_internal(recv_slice), true));
599 drain_continue = MakeClosure([&](grpc_error* error) {
600 recv_stream->Pull(&recv_slice);
601 received += GRPC_SLICE_LENGTH(recv_slice);
602 grpc_slice_unref_internal(recv_slice);
603 GRPC_CLOSURE_RUN(drain.get(), GRPC_ERROR_NONE);
607 op.send_initial_metadata = true;
608 op.payload->send_initial_metadata.send_initial_metadata = &b;
609 op.recv_initial_metadata = true;
610 op.payload->recv_initial_metadata.recv_initial_metadata = &b_recv;
611 op.payload->recv_initial_metadata.recv_initial_metadata_ready =
613 op.on_complete = c.get();
615 f.PushInput(SLICE_FROM_BUFFER(
616 "\x00\x00\x00\x04\x00\x00\x00\x00\x00"
618 // tools/codegen/core/gen_header_frame.py <
619 // test/cpp/microbenchmarks/representative_server_initial_metadata.headers
620 "\x00\x00X\x01\x04\x00\x00\x00\x01"
621 "\x10\x07:status\x03"
626 "\x10\x14grpc-accept-encoding\x15identity,deflate,gzip"));
630 op.cancel_stream = true;
631 op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
633 s->DestroyThen(MakeOnceClosure([s](grpc_error* error) { delete s; }));
634 grpc_metadata_batch_destroy(&b);
635 grpc_metadata_batch_destroy(&b_recv);
637 track_counters.Finish(state);
638 grpc_slice_unref(incoming_data);
640 BENCHMARK(BM_TransportStreamRecv)->Range(0, 128 * 1024 * 1024);
642 // Some distros have RunSpecifiedBenchmarks under the benchmark namespace,
643 // and others do not. This allows us to support both modes.
644 namespace benchmark {
645 void RunTheBenchmarksNamespaced() { RunSpecifiedBenchmarks(); }
646 } // namespace benchmark
648 int main(int argc, char** argv) {
649 LibraryInitializer libInit;
650 ::benchmark::Initialize(&argc, argv);
651 ::grpc::testing::InitTest(&argc, &argv, false);
652 benchmark::RunTheBenchmarksNamespaced();