Apply PIE to nghttpx
[platform/upstream/nghttp2.git] / src / shrpx_worker.cc
1 /*
2  * nghttp2 - HTTP/2 C Library
3  *
4  * Copyright (c) 2012 Tatsuhiro Tsujikawa
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining
7  * a copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sublicense, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be
15  * included in all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25 #include "shrpx_worker.h"
26
27 #ifdef HAVE_UNISTD_H
28 #  include <unistd.h>
29 #endif // HAVE_UNISTD_H
30
31 #include <memory>
32
33 #include "shrpx_tls.h"
34 #include "shrpx_log.h"
35 #include "shrpx_client_handler.h"
36 #include "shrpx_http2_session.h"
37 #include "shrpx_log_config.h"
38 #include "shrpx_memcached_dispatcher.h"
39 #ifdef HAVE_MRUBY
40 #  include "shrpx_mruby.h"
41 #endif // HAVE_MRUBY
42 #include "util.h"
43 #include "template.h"
44
45 namespace shrpx {
46
47 namespace {
48 void eventcb(struct ev_loop *loop, ev_async *w, int revents) {
49   auto worker = static_cast<Worker *>(w->data);
50   worker->process_events();
51 }
52 } // namespace
53
54 namespace {
55 void mcpool_clear_cb(struct ev_loop *loop, ev_timer *w, int revents) {
56   auto worker = static_cast<Worker *>(w->data);
57   if (worker->get_worker_stat()->num_connections != 0) {
58     return;
59   }
60   worker->get_mcpool()->clear();
61 }
62 } // namespace
63
64 namespace {
65 void proc_wev_cb(struct ev_loop *loop, ev_timer *w, int revents) {
66   auto worker = static_cast<Worker *>(w->data);
67   worker->process_events();
68 }
69 } // namespace
70
71 DownstreamAddrGroup::DownstreamAddrGroup() : retired{false} {}
72
73 DownstreamAddrGroup::~DownstreamAddrGroup() {}
74
75 // DownstreamKey is used to index SharedDownstreamAddr in order to
76 // find the same configuration.
77 using DownstreamKey =
78     std::tuple<std::vector<std::tuple<StringRef, StringRef, StringRef, size_t,
79                                       size_t, Proto, uint32_t, uint32_t,
80                                       uint32_t, bool, bool, bool, bool>>,
81                bool, SessionAffinity, StringRef, StringRef,
82                SessionAffinityCookieSecure, int64_t, int64_t, StringRef>;
83
84 namespace {
85 DownstreamKey
86 create_downstream_key(const std::shared_ptr<SharedDownstreamAddr> &shared_addr,
87                       const StringRef &mruby_file) {
88   DownstreamKey dkey;
89
90   auto &addrs = std::get<0>(dkey);
91   addrs.resize(shared_addr->addrs.size());
92   auto p = std::begin(addrs);
93   for (auto &a : shared_addr->addrs) {
94     std::get<0>(*p) = a.host;
95     std::get<1>(*p) = a.sni;
96     std::get<2>(*p) = a.group;
97     std::get<3>(*p) = a.fall;
98     std::get<4>(*p) = a.rise;
99     std::get<5>(*p) = a.proto;
100     std::get<6>(*p) = a.port;
101     std::get<7>(*p) = a.weight;
102     std::get<8>(*p) = a.group_weight;
103     std::get<9>(*p) = a.host_unix;
104     std::get<10>(*p) = a.tls;
105     std::get<11>(*p) = a.dns;
106     std::get<12>(*p) = a.upgrade_scheme;
107     ++p;
108   }
109   std::sort(std::begin(addrs), std::end(addrs));
110
111   std::get<1>(dkey) = shared_addr->redirect_if_not_tls;
112
113   auto &affinity = shared_addr->affinity;
114   std::get<2>(dkey) = affinity.type;
115   std::get<3>(dkey) = affinity.cookie.name;
116   std::get<4>(dkey) = affinity.cookie.path;
117   std::get<5>(dkey) = affinity.cookie.secure;
118   auto &timeout = shared_addr->timeout;
119   std::get<6>(dkey) = timeout.read;
120   std::get<7>(dkey) = timeout.write;
121   std::get<8>(dkey) = mruby_file;
122
123   return dkey;
124 }
125 } // namespace
126
127 Worker::Worker(struct ev_loop *loop, SSL_CTX *sv_ssl_ctx, SSL_CTX *cl_ssl_ctx,
128                SSL_CTX *tls_session_cache_memcached_ssl_ctx,
129                tls::CertLookupTree *cert_tree,
130                const std::shared_ptr<TicketKeys> &ticket_keys,
131                ConnectionHandler *conn_handler,
132                std::shared_ptr<DownstreamConfig> downstreamconf)
133     : randgen_(util::make_mt19937()),
134       worker_stat_{},
135       dns_tracker_(loop),
136       loop_(loop),
137       sv_ssl_ctx_(sv_ssl_ctx),
138       cl_ssl_ctx_(cl_ssl_ctx),
139       cert_tree_(cert_tree),
140       conn_handler_(conn_handler),
141       ticket_keys_(ticket_keys),
142       connect_blocker_(
143           std::make_unique<ConnectBlocker>(randgen_, loop_, nullptr, nullptr)),
144       graceful_shutdown_(false) {
145   ev_async_init(&w_, eventcb);
146   w_.data = this;
147   ev_async_start(loop_, &w_);
148
149   ev_timer_init(&mcpool_clear_timer_, mcpool_clear_cb, 0., 0.);
150   mcpool_clear_timer_.data = this;
151
152   ev_timer_init(&proc_wev_timer_, proc_wev_cb, 0., 0.);
153   proc_wev_timer_.data = this;
154
155   auto &session_cacheconf = get_config()->tls.session_cache;
156
157   if (!session_cacheconf.memcached.host.empty()) {
158     session_cache_memcached_dispatcher_ = std::make_unique<MemcachedDispatcher>(
159         &session_cacheconf.memcached.addr, loop,
160         tls_session_cache_memcached_ssl_ctx,
161         StringRef{session_cacheconf.memcached.host}, &mcpool_, randgen_);
162   }
163
164   replace_downstream_config(std::move(downstreamconf));
165 }
166
167 namespace {
168 void ensure_enqueue_addr(
169     std::priority_queue<WeightGroupEntry, std::vector<WeightGroupEntry>,
170                         WeightGroupEntryGreater> &wgpq,
171     WeightGroup *wg, DownstreamAddr *addr) {
172   uint32_t cycle;
173   if (!wg->pq.empty()) {
174     auto &top = wg->pq.top();
175     cycle = top.cycle;
176   } else {
177     cycle = 0;
178   }
179
180   addr->cycle = cycle;
181   addr->pending_penalty = 0;
182   wg->pq.push(DownstreamAddrEntry{addr, addr->seq, addr->cycle});
183   addr->queued = true;
184
185   if (!wg->queued) {
186     if (!wgpq.empty()) {
187       auto &top = wgpq.top();
188       cycle = top.cycle;
189     } else {
190       cycle = 0;
191     }
192
193     wg->cycle = cycle;
194     wg->pending_penalty = 0;
195     wgpq.push(WeightGroupEntry{wg, wg->seq, wg->cycle});
196     wg->queued = true;
197   }
198 }
199 } // namespace
200
201 void Worker::replace_downstream_config(
202     std::shared_ptr<DownstreamConfig> downstreamconf) {
203   for (auto &g : downstream_addr_groups_) {
204     g->retired = true;
205
206     auto &shared_addr = g->shared_addr;
207     for (auto &addr : shared_addr->addrs) {
208       addr.dconn_pool->remove_all();
209     }
210   }
211
212   downstreamconf_ = downstreamconf;
213
214   // Making a copy is much faster with multiple thread on
215   // backendconfig API call.
216   auto groups = downstreamconf->addr_groups;
217
218   downstream_addr_groups_ =
219       std::vector<std::shared_ptr<DownstreamAddrGroup>>(groups.size());
220
221   std::map<DownstreamKey, size_t> addr_groups_indexer;
222 #ifdef HAVE_MRUBY
223   // TODO It is a bit less efficient because
224   // mruby::create_mruby_context returns std::unique_ptr and we cannot
225   // use std::make_shared.
226   std::map<StringRef, std::shared_ptr<mruby::MRubyContext>> shared_mruby_ctxs;
227 #endif // HAVE_MRUBY
228
229   for (size_t i = 0; i < groups.size(); ++i) {
230     auto &src = groups[i];
231     auto &dst = downstream_addr_groups_[i];
232
233     dst = std::make_shared<DownstreamAddrGroup>();
234     dst->pattern =
235         ImmutableString{std::begin(src.pattern), std::end(src.pattern)};
236
237     auto shared_addr = std::make_shared<SharedDownstreamAddr>();
238
239     shared_addr->addrs.resize(src.addrs.size());
240     shared_addr->affinity.type = src.affinity.type;
241     if (src.affinity.type == SessionAffinity::COOKIE) {
242       shared_addr->affinity.cookie.name =
243           make_string_ref(shared_addr->balloc, src.affinity.cookie.name);
244       if (!src.affinity.cookie.path.empty()) {
245         shared_addr->affinity.cookie.path =
246             make_string_ref(shared_addr->balloc, src.affinity.cookie.path);
247       }
248       shared_addr->affinity.cookie.secure = src.affinity.cookie.secure;
249     }
250     shared_addr->affinity_hash = src.affinity_hash;
251     shared_addr->redirect_if_not_tls = src.redirect_if_not_tls;
252     shared_addr->timeout.read = src.timeout.read;
253     shared_addr->timeout.write = src.timeout.write;
254
255     for (size_t j = 0; j < src.addrs.size(); ++j) {
256       auto &src_addr = src.addrs[j];
257       auto &dst_addr = shared_addr->addrs[j];
258
259       dst_addr.addr = src_addr.addr;
260       dst_addr.host = make_string_ref(shared_addr->balloc, src_addr.host);
261       dst_addr.hostport =
262           make_string_ref(shared_addr->balloc, src_addr.hostport);
263       dst_addr.port = src_addr.port;
264       dst_addr.host_unix = src_addr.host_unix;
265       dst_addr.weight = src_addr.weight;
266       dst_addr.group = make_string_ref(shared_addr->balloc, src_addr.group);
267       dst_addr.group_weight = src_addr.group_weight;
268       dst_addr.proto = src_addr.proto;
269       dst_addr.tls = src_addr.tls;
270       dst_addr.sni = make_string_ref(shared_addr->balloc, src_addr.sni);
271       dst_addr.fall = src_addr.fall;
272       dst_addr.rise = src_addr.rise;
273       dst_addr.dns = src_addr.dns;
274       dst_addr.upgrade_scheme = src_addr.upgrade_scheme;
275
276       auto shared_addr_ptr = shared_addr.get();
277
278       dst_addr.connect_blocker = std::make_unique<ConnectBlocker>(
279           randgen_, loop_, nullptr, [shared_addr_ptr, &dst_addr]() {
280             if (!dst_addr.queued) {
281               if (!dst_addr.wg) {
282                 return;
283               }
284               ensure_enqueue_addr(shared_addr_ptr->pq, dst_addr.wg, &dst_addr);
285             }
286           });
287
288       dst_addr.live_check = std::make_unique<LiveCheck>(
289           loop_, cl_ssl_ctx_, this, &dst_addr, randgen_);
290     }
291
292 #ifdef HAVE_MRUBY
293     auto mruby_ctx_it = shared_mruby_ctxs.find(src.mruby_file);
294     if (mruby_ctx_it == std::end(shared_mruby_ctxs)) {
295       shared_addr->mruby_ctx = mruby::create_mruby_context(src.mruby_file);
296       assert(shared_addr->mruby_ctx);
297       shared_mruby_ctxs.emplace(src.mruby_file, shared_addr->mruby_ctx);
298     } else {
299       shared_addr->mruby_ctx = (*mruby_ctx_it).second;
300     }
301 #endif // HAVE_MRUBY
302
303     // share the connection if patterns have the same set of backend
304     // addresses.
305
306     auto dkey = create_downstream_key(shared_addr, src.mruby_file);
307     auto it = addr_groups_indexer.find(dkey);
308
309     if (it == std::end(addr_groups_indexer)) {
310       std::shuffle(std::begin(shared_addr->addrs), std::end(shared_addr->addrs),
311                    randgen_);
312
313       size_t seq = 0;
314       for (auto &addr : shared_addr->addrs) {
315         addr.dconn_pool = std::make_unique<DownstreamConnectionPool>();
316         addr.seq = seq++;
317       }
318
319       if (shared_addr->affinity.type == SessionAffinity::NONE) {
320         std::map<StringRef, WeightGroup *> wgs;
321         size_t num_wgs = 0;
322         for (auto &addr : shared_addr->addrs) {
323           if (wgs.find(addr.group) == std::end(wgs)) {
324             ++num_wgs;
325             wgs.emplace(addr.group, nullptr);
326           }
327         }
328
329         shared_addr->wgs = std::vector<WeightGroup>(num_wgs);
330
331         for (auto &addr : shared_addr->addrs) {
332           auto &wg = wgs[addr.group];
333           if (wg == nullptr) {
334             wg = &shared_addr->wgs[--num_wgs];
335             wg->seq = num_wgs;
336           }
337
338           wg->weight = addr.group_weight;
339           wg->pq.push(DownstreamAddrEntry{&addr, addr.seq, addr.cycle});
340           addr.queued = true;
341           addr.wg = wg;
342         }
343
344         assert(num_wgs == 0);
345
346         for (auto &kv : wgs) {
347           shared_addr->pq.push(
348               WeightGroupEntry{kv.second, kv.second->seq, kv.second->cycle});
349           kv.second->queued = true;
350         }
351       }
352
353       dst->shared_addr = shared_addr;
354
355       addr_groups_indexer.emplace(std::move(dkey), i);
356     } else {
357       auto &g = *(std::begin(downstream_addr_groups_) + (*it).second);
358       if (LOG_ENABLED(INFO)) {
359         LOG(INFO) << dst->pattern << " shares the same backend group with "
360                   << g->pattern;
361       }
362       dst->shared_addr = g->shared_addr;
363     }
364   }
365 }
366
367 Worker::~Worker() {
368   ev_async_stop(loop_, &w_);
369   ev_timer_stop(loop_, &mcpool_clear_timer_);
370   ev_timer_stop(loop_, &proc_wev_timer_);
371 }
372
373 void Worker::schedule_clear_mcpool() {
374   // libev manual says: "If the watcher is already active nothing will
375   // happen."  Since we don't change any timeout here, we don't have
376   // to worry about querying ev_is_active.
377   ev_timer_start(loop_, &mcpool_clear_timer_);
378 }
379
380 void Worker::wait() {
381 #ifndef NOTHREADS
382   fut_.get();
383 #endif // !NOTHREADS
384 }
385
386 void Worker::run_async() {
387 #ifndef NOTHREADS
388   fut_ = std::async(std::launch::async, [this] {
389     (void)reopen_log_files(get_config()->logging);
390     ev_run(loop_);
391     delete_log_config();
392   });
393 #endif // !NOTHREADS
394 }
395
396 void Worker::send(const WorkerEvent &event) {
397   {
398     std::lock_guard<std::mutex> g(m_);
399
400     q_.push_back(event);
401   }
402
403   ev_async_send(loop_, &w_);
404 }
405
406 void Worker::process_events() {
407   WorkerEvent wev;
408   {
409     std::lock_guard<std::mutex> g(m_);
410
411     // Process event one at a time.  This is important for
412     // WorkerEventType::NEW_CONNECTION event since accepting large
413     // number of new connections at once may delay time to 1st byte
414     // for existing connections.
415
416     if (q_.empty()) {
417       ev_timer_stop(loop_, &proc_wev_timer_);
418       return;
419     }
420
421     wev = q_.front();
422     q_.pop_front();
423   }
424
425   ev_timer_start(loop_, &proc_wev_timer_);
426
427   auto config = get_config();
428
429   auto worker_connections = config->conn.upstream.worker_connections;
430
431   switch (wev.type) {
432   case WorkerEventType::NEW_CONNECTION: {
433     if (LOG_ENABLED(INFO)) {
434       WLOG(INFO, this) << "WorkerEvent: client_fd=" << wev.client_fd
435                        << ", addrlen=" << wev.client_addrlen;
436     }
437
438     if (worker_stat_.num_connections >= worker_connections) {
439
440       if (LOG_ENABLED(INFO)) {
441         WLOG(INFO, this) << "Too many connections >= " << worker_connections;
442       }
443
444       close(wev.client_fd);
445
446       break;
447     }
448
449     auto client_handler =
450         tls::accept_connection(this, wev.client_fd, &wev.client_addr.sa,
451                                wev.client_addrlen, wev.faddr);
452     if (!client_handler) {
453       if (LOG_ENABLED(INFO)) {
454         WLOG(ERROR, this) << "ClientHandler creation failed";
455       }
456       close(wev.client_fd);
457       break;
458     }
459
460     if (LOG_ENABLED(INFO)) {
461       WLOG(INFO, this) << "CLIENT_HANDLER:" << client_handler << " created ";
462     }
463
464     break;
465   }
466   case WorkerEventType::REOPEN_LOG:
467     WLOG(NOTICE, this) << "Reopening log files: worker process (thread " << this
468                        << ")";
469
470     reopen_log_files(config->logging);
471
472     break;
473   case WorkerEventType::GRACEFUL_SHUTDOWN:
474     WLOG(NOTICE, this) << "Graceful shutdown commencing";
475
476     graceful_shutdown_ = true;
477
478     if (worker_stat_.num_connections == 0) {
479       ev_break(loop_);
480
481       return;
482     }
483
484     break;
485   case WorkerEventType::REPLACE_DOWNSTREAM:
486     WLOG(NOTICE, this) << "Replace downstream";
487
488     replace_downstream_config(wev.downstreamconf);
489
490     break;
491   default:
492     if (LOG_ENABLED(INFO)) {
493       WLOG(INFO, this) << "unknown event type " << static_cast<int>(wev.type);
494     }
495   }
496 }
497
498 tls::CertLookupTree *Worker::get_cert_lookup_tree() const { return cert_tree_; }
499
500 std::shared_ptr<TicketKeys> Worker::get_ticket_keys() {
501 #ifdef HAVE_ATOMIC_STD_SHARED_PTR
502   return std::atomic_load_explicit(&ticket_keys_, std::memory_order_acquire);
503 #else  // !HAVE_ATOMIC_STD_SHARED_PTR
504   std::lock_guard<std::mutex> g(ticket_keys_m_);
505   return ticket_keys_;
506 #endif // !HAVE_ATOMIC_STD_SHARED_PTR
507 }
508
509 void Worker::set_ticket_keys(std::shared_ptr<TicketKeys> ticket_keys) {
510 #ifdef HAVE_ATOMIC_STD_SHARED_PTR
511   // This is single writer
512   std::atomic_store_explicit(&ticket_keys_, std::move(ticket_keys),
513                              std::memory_order_release);
514 #else  // !HAVE_ATOMIC_STD_SHARED_PTR
515   std::lock_guard<std::mutex> g(ticket_keys_m_);
516   ticket_keys_ = std::move(ticket_keys);
517 #endif // !HAVE_ATOMIC_STD_SHARED_PTR
518 }
519
520 WorkerStat *Worker::get_worker_stat() { return &worker_stat_; }
521
522 struct ev_loop *Worker::get_loop() const {
523   return loop_;
524 }
525
526 SSL_CTX *Worker::get_sv_ssl_ctx() const { return sv_ssl_ctx_; }
527
528 SSL_CTX *Worker::get_cl_ssl_ctx() const { return cl_ssl_ctx_; }
529
530 void Worker::set_graceful_shutdown(bool f) { graceful_shutdown_ = f; }
531
532 bool Worker::get_graceful_shutdown() const { return graceful_shutdown_; }
533
534 MemchunkPool *Worker::get_mcpool() { return &mcpool_; }
535
536 MemcachedDispatcher *Worker::get_session_cache_memcached_dispatcher() {
537   return session_cache_memcached_dispatcher_.get();
538 }
539
540 std::mt19937 &Worker::get_randgen() { return randgen_; }
541
542 #ifdef HAVE_MRUBY
543 int Worker::create_mruby_context() {
544   mruby_ctx_ = mruby::create_mruby_context(StringRef{get_config()->mruby_file});
545   if (!mruby_ctx_) {
546     return -1;
547   }
548
549   return 0;
550 }
551
552 mruby::MRubyContext *Worker::get_mruby_context() const {
553   return mruby_ctx_.get();
554 }
555 #endif // HAVE_MRUBY
556
557 std::vector<std::shared_ptr<DownstreamAddrGroup>> &
558 Worker::get_downstream_addr_groups() {
559   return downstream_addr_groups_;
560 }
561
562 ConnectBlocker *Worker::get_connect_blocker() const {
563   return connect_blocker_.get();
564 }
565
566 const DownstreamConfig *Worker::get_downstream_config() const {
567   return downstreamconf_.get();
568 }
569
570 ConnectionHandler *Worker::get_connection_handler() const {
571   return conn_handler_;
572 }
573
574 DNSTracker *Worker::get_dns_tracker() { return &dns_tracker_; }
575
576 namespace {
577 size_t match_downstream_addr_group_host(
578     const RouterConfig &routerconf, const StringRef &host,
579     const StringRef &path,
580     const std::vector<std::shared_ptr<DownstreamAddrGroup>> &groups,
581     size_t catch_all, BlockAllocator &balloc) {
582
583   const auto &router = routerconf.router;
584   const auto &rev_wildcard_router = routerconf.rev_wildcard_router;
585   const auto &wildcard_patterns = routerconf.wildcard_patterns;
586
587   if (LOG_ENABLED(INFO)) {
588     LOG(INFO) << "Perform mapping selection, using host=" << host
589               << ", path=" << path;
590   }
591
592   auto group = router.match(host, path);
593   if (group != -1) {
594     if (LOG_ENABLED(INFO)) {
595       LOG(INFO) << "Found pattern with query " << host << path
596                 << ", matched pattern=" << groups[group]->pattern;
597     }
598     return group;
599   }
600
601   if (!wildcard_patterns.empty() && !host.empty()) {
602     auto rev_host_src = make_byte_ref(balloc, host.size() - 1);
603     auto ep =
604         std::copy(std::begin(host) + 1, std::end(host), rev_host_src.base);
605     std::reverse(rev_host_src.base, ep);
606     auto rev_host = StringRef{rev_host_src.base, ep};
607
608     ssize_t best_group = -1;
609     const RNode *last_node = nullptr;
610
611     for (;;) {
612       size_t nread = 0;
613       auto wcidx =
614           rev_wildcard_router.match_prefix(&nread, &last_node, rev_host);
615       if (wcidx == -1) {
616         break;
617       }
618
619       rev_host = StringRef{std::begin(rev_host) + nread, std::end(rev_host)};
620
621       auto &wc = wildcard_patterns[wcidx];
622       auto group = wc.router.match(StringRef{}, path);
623       if (group != -1) {
624         // We sorted wildcard_patterns in a way that first match is the
625         // longest host pattern.
626         if (LOG_ENABLED(INFO)) {
627           LOG(INFO) << "Found wildcard pattern with query " << host << path
628                     << ", matched pattern=" << groups[group]->pattern;
629         }
630
631         best_group = group;
632       }
633     }
634
635     if (best_group != -1) {
636       return best_group;
637     }
638   }
639
640   group = router.match(StringRef::from_lit(""), path);
641   if (group != -1) {
642     if (LOG_ENABLED(INFO)) {
643       LOG(INFO) << "Found pattern with query " << path
644                 << ", matched pattern=" << groups[group]->pattern;
645     }
646     return group;
647   }
648
649   if (LOG_ENABLED(INFO)) {
650     LOG(INFO) << "None match.  Use catch-all pattern";
651   }
652   return catch_all;
653 }
654 } // namespace
655
656 size_t match_downstream_addr_group(
657     const RouterConfig &routerconf, const StringRef &hostport,
658     const StringRef &raw_path,
659     const std::vector<std::shared_ptr<DownstreamAddrGroup>> &groups,
660     size_t catch_all, BlockAllocator &balloc) {
661   if (std::find(std::begin(hostport), std::end(hostport), '/') !=
662       std::end(hostport)) {
663     // We use '/' specially, and if '/' is included in host, it breaks
664     // our code.  Select catch-all case.
665     return catch_all;
666   }
667
668   auto fragment = std::find(std::begin(raw_path), std::end(raw_path), '#');
669   auto query = std::find(std::begin(raw_path), fragment, '?');
670   auto path = StringRef{std::begin(raw_path), query};
671
672   if (path.empty() || path[0] != '/') {
673     path = StringRef::from_lit("/");
674   }
675
676   if (hostport.empty()) {
677     return match_downstream_addr_group_host(routerconf, hostport, path, groups,
678                                             catch_all, balloc);
679   }
680
681   StringRef host;
682   if (hostport[0] == '[') {
683     // assume this is IPv6 numeric address
684     auto p = std::find(std::begin(hostport), std::end(hostport), ']');
685     if (p == std::end(hostport)) {
686       return catch_all;
687     }
688     if (p + 1 < std::end(hostport) && *(p + 1) != ':') {
689       return catch_all;
690     }
691     host = StringRef{std::begin(hostport), p + 1};
692   } else {
693     auto p = std::find(std::begin(hostport), std::end(hostport), ':');
694     if (p == std::begin(hostport)) {
695       return catch_all;
696     }
697     host = StringRef{std::begin(hostport), p};
698   }
699
700   if (std::find_if(std::begin(host), std::end(host), [](char c) {
701         return 'A' <= c || c <= 'Z';
702       }) != std::end(host)) {
703     auto low_host = make_byte_ref(balloc, host.size() + 1);
704     auto ep = std::copy(std::begin(host), std::end(host), low_host.base);
705     *ep = '\0';
706     util::inp_strlower(low_host.base, ep);
707     host = StringRef{low_host.base, ep};
708   }
709   return match_downstream_addr_group_host(routerconf, host, path, groups,
710                                           catch_all, balloc);
711 }
712
713 void downstream_failure(DownstreamAddr *addr, const Address *raddr) {
714   const auto &connect_blocker = addr->connect_blocker;
715
716   if (connect_blocker->in_offline()) {
717     return;
718   }
719
720   connect_blocker->on_failure();
721
722   if (addr->fall == 0) {
723     return;
724   }
725
726   auto fail_count = connect_blocker->get_fail_count();
727
728   if (fail_count >= addr->fall) {
729     if (raddr) {
730       LOG(WARN) << "Could not connect to " << util::to_numeric_addr(raddr)
731                 << " " << fail_count
732                 << " times in a row; considered as offline";
733     } else {
734       LOG(WARN) << "Could not connect to " << addr->host << ":" << addr->port
735                 << " " << fail_count
736                 << " times in a row; considered as offline";
737     }
738
739     connect_blocker->offline();
740
741     if (addr->rise) {
742       addr->live_check->schedule();
743     }
744   }
745 }
746
747 } // namespace shrpx