2012-06-05 18:26:04 +02:00
|
|
|
/*
|
2014-03-30 12:09:21 +02:00
|
|
|
* nghttp2 - HTTP/2 C Library
|
2012-06-05 18:26:04 +02:00
|
|
|
*
|
|
|
|
* Copyright (c) 2012 Tatsuhiro Tsujikawa
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining
|
|
|
|
* a copy of this software and associated documentation files (the
|
|
|
|
* "Software"), to deal in the Software without restriction, including
|
|
|
|
* without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
* distribute, sublicense, and/or sell copies of the Software, and to
|
|
|
|
* permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
* the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be
|
|
|
|
* included in all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
|
|
|
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
|
|
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
|
|
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
#include "shrpx_worker.h"
|
|
|
|
|
2015-05-13 15:30:35 +02:00
|
|
|
#ifdef HAVE_UNISTD_H
|
2012-06-05 18:26:04 +02:00
|
|
|
#include <unistd.h>
|
2015-05-13 15:30:35 +02:00
|
|
|
#endif // HAVE_UNISTD_H
|
2013-09-24 16:17:53 +02:00
|
|
|
|
2013-09-23 17:14:55 +02:00
|
|
|
#include <memory>
|
2012-06-05 18:26:04 +02:00
|
|
|
|
|
|
|
#include "shrpx_ssl.h"
|
|
|
|
#include "shrpx_log.h"
|
2014-12-27 18:59:06 +01:00
|
|
|
#include "shrpx_client_handler.h"
|
2013-11-04 09:53:57 +01:00
|
|
|
#include "shrpx_http2_session.h"
|
2015-02-25 16:02:29 +01:00
|
|
|
#include "shrpx_log_config.h"
|
2014-08-19 16:36:04 +02:00
|
|
|
#include "shrpx_connect_blocker.h"
|
2016-04-07 18:04:16 +02:00
|
|
|
#include "shrpx_live_check.h"
|
2015-07-25 15:22:17 +02:00
|
|
|
#include "shrpx_memcached_dispatcher.h"
|
2015-09-03 17:54:41 +02:00
|
|
|
#ifdef HAVE_MRUBY
|
2015-09-01 17:19:32 +02:00
|
|
|
#include "shrpx_mruby.h"
|
2015-09-03 17:54:41 +02:00
|
|
|
#endif // HAVE_MRUBY
|
2013-09-23 17:14:55 +02:00
|
|
|
#include "util.h"
|
2015-02-05 15:21:53 +01:00
|
|
|
#include "template.h"
|
2013-09-23 17:14:55 +02:00
|
|
|
|
2012-06-05 18:26:04 +02:00
|
|
|
namespace shrpx {
|
|
|
|
|
|
|
|
namespace {
|
2014-12-27 18:59:06 +01:00
|
|
|
void eventcb(struct ev_loop *loop, ev_async *w, int revents) {
|
|
|
|
auto worker = static_cast<Worker *>(w->data);
|
|
|
|
worker->process_events();
|
2012-06-05 18:26:04 +02:00
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
2015-04-08 06:43:57 +02:00
|
|
|
namespace {
|
|
|
|
void mcpool_clear_cb(struct ev_loop *loop, ev_timer *w, int revents) {
|
|
|
|
auto worker = static_cast<Worker *>(w->data);
|
|
|
|
if (worker->get_worker_stat()->num_connections != 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
worker->get_mcpool()->clear();
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
2016-03-22 15:51:00 +01:00
|
|
|
namespace {
|
|
|
|
bool match_shared_downstream_addr(
|
|
|
|
const std::shared_ptr<SharedDownstreamAddr> &lhs,
|
|
|
|
const std::shared_ptr<SharedDownstreamAddr> &rhs) {
|
2016-05-25 16:07:04 +02:00
|
|
|
if (lhs->addrs.size() != rhs->addrs.size()) {
|
2016-03-22 15:51:00 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-06-09 15:35:59 +02:00
|
|
|
if (lhs->affinity != rhs->affinity) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-03-23 14:13:53 +01:00
|
|
|
auto used = std::vector<bool>(lhs->addrs.size());
|
|
|
|
|
2016-03-22 15:51:00 +01:00
|
|
|
for (auto &a : lhs->addrs) {
|
2016-03-23 14:13:53 +01:00
|
|
|
size_t i;
|
|
|
|
for (i = 0; i < rhs->addrs.size(); ++i) {
|
|
|
|
if (used[i]) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto &b = rhs->addrs[i];
|
2016-04-09 14:58:08 +02:00
|
|
|
if (a.host == b.host && a.port == b.port && a.host_unix == b.host_unix &&
|
2016-05-24 16:36:43 +02:00
|
|
|
a.proto == b.proto && a.tls == b.tls && a.sni == b.sni &&
|
|
|
|
a.fall == b.fall && a.rise == b.rise) {
|
2016-03-23 14:13:53 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (i == rhs->addrs.size()) {
|
2016-03-22 15:51:00 +01:00
|
|
|
return false;
|
|
|
|
}
|
2016-03-23 14:13:53 +01:00
|
|
|
|
|
|
|
used[i] = true;
|
2016-03-22 15:51:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
2016-01-15 15:04:58 +01:00
|
|
|
namespace {
|
|
|
|
std::random_device rd;
|
|
|
|
} // namespace
|
|
|
|
|
2015-02-11 11:18:41 +01:00
|
|
|
Worker::Worker(struct ev_loop *loop, SSL_CTX *sv_ssl_ctx, SSL_CTX *cl_ssl_ctx,
|
2016-02-11 14:56:45 +01:00
|
|
|
SSL_CTX *tls_session_cache_memcached_ssl_ctx,
|
2015-01-12 16:18:27 +01:00
|
|
|
ssl::CertLookupTree *cert_tree,
|
2016-06-03 18:02:57 +02:00
|
|
|
const std::shared_ptr<TicketKeys> &ticket_keys,
|
|
|
|
ConnectionHandler *conn_handler,
|
|
|
|
std::shared_ptr<DownstreamConfig> downstreamconf)
|
2016-01-19 08:56:12 +01:00
|
|
|
: randgen_(rd()),
|
2016-02-27 15:24:14 +01:00
|
|
|
worker_stat_{},
|
2016-01-27 13:14:07 +01:00
|
|
|
loop_(loop),
|
|
|
|
sv_ssl_ctx_(sv_ssl_ctx),
|
|
|
|
cl_ssl_ctx_(cl_ssl_ctx),
|
|
|
|
cert_tree_(cert_tree),
|
2016-06-03 18:02:57 +02:00
|
|
|
conn_handler_(conn_handler),
|
2015-07-09 19:52:11 +02:00
|
|
|
ticket_keys_(ticket_keys),
|
2016-05-25 16:07:04 +02:00
|
|
|
connect_blocker_(
|
|
|
|
make_unique<ConnectBlocker>(randgen_, loop_, []() {}, []() {})),
|
2015-02-25 14:53:23 +01:00
|
|
|
graceful_shutdown_(false) {
|
2014-12-27 18:59:06 +01:00
|
|
|
ev_async_init(&w_, eventcb);
|
|
|
|
w_.data = this;
|
|
|
|
ev_async_start(loop_, &w_);
|
|
|
|
|
2015-04-08 06:43:57 +02:00
|
|
|
ev_timer_init(&mcpool_clear_timer_, mcpool_clear_cb, 0., 0.);
|
|
|
|
mcpool_clear_timer_.data = this;
|
|
|
|
|
2016-01-18 06:21:09 +01:00
|
|
|
auto &session_cacheconf = get_config()->tls.session_cache;
|
|
|
|
|
2016-02-14 12:59:10 +01:00
|
|
|
if (!session_cacheconf.memcached.host.empty()) {
|
2015-07-25 15:22:17 +02:00
|
|
|
session_cache_memcached_dispatcher_ = make_unique<MemcachedDispatcher>(
|
2016-02-11 14:56:45 +01:00
|
|
|
&session_cacheconf.memcached.addr, loop,
|
|
|
|
tls_session_cache_memcached_ssl_ctx,
|
2016-02-14 12:59:10 +01:00
|
|
|
StringRef{session_cacheconf.memcached.host}, &mcpool_);
|
2015-07-25 15:22:17 +02:00
|
|
|
}
|
|
|
|
|
2016-06-03 18:02:57 +02:00
|
|
|
replace_downstream_config(std::move(downstreamconf));
|
2016-06-03 12:13:02 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void Worker::replace_downstream_config(
|
2016-06-03 18:02:57 +02:00
|
|
|
std::shared_ptr<DownstreamConfig> downstreamconf) {
|
2016-06-04 05:16:31 +02:00
|
|
|
for (auto &g : downstream_addr_groups_) {
|
|
|
|
g->retired = true;
|
2016-06-09 15:35:59 +02:00
|
|
|
|
|
|
|
auto &shared_addr = g->shared_addr;
|
|
|
|
|
|
|
|
if (shared_addr->affinity == AFFINITY_NONE) {
|
|
|
|
shared_addr->dconn_pool.remove_all();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto &addr : shared_addr->addrs) {
|
|
|
|
addr.dconn_pool->remove_all();
|
|
|
|
}
|
2016-06-04 05:16:31 +02:00
|
|
|
}
|
|
|
|
|
2016-06-03 12:13:02 +02:00
|
|
|
downstreamconf_ = downstreamconf;
|
|
|
|
|
|
|
|
downstream_addr_groups_ = std::vector<std::shared_ptr<DownstreamAddrGroup>>(
|
|
|
|
downstreamconf->addr_groups.size());
|
2016-02-27 15:24:14 +01:00
|
|
|
|
2016-06-03 12:13:02 +02:00
|
|
|
for (size_t i = 0; i < downstreamconf->addr_groups.size(); ++i) {
|
|
|
|
auto &src = downstreamconf->addr_groups[i];
|
2016-02-27 15:24:14 +01:00
|
|
|
auto &dst = downstream_addr_groups_[i];
|
|
|
|
|
2016-06-02 18:20:49 +02:00
|
|
|
dst = std::make_shared<DownstreamAddrGroup>();
|
|
|
|
dst->pattern = src.pattern;
|
2016-03-22 15:51:00 +01:00
|
|
|
|
2016-06-04 05:16:31 +02:00
|
|
|
// TODO for some reason, clang-3.6 which comes with Ubuntu 15.10
|
|
|
|
// does not value initialize with std::make_shared.
|
2016-03-22 15:51:00 +01:00
|
|
|
auto shared_addr = std::make_shared<SharedDownstreamAddr>();
|
2016-03-22 17:32:17 +01:00
|
|
|
|
2016-03-22 15:51:00 +01:00
|
|
|
shared_addr->addrs.resize(src.addrs.size());
|
2016-06-09 15:35:59 +02:00
|
|
|
shared_addr->affinity = src.affinity;
|
2016-05-24 16:36:43 +02:00
|
|
|
|
|
|
|
size_t num_http1 = 0;
|
|
|
|
size_t num_http2 = 0;
|
2016-02-27 15:24:14 +01:00
|
|
|
|
|
|
|
for (size_t j = 0; j < src.addrs.size(); ++j) {
|
|
|
|
auto &src_addr = src.addrs[j];
|
2016-03-22 15:51:00 +01:00
|
|
|
auto &dst_addr = shared_addr->addrs[j];
|
2016-02-27 15:24:14 +01:00
|
|
|
|
|
|
|
dst_addr.addr = src_addr.addr;
|
|
|
|
dst_addr.host = src_addr.host;
|
|
|
|
dst_addr.hostport = src_addr.hostport;
|
|
|
|
dst_addr.port = src_addr.port;
|
|
|
|
dst_addr.host_unix = src_addr.host_unix;
|
2016-05-24 16:36:43 +02:00
|
|
|
dst_addr.proto = src_addr.proto;
|
|
|
|
dst_addr.tls = src_addr.tls;
|
2016-04-29 07:42:18 +02:00
|
|
|
dst_addr.sni = src_addr.sni;
|
2016-04-09 14:58:08 +02:00
|
|
|
dst_addr.fall = src_addr.fall;
|
|
|
|
dst_addr.rise = src_addr.rise;
|
2016-02-27 15:24:14 +01:00
|
|
|
|
2016-06-05 17:16:25 +02:00
|
|
|
auto shared_addr_ptr = shared_addr.get();
|
|
|
|
|
2016-05-25 16:07:04 +02:00
|
|
|
dst_addr.connect_blocker =
|
|
|
|
make_unique<ConnectBlocker>(randgen_, loop_,
|
2016-06-05 17:16:25 +02:00
|
|
|
[shared_addr_ptr, &dst_addr]() {
|
2016-05-25 16:07:04 +02:00
|
|
|
switch (dst_addr.proto) {
|
|
|
|
case PROTO_HTTP1:
|
2016-06-05 17:16:25 +02:00
|
|
|
--shared_addr_ptr->http1_pri.weight;
|
2016-05-25 16:07:04 +02:00
|
|
|
break;
|
|
|
|
case PROTO_HTTP2:
|
2016-06-05 17:16:25 +02:00
|
|
|
--shared_addr_ptr->http2_pri.weight;
|
2016-05-25 16:07:04 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(0);
|
|
|
|
}
|
|
|
|
},
|
2016-06-05 17:16:25 +02:00
|
|
|
[shared_addr_ptr, &dst_addr]() {
|
2016-05-25 16:07:04 +02:00
|
|
|
switch (dst_addr.proto) {
|
|
|
|
case PROTO_HTTP1:
|
2016-06-05 17:16:25 +02:00
|
|
|
++shared_addr_ptr->http1_pri.weight;
|
2016-05-25 16:07:04 +02:00
|
|
|
break;
|
|
|
|
case PROTO_HTTP2:
|
2016-06-05 17:16:25 +02:00
|
|
|
++shared_addr_ptr->http2_pri.weight;
|
2016-05-25 16:07:04 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(0);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2016-05-24 16:36:43 +02:00
|
|
|
dst_addr.live_check =
|
|
|
|
make_unique<LiveCheck>(loop_, cl_ssl_ctx_, this, &dst_addr, randgen_);
|
|
|
|
|
|
|
|
if (dst_addr.proto == PROTO_HTTP2) {
|
|
|
|
++num_http2;
|
|
|
|
} else {
|
|
|
|
assert(dst_addr.proto == PROTO_HTTP1);
|
|
|
|
++num_http1;
|
|
|
|
}
|
2016-02-21 06:53:06 +01:00
|
|
|
}
|
2016-03-22 15:51:00 +01:00
|
|
|
|
|
|
|
// share the connection if patterns have the same set of backend
|
|
|
|
// addresses.
|
|
|
|
auto end = std::begin(downstream_addr_groups_) + i;
|
2016-06-02 18:20:49 +02:00
|
|
|
auto it = std::find_if(
|
|
|
|
std::begin(downstream_addr_groups_), end,
|
|
|
|
[&shared_addr](const std::shared_ptr<DownstreamAddrGroup> &group) {
|
|
|
|
return match_shared_downstream_addr(group->shared_addr, shared_addr);
|
|
|
|
});
|
2016-03-22 15:51:00 +01:00
|
|
|
|
|
|
|
if (it == end) {
|
2016-05-25 16:07:04 +02:00
|
|
|
if (LOG_ENABLED(INFO)) {
|
|
|
|
LOG(INFO) << "number of http/1.1 backend: " << num_http1
|
|
|
|
<< ", number of h2 backend: " << num_http2;
|
|
|
|
}
|
|
|
|
|
2016-05-26 06:47:01 +02:00
|
|
|
shared_addr->http1_pri.weight = num_http1;
|
|
|
|
shared_addr->http2_pri.weight = num_http2;
|
2016-05-24 16:36:43 +02:00
|
|
|
|
2016-06-09 15:35:59 +02:00
|
|
|
if (shared_addr->affinity != AFFINITY_NONE) {
|
|
|
|
for (auto &addr : shared_addr->addrs) {
|
|
|
|
addr.dconn_pool = make_unique<DownstreamConnectionPool>();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-02 18:20:49 +02:00
|
|
|
dst->shared_addr = shared_addr;
|
2016-03-22 15:51:00 +01:00
|
|
|
} else {
|
2016-04-16 15:04:35 +02:00
|
|
|
if (LOG_ENABLED(INFO)) {
|
2016-06-02 18:20:49 +02:00
|
|
|
LOG(INFO) << dst->pattern << " shares the same backend group with "
|
|
|
|
<< (*it)->pattern;
|
2016-04-16 15:04:35 +02:00
|
|
|
}
|
2016-06-02 18:20:49 +02:00
|
|
|
dst->shared_addr = (*it)->shared_addr;
|
2016-03-22 15:51:00 +01:00
|
|
|
}
|
2016-02-21 06:53:06 +01:00
|
|
|
}
|
2014-12-27 18:59:06 +01:00
|
|
|
}
|
2012-06-05 18:26:04 +02:00
|
|
|
|
2015-04-08 06:43:57 +02:00
|
|
|
Worker::~Worker() {
|
|
|
|
ev_async_stop(loop_, &w_);
|
|
|
|
ev_timer_stop(loop_, &mcpool_clear_timer_);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Worker::schedule_clear_mcpool() {
|
|
|
|
// libev manual says: "If the watcher is already active nothing will
|
|
|
|
// happen." Since we don't change any timeout here, we don't have
|
|
|
|
// to worry about querying ev_is_active.
|
|
|
|
ev_timer_start(loop_, &mcpool_clear_timer_);
|
|
|
|
}
|
2014-12-27 18:59:06 +01:00
|
|
|
|
2015-01-07 17:57:59 +01:00
|
|
|
void Worker::wait() {
|
|
|
|
#ifndef NOTHREADS
|
|
|
|
fut_.get();
|
|
|
|
#endif // !NOTHREADS
|
|
|
|
}
|
2014-12-27 18:59:06 +01:00
|
|
|
|
2015-02-11 11:18:41 +01:00
|
|
|
void Worker::run_async() {
|
|
|
|
#ifndef NOTHREADS
|
|
|
|
fut_ = std::async(std::launch::async, [this] {
|
|
|
|
(void)reopen_log_files();
|
|
|
|
ev_run(loop_);
|
2015-09-01 19:07:29 +02:00
|
|
|
delete log_config();
|
2015-02-11 11:18:41 +01:00
|
|
|
});
|
|
|
|
#endif // !NOTHREADS
|
|
|
|
}
|
|
|
|
|
2014-12-27 18:59:06 +01:00
|
|
|
void Worker::send(const WorkerEvent &event) {
|
|
|
|
{
|
|
|
|
std::lock_guard<std::mutex> g(m_);
|
2014-07-05 11:22:40 +02:00
|
|
|
|
2014-12-27 18:59:06 +01:00
|
|
|
q_.push_back(event);
|
2013-09-24 16:17:53 +02:00
|
|
|
}
|
2014-12-27 18:59:06 +01:00
|
|
|
|
|
|
|
ev_async_send(loop_, &w_);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Worker::process_events() {
|
2015-06-12 14:28:24 +02:00
|
|
|
std::vector<WorkerEvent> q;
|
2014-12-27 18:59:06 +01:00
|
|
|
{
|
|
|
|
std::lock_guard<std::mutex> g(m_);
|
|
|
|
q.swap(q_);
|
2013-09-24 16:17:53 +02:00
|
|
|
}
|
2016-01-19 08:56:12 +01:00
|
|
|
|
|
|
|
auto worker_connections = get_config()->conn.upstream.worker_connections;
|
|
|
|
|
2014-12-27 18:59:06 +01:00
|
|
|
for (auto &wev : q) {
|
2015-01-08 14:23:30 +01:00
|
|
|
switch (wev.type) {
|
|
|
|
case NEW_CONNECTION: {
|
2015-01-08 13:20:17 +01:00
|
|
|
if (LOG_ENABLED(INFO)) {
|
|
|
|
WLOG(INFO, this) << "WorkerEvent: client_fd=" << wev.client_fd
|
|
|
|
<< ", addrlen=" << wev.client_addrlen;
|
|
|
|
}
|
|
|
|
|
2016-01-19 08:56:12 +01:00
|
|
|
if (worker_stat_.num_connections >= worker_connections) {
|
2015-01-08 13:20:17 +01:00
|
|
|
|
|
|
|
if (LOG_ENABLED(INFO)) {
|
2016-01-19 08:56:12 +01:00
|
|
|
WLOG(INFO, this) << "Too many connections >= " << worker_connections;
|
2015-01-08 13:20:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
close(wev.client_fd);
|
|
|
|
|
2015-01-08 14:23:30 +01:00
|
|
|
break;
|
2015-01-08 13:20:17 +01:00
|
|
|
}
|
|
|
|
|
2016-01-31 11:41:56 +01:00
|
|
|
auto client_handler =
|
|
|
|
ssl::accept_connection(this, wev.client_fd, &wev.client_addr.sa,
|
|
|
|
wev.client_addrlen, wev.faddr);
|
2015-01-08 13:20:17 +01:00
|
|
|
if (!client_handler) {
|
|
|
|
if (LOG_ENABLED(INFO)) {
|
|
|
|
WLOG(ERROR, this) << "ClientHandler creation failed";
|
|
|
|
}
|
|
|
|
close(wev.client_fd);
|
2015-01-08 14:23:30 +01:00
|
|
|
break;
|
2015-01-08 13:20:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (LOG_ENABLED(INFO)) {
|
|
|
|
WLOG(INFO, this) << "CLIENT_HANDLER:" << client_handler << " created ";
|
|
|
|
}
|
|
|
|
|
2015-01-08 14:23:30 +01:00
|
|
|
break;
|
2015-01-08 13:20:17 +01:00
|
|
|
}
|
2015-01-08 14:23:30 +01:00
|
|
|
case REOPEN_LOG:
|
2015-09-17 18:16:49 +02:00
|
|
|
WLOG(NOTICE, this) << "Reopening log files: worker process (thread "
|
|
|
|
<< this << ")";
|
2014-12-27 18:59:06 +01:00
|
|
|
|
|
|
|
reopen_log_files();
|
|
|
|
|
2015-01-08 14:23:30 +01:00
|
|
|
break;
|
|
|
|
case GRACEFUL_SHUTDOWN:
|
2015-01-08 13:20:17 +01:00
|
|
|
WLOG(NOTICE, this) << "Graceful shutdown commencing";
|
2014-12-27 18:59:06 +01:00
|
|
|
|
2015-02-25 14:53:23 +01:00
|
|
|
graceful_shutdown_ = true;
|
2014-12-27 18:59:06 +01:00
|
|
|
|
2015-02-11 11:18:41 +01:00
|
|
|
if (worker_stat_.num_connections == 0) {
|
2014-12-27 18:59:06 +01:00
|
|
|
ev_break(loop_);
|
|
|
|
|
2015-01-08 14:23:30 +01:00
|
|
|
return;
|
2014-12-27 18:59:06 +01:00
|
|
|
}
|
|
|
|
|
2016-06-03 18:02:57 +02:00
|
|
|
break;
|
|
|
|
case REPLACE_DOWNSTREAM:
|
|
|
|
WLOG(NOTICE, this) << "Replace downstream";
|
|
|
|
|
|
|
|
replace_downstream_config(wev.downstreamconf);
|
|
|
|
|
2015-01-08 14:23:30 +01:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if (LOG_ENABLED(INFO)) {
|
|
|
|
WLOG(INFO, this) << "unknown event type " << wev.type;
|
|
|
|
}
|
2014-12-27 18:59:06 +01:00
|
|
|
}
|
|
|
|
}
|
2012-06-05 18:26:04 +02:00
|
|
|
}
|
|
|
|
|
2015-02-11 11:18:41 +01:00
|
|
|
ssl::CertLookupTree *Worker::get_cert_lookup_tree() const { return cert_tree_; }
|
|
|
|
|
2015-07-23 16:13:29 +02:00
|
|
|
std::shared_ptr<TicketKeys> Worker::get_ticket_keys() {
|
|
|
|
std::lock_guard<std::mutex> g(m_);
|
2015-02-11 11:18:41 +01:00
|
|
|
return ticket_keys_;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Worker::set_ticket_keys(std::shared_ptr<TicketKeys> ticket_keys) {
|
2015-07-23 16:13:29 +02:00
|
|
|
std::lock_guard<std::mutex> g(m_);
|
2015-02-11 11:18:41 +01:00
|
|
|
ticket_keys_ = std::move(ticket_keys);
|
|
|
|
}
|
|
|
|
|
|
|
|
WorkerStat *Worker::get_worker_stat() { return &worker_stat_; }
|
|
|
|
|
|
|
|
struct ev_loop *Worker::get_loop() const {
|
|
|
|
return loop_;
|
|
|
|
}
|
|
|
|
|
|
|
|
SSL_CTX *Worker::get_sv_ssl_ctx() const { return sv_ssl_ctx_; }
|
|
|
|
|
2015-03-10 13:54:29 +01:00
|
|
|
SSL_CTX *Worker::get_cl_ssl_ctx() const { return cl_ssl_ctx_; }
|
|
|
|
|
2015-02-25 14:53:23 +01:00
|
|
|
void Worker::set_graceful_shutdown(bool f) { graceful_shutdown_ = f; }
|
|
|
|
|
|
|
|
bool Worker::get_graceful_shutdown() const { return graceful_shutdown_; }
|
|
|
|
|
2015-04-07 15:13:01 +02:00
|
|
|
MemchunkPool *Worker::get_mcpool() { return &mcpool_; }
|
|
|
|
|
2015-07-25 15:22:17 +02:00
|
|
|
MemcachedDispatcher *Worker::get_session_cache_memcached_dispatcher() {
|
|
|
|
return session_cache_memcached_dispatcher_.get();
|
|
|
|
}
|
|
|
|
|
2016-01-15 15:04:58 +01:00
|
|
|
std::mt19937 &Worker::get_randgen() { return randgen_; }
|
|
|
|
|
2015-09-03 17:54:41 +02:00
|
|
|
#ifdef HAVE_MRUBY
|
2015-09-01 17:19:32 +02:00
|
|
|
int Worker::create_mruby_context() {
|
2016-02-14 14:27:59 +01:00
|
|
|
mruby_ctx_ = mruby::create_mruby_context(StringRef{get_config()->mruby_file});
|
2015-09-01 17:19:32 +02:00
|
|
|
if (!mruby_ctx_) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
mruby::MRubyContext *Worker::get_mruby_context() const {
|
|
|
|
return mruby_ctx_.get();
|
|
|
|
}
|
2015-09-03 17:54:41 +02:00
|
|
|
#endif // HAVE_MRUBY
|
2015-09-01 17:19:32 +02:00
|
|
|
|
2016-06-02 18:20:49 +02:00
|
|
|
std::vector<std::shared_ptr<DownstreamAddrGroup>> &
|
|
|
|
Worker::get_downstream_addr_groups() {
|
2016-02-21 06:53:06 +01:00
|
|
|
return downstream_addr_groups_;
|
|
|
|
}
|
|
|
|
|
2016-02-21 07:27:19 +01:00
|
|
|
ConnectBlocker *Worker::get_connect_blocker() const {
|
|
|
|
return connect_blocker_.get();
|
|
|
|
}
|
|
|
|
|
2016-06-03 12:13:02 +02:00
|
|
|
const DownstreamConfig *Worker::get_downstream_config() const {
|
|
|
|
return downstreamconf_.get();
|
2016-06-02 18:20:49 +02:00
|
|
|
}
|
|
|
|
|
2016-06-03 18:02:57 +02:00
|
|
|
ConnectionHandler *Worker::get_connection_handler() const {
|
|
|
|
return conn_handler_;
|
|
|
|
}
|
|
|
|
|
2016-02-27 15:24:14 +01:00
|
|
|
namespace {
|
|
|
|
size_t match_downstream_addr_group_host(
|
2016-03-12 16:59:25 +01:00
|
|
|
const Router &router, const std::vector<WildcardPattern> &wildcard_patterns,
|
|
|
|
const StringRef &host, const StringRef &path,
|
2016-06-02 18:20:49 +02:00
|
|
|
const std::vector<std::shared_ptr<DownstreamAddrGroup>> &groups,
|
|
|
|
size_t catch_all) {
|
2016-02-27 15:24:14 +01:00
|
|
|
if (path.empty() || path[0] != '/') {
|
|
|
|
auto group = router.match(host, StringRef::from_lit("/"));
|
|
|
|
if (group != -1) {
|
|
|
|
if (LOG_ENABLED(INFO)) {
|
|
|
|
LOG(INFO) << "Found pattern with query " << host
|
2016-06-02 18:20:49 +02:00
|
|
|
<< ", matched pattern=" << groups[group]->pattern;
|
2016-02-27 15:24:14 +01:00
|
|
|
}
|
|
|
|
return group;
|
|
|
|
}
|
|
|
|
return catch_all;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (LOG_ENABLED(INFO)) {
|
|
|
|
LOG(INFO) << "Perform mapping selection, using host=" << host
|
|
|
|
<< ", path=" << path;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto group = router.match(host, path);
|
|
|
|
if (group != -1) {
|
|
|
|
if (LOG_ENABLED(INFO)) {
|
|
|
|
LOG(INFO) << "Found pattern with query " << host << path
|
2016-06-02 18:20:49 +02:00
|
|
|
<< ", matched pattern=" << groups[group]->pattern;
|
2016-02-27 15:24:14 +01:00
|
|
|
}
|
|
|
|
return group;
|
|
|
|
}
|
|
|
|
|
2016-03-12 16:59:25 +01:00
|
|
|
for (auto it = std::begin(wildcard_patterns);
|
|
|
|
it != std::end(wildcard_patterns); ++it) {
|
2016-03-22 14:40:23 +01:00
|
|
|
/* left most '*' must match at least one character */
|
|
|
|
if (host.size() <= (*it).host.size() ||
|
|
|
|
!util::ends_with(std::begin(host), std::end(host),
|
2016-03-12 16:59:25 +01:00
|
|
|
std::begin((*it).host), std::end((*it).host))) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
auto group = (*it).router.match(StringRef{}, path);
|
|
|
|
if (group != -1) {
|
|
|
|
// We sorted wildcard_patterns in a way that first match is the
|
|
|
|
// longest host pattern.
|
|
|
|
if (LOG_ENABLED(INFO)) {
|
|
|
|
LOG(INFO) << "Found wildcard pattern with query " << host << path
|
2016-06-02 18:20:49 +02:00
|
|
|
<< ", matched pattern=" << groups[group]->pattern;
|
2016-03-12 16:59:25 +01:00
|
|
|
}
|
|
|
|
return group;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-28 13:35:26 +01:00
|
|
|
group = router.match(StringRef::from_lit(""), path);
|
2016-02-27 15:24:14 +01:00
|
|
|
if (group != -1) {
|
|
|
|
if (LOG_ENABLED(INFO)) {
|
|
|
|
LOG(INFO) << "Found pattern with query " << path
|
2016-06-02 18:20:49 +02:00
|
|
|
<< ", matched pattern=" << groups[group]->pattern;
|
2016-02-27 15:24:14 +01:00
|
|
|
}
|
|
|
|
return group;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (LOG_ENABLED(INFO)) {
|
|
|
|
LOG(INFO) << "None match. Use catch-all pattern";
|
|
|
|
}
|
|
|
|
return catch_all;
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
size_t match_downstream_addr_group(
|
2016-03-12 16:59:25 +01:00
|
|
|
const Router &router, const std::vector<WildcardPattern> &wildcard_patterns,
|
|
|
|
const StringRef &hostport, const StringRef &raw_path,
|
2016-06-02 18:20:49 +02:00
|
|
|
const std::vector<std::shared_ptr<DownstreamAddrGroup>> &groups,
|
|
|
|
size_t catch_all) {
|
2016-02-27 15:24:14 +01:00
|
|
|
if (std::find(std::begin(hostport), std::end(hostport), '/') !=
|
|
|
|
std::end(hostport)) {
|
|
|
|
// We use '/' specially, and if '/' is included in host, it breaks
|
|
|
|
// our code. Select catch-all case.
|
|
|
|
return catch_all;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto fragment = std::find(std::begin(raw_path), std::end(raw_path), '#');
|
|
|
|
auto query = std::find(std::begin(raw_path), fragment, '?');
|
|
|
|
auto path = StringRef{std::begin(raw_path), query};
|
|
|
|
|
|
|
|
if (hostport.empty()) {
|
2016-03-12 16:59:25 +01:00
|
|
|
return match_downstream_addr_group_host(router, wildcard_patterns, hostport,
|
|
|
|
path, groups, catch_all);
|
2016-02-27 15:24:14 +01:00
|
|
|
}
|
|
|
|
|
2016-03-12 16:59:25 +01:00
|
|
|
StringRef host;
|
2016-02-27 15:24:14 +01:00
|
|
|
if (hostport[0] == '[') {
|
|
|
|
// assume this is IPv6 numeric address
|
|
|
|
auto p = std::find(std::begin(hostport), std::end(hostport), ']');
|
|
|
|
if (p == std::end(hostport)) {
|
|
|
|
return catch_all;
|
|
|
|
}
|
|
|
|
if (p + 1 < std::end(hostport) && *(p + 1) != ':') {
|
|
|
|
return catch_all;
|
|
|
|
}
|
2016-03-12 16:59:25 +01:00
|
|
|
host = StringRef{std::begin(hostport), p + 1};
|
2016-02-27 15:24:14 +01:00
|
|
|
} else {
|
|
|
|
auto p = std::find(std::begin(hostport), std::end(hostport), ':');
|
|
|
|
if (p == std::begin(hostport)) {
|
|
|
|
return catch_all;
|
|
|
|
}
|
2016-03-12 16:59:25 +01:00
|
|
|
host = StringRef{std::begin(hostport), p};
|
2016-02-27 15:24:14 +01:00
|
|
|
}
|
|
|
|
|
2016-03-12 16:59:25 +01:00
|
|
|
std::string low_host;
|
|
|
|
if (std::find_if(std::begin(host), std::end(host), [](char c) {
|
|
|
|
return 'A' <= c || c <= 'Z';
|
|
|
|
}) != std::end(host)) {
|
|
|
|
low_host = host.str();
|
|
|
|
util::inp_strlower(low_host);
|
|
|
|
host = StringRef{low_host};
|
|
|
|
}
|
|
|
|
return match_downstream_addr_group_host(router, wildcard_patterns, host, path,
|
|
|
|
groups, catch_all);
|
2016-02-27 15:24:14 +01:00
|
|
|
}
|
|
|
|
|
2016-04-07 18:04:16 +02:00
|
|
|
void downstream_failure(DownstreamAddr *addr) {
|
|
|
|
const auto &connect_blocker = addr->connect_blocker;
|
|
|
|
|
2016-05-21 03:28:16 +02:00
|
|
|
if (connect_blocker->in_offline()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-04-07 18:04:16 +02:00
|
|
|
connect_blocker->on_failure();
|
|
|
|
|
2016-04-09 14:58:08 +02:00
|
|
|
if (addr->fall == 0) {
|
2016-04-08 15:35:45 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-04-09 14:58:08 +02:00
|
|
|
auto fail_count = connect_blocker->get_fail_count();
|
|
|
|
|
|
|
|
if (fail_count >= addr->fall) {
|
2016-04-07 18:04:16 +02:00
|
|
|
LOG(WARN) << "Could not connect to " << util::to_numeric_addr(&addr->addr)
|
|
|
|
<< " " << fail_count << " times in a row; considered as offline";
|
|
|
|
|
|
|
|
connect_blocker->offline();
|
2016-04-08 15:35:45 +02:00
|
|
|
|
2016-04-09 14:58:08 +02:00
|
|
|
if (addr->rise) {
|
2016-04-08 15:35:45 +02:00
|
|
|
addr->live_check->schedule();
|
|
|
|
}
|
2016-04-07 18:04:16 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-05 18:26:04 +02:00
|
|
|
} // namespace shrpx
|