nghttp2/src/shrpx_connection_handler.cc

537 lines
13 KiB
C++
Raw Normal View History

/*
2014-03-30 12:09:21 +02:00
* nghttp2 - HTTP/2 C Library
*
* Copyright (c) 2012 Tatsuhiro Tsujikawa
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "shrpx_connection_handler.h"
2012-06-08 14:40:03 +02:00
#include <unistd.h>
#include <sys/types.h>
#include <sys/wait.h>
2012-06-05 18:26:04 +02:00
#include <cerrno>
2013-08-19 17:07:01 +02:00
#include <thread>
#ifndef NOTHREADS
#include <spawn.h>
#endif // !NOTHREADS
2012-06-05 18:26:04 +02:00
#include "shrpx_client_handler.h"
2012-06-05 18:26:04 +02:00
#include "shrpx_ssl.h"
#include "shrpx_worker.h"
#include "shrpx_config.h"
#include "shrpx_http2_session.h"
#include "shrpx_connect_blocker.h"
#include "shrpx_downstream_connection.h"
#include "shrpx_accept_handler.h"
#include "util.h"
#include "template.h"
using namespace nghttp2;
namespace shrpx {
namespace {
void acceptor_disable_cb(struct ev_loop *loop, ev_timer *w, int revent) {
auto h = static_cast<ConnectionHandler *>(w->data);
// If we are in graceful shutdown period, we must not enable
// acceptors again.
if (h->get_graceful_shutdown()) {
return;
}
h->enable_acceptor();
}
} // namespace
2015-03-30 16:20:40 +02:00
namespace {
void ocsp_cb(struct ev_loop *loop, ev_timer *w, int revent) {
auto h = static_cast<ConnectionHandler *>(w->data);
// If we are in graceful shutdown period, we won't do ocsp query.
if (h->get_graceful_shutdown()) {
return;
}
h->proceed_next_cert_ocsp();
}
} // namespace
namespace {
void ocsp_read_cb(struct ev_loop *loop, ev_io *w, int revent) {
auto h = static_cast<ConnectionHandler *>(w->data);
h->read_ocsp_chunk();
}
} // namespace
namespace {
void ocsp_chld_cb(struct ev_loop *loop, ev_child *w, int revent) {
auto h = static_cast<ConnectionHandler *>(w->data);
h->handle_ocsp_complete();
2015-03-30 16:20:40 +02:00
}
} // namespace
ConnectionHandler::ConnectionHandler(struct ev_loop *loop)
: single_worker_(nullptr), loop_(loop), worker_round_robin_cnt_(0),
graceful_shutdown_(false) {
ev_timer_init(&disable_acceptor_timer_, acceptor_disable_cb, 0., 0.);
disable_acceptor_timer_.data = this;
2015-03-30 16:20:40 +02:00
ev_timer_init(&ocsp_timer_, ocsp_cb, 0., 0.);
ocsp_timer_.data = this;
ev_io_init(&ocsp_.rev, ocsp_read_cb, -1, EV_READ);
ocsp_.rev.data = this;
ev_child_init(&ocsp_.chldev, ocsp_chld_cb, 0, 0);
ocsp_.chldev.data = this;
ocsp_.next = 0;
ocsp_.fd = -1;
reset_ocsp();
}
2014-11-27 15:39:04 +01:00
ConnectionHandler::~ConnectionHandler() {
ev_timer_stop(loop_, &disable_acceptor_timer_);
2015-03-30 16:20:40 +02:00
ev_timer_stop(loop_, &ocsp_timer_);
}
void ConnectionHandler::worker_reopen_log_files() {
WorkerEvent wev;
memset(&wev, 0, sizeof(wev));
wev.type = REOPEN_LOG;
for (auto &worker : workers_) {
worker->send(wev);
}
}
void ConnectionHandler::worker_renew_ticket_keys(
const std::shared_ptr<TicketKeys> &ticket_keys) {
WorkerEvent wev;
memset(&wev, 0, sizeof(wev));
wev.type = RENEW_TICKET_KEYS;
wev.ticket_keys = ticket_keys;
for (auto &worker : workers_) {
worker->send(wev);
}
}
2015-02-11 11:18:41 +01:00
void ConnectionHandler::create_single_worker() {
auto cert_tree = ssl::create_cert_lookup_tree();
2015-03-30 16:20:40 +02:00
auto sv_ssl_ctx = ssl::setup_server_ssl_context(all_ssl_ctx_, cert_tree);
2015-02-11 11:18:41 +01:00
auto cl_ssl_ctx = ssl::setup_client_ssl_context();
single_worker_ = make_unique<Worker>(loop_, sv_ssl_ctx, cl_ssl_ctx, cert_tree,
ticket_keys_);
}
void ConnectionHandler::create_worker_thread(size_t num) {
#ifndef NOTHREADS
assert(workers_.size() == 0);
auto cert_tree = ssl::create_cert_lookup_tree();
auto sv_ssl_ctx = ssl::setup_server_ssl_context(all_ssl_ctx_, cert_tree);
auto cl_ssl_ctx = ssl::setup_client_ssl_context();
2015-02-11 11:18:41 +01:00
for (size_t i = 0; i < num; ++i) {
2015-02-11 11:18:41 +01:00
auto loop = ev_loop_new(0);
auto worker = make_unique<Worker>(loop, sv_ssl_ctx, cl_ssl_ctx, cert_tree,
ticket_keys_);
worker->run_async();
workers_.push_back(std::move(worker));
2014-11-27 15:39:04 +01:00
if (LOG_ENABLED(INFO)) {
LLOG(INFO, this) << "Created thread #" << workers_.size() - 1;
2012-06-05 18:26:04 +02:00
}
}
#endif // NOTHREADS
2012-06-05 18:26:04 +02:00
}
void ConnectionHandler::join_worker() {
#ifndef NOTHREADS
int n = 0;
2014-11-27 15:39:04 +01:00
if (LOG_ENABLED(INFO)) {
LLOG(INFO, this) << "Waiting for worker thread to join: n="
<< workers_.size();
}
2014-11-27 15:39:04 +01:00
for (auto &worker : workers_) {
worker->wait();
2014-11-27 15:39:04 +01:00
if (LOG_ENABLED(INFO)) {
LLOG(INFO, this) << "Thread #" << n << " joined";
}
++n;
}
#endif // NOTHREADS
}
void ConnectionHandler::graceful_shutdown_worker() {
2014-11-27 15:39:04 +01:00
if (get_config()->num_worker == 1) {
return;
}
WorkerEvent wev;
memset(&wev, 0, sizeof(wev));
wev.type = GRACEFUL_SHUTDOWN;
if (LOG_ENABLED(INFO)) {
LLOG(INFO, this) << "Sending graceful shutdown signal to worker";
}
for (auto &worker : workers_) {
worker->send(wev);
}
}
int ConnectionHandler::handle_connection(int fd, sockaddr *addr, int addrlen) {
2014-11-27 15:39:04 +01:00
if (LOG_ENABLED(INFO)) {
LLOG(INFO, this) << "Accepted connection. fd=" << fd;
}
2014-11-27 15:39:04 +01:00
if (get_config()->num_worker == 1) {
2015-02-11 11:18:41 +01:00
if (single_worker_->get_worker_stat()->num_connections >=
2014-11-27 15:39:04 +01:00
get_config()->worker_frontend_connections) {
2014-11-27 15:39:04 +01:00
if (LOG_ENABLED(INFO)) {
LLOG(INFO, this) << "Too many connections >="
<< get_config()->worker_frontend_connections;
}
close(fd);
return -1;
}
2015-02-11 11:18:41 +01:00
auto client =
ssl::accept_connection(single_worker_.get(), fd, addr, addrlen);
2014-11-27 15:39:04 +01:00
if (!client) {
LLOG(ERROR, this) << "ClientHandler creation failed";
close(fd);
return -1;
}
return 0;
}
size_t idx = worker_round_robin_cnt_ % workers_.size();
2015-02-11 11:18:41 +01:00
if (LOG_ENABLED(INFO)) {
LOG(INFO) << "Dispatch connection to worker #" << idx;
}
++worker_round_robin_cnt_;
WorkerEvent wev;
memset(&wev, 0, sizeof(wev));
wev.type = NEW_CONNECTION;
wev.client_fd = fd;
memcpy(&wev.client_addr, addr, addrlen);
wev.client_addrlen = addrlen;
workers_[idx]->send(wev);
return 0;
}
struct ev_loop *ConnectionHandler::get_loop() const {
return loop_;
}
2015-02-11 11:18:41 +01:00
Worker *ConnectionHandler::get_single_worker() const {
return single_worker_.get();
}
void ConnectionHandler::set_acceptor(std::unique_ptr<AcceptHandler> h) {
acceptor_ = std::move(h);
}
AcceptHandler *ConnectionHandler::get_acceptor() const {
return acceptor_.get();
}
void ConnectionHandler::set_acceptor6(std::unique_ptr<AcceptHandler> h) {
acceptor6_ = std::move(h);
}
AcceptHandler *ConnectionHandler::get_acceptor6() const {
return acceptor6_.get();
}
void ConnectionHandler::enable_acceptor() {
if (acceptor_) {
acceptor_->enable();
}
if (acceptor6_) {
acceptor6_->enable();
}
}
void ConnectionHandler::disable_acceptor() {
if (acceptor_) {
acceptor_->disable();
}
if (acceptor6_) {
acceptor6_->disable();
}
}
void ConnectionHandler::disable_acceptor_temporary(ev_tstamp t) {
if (t == 0. || ev_is_active(&disable_acceptor_timer_)) {
return;
}
disable_acceptor();
ev_timer_set(&disable_acceptor_timer_, t, 0.);
ev_timer_start(loop_, &disable_acceptor_timer_);
}
void ConnectionHandler::accept_pending_connection() {
if (acceptor_) {
acceptor_->accept_connection();
}
if (acceptor6_) {
acceptor6_->accept_connection();
}
}
2015-02-11 11:18:41 +01:00
void
ConnectionHandler::set_ticket_keys(std::shared_ptr<TicketKeys> ticket_keys) {
ticket_keys_ = std::move(ticket_keys);
if (single_worker_) {
single_worker_->set_ticket_keys(ticket_keys_);
}
}
const std::shared_ptr<TicketKeys> &ConnectionHandler::get_ticket_keys() const {
return ticket_keys_;
}
void ConnectionHandler::set_graceful_shutdown(bool f) {
graceful_shutdown_ = f;
if (single_worker_) {
single_worker_->set_graceful_shutdown(f);
}
}
bool ConnectionHandler::get_graceful_shutdown() const {
return graceful_shutdown_;
}
void ConnectionHandler::cancel_ocsp_update() {
if (ocsp_.pid == 0) {
2015-03-30 16:20:40 +02:00
return;
}
kill(ocsp_.pid, SIGTERM);
}
// inspired by h2o_read_command function from h2o project:
// https://github.com/h2o/h2o
int ConnectionHandler::start_ocsp_update(const char *cert_file) {
#ifndef NOTHREADS
int rv;
int pfd[2];
assert(!ev_is_active(&ocsp_.rev));
assert(!ev_is_active(&ocsp_.chldev));
char *const argv[] = {
const_cast<char *>(get_config()->fetch_ocsp_response_file.get()),
const_cast<char *>(cert_file), nullptr};
char *const envp[] = {nullptr};
#ifdef O_CLOEXEC
if (pipe2(pfd, O_CLOEXEC) == -1) {
return -1;
}
#else // !O_CLOEXEC
if (pipe(pfd) == -1) {
return -1;
2015-03-30 16:20:40 +02:00
}
util::make_socket_closeonexec(pfd[0]);
util::make_socket_closeonexec(pfd[1]);
#endif // !O_CLOEXEC
2015-03-30 16:20:40 +02:00
auto closer = defer([&pfd]() {
if (pfd[0] != -1) {
close(pfd[0]);
}
if (pfd[1] != -1) {
close(pfd[1]);
}
});
// posix_spawn family functions are really interesting. They makes
// fork + dup2 + execve pattern easier.
2015-03-30 16:20:40 +02:00
posix_spawn_file_actions_t file_actions;
if (posix_spawn_file_actions_init(&file_actions) != 0) {
return -1;
2015-03-30 16:20:40 +02:00
}
auto file_actions_del =
defer(posix_spawn_file_actions_destroy, &file_actions);
if (posix_spawn_file_actions_adddup2(&file_actions, pfd[1], 1) != 0) {
return -1;
}
if (posix_spawn_file_actions_addclose(&file_actions, pfd[0]) != 0) {
return -1;
}
rv = posix_spawn(&ocsp_.pid, argv[0], &file_actions, nullptr, argv, envp);
if (rv != 0) {
LOG(WARN) << "Cannot execute ocsp query command: " << argv[0]
<< ", errno=" << rv;
return -1;
}
close(pfd[1]);
pfd[1] = -1;
ocsp_.fd = pfd[0];
pfd[0] = -1;
util::make_socket_nonblocking(ocsp_.fd);
ev_io_set(&ocsp_.rev, ocsp_.fd, EV_READ);
ev_io_start(loop_, &ocsp_.rev);
ev_child_set(&ocsp_.chldev, ocsp_.pid, 0);
ev_child_start(loop_, &ocsp_.chldev);
#endif // !NOTHREADS
return 0;
2015-03-30 16:20:40 +02:00
}
void ConnectionHandler::read_ocsp_chunk() {
std::array<uint8_t, 4096> buf;
for (;;) {
ssize_t n;
while ((n = read(ocsp_.fd, buf.data(), buf.size())) == -1 && errno == EINTR)
;
if (n == -1) {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
return;
2015-03-30 16:20:40 +02:00
}
auto error = errno;
LOG(WARN) << "Reading from ocsp query command failed: errno=" << error;
ocsp_.error = error;
2015-03-30 16:20:40 +02:00
break;
}
if (n == 0) {
break;
}
std::copy_n(std::begin(buf), n, std::back_inserter(ocsp_.resp));
}
ev_io_stop(loop_, &ocsp_.rev);
2015-03-30 16:20:40 +02:00
}
void ConnectionHandler::handle_ocsp_complete() {
ev_io_stop(loop_, &ocsp_.rev);
ev_child_stop(loop_, &ocsp_.chldev);
auto rstatus = ocsp_.chldev.rstatus;
auto status = WEXITSTATUS(rstatus);
if (ocsp_.error || !WIFEXITED(rstatus) || status != 0) {
LOG(WARN) << "ocsp query command failed: error=" << ocsp_.error
<< ", rstatus=" << rstatus << ", status=" << status;
++ocsp_.next;
proceed_next_cert_ocsp();
2015-03-30 16:20:40 +02:00
return;
}
assert(ocsp_.next < all_ssl_ctx_.size());
auto ssl_ctx = all_ssl_ctx_[ocsp_.next];
auto tls_ctx_data =
static_cast<ssl::TLSContextData *>(SSL_CTX_get_app_data(ssl_ctx));
if (LOG_ENABLED(INFO)) {
LOG(INFO) << "ocsp update for " << tls_ctx_data->cert_file
<< " finished successfully";
2015-03-30 16:20:40 +02:00
}
{
std::lock_guard<std::mutex> g(tls_ctx_data->mu);
tls_ctx_data->ocsp_data = std::move(ocsp_.resp);
}
2015-03-30 16:20:40 +02:00
++ocsp_.next;
proceed_next_cert_ocsp();
2015-03-30 16:20:40 +02:00
}
void ConnectionHandler::reset_ocsp() {
if (ocsp_.fd != -1) {
close(ocsp_.fd);
}
ocsp_.fd = -1;
ocsp_.pid = 0;
ocsp_.error = 0;
ocsp_.resp = std::vector<uint8_t>();
}
void ConnectionHandler::proceed_next_cert_ocsp() {
for (;;) {
reset_ocsp();
if (ocsp_.next == all_ssl_ctx_.size()) {
ocsp_.next = 0;
// We have updated all ocsp response, and schedule next update.
ev_timer_set(&ocsp_timer_, get_config()->ocsp_update_interval, 0.);
ev_timer_start(loop_, &ocsp_timer_);
return;
}
auto ssl_ctx = all_ssl_ctx_[ocsp_.next];
auto tls_ctx_data =
static_cast<ssl::TLSContextData *>(SSL_CTX_get_app_data(ssl_ctx));
auto cert_file = tls_ctx_data->cert_file;
if (start_ocsp_update(cert_file) != 0) {
++ocsp_.next;
continue;
}
break;
2015-03-30 16:20:40 +02:00
}
}
} // namespace shrpx