nghttpx: Reduce TTFB with large number of incoming connections

To reduce TTFB with large number of incoming connections, we now
intentionally accept one connection at a time, so that it does not
delay the TTFB of the existing connection.  This is significant
especially for TLS connections.
This commit is contained in:
Tatsuhiro Tsujikawa 2016-06-25 11:50:33 +09:00
parent 3c1efeff55
commit 2a4733857f
3 changed files with 113 additions and 91 deletions

View File

@ -58,13 +58,12 @@ AcceptHandler::~AcceptHandler() {
} }
void AcceptHandler::accept_connection() { void AcceptHandler::accept_connection() {
for (;;) {
sockaddr_union sockaddr; sockaddr_union sockaddr;
socklen_t addrlen = sizeof(sockaddr); socklen_t addrlen = sizeof(sockaddr);
#ifdef HAVE_ACCEPT4 #ifdef HAVE_ACCEPT4
auto cfd = accept4(faddr_->fd, &sockaddr.sa, &addrlen, auto cfd =
SOCK_NONBLOCK | SOCK_CLOEXEC); accept4(faddr_->fd, &sockaddr.sa, &addrlen, SOCK_NONBLOCK | SOCK_CLOEXEC);
#else // !HAVE_ACCEPT4 #else // !HAVE_ACCEPT4
auto cfd = accept(faddr_->fd, &sockaddr.sa, &addrlen); auto cfd = accept(faddr_->fd, &sockaddr.sa, &addrlen);
#endif // !HAVE_ACCEPT4 #endif // !HAVE_ACCEPT4
@ -82,16 +81,16 @@ void AcceptHandler::accept_connection() {
case EHOSTUNREACH: case EHOSTUNREACH:
case EOPNOTSUPP: case EOPNOTSUPP:
case ENETUNREACH: case ENETUNREACH:
continue; return;
case EMFILE: case EMFILE:
case ENFILE: case ENFILE:
LOG(WARN) << "acceptor: running out file descriptor; disable acceptor " LOG(WARN) << "acceptor: running out file descriptor; disable acceptor "
"temporarily"; "temporarily";
conn_hnr_->sleep_acceptor(get_config()->conn.listener.timeout.sleep); conn_hnr_->sleep_acceptor(get_config()->conn.listener.timeout.sleep);
break; return;
default:
return;
} }
break;
} }
#ifndef HAVE_ACCEPT4 #ifndef HAVE_ACCEPT4
@ -103,7 +102,6 @@ void AcceptHandler::accept_connection() {
conn_hnr_->handle_connection(cfd, &sockaddr.sa, addrlen, faddr_); conn_hnr_->handle_connection(cfd, &sockaddr.sa, addrlen, faddr_);
} }
}
void AcceptHandler::enable() { ev_io_start(conn_hnr_->get_loop(), &wev_); } void AcceptHandler::enable() { ev_io_start(conn_hnr_->get_loop(), &wev_); }

View File

@ -63,6 +63,13 @@ void mcpool_clear_cb(struct ev_loop *loop, ev_timer *w, int revents) {
} }
} // namespace } // namespace
namespace {
void proc_wev_cb(struct ev_loop *loop, ev_timer *w, int revents) {
auto worker = static_cast<Worker *>(w->data);
worker->process_events();
}
} // namespace
namespace { namespace {
bool match_shared_downstream_addr( bool match_shared_downstream_addr(
const std::shared_ptr<SharedDownstreamAddr> &lhs, const std::shared_ptr<SharedDownstreamAddr> &lhs,
@ -131,6 +138,9 @@ Worker::Worker(struct ev_loop *loop, SSL_CTX *sv_ssl_ctx, SSL_CTX *cl_ssl_ctx,
ev_timer_init(&mcpool_clear_timer_, mcpool_clear_cb, 0., 0.); ev_timer_init(&mcpool_clear_timer_, mcpool_clear_cb, 0., 0.);
mcpool_clear_timer_.data = this; mcpool_clear_timer_.data = this;
ev_timer_init(&proc_wev_timer_, proc_wev_cb, 0., 0.);
proc_wev_timer_.data = this;
auto &session_cacheconf = get_config()->tls.session_cache; auto &session_cacheconf = get_config()->tls.session_cache;
if (!session_cacheconf.memcached.host.empty()) { if (!session_cacheconf.memcached.host.empty()) {
@ -279,6 +289,7 @@ void Worker::replace_downstream_config(
Worker::~Worker() { Worker::~Worker() {
ev_async_stop(loop_, &w_); ev_async_stop(loop_, &w_);
ev_timer_stop(loop_, &mcpool_clear_timer_); ev_timer_stop(loop_, &mcpool_clear_timer_);
ev_timer_stop(loop_, &proc_wev_timer_);
} }
void Worker::schedule_clear_mcpool() { void Worker::schedule_clear_mcpool() {
@ -315,15 +326,28 @@ void Worker::send(const WorkerEvent &event) {
} }
void Worker::process_events() { void Worker::process_events() {
std::vector<WorkerEvent> q; WorkerEvent wev;
{ {
std::lock_guard<std::mutex> g(m_); std::lock_guard<std::mutex> g(m_);
q.swap(q_);
// Process event one at a time. This is important for
// NEW_CONNECTION event since accepting large number of new
// connections at once may delay time to 1st byte for existing
// connections.
if (q_.empty()) {
ev_timer_stop(loop_, &proc_wev_timer_);
return;
} }
wev = q_.front();
q_.pop_front();
}
ev_timer_start(loop_, &proc_wev_timer_);
auto worker_connections = get_config()->conn.upstream.worker_connections; auto worker_connections = get_config()->conn.upstream.worker_connections;
for (auto &wev : q) {
switch (wev.type) { switch (wev.type) {
case NEW_CONNECTION: { case NEW_CONNECTION: {
if (LOG_ENABLED(INFO)) { if (LOG_ENABLED(INFO)) {
@ -360,8 +384,8 @@ void Worker::process_events() {
break; break;
} }
case REOPEN_LOG: case REOPEN_LOG:
WLOG(NOTICE, this) << "Reopening log files: worker process (thread " WLOG(NOTICE, this) << "Reopening log files: worker process (thread " << this
<< this << ")"; << ")";
reopen_log_files(); reopen_log_files();
@ -390,7 +414,6 @@ void Worker::process_events() {
} }
} }
} }
}
ssl::CertLookupTree *Worker::get_cert_lookup_tree() const { return cert_tree_; } ssl::CertLookupTree *Worker::get_cert_lookup_tree() const { return cert_tree_; }

View File

@ -243,10 +243,11 @@ private:
std::future<void> fut_; std::future<void> fut_;
#endif // NOTHREADS #endif // NOTHREADS
std::mutex m_; std::mutex m_;
std::vector<WorkerEvent> q_; std::deque<WorkerEvent> q_;
std::mt19937 randgen_; std::mt19937 randgen_;
ev_async w_; ev_async w_;
ev_timer mcpool_clear_timer_; ev_timer mcpool_clear_timer_;
ev_timer proc_wev_timer_;
MemchunkPool mcpool_; MemchunkPool mcpool_;
WorkerStat worker_stat_; WorkerStat worker_stat_;