More graceful stop of nghttp2::asio_http2::server::http2
Explicit io_service::stop() will prevent running streams from finishing their task. That means if there are already reposnes that we have called end(std::string) on them and they have not finished sending back their data, they will be closed with a NGHTTP2_INTERNAL_ERROR Instead, we can stop accepting connections and destroy all io_service::work objects to signals end of work.
This commit is contained in:
parent
8f888b29bd
commit
1c31213aef
|
@ -75,13 +75,18 @@ void io_service_pool::join() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void io_service_pool::stop() {
|
void io_service_pool::force_stop() {
|
||||||
// Explicitly stop all io_services.
|
// Explicitly stop all io_services.
|
||||||
for (auto &iosv : io_services_) {
|
for (auto &iosv : io_services_) {
|
||||||
iosv->stop();
|
iosv->stop();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void io_service_pool::stop() {
|
||||||
|
// Destroy all work objects to signals end of work
|
||||||
|
work_.clear();
|
||||||
|
}
|
||||||
|
|
||||||
boost::asio::io_service &io_service_pool::get_io_service() {
|
boost::asio::io_service &io_service_pool::get_io_service() {
|
||||||
// Use a round-robin scheme to choose the next io_service to use.
|
// Use a round-robin scheme to choose the next io_service to use.
|
||||||
auto &io_service = *io_services_[next_io_service_];
|
auto &io_service = *io_services_[next_io_service_];
|
||||||
|
|
|
@ -62,6 +62,9 @@ public:
|
||||||
void run(bool asynchronous = false);
|
void run(bool asynchronous = false);
|
||||||
|
|
||||||
/// Stop all io_service objects in the pool.
|
/// Stop all io_service objects in the pool.
|
||||||
|
void force_stop();
|
||||||
|
|
||||||
|
/// Destroy all work objects to signals end of work
|
||||||
void stop();
|
void stop();
|
||||||
|
|
||||||
/// Join on all io_service objects in the pool.
|
/// Join on all io_service objects in the pool.
|
||||||
|
|
|
@ -124,6 +124,11 @@ boost::system::error_code server::bind_and_listen(boost::system::error_code &ec,
|
||||||
|
|
||||||
void server::start_accept(boost::asio::ssl::context &tls_context,
|
void server::start_accept(boost::asio::ssl::context &tls_context,
|
||||||
tcp::acceptor &acceptor, serve_mux &mux) {
|
tcp::acceptor &acceptor, serve_mux &mux) {
|
||||||
|
|
||||||
|
if (!acceptor.is_open()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
auto new_connection = std::make_shared<connection<ssl_socket>>(
|
auto new_connection = std::make_shared<connection<ssl_socket>>(
|
||||||
mux, tls_handshake_timeout_, read_timeout_,
|
mux, tls_handshake_timeout_, read_timeout_,
|
||||||
io_service_pool_.get_io_service(), tls_context);
|
io_service_pool_.get_io_service(), tls_context);
|
||||||
|
@ -158,6 +163,11 @@ void server::start_accept(boost::asio::ssl::context &tls_context,
|
||||||
}
|
}
|
||||||
|
|
||||||
void server::start_accept(tcp::acceptor &acceptor, serve_mux &mux) {
|
void server::start_accept(tcp::acceptor &acceptor, serve_mux &mux) {
|
||||||
|
|
||||||
|
if (!acceptor.is_open()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
auto new_connection = std::make_shared<connection<tcp::socket>>(
|
auto new_connection = std::make_shared<connection<tcp::socket>>(
|
||||||
mux, tls_handshake_timeout_, read_timeout_,
|
mux, tls_handshake_timeout_, read_timeout_,
|
||||||
io_service_pool_.get_io_service());
|
io_service_pool_.get_io_service());
|
||||||
|
@ -177,10 +187,10 @@ void server::start_accept(tcp::acceptor &acceptor, serve_mux &mux) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void server::stop() {
|
void server::stop() {
|
||||||
io_service_pool_.stop();
|
|
||||||
for (auto &acceptor : acceptors_) {
|
for (auto &acceptor : acceptors_) {
|
||||||
acceptor.close();
|
acceptor.close();
|
||||||
}
|
}
|
||||||
|
io_service_pool_.stop();
|
||||||
}
|
}
|
||||||
|
|
||||||
void server::join() { io_service_pool_.join(); }
|
void server::join() { io_service_pool_.join(); }
|
||||||
|
|
Loading…
Reference in New Issue