nghttpx: Fix request stall
Fix request stall if backend connection is reused and buffer is full.
This commit is contained in:
parent
448bbbc38c
commit
319d5ab1c6
|
@ -625,6 +625,35 @@ func TestH1H1HTTPSRedirectPort(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// TestH1H1POSTRequests tests that server can handle 2 requests with
|
||||
// request body.
|
||||
func TestH1H1POSTRequests(t *testing.T) {
|
||||
st := newServerTester(nil, t, noopHandler)
|
||||
defer st.Close()
|
||||
|
||||
res, err := st.http1(requestParam{
|
||||
name: "TestH1H1POSTRequestsNo1",
|
||||
body: make([]byte, 1),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error st.http1() = %v", err)
|
||||
}
|
||||
if got, want := res.status, 200; got != want {
|
||||
t.Errorf("res.status: %v; want %v", got, want)
|
||||
}
|
||||
|
||||
res, err = st.http1(requestParam{
|
||||
name: "TestH1H1POSTRequestsNo2",
|
||||
body: make([]byte, 65536),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error st.http1() = %v", err)
|
||||
}
|
||||
if got, want := res.status, 200; got != want {
|
||||
t.Errorf("res.status: %v; want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// // TestH1H2ConnectFailure tests that server handles the situation that
|
||||
// // connection attempt to HTTP/2 backend failed.
|
||||
// func TestH1H2ConnectFailure(t *testing.T) {
|
||||
|
|
|
@ -662,7 +662,9 @@ func cloneHeader(h http.Header) http.Header {
|
|||
return h2
|
||||
}
|
||||
|
||||
func noopHandler(w http.ResponseWriter, r *http.Request) {}
|
||||
func noopHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ioutil.ReadAll(r.Body)
|
||||
}
|
||||
|
||||
type APIResponse struct {
|
||||
Status string `json:"status,omitempty"`
|
||||
|
|
|
@ -144,7 +144,8 @@ Downstream::Downstream(Upstream *upstream, MemchunkPool *mcpool,
|
|||
request_header_sent_(false),
|
||||
accesslog_written_(false),
|
||||
new_affinity_cookie_(false),
|
||||
blocked_request_data_eof_(false) {
|
||||
blocked_request_data_eof_(false),
|
||||
expect_100_continue_(false) {
|
||||
|
||||
auto &timeoutconf = get_config()->http2.timeout;
|
||||
|
||||
|
@ -857,6 +858,11 @@ void Downstream::inspect_http1_request() {
|
|||
chunked_request_ = true;
|
||||
}
|
||||
}
|
||||
|
||||
auto expect = req_.fs.header(http2::HD_EXPECT);
|
||||
expect_100_continue_ =
|
||||
expect &&
|
||||
util::strieq(expect->value, StringRef::from_lit("100-continue"));
|
||||
}
|
||||
|
||||
void Downstream::inspect_http1_response() {
|
||||
|
@ -1159,4 +1165,8 @@ void Downstream::set_blocked_request_data_eof(bool f) {
|
|||
|
||||
void Downstream::set_ws_key(const StringRef &key) { ws_key_ = key; }
|
||||
|
||||
bool Downstream::get_expect_100_continue() const {
|
||||
return expect_100_continue_;
|
||||
}
|
||||
|
||||
} // namespace shrpx
|
||||
|
|
|
@ -511,6 +511,8 @@ public:
|
|||
|
||||
void set_ws_key(const StringRef &key);
|
||||
|
||||
bool get_expect_100_continue() const;
|
||||
|
||||
enum {
|
||||
EVENT_ERROR = 0x1,
|
||||
EVENT_TIMEOUT = 0x2,
|
||||
|
@ -602,6 +604,8 @@ private:
|
|||
// true if eof is received from client before sending header fields
|
||||
// to backend.
|
||||
bool blocked_request_data_eof_;
|
||||
// true if request contains "expect: 100-continue" header field.
|
||||
bool expect_100_continue_;
|
||||
};
|
||||
|
||||
} // namespace shrpx
|
||||
|
|
|
@ -694,7 +694,8 @@ int HttpDownstreamConnection::push_request_headers() {
|
|||
// enables us to send headers and data in one writev system call.
|
||||
if (req.method == HTTP_CONNECT ||
|
||||
downstream_->get_blocked_request_buf()->rleft() ||
|
||||
(!req.http2_expect_body && req.fs.content_length == 0)) {
|
||||
(!req.http2_expect_body && req.fs.content_length == 0) ||
|
||||
downstream_->get_expect_100_continue()) {
|
||||
signal_write();
|
||||
}
|
||||
|
||||
|
@ -1177,6 +1178,19 @@ int HttpDownstreamConnection::write_first() {
|
|||
auto buf = downstream_->get_blocked_request_buf();
|
||||
buf->reset();
|
||||
|
||||
// upstream->resume_read() might be called in
|
||||
// write_tls()/write_clear(), but before blocked_request_buf_ is
|
||||
// reset. So upstream read might still be blocked. Let's do it
|
||||
// again here.
|
||||
auto input = downstream_->get_request_buf();
|
||||
if (input->rleft() == 0) {
|
||||
auto upstream = downstream_->get_upstream();
|
||||
auto &req = downstream_->request();
|
||||
|
||||
upstream->resume_read(SHRPX_NO_BUFFER, downstream_,
|
||||
req.unconsumed_body_length);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -505,9 +505,7 @@ int htp_hdrs_completecb(llhttp_t *htp) {
|
|||
// and let them decide whether responds with 100 Continue or not.
|
||||
// For alternative mode, we have no backend, so just send 100
|
||||
// Continue here to make the client happy.
|
||||
auto expect = req.fs.header(http2::HD_EXPECT);
|
||||
if (expect &&
|
||||
util::strieq(expect->value, StringRef::from_lit("100-continue"))) {
|
||||
if (downstream->get_expect_100_continue()) {
|
||||
auto output = downstream->get_response_buf();
|
||||
constexpr auto res = StringRef::from_lit("HTTP/1.1 100 Continue\r\n\r\n");
|
||||
output->append(res);
|
||||
|
|
Loading…
Reference in New Issue