Merge branch 'nghttpx-headers-size-option'

This commit is contained in:
Tatsuhiro Tsujikawa 2015-04-29 22:55:30 +09:00
commit ea612a2dce
14 changed files with 336 additions and 41 deletions

View File

@ -246,6 +246,72 @@ func TestH1H1RequestTrailer(t *testing.T) {
}
}
// TestH1H1HeaderFieldBufferPath tests that request with request path
// larger than configured buffer size is rejected.
func TestH1H1HeaderFieldBufferPath(t *testing.T) {
// The value 100 is chosen so that sum of header fields bytes
// does not exceed it. We use > 100 bytes URI to exceed this
// limit.
st := newServerTester([]string{"--header-field-buffer=100"}, t, func(w http.ResponseWriter, r *http.Request) {
t.Fatal("execution path should not be here")
})
defer st.Close()
res, err := st.http1(requestParam{
name: "TestH1H1HeaderFieldBufferPath",
path: "/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
})
if err != nil {
t.Fatalf("Error st.http1() = %v", err)
}
if got, want := res.status, 431; got != want {
t.Errorf("status: %v; want %v", got, want)
}
}
// TestH1H1HeaderFieldBuffer tests that request with header fields
// larger than configured buffer size is rejected.
func TestH1H1HeaderFieldBuffer(t *testing.T) {
st := newServerTester([]string{"--header-field-buffer=10"}, t, func(w http.ResponseWriter, r *http.Request) {
t.Fatal("execution path should not be here")
})
defer st.Close()
res, err := st.http1(requestParam{
name: "TestH1H1HeaderFieldBuffer",
})
if err != nil {
t.Fatalf("Error st.http1() = %v", err)
}
if got, want := res.status, 431; got != want {
t.Errorf("status: %v; want %v", got, want)
}
}
// TestH1H1HeaderFields tests that request with header fields more
// than configured number is rejected.
func TestH1H1HeaderFields(t *testing.T) {
st := newServerTester([]string{"--max-header-fields=1"}, t, func(w http.ResponseWriter, r *http.Request) {
t.Fatal("execution path should not be here")
})
defer st.Close()
res, err := st.http1(requestParam{
name: "TestH1H1HeaderFields",
header: []hpack.HeaderField{
// Add extra header field to ensure that
// header field limit exceeds
pair("Connection", "close"),
},
})
if err != nil {
t.Fatalf("Error st.http1() = %v", err)
}
if got, want := res.status, 431; got != want {
t.Errorf("status: %v; want %v", got, want)
}
}
// TestH1H2ConnectFailure tests that server handles the situation that
// connection attempt to HTTP/2 backend failed.
func TestH1H2ConnectFailure(t *testing.T) {

View File

@ -558,6 +558,46 @@ func TestH2H1RequestTrailer(t *testing.T) {
}
}
// TestH2H1HeaderFieldBuffer tests that request with header fields
// larger than configured buffer size is rejected.
func TestH2H1HeaderFieldBuffer(t *testing.T) {
st := newServerTester([]string{"--header-field-buffer=10"}, t, func(w http.ResponseWriter, r *http.Request) {
t.Fatal("execution path should not be here")
})
defer st.Close()
res, err := st.http2(requestParam{
name: "TestH2H1HeaderFieldBuffer",
})
if err != nil {
t.Fatalf("Error st.http2() = %v", err)
}
if got, want := res.status, 431; got != want {
t.Errorf("status: %v; want %v", got, want)
}
}
// TestH2H1HeaderFields tests that request with header fields more
// than configured number is rejected.
func TestH2H1HeaderFields(t *testing.T) {
st := newServerTester([]string{"--max-header-fields=1"}, t, func(w http.ResponseWriter, r *http.Request) {
t.Fatal("execution path should not be here")
})
defer st.Close()
res, err := st.http2(requestParam{
name: "TestH2H1HeaderFields",
// we have at least 4 pseudo-header fields sent, and
// that ensures that buffer limit exceeds.
})
if err != nil {
t.Fatalf("Error st.http2() = %v", err)
}
if got, want := res.status, 431; got != want {
t.Errorf("status: %v; want %v", got, want)
}
}
// TestH2H1Upgrade tests HTTP Upgrade to HTTP/2
func TestH2H1Upgrade(t *testing.T) {
st := newServerTester(nil, t, func(w http.ResponseWriter, r *http.Request) {})

View File

@ -170,6 +170,46 @@ func TestS3H1NoVia(t *testing.T) {
}
}
// TestS3H1HeaderFieldBuffer tests that request with header fields
// larger than configured buffer size is rejected.
func TestS3H1HeaderFieldBuffer(t *testing.T) {
st := newServerTesterTLS([]string{"--npn-list=spdy/3.1", "--header-field-buffer=10"}, t, func(w http.ResponseWriter, r *http.Request) {
t.Fatal("execution path should not be here")
})
defer st.Close()
res, err := st.spdy(requestParam{
name: "TestS3H1HeaderFieldBuffer",
})
if err != nil {
t.Fatalf("Error st.spdy() = %v", err)
}
if got, want := res.spdyRstErrCode, spdy.InternalError; got != want {
t.Errorf("res.spdyRstErrCode: %v; want %v", got, want)
}
}
// TestS3H1HeaderFields tests that request with header fields more
// than configured number is rejected.
func TestS3H1HeaderFields(t *testing.T) {
st := newServerTesterTLS([]string{"--npn-list=spdy/3.1", "--max-header-fields=1"}, t, func(w http.ResponseWriter, r *http.Request) {
t.Fatal("execution path should not be here")
})
defer st.Close()
res, err := st.spdy(requestParam{
name: "TestS3H1HeaderFields",
// we have at least 5 pseudo-header fields sent, and
// that ensures that buffer limit exceeds.
})
if err != nil {
t.Fatalf("Error st.spdy() = %v", err)
}
if got, want := res.spdyRstErrCode, spdy.InternalError; got != want {
t.Errorf("res.spdyRstErrCode: %v; want %v", got, want)
}
}
// TestS3H2ConnectFailure tests that server handles the situation that
// connection attempt to HTTP/2 backend failed.
func TestS3H2ConnectFailure(t *testing.T) {

View File

@ -297,7 +297,19 @@ func (st *serverTester) http1(rp requestParam) (*serverResponse, error) {
body = cbr
}
}
req, err := http.NewRequest(method, st.url, body)
reqURL := st.url
if rp.path != "" {
u, err := url.Parse(st.url)
if err != nil {
st.t.Fatalf("Error parsing URL from st.url %v: %v", st.url, err)
}
u.Path = rp.path
reqURL = u.String()
}
req, err := http.NewRequest(method, reqURL, body)
if err != nil {
return nil, err
}

View File

@ -912,6 +912,8 @@ void fill_default_config() {
mod_config()->fetch_ocsp_response_file =
strcopy(PKGDATADIR "/fetch-ocsp-response");
mod_config()->no_ocsp = false;
mod_config()->header_field_buffer = 64 * 1024;
mod_config()->max_header_fields = 100;
}
} // namespace
@ -1336,6 +1338,16 @@ HTTP:
won't replace anything already set. This option can be
used several times to specify multiple header fields.
Example: --add-response-header="foo: bar"
--header-field-buffer=<SIZE>
Set maximum buffer size for incoming HTTP header field
list. This is the sum of header name and value in
bytes.
Default: )"
<< util::utos_with_unit(get_config()->header_field_buffer) << R"(
--max-header-fields=<N>
Set maximum number of incoming HTTP header fields, which
appear in one request or response header field list.
Default: )" << get_config()->max_header_fields << R"(
Debug:
--frontend-http2-dump-request-header=<PATH>
@ -1496,6 +1508,8 @@ int main(int argc, char **argv) {
{"fetch-ocsp-response-file", required_argument, &flag, 77},
{"ocsp-update-interval", required_argument, &flag, 78},
{"no-ocsp", no_argument, &flag, 79},
{"header-field-buffer", required_argument, &flag, 80},
{"max-header-fields", required_argument, &flag, 81},
{nullptr, 0, nullptr, 0}};
int option_index = 0;
@ -1846,6 +1860,14 @@ int main(int argc, char **argv) {
// --no-ocsp
cmdcfgs.emplace_back(SHRPX_OPT_NO_OCSP, "yes");
break;
case 80:
// --header-field-buffer
cmdcfgs.emplace_back(SHRPX_OPT_HEADER_FIELD_BUFFER, optarg);
break;
case 81:
// --max-header-fields
cmdcfgs.emplace_back(SHRPX_OPT_MAX_HEADER_FIELDS, optarg);
break;
default:
break;
}

View File

@ -150,6 +150,8 @@ const char SHRPX_OPT_BACKEND_HTTP2_CONNECTIONS_PER_WORKER[] =
const char SHRPX_OPT_FETCH_OCSP_RESPONSE_FILE[] = "fetch-ocsp-response-file";
const char SHRPX_OPT_OCSP_UPDATE_INTERVAL[] = "ocsp-update-interval";
const char SHRPX_OPT_NO_OCSP[] = "no-ocsp";
const char SHRPX_OPT_HEADER_FIELD_BUFFER[] = "header-field-buffer";
const char SHRPX_OPT_MAX_HEADER_FIELDS[] = "max-header-fields";
namespace {
Config *config = nullptr;
@ -1212,6 +1214,15 @@ int parse_config(const char *opt, const char *optarg) {
return 0;
}
if (util::strieq(opt, SHRPX_OPT_HEADER_FIELD_BUFFER)) {
return parse_uint_with_unit(&mod_config()->header_field_buffer, opt,
optarg);
}
if (util::strieq(opt, SHRPX_OPT_MAX_HEADER_FIELDS)) {
return parse_uint(&mod_config()->max_header_fields, opt, optarg);
}
if (util::strieq(opt, "conf")) {
LOG(WARN) << "conf: ignored";

View File

@ -139,6 +139,8 @@ extern const char SHRPX_OPT_BACKEND_HTTP2_CONNECTIONS_PER_WORKER[];
extern const char SHRPX_OPT_FETCH_OCSP_RESPONSE_FILE[];
extern const char SHRPX_OPT_OCSP_UPDATE_INTERVAL[];
extern const char SHRPX_OPT_NO_OCSP[];
extern const char SHRPX_OPT_HEADER_FIELD_BUFFER[];
extern const char SHRPX_OPT_MAX_HEADER_FIELDS[];
union sockaddr_union {
sockaddr_storage storage;
@ -282,6 +284,8 @@ struct Config {
size_t rlimit_nofile;
size_t downstream_request_buffer_size;
size_t downstream_response_buffer_size;
size_t header_field_buffer;
size_t max_header_fields;
// Bit mask to disable SSL/TLS protocol versions. This will be
// passed to SSL_CTX_set_options().
long int tls_proto_mask;

View File

@ -1211,4 +1211,8 @@ void Downstream::detach_blocked_link(BlockedLink *l) {
blocked_link_ = nullptr;
}
void Downstream::add_request_headers_sum(size_t amount) {
request_headers_sum_ += amount;
}
} // namespace shrpx

View File

@ -143,6 +143,7 @@ public:
void set_request_method(std::string method);
const std::string &get_request_method() const;
void set_request_path(std::string path);
void add_request_headers_sum(size_t amount);
void
set_request_start_time(std::chrono::high_resolution_clock::time_point time);
const std::chrono::high_resolution_clock::time_point &
@ -193,6 +194,10 @@ public:
// header contains invalid header field. We can safely send error
// response (502) to a client.
MSG_BAD_HEADER,
// header fields in HTTP/1 request exceed the configuration limit.
// This state is only transitioned from INITIAL state, and solely
// used to signal 431 status code to the client.
HTTP1_REQUEST_HEADER_TOO_LARGE,
};
void set_request_state(int state);
int get_request_state() const;
@ -286,9 +291,6 @@ public:
// Change the priority of downstream
int change_priority(int32_t pri);
// Maximum buffer size for header name/value pairs.
static constexpr size_t MAX_HEADERS_SUM = 128 * 1024;
bool get_rst_stream_after_end_stream() const;
void set_rst_stream_after_end_stream(bool f);

View File

@ -733,10 +733,15 @@ int on_header_callback(nghttp2_session *session, const nghttp2_frame *frame,
auto trailer = frame->headers.cat != NGHTTP2_HCAT_RESPONSE &&
!downstream->get_expect_final_response();
if (downstream->get_response_headers_sum() > Downstream::MAX_HEADERS_SUM) {
if (downstream->get_response_headers_sum() + namelen + valuelen >
get_config()->header_field_buffer ||
downstream->get_response_headers().size() >=
get_config()->max_header_fields) {
if (LOG_ENABLED(INFO)) {
DLOG(INFO, downstream) << "Too large header block size="
<< downstream->get_response_headers_sum();
DLOG(INFO, downstream)
<< "Too large or many header field size="
<< downstream->get_response_headers_sum() + namelen + valuelen
<< ", num=" << downstream->get_response_headers().size() + 1;
}
if (trailer) {

View File

@ -202,14 +202,19 @@ int on_header_callback(nghttp2_session *session, const nghttp2_frame *frame,
return 0;
}
if (downstream->get_request_headers_sum() > Downstream::MAX_HEADERS_SUM) {
if (downstream->get_request_headers_sum() + namelen + valuelen >
get_config()->header_field_buffer ||
downstream->get_request_headers().size() >=
get_config()->max_header_fields) {
if (downstream->get_response_state() == Downstream::MSG_COMPLETE) {
return 0;
}
if (LOG_ENABLED(INFO)) {
ULOG(INFO, upstream) << "Too large header block size="
<< downstream->get_request_headers_sum();
ULOG(INFO, upstream) << "Too large or many header field size="
<< downstream->get_request_headers_sum() + namelen +
valuelen << ", num="
<< downstream->get_request_headers().size() + 1;
}
// just ignore header fields if this is trailer part.

View File

@ -582,10 +582,29 @@ int htp_hdrs_completecb(http_parser *htp) {
namespace {
int htp_hdr_keycb(http_parser *htp, const char *data, size_t len) {
auto downstream = static_cast<Downstream *>(htp->data);
if (downstream->get_response_headers_sum() + len >
get_config()->header_field_buffer) {
if (LOG_ENABLED(INFO)) {
DLOG(INFO, downstream) << "Too large header block size="
<< downstream->get_response_headers_sum() + len;
}
return -1;
}
if (downstream->get_response_state() == Downstream::INITIAL) {
if (downstream->get_response_header_key_prev()) {
downstream->append_last_response_header_key(data, len);
} else {
if (downstream->get_response_headers().size() >=
get_config()->max_header_fields) {
if (LOG_ENABLED(INFO)) {
DLOG(INFO, downstream)
<< "Too many header field num="
<< downstream->get_response_headers().size() + 1;
}
return -1;
}
downstream->add_response_header(std::string(data, len), "");
}
} else {
@ -593,16 +612,18 @@ int htp_hdr_keycb(http_parser *htp, const char *data, size_t len) {
if (downstream->get_response_trailer_key_prev()) {
downstream->append_last_response_trailer_key(data, len);
} else {
if (downstream->get_response_headers().size() >=
get_config()->max_header_fields) {
if (LOG_ENABLED(INFO)) {
DLOG(INFO, downstream)
<< "Too many header field num="
<< downstream->get_response_headers().size() + 1;
}
return -1;
}
downstream->add_response_trailer(std::string(data, len), "");
}
}
if (downstream->get_response_headers_sum() > Downstream::MAX_HEADERS_SUM) {
if (LOG_ENABLED(INFO)) {
DLOG(INFO, downstream) << "Too large header block size="
<< downstream->get_response_headers_sum();
}
return -1;
}
return 0;
}
} // namespace
@ -610,6 +631,14 @@ int htp_hdr_keycb(http_parser *htp, const char *data, size_t len) {
namespace {
int htp_hdr_valcb(http_parser *htp, const char *data, size_t len) {
auto downstream = static_cast<Downstream *>(htp->data);
if (downstream->get_response_headers_sum() + len >
get_config()->header_field_buffer) {
if (LOG_ENABLED(INFO)) {
DLOG(INFO, downstream) << "Too large header block size="
<< downstream->get_response_headers_sum() + len;
}
return -1;
}
if (downstream->get_response_state() == Downstream::INITIAL) {
if (downstream->get_response_header_key_prev()) {
downstream->set_last_response_header_value(data, len);
@ -623,13 +652,6 @@ int htp_hdr_valcb(http_parser *htp, const char *data, size_t len) {
downstream->append_last_response_trailer_value(data, len);
}
}
if (downstream->get_response_headers_sum() > Downstream::MAX_HEADERS_SUM) {
if (LOG_ENABLED(INFO)) {
DLOG(INFO, downstream) << "Too large header block size="
<< downstream->get_response_headers_sum();
}
return -1;
}
return 0;
}
} // namespace

View File

@ -78,6 +78,17 @@ namespace {
int htp_uricb(http_parser *htp, const char *data, size_t len) {
auto upstream = static_cast<HttpsUpstream *>(htp->data);
auto downstream = upstream->get_downstream();
if (downstream->get_request_headers_sum() + len >
get_config()->header_field_buffer) {
if (LOG_ENABLED(INFO)) {
ULOG(INFO, upstream) << "Too large URI size="
<< downstream->get_request_headers_sum() + len;
}
assert(downstream->get_request_state() == Downstream::INITIAL);
downstream->set_request_state(Downstream::HTTP1_REQUEST_HEADER_TOO_LARGE);
return -1;
}
downstream->add_request_headers_sum(len);
downstream->append_request_path(data, len);
return 0;
}
@ -87,10 +98,31 @@ namespace {
int htp_hdr_keycb(http_parser *htp, const char *data, size_t len) {
auto upstream = static_cast<HttpsUpstream *>(htp->data);
auto downstream = upstream->get_downstream();
if (downstream->get_request_headers_sum() + len >
get_config()->header_field_buffer) {
if (LOG_ENABLED(INFO)) {
ULOG(INFO, upstream) << "Too large header block size="
<< downstream->get_request_headers_sum() + len;
}
if (downstream->get_request_state() == Downstream::INITIAL) {
downstream->set_request_state(Downstream::HTTP1_REQUEST_HEADER_TOO_LARGE);
}
return -1;
}
if (downstream->get_request_state() == Downstream::INITIAL) {
if (downstream->get_request_header_key_prev()) {
downstream->append_last_request_header_key(data, len);
} else {
if (downstream->get_request_headers().size() >=
get_config()->max_header_fields) {
if (LOG_ENABLED(INFO)) {
ULOG(INFO, upstream) << "Too many header field num="
<< downstream->get_request_headers().size() + 1;
}
downstream->set_request_state(
Downstream::HTTP1_REQUEST_HEADER_TOO_LARGE);
return -1;
}
downstream->add_request_header(std::string(data, len), "");
}
} else {
@ -98,16 +130,17 @@ int htp_hdr_keycb(http_parser *htp, const char *data, size_t len) {
if (downstream->get_request_trailer_key_prev()) {
downstream->append_last_request_trailer_key(data, len);
} else {
if (downstream->get_request_headers().size() >=
get_config()->max_header_fields) {
if (LOG_ENABLED(INFO)) {
ULOG(INFO, upstream) << "Too many header field num="
<< downstream->get_request_headers().size() + 1;
}
return -1;
}
downstream->add_request_trailer(std::string(data, len), "");
}
}
if (downstream->get_request_headers_sum() > Downstream::MAX_HEADERS_SUM) {
if (LOG_ENABLED(INFO)) {
ULOG(INFO, upstream) << "Too large header block size="
<< downstream->get_request_headers_sum();
}
return -1;
}
return 0;
}
} // namespace
@ -116,6 +149,17 @@ namespace {
int htp_hdr_valcb(http_parser *htp, const char *data, size_t len) {
auto upstream = static_cast<HttpsUpstream *>(htp->data);
auto downstream = upstream->get_downstream();
if (downstream->get_request_headers_sum() + len >
get_config()->header_field_buffer) {
if (LOG_ENABLED(INFO)) {
ULOG(INFO, upstream) << "Too large header block size="
<< downstream->get_request_headers_sum() + len;
}
if (downstream->get_request_state() == Downstream::INITIAL) {
downstream->set_request_state(Downstream::HTTP1_REQUEST_HEADER_TOO_LARGE);
}
return -1;
}
if (downstream->get_request_state() == Downstream::INITIAL) {
if (downstream->get_request_header_key_prev()) {
downstream->set_last_request_header_value(data, len);
@ -129,13 +173,6 @@ int htp_hdr_valcb(http_parser *htp, const char *data, size_t len) {
downstream->append_last_request_trailer_value(data, len);
}
}
if (downstream->get_request_headers_sum() > Downstream::MAX_HEADERS_SUM) {
if (LOG_ENABLED(INFO)) {
ULOG(INFO, upstream) << "Too large header block size="
<< downstream->get_request_headers_sum();
}
return -1;
}
return 0;
}
} // namespace
@ -407,9 +444,15 @@ int HttpsUpstream::on_read() {
unsigned int status_code;
if (downstream &&
downstream->get_request_state() == Downstream::CONNECT_FAIL) {
status_code = 503;
if (downstream) {
if (downstream->get_request_state() == Downstream::CONNECT_FAIL) {
status_code = 503;
} else if (downstream->get_request_state() ==
Downstream::HTTP1_REQUEST_HEADER_TOO_LARGE) {
status_code = 431;
} else {
status_code = 400;
}
} else {
status_code = 400;
}

View File

@ -163,6 +163,19 @@ void on_ctrl_recv_callback(spdylay_session *session, spdylay_frame_type type,
<< downstream->get_stream_id() << "\n" << ss.str();
}
size_t num_headers = 0;
size_t header_buffer = 0;
for (size_t i = 0; nv[i]; i += 2) {
++num_headers;
header_buffer += strlen(nv[i]) + strlen(nv[i + 1]);
}
if (header_buffer > get_config()->header_field_buffer ||
num_headers > get_config()->max_header_fields) {
upstream->rst_stream(downstream, SPDYLAY_INTERNAL_ERROR);
return;
}
for (size_t i = 0; nv[i]; i += 2) {
downstream->add_request_header(nv[i], nv[i + 1]);
}
@ -428,6 +441,12 @@ SpdyUpstream::SpdyUpstream(uint16_t version, ClientHandler *handler)
rv = spdylay_session_server_new(&session_, version, &callbacks, this);
assert(rv == 0);
uint32_t max_buffer = 65536;
rv = spdylay_session_set_option(session_,
SPDYLAY_OPT_MAX_RECV_CTRL_FRAME_BUFFER,
&max_buffer, sizeof(max_buffer));
assert(rv == 0);
if (version >= SPDYLAY_PROTO_SPDY3) {
int val = 1;
flow_control_ = true;