Do not try to set TCP_NODELAY when frontend is an UNIX socket
This silences warning log that otherwise spams logs on every accepted connection.
This commit is contained in:
parent
87a38bdf8d
commit
84f96a2fd5
|
@ -721,11 +721,11 @@ ClientHandler *accept_connection(Worker *worker, int fd, sockaddr *addr,
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
int val = 1;
|
||||
rv = setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, reinterpret_cast<char *>(&val),
|
||||
sizeof(val));
|
||||
if (rv == -1) {
|
||||
LOG(WARN) << "Setting option TCP_NODELAY failed: errno=" << errno;
|
||||
if (addr->sa_family != AF_UNIX) {
|
||||
rv = util::make_socket_nodelay(fd);
|
||||
if (rv == -1) {
|
||||
LOG(WARN) << "Setting option TCP_NODELAY failed: errno=" << errno;
|
||||
}
|
||||
}
|
||||
SSL *ssl = nullptr;
|
||||
auto ssl_ctx = worker->get_sv_ssl_ctx();
|
||||
|
|
Loading…
Reference in New Issue