diff --git a/iocore/net/NetEvent.h b/iocore/net/NetEvent.h index 94e96fedda9..534f3093984 100644 --- a/iocore/net/NetEvent.h +++ b/iocore/net/NetEvent.h @@ -55,6 +55,9 @@ class NetEvent // Close when EventIO close; virtual int close() = 0; + bool has_error() const; + void set_error_from_socket(); + // get fd virtual int get_fd() = 0; virtual Ptr &get_mutex() = 0; @@ -65,6 +68,7 @@ class NetEvent NetState write{}; int closed = 0; + int error = 0; NetHandler *nh = nullptr; ink_hrtime inactivity_timeout_in = 0; @@ -94,3 +98,16 @@ class NetEvent } f; }; }; + +inline bool +NetEvent::has_error() const +{ + return error != 0; +} + +inline void +NetEvent::set_error_from_socket() +{ + socklen_t errlen = sizeof(error); + getsockopt(this->get_fd(), SOL_SOCKET, SO_ERROR, (void *)&error, &errlen); +} diff --git a/iocore/net/UnixNet.cc b/iocore/net/UnixNet.cc index 19ea5aee750..ca803fff3c0 100644 --- a/iocore/net/UnixNet.cc +++ b/iocore/net/UnixNet.cc @@ -507,27 +507,24 @@ NetHandler::waitForActivity(ink_hrtime timeout) if (cop_list.in(ne)) { cop_list.remove(ne); } - if (get_ev_events(pd, x) & (EVENTIO_READ | EVENTIO_ERROR)) { + int flags = get_ev_events(pd, x); + if (flags & (EVENTIO_ERROR)) { + ne->set_error_from_socket(); + } + if (flags & (EVENTIO_READ)) { ne->read.triggered = 1; if (!read_ready_list.in(ne)) { read_ready_list.enqueue(ne); - } else if (get_ev_events(pd, x) & EVENTIO_ERROR) { - // check for unhandled epoll events that should be handled - Debug("iocore_net_main", "Unhandled epoll event on read: 0x%04x read.enabled=%d closed=%d read.netready_queue=%d", - get_ev_events(pd, x), ne->read.enabled, ne->closed, read_ready_list.in(ne)); } } - if (get_ev_events(pd, x) & (EVENTIO_WRITE | EVENTIO_ERROR)) { + if (flags & (EVENTIO_WRITE)) { ne->write.triggered = 1; if (!write_ready_list.in(ne)) { write_ready_list.enqueue(ne); - } else if (get_ev_events(pd, x) & EVENTIO_ERROR) { - // check for unhandled epoll events that should be handled - Debug("iocore_net_main", "Unhandled epoll event on write: 0x%04x write.enabled=%d closed=%d write.netready_queue=%d", - get_ev_events(pd, x), ne->write.enabled, ne->closed, write_ready_list.in(ne)); } - } else if (!(get_ev_events(pd, x) & EVENTIO_READ)) { + } else if (!(flags & (EVENTIO_READ))) { Debug("iocore_net_main", "Unhandled epoll event: 0x%04x", get_ev_events(pd, x)); + ink_release_assert(false); } } else if (epd->type == EVENTIO_DNS_CONNECTION) { if (epd->data.dnscon != nullptr) { diff --git a/iocore/net/UnixNetVConnection.cc b/iocore/net/UnixNetVConnection.cc index d73119d63e4..d99e6624544 100644 --- a/iocore/net/UnixNetVConnection.cc +++ b/iocore/net/UnixNetVConnection.cc @@ -199,6 +199,12 @@ read_from_net(NetHandler *nh, UnixNetVConnection *vc, EThread *thread) return; } + if (vc->has_error()) { + vc->lerrno = vc->error; + vc->readSignalAndUpdate(VC_EVENT_ERROR); + return; + } + // It is possible that the closed flag got set from HttpSessionManager in the // global session pool case. If so, the closed flag should be stable once we get the // s->vio.mutex (the global session pool mutex). @@ -368,6 +374,12 @@ write_to_net_io(NetHandler *nh, UnixNetVConnection *vc, EThread *thread) return; } + if (vc->has_error()) { + vc->lerrno = vc->error; + write_signal_and_update(VC_EVENT_ERROR, vc); + return; + } + // This function will always return true unless // vc is an SSLNetVConnection. if (!vc->getSSLHandShakeComplete()) {