Skip to content

Commit

Permalink
INKVConnInternal::do_io_*: handle null buffer (apache#11789)
Browse files Browse the repository at this point in the history
It's common for users of VC's to cancel io via a 0 byte, nullptr read or
write on the VC. INKVConnInternal::do_io_read and
INKVConnInternal::do_io_write were not prepared to handle such
cancellations. This updates them to handle this gracefully rather than
crashing on a nullptr dereference. This change is was found to be needed
for the multiplexer plugin for handling HttpTunnel aborts.

For reference, see, for example, UnixNetVConnection::do_io_read which
handles a nullptr MIOBuffer. This basically copies that logic into
INKVConnInternal so it handles cancellation gracefully.
  • Loading branch information
bneradt authored Oct 1, 2024
1 parent 53adece commit b848a2e
Showing 1 changed file with 17 additions and 9 deletions.
26 changes: 17 additions & 9 deletions src/api/InkVConnInternal.cc
Original file line number Diff line number Diff line change
Expand Up @@ -70,17 +70,21 @@ INKVConnInternal::destroy()
VIO *
INKVConnInternal::do_io_read(Continuation *c, int64_t nbytes, MIOBuffer *buf)
{
m_read_vio.set_writer(buf);
m_read_vio.op = VIO::READ;
m_read_vio.set_continuation(c);
m_read_vio.nbytes = nbytes;
m_read_vio.ndone = 0;
m_read_vio.vc_server = this;

if (ink_atomic_increment((int *)&m_event_count, 1) < 0) {
ink_assert(!"not reached");
if (buf) {
m_read_vio.set_writer(buf);
if (ink_atomic_increment((int *)&m_event_count, 1) < 0) {
ink_assert(!"not reached");
}
eventProcessor.schedule_imm(this, ET_NET);
} else {
m_read_vio.buffer.clear();
}
eventProcessor.schedule_imm(this, ET_NET);

return &m_read_vio;
}
Expand All @@ -89,18 +93,22 @@ VIO *
INKVConnInternal::do_io_write(Continuation *c, int64_t nbytes, IOBufferReader *buf, bool owner)
{
ink_assert(!owner);
m_write_vio.set_reader(buf);
m_write_vio.op = VIO::WRITE;
m_write_vio.set_continuation(c);
m_write_vio.nbytes = nbytes;
m_write_vio.ndone = 0;
m_write_vio.vc_server = this;

if (m_write_vio.get_reader()->read_avail() > 0) {
if (ink_atomic_increment((int *)&m_event_count, 1) < 0) {
ink_assert(!"not reached");
if (buf) {
m_write_vio.set_reader(buf);
if (m_write_vio.get_reader()->read_avail() > 0) {
if (ink_atomic_increment((int *)&m_event_count, 1) < 0) {
ink_assert(!"not reached");
}
eventProcessor.schedule_imm(this, ET_NET);
}
eventProcessor.schedule_imm(this, ET_NET);
} else {
m_write_vio.buffer.clear();
}

return &m_write_vio;
Expand Down

0 comments on commit b848a2e

Please sign in to comment.