diff --git a/bin/varnishd/cache/cache_backend.c b/bin/varnishd/cache/cache_backend.c index d3a98ac56f..f09eae4237 100644 --- a/bin/varnishd/cache/cache_backend.c +++ b/bin/varnishd/cache/cache_backend.c @@ -416,6 +416,10 @@ vbe_dir_gethdrs(VRT_CTX, VCL_BACKEND d) http_PrintfHeader(bo->bereq, "Host: %s", bp->hosthdr); do { + if (WS_Overflowed(bo->ws)) { + VSLb(bo->vsl, SLT_FetchError, "workspace_backend overflow"); + return (-1); + } if (bo->htc != NULL) CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); pfd = vbe_dir_getfd(ctx, wrk, d, bp, extrachance == 0 ? 1 : 0); diff --git a/bin/varnishd/cache/cache_req_fsm.c b/bin/varnishd/cache/cache_req_fsm.c index bbcb3824f7..7d203c9667 100644 --- a/bin/varnishd/cache/cache_req_fsm.c +++ b/bin/varnishd/cache/cache_req_fsm.c @@ -238,6 +238,8 @@ cnt_deliver(struct worker *wrk, struct req *req) assert(req->restarts <= cache_param->max_restarts); + if (WS_Overflowed(req->ws)) + wrk->vpi->handling = VCL_RET_FAIL; if (wrk->vpi->handling != VCL_RET_DELIVER) { HSH_Cancel(wrk, req->objcore, NULL); (void)HSH_DerefObjCore(wrk, &req->objcore, HSH_RUSH_POLICY); @@ -648,6 +650,8 @@ cnt_lookup(struct worker *wrk, struct req *req) VCL_hit_method(req->vcl, wrk, req, NULL, NULL); + if (WS_Overflowed(req->ws)) + wrk->vpi->handling = VCL_RET_FAIL; switch (wrk->vpi->handling) { case VCL_RET_DELIVER: if (busy != NULL) { @@ -710,6 +714,8 @@ cnt_miss(struct worker *wrk, struct req *req) CHECK_OBJ_ORNULL(req->stale_oc, OBJCORE_MAGIC); VCL_miss_method(req->vcl, wrk, req, NULL, NULL); + if (WS_Overflowed(req->ws)) + wrk->vpi->handling = VCL_RET_FAIL; switch (wrk->vpi->handling) { case VCL_RET_FETCH: wrk->stats->cache_miss++; @@ -755,6 +761,8 @@ cnt_pass(struct worker *wrk, struct req *req) AZ(req->stale_oc); VCL_pass_method(req->vcl, wrk, req, NULL, NULL); + if (WS_Overflowed(req->ws)) + wrk->vpi->handling = VCL_RET_FAIL; switch (wrk->vpi->handling) { case VCL_RET_FAIL: req->req_step = R_STP_VCLFAIL; @@ -1029,6 +1037,8 @@ cnt_recv(struct worker *wrk, struct req *req) assert(wrk->vpi->handling == VCL_RET_LOOKUP); VSHA256_Final(req->digest, &sha256ctx); + if (WS_Overflowed(req->ws)) + recv_handling = VCL_RET_FAIL; switch (recv_handling) { case VCL_RET_VCL: VSLb(req->vsl, SLT_VCL_Error, @@ -1111,6 +1121,8 @@ cnt_purge(struct worker *wrk, struct req *req) AZ(HSH_DerefObjCore(wrk, &boc, 1)); VCL_purge_method(req->vcl, wrk, req, NULL, NULL); + if (WS_Overflowed(req->ws)) + wrk->vpi->handling = VCL_RET_FAIL; switch (wrk->vpi->handling) { case VCL_RET_RESTART: req->req_step = R_STP_RESTART; diff --git a/bin/varnishd/http1/cache_http1_fetch.c b/bin/varnishd/http1/cache_http1_fetch.c index e0c53b96ba..51ae6204f0 100644 --- a/bin/varnishd/http1/cache_http1_fetch.c +++ b/bin/varnishd/http1/cache_http1_fetch.c @@ -98,7 +98,9 @@ V1F_SendReq(struct worker *wrk, struct busyobj *bo, uint64_t *ctr_hdrbytes, cl = 0; VDP_Init(vdc, wrk, bo->vsl, NULL, bo, &cl); - if (bo->vdp_filter_list != NULL && + if (WS_Overflowed(bo->ws)) + err = "workspace_backend overflow"; + else if (bo->vdp_filter_list != NULL && VCL_StackVDP(vdc, bo->vcl, bo->vdp_filter_list, NULL, bo)) err = "Failure to push processors"; else if ((v1l = V1L_Open(wrk->aws, htc->rfd, bo->vsl, nan(""), 0)) == NULL) { diff --git a/bin/varnishtest/tests/r04232.vtc b/bin/varnishtest/tests/r04232.vtc new file mode 100644 index 0000000000..ea22bc5e1b --- /dev/null +++ b/bin/varnishtest/tests/r04232.vtc @@ -0,0 +1,61 @@ +varnishtest "Workspace overflow through LostHeader" + +server s1 {} -start + +varnish v1 -cliok "param.set http_max_hdr 32" + +varnish v1 -vcl+backend { + sub vcl_recv { + if (req.url == "/1") { + set req.http.make-overflow = "some value"; + } + } +} -start + +logexpect l1 -v v1 { +# expect * 1001 LostHeader "make-overflow: some value" +# expect * 1001 Error "out of workspace" +# expect * 1001 RespStatus "200" +# expect * 1001 VCL_return "deliver" +# expect * 1001 Error "workspace_client overflow" + +# expect * 1005 Error "out of workspace" +# expect * 1005 LostHeader "Accept-Encoding" +# expect * 1005 Error "out of workspace" +# expect * 1005 LostHeader "X-Varnish: 1005" +# expect * 1005 BerespStatus "200" +} -start + +# 1. Trigger client workspace overflow through LostHeader in vcl_recv +# 2. Go to vcl_backend_fetch and cause a backend workspace overflow and +# still return a 200 +# 3. Notice a workspace overflow after vcl_deliver and give client 500 response +client c1 { + txreq -url "/1" -hdr "h1: 1" -hdr "h2: 2" \ + -hdr "h3: 3" -hdr "h4: 4" -hdr "h5: 5" \ + -hdr "h6: 6" -hdr "h7: 7" -hdr "h8: 8" \ + -hdr "h9: 9" -hdr "h10: 10" -hdr "h11: 11" \ + -hdr "h12: 12" -hdr "h13: 13" -hdr "h14: 14" \ + -hdr "h15: 15" -hdr "h16: 16" -hdr "h17: 17" \ + -hdr "h18: 18" -hdr "h19: 19" -hdr "h20: 20" \ + -hdr "h21: 21" -hdr "h22: 22" -hdr "h23: 23" + rxresp + expect resp.status == 503 +} -run + +# 1. Trigger backend workspace overflow through LostHeader in vcl_backend_fetch +# 2. Get and deliver 200 from backend, send 200 to client +client c2 { + txreq -url "/2" -hdr "h1: 1" -hdr "h2: 2" \ + -hdr "h3: 3" -hdr "h4: 4" -hdr "h5: 5" \ + -hdr "h6: 6" -hdr "h7: 7" -hdr "h8: 8" \ + -hdr "h9: 9" -hdr "h10: 10" -hdr "h11: 11" \ + -hdr "h12: 12" -hdr "h13: 13" -hdr "h14: 14" \ + -hdr "h15: 15" -hdr "h16: 16" -hdr "h17: 17" \ + -hdr "h18: 18" -hdr "h19: 19" -hdr "h20: 20" \ + -hdr "h21: 21" -hdr "h22: 22" -hdr "h23: 23" + rxresp + expect resp.status == 503 +} -run + +logexpect l1 -wait