-
Notifications
You must be signed in to change notification settings - Fork 20
/
outboundSender.go
714 lines (621 loc) · 21 KB
/
outboundSender.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
/**
* Copyright 2020 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package main
import (
"bytes"
"container/ring"
"crypto/hmac"
"crypto/sha1"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/go-kit/kit/metrics"
"github.com/xmidt-org/webpa-common/device"
"github.com/xmidt-org/webpa-common/logging"
"github.com/xmidt-org/webpa-common/semaphore"
"github.com/xmidt-org/webpa-common/webhook"
"github.com/xmidt-org/webpa-common/xhttp"
"github.com/xmidt-org/wrp-go/v2"
"github.com/xmidt-org/wrp-go/v2/wrphttp"
)
// failureText is human readable text for the failure message
const failureText = `Unfortunately, your endpoint is not able to keep up with the ` +
`traffic being sent to it. Due to this circumstance, all notification traffic ` +
`is being cut off and dropped for a period of time. Please increase your ` +
`capacity to handle notifications, or reduce the number of notifications ` +
`you have requested.`
// outboundRequest stores the outgoing request and assorted data that has been
// collected so far (to save on processing the information again).
type outboundRequest struct {
msg *wrp.Message
event string
transID string
deviceID string
contentType string
}
// FailureMessage is a helper that lets us easily create a json struct to send
// when we have to cut and endpoint off.
type FailureMessage struct {
Text string `json:"text"`
Original webhook.W `json:"webhook_registration"`
CutOffPeriod string `json:"cut_off_period"`
QueueSize int `json:"queue_size"`
Workers int `json:"worker_count"`
}
// OutboundSenderFactory is a configurable factory for OutboundSender objects.
type OutboundSenderFactory struct {
// The WebHookListener to service
Listener webhook.W
// The http client Do() function to use for outbound requests.
Sender func(*http.Request) (*http.Response, error)
// The number of delivery workers to create and use.
NumWorkers int
// The queue depth to buffer events before we declare overflow, shut
// off the message delivery, and basically put the endpoint in "timeout."
QueueSize int
// The amount of time to cut off the consumer if they don't keep up.
// Must be greater then 0 seconds
CutOffPeriod time.Duration
// Number of delivery retries before giving up
DeliveryRetries int
// Time in between delivery retries
DeliveryInterval time.Duration
// Metrics registry.
MetricsRegistry CaduceusMetricsRegistry
// The logger to use.
Logger log.Logger
}
type OutboundSender interface {
Update(webhook.W) error
Shutdown(bool)
RetiredSince() time.Time
Queue(*wrp.Message)
}
// CaduceusOutboundSender is the outbound sender object.
type CaduceusOutboundSender struct {
id string
urls *ring.Ring
listener webhook.W
deliverUntil time.Time
dropUntil time.Time
sender func(*http.Request) (*http.Response, error)
events []*regexp.Regexp
matcher []*regexp.Regexp
queueSize int
deliveryRetries int
deliveryInterval time.Duration
deliveryCounter metrics.Counter
deliveryRetryCounter metrics.Counter
droppedQueueFullCounter metrics.Counter
droppedCutoffCounter metrics.Counter
droppedExpiredCounter metrics.Counter
droppedExpiredBeforeQueueCounter metrics.Counter
droppedNetworkErrCounter metrics.Counter
droppedInvalidConfig metrics.Counter
droppedPanic metrics.Counter
cutOffCounter metrics.Counter
contentTypeCounter metrics.Counter
queueDepthGauge metrics.Gauge
renewalTimeGauge metrics.Gauge
deliverUntilGauge metrics.Gauge
dropUntilGauge metrics.Gauge
maxWorkersGauge metrics.Gauge
currentWorkersGauge metrics.Gauge
deliveryRetryMaxGauge metrics.Gauge
eventType metrics.Counter
wg sync.WaitGroup
cutOffPeriod time.Duration
workers semaphore.Interface
maxWorkers int
failureMsg FailureMessage
logger log.Logger
mutex sync.RWMutex
queue atomic.Value
}
// New creates a new OutboundSender object from the factory, or returns an error.
func (osf OutboundSenderFactory) New() (obs OutboundSender, err error) {
if _, err = url.ParseRequestURI(osf.Listener.Config.URL); nil != err {
return
}
if nil == osf.Sender {
err = errors.New("nil Sender()")
return
}
if 0 == osf.CutOffPeriod.Nanoseconds() {
err = errors.New("Invalid CutOffPeriod")
return
}
if nil == osf.Logger {
err = errors.New("Logger required")
return
}
caduceusOutboundSender := &CaduceusOutboundSender{
id: osf.Listener.Config.URL,
listener: osf.Listener,
sender: osf.Sender,
queueSize: osf.QueueSize,
cutOffPeriod: osf.CutOffPeriod,
deliverUntil: osf.Listener.Until,
logger: osf.Logger,
deliveryRetries: osf.DeliveryRetries,
deliveryInterval: osf.DeliveryInterval,
maxWorkers: osf.NumWorkers,
failureMsg: FailureMessage{
Original: osf.Listener,
Text: failureText,
CutOffPeriod: osf.CutOffPeriod.String(),
QueueSize: osf.QueueSize,
Workers: osf.NumWorkers,
},
}
// Don't share the secret with others when there is an error.
caduceusOutboundSender.failureMsg.Original.Config.Secret = "XxxxxX"
CreateOutbounderMetrics(osf.MetricsRegistry, caduceusOutboundSender)
// update queue depth and current workers gauge to make sure they start at 0
caduceusOutboundSender.queueDepthGauge.Set(0)
caduceusOutboundSender.currentWorkersGauge.Set(0)
caduceusOutboundSender.queue.Store(make(chan *wrp.Message, osf.QueueSize))
if err = caduceusOutboundSender.Update(osf.Listener); nil != err {
return
}
caduceusOutboundSender.workers = semaphore.New(caduceusOutboundSender.maxWorkers)
caduceusOutboundSender.wg.Add(1)
go caduceusOutboundSender.dispatcher()
obs = caduceusOutboundSender
return
}
// Update applies user configurable values for the outbound sender when a
// webhook is registered
func (obs *CaduceusOutboundSender) Update(wh webhook.W) (err error) {
// Validate the failure URL, if present
if "" != wh.FailureURL {
if _, err = url.ParseRequestURI(wh.FailureURL); nil != err {
return
}
}
// Create and validate the event regex objects
var events []*regexp.Regexp
for _, event := range wh.Events {
var re *regexp.Regexp
if re, err = regexp.Compile(event); nil != err {
return
}
events = append(events, re)
}
if len(events) < 1 {
err = errors.New("Events must not be empty.")
return
}
// Create the matcher regex objects
matcher := []*regexp.Regexp{}
for _, item := range wh.Matcher.DeviceId {
if ".*" == item {
// Match everything - skip the filtering
matcher = []*regexp.Regexp{}
break
}
var re *regexp.Regexp
if re, err = regexp.Compile(item); nil != err {
err = fmt.Errorf("Invalid matcher item: '%s'", item)
return
}
matcher = append(matcher, re)
}
// Validate the various urls
urlCount := len(wh.Config.AlternativeURLs)
for i := 0; i < urlCount; i++ {
_, err = url.Parse(wh.Config.AlternativeURLs[i])
if err != nil {
obs.logger.Log(level.Key(), level.ErrorValue(), logging.MessageKey(), "failed to update url",
"url", wh.Config.AlternativeURLs[i], logging.ErrorKey(), err)
return
}
}
obs.renewalTimeGauge.Set(float64(time.Now().Unix()))
// write/update obs
obs.mutex.Lock()
obs.listener = wh
obs.failureMsg.Original = wh
// Don't share the secret with others when there is an error.
obs.failureMsg.Original.Config.Secret = "XxxxxX"
obs.listener.FailureURL = wh.FailureURL
obs.deliverUntil = wh.Until
obs.deliverUntilGauge.Set(float64(obs.deliverUntil.Unix()))
obs.events = events
obs.deliveryRetryMaxGauge.Set(float64(obs.deliveryRetries))
// if matcher list is empty set it nil for Queue() logic
obs.matcher = nil
if 0 < len(matcher) {
obs.matcher = matcher
}
if 0 == urlCount {
obs.urls = ring.New(1)
obs.urls.Value = obs.id
} else {
r := ring.New(urlCount)
for i := 0; i < urlCount; i++ {
r.Value = wh.Config.AlternativeURLs[i]
r = r.Next()
}
obs.urls = r
}
// Randomize where we start so all the instances don't synchronize
r := rand.New(rand.NewSource(time.Now().UnixNano()))
offset := r.Intn(obs.urls.Len())
for 0 < offset {
obs.urls = obs.urls.Next()
offset--
}
// Update this here in case we make this configurable later
obs.maxWorkersGauge.Set(float64(obs.maxWorkers))
obs.mutex.Unlock()
return
}
// Shutdown causes the CaduceusOutboundSender to stop its activities either gently or
// abruptly based on the gentle parameter. If gentle is false, all queued
// messages will be dropped without an attempt to send made.
func (obs *CaduceusOutboundSender) Shutdown(gentle bool) {
if false == gentle {
// need to close the channel we're going to replace, in case it doesn't
// have any events in it.
close(obs.queue.Load().(chan *wrp.Message))
obs.Empty(obs.droppedExpiredCounter)
}
close(obs.queue.Load().(chan *wrp.Message))
obs.wg.Wait()
obs.mutex.Lock()
obs.deliverUntil = time.Time{}
obs.deliverUntilGauge.Set(float64(obs.deliverUntil.Unix()))
obs.queueDepthGauge.Set(0) //just in case
obs.mutex.Unlock()
}
// RetiredSince returns the time the CaduceusOutboundSender retired (which could be in
// the future).
func (obs *CaduceusOutboundSender) RetiredSince() time.Time {
obs.mutex.RLock()
deliverUntil := obs.deliverUntil
obs.mutex.RUnlock()
return deliverUntil
}
// Queue is given a request to evaluate and optionally enqueue in the list
// of messages to deliver. The request is checked to see if it matches the
// criteria before being accepted or silently dropped.
func (obs *CaduceusOutboundSender) Queue(msg *wrp.Message) {
obs.mutex.RLock()
deliverUntil := obs.deliverUntil
dropUntil := obs.dropUntil
events := obs.events
matcher := obs.matcher
obs.mutex.RUnlock()
now := time.Now()
if false == obs.isValidTimeWindow(now, dropUntil, deliverUntil) {
return
}
for _, eventRegex := range events {
if false == eventRegex.MatchString(strings.TrimPrefix(msg.Destination, "event:")) {
// regex didn't match; don't do anything
continue
}
matchDevice := (nil == matcher)
if nil != matcher {
for _, deviceRegex := range matcher {
if deviceRegex.MatchString(msg.Source) {
matchDevice = true
break
}
}
}
/*
// if the device id matches then we want to look through all the metadata
// and make sure that the obs metadata matches the metadata provided
if matchDevice {
for key, val := range metaData {
if matchers, ok := matcher[key]; ok {
for _, deviceRegex := range matchers {
matchDevice = false
if deviceRegex.MatchString(val) {
matchDevice = true
break
}
}
// metadata was provided but did not match our expectations,
// so it is time to drop the message
if !matchDevice {
break
}
}
}
}
*/
if matchDevice {
select {
case obs.queue.Load().(chan *wrp.Message) <- msg:
obs.queueDepthGauge.Add(1.0)
default:
obs.queueOverflow()
obs.droppedQueueFullCounter.Add(1.0)
}
}
}
}
func (obs *CaduceusOutboundSender) isValidTimeWindow(now, dropUntil, deliverUntil time.Time) bool {
if false == now.After(dropUntil) {
// client was cut off
obs.droppedCutoffCounter.Add(1.0)
return false
}
if false == now.Before(deliverUntil) {
// outside delivery window
obs.droppedExpiredBeforeQueueCounter.Add(1.0)
return false
}
return true
}
// Empty is called on cutoff or shutdown and swaps out the current queue for
// a fresh one, counting any current messages in the queue as dropped.
// It should never close a queue, as a queue not referenced anywhere will be
// cleaned up by the garbage collector without needing to be closed.
func (obs *CaduceusOutboundSender) Empty(droppedCounter metrics.Counter) {
droppedMsgs := obs.queue.Load().(chan *wrp.Message)
obs.queue.Store(make(chan *wrp.Message, obs.queueSize))
droppedCounter.Add(float64(len(droppedMsgs)))
obs.queueDepthGauge.Set(0.0)
return
}
func (obs *CaduceusOutboundSender) dispatcher() {
defer obs.wg.Done()
var (
msg *wrp.Message
urls *ring.Ring
secret, accept string
ok bool
)
Loop:
for {
// Always pull a new queue in case we have been cutoff or are shutting
// down.
msgQueue := obs.queue.Load().(chan *wrp.Message)
select {
// The dispatcher cannot get stuck blocking here forever (caused by an
// empty queue that is replaced and then Queue() starts adding to the
// new queue) because:
// - queue is only replaced on cutoff and shutdown
// - on cutoff, the first queue is always full so we will definitely
// get a message, drop it because we're cut off, then get the new
// queue and block until the cut off ends and Queue() starts queueing
// messages again.
// - on graceful shutdown, the queue is closed and then the dispatcher
// will send all messages, then break the loop, gather workers, and
// exit.
// - on non graceful shutdown, the queue is closed and then replaced
// with a new, empty queue that is also closed.
// - If the first queue is empty, we immediately break the loop,
// gather workers, and exit.
// - If the first queue has messages, we drop a message as expired
// pull in the new queue which is empty and closed, break the
// loop, gather workers, and exit.
case msg, ok = <-msgQueue:
// This is only true when a queue is empty and closed, which for us
// only happens on Shutdown().
if !ok {
break Loop
}
obs.queueDepthGauge.Add(-1.0)
obs.mutex.RLock()
urls = obs.urls
// Move to the next URL to try 1st the next time.
// This is okay because we run a single dispatcher and it's the
// only one updating this field.
obs.urls = obs.urls.Next()
deliverUntil := obs.deliverUntil
dropUntil := obs.dropUntil
secret = obs.listener.Config.Secret
accept = obs.listener.Config.ContentType
obs.mutex.RUnlock()
now := time.Now()
if now.Before(dropUntil) {
obs.droppedCutoffCounter.Add(1.0)
continue
}
if now.After(deliverUntil) {
obs.droppedExpiredCounter.Add(1.0)
continue
}
obs.workers.Acquire()
obs.currentWorkersGauge.Add(1.0)
go obs.send(urls, secret, accept, msg)
}
}
for i := 0; i < obs.maxWorkers; i++ {
obs.workers.Acquire()
}
}
// worker is the routine that actually takes the queued messages and delivers
// them to the listeners outside webpa
func (obs *CaduceusOutboundSender) send(urls *ring.Ring, secret, acceptType string, msg *wrp.Message) {
defer func() {
if r := recover(); nil != r {
obs.droppedPanic.Add(1.0)
obs.logger.Log(level.Key(), level.ErrorValue(), logging.MessageKey(), "goroutine send() panicked",
"id", obs.id, "panic", r)
}
obs.workers.Release()
obs.currentWorkersGauge.Add(-1.0)
}()
payload := msg.Payload
body := payload
var payloadReader *bytes.Reader
obs.contentTypeCounter.With("content_type", strings.TrimLeft(msg.ContentType, "application/")).Add(1.0)
// Use the internal content type unless the accept type is wrp
contentType := msg.ContentType
switch acceptType {
case "wrp", "application/msgpack", "application/wrp":
// WTS - We should pass the original, raw WRP event instead of
// re-encoding it.
contentType = "application/msgpack"
buffer := bytes.NewBuffer([]byte{})
encoder := wrp.NewEncoder(buffer, wrp.Msgpack)
encoder.Encode(msg)
body = buffer.Bytes()
}
payloadReader = bytes.NewReader(body)
req, err := http.NewRequest("POST", urls.Value.(string), payloadReader)
if nil != err {
// Report drop
obs.droppedInvalidConfig.Add(1.0)
obs.logger.Log(level.Key(), level.ErrorValue(), logging.MessageKey(), "Invalid URL",
"url", urls.Value.(string), "id", obs.id, logging.ErrorKey(), err)
return
}
req.Header.Set("Content-Type", contentType)
// Add x-Midt-* headers
wrphttp.AddMessageHeaders(req.Header, msg)
// Provide the old headers for now
req.Header.Set("X-Webpa-Event", strings.TrimPrefix(msg.Destination, "event:"))
req.Header.Set("X-Webpa-Transaction-Id", msg.TransactionUUID)
// Add the device id without the trailing service
id, _ := device.ParseID(msg.Source)
req.Header.Set("X-Webpa-Device-Id", string(id))
req.Header.Set("X-Webpa-Device-Name", string(id))
// Apply the secret
if "" != secret {
s := hmac.New(sha1.New, []byte(secret))
s.Write(body)
sig := fmt.Sprintf("sha1=%s", hex.EncodeToString(s.Sum(nil)))
req.Header.Set("X-Webpa-Signature", sig)
}
// find the event "short name"
event := msg.FindEventStringSubMatch()
retryOptions := xhttp.RetryOptions{
Logger: obs.logger,
Retries: obs.deliveryRetries,
Interval: obs.deliveryInterval,
Counter: obs.deliveryRetryCounter.With("url", obs.id, "event", event),
// Always retry on failures up to the max count.
ShouldRetry: func(error) bool { return true },
ShouldRetryStatus: func(code int) bool {
return code < 200 || code > 299
},
}
// update subsequent requests with the next url in the list upon failure
retryOptions.UpdateRequest = func(request *http.Request) {
urls = urls.Next()
tmp, err := url.Parse(urls.Value.(string))
if err != nil {
obs.logger.Log(level.Key(), level.ErrorValue(), logging.MessageKey(), "failed to update url",
"url", urls.Value.(string), logging.ErrorKey(), err)
return
}
request.URL = tmp
}
// Send it
resp, err := xhttp.RetryTransactor(retryOptions, obs.sender)(req)
code := "failure"
if nil != err {
// Report failure
obs.droppedNetworkErrCounter.Add(1.0)
} else {
// Report Result
code = strconv.Itoa(resp.StatusCode)
// read until the response is complete before closing to allow
// connection reuse
if nil != resp.Body {
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}
}
obs.deliveryCounter.With("url", obs.id, "code", code, "event", event).Add(1.0)
}
// queueOverflow handles the logic of what to do when a queue overflows:
// cutting off the webhook for a time and sending a cut off notification
// to the failure URL.
func (obs *CaduceusOutboundSender) queueOverflow() {
obs.mutex.Lock()
if time.Now().Before(obs.dropUntil) {
obs.mutex.Unlock()
return
}
obs.dropUntil = time.Now().Add(obs.cutOffPeriod)
obs.dropUntilGauge.Set(float64(obs.dropUntil.Unix()))
secret := obs.listener.Config.Secret
failureMsg := obs.failureMsg
failureURL := obs.listener.FailureURL
obs.mutex.Unlock()
var (
errorLog = log.WithPrefix(obs.logger, level.Key(), level.ErrorValue())
)
obs.cutOffCounter.Add(1.0)
// We empty the queue but don't close the channel, because we're not
// shutting down.
obs.Empty(obs.droppedCutoffCounter)
msg, err := json.Marshal(failureMsg)
if nil != err {
errorLog.Log(logging.MessageKey(), "Cut-off notification json.Marshal failed", "failureMessage", obs.failureMsg,
"for", obs.id, logging.ErrorKey(), err)
return
}
// if no URL to send cut off notification to, do nothing
if "" == failureURL {
return
}
// Send a "you've been cut off" warning message
payload := bytes.NewReader(msg)
req, err := http.NewRequest("POST", failureURL, payload)
if nil != err {
// Failure
errorLog.Log(logging.MessageKey(), "Unable to send cut-off notification", "notification",
failureURL, "for", obs.id, logging.ErrorKey(), err)
return
}
req.Header.Set("Content-Type", "application/json")
if "" != secret {
h := hmac.New(sha1.New, []byte(secret))
h.Write(msg)
sig := fmt.Sprintf("sha1=%s", hex.EncodeToString(h.Sum(nil)))
req.Header.Set("X-Webpa-Signature", sig)
}
// record content type, json.
obs.contentTypeCounter.With("content_type", "json").Add(1.0)
resp, err := obs.sender(req)
if nil != err {
// Failure
errorLog.Log(logging.MessageKey(), "Unable to send cut-off notification", "notification",
failureURL, "for", obs.id, logging.ErrorKey(), err)
return
}
if nil == resp {
// Failure
errorLog.Log(logging.MessageKey(), "Unable to send cut-off notification, nil response",
"notification", failureURL)
return
}
// Success
}