forked from influxdata/telegraf
-
Notifications
You must be signed in to change notification settings - Fork 0
/
pubsub_push.go
323 lines (272 loc) · 7.96 KB
/
pubsub_push.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
package cloud_pubsub_push
import (
"context"
"crypto/subtle"
"encoding/base64"
"encoding/json"
"io/ioutil"
"net"
"net/http"
"sync"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
tlsint "github.com/influxdata/telegraf/internal/tls"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/parsers"
)
// defaultMaxBodySize is the default maximum request body size, in bytes.
// if the request body is over this size, we will return an HTTP 413 error.
// 500 MB
const defaultMaxBodySize = 500 * 1024 * 1024
const defaultMaxUndeliveredMessages = 1000
type PubSubPush struct {
ServiceAddress string
Token string
Path string
ReadTimeout internal.Duration
WriteTimeout internal.Duration
MaxBodySize internal.Size
AddMeta bool
Log telegraf.Logger
MaxUndeliveredMessages int `toml:"max_undelivered_messages"`
tlsint.ServerConfig
parsers.Parser
listener net.Listener
server *http.Server
acc telegraf.TrackingAccumulator
ctx context.Context
cancel context.CancelFunc
wg *sync.WaitGroup
mu *sync.Mutex
undelivered map[telegraf.TrackingID]chan bool
sem chan struct{}
}
// Message defines the structure of a Google Pub/Sub message.
type Message struct {
Atts map[string]string `json:"attributes"`
Data string `json:"data"` // Data is base64 encoded data
}
// Payload is the received Google Pub/Sub data. (https://cloud.google.com/pubsub/docs/push)
type Payload struct {
Msg Message `json:"message"`
Subscription string `json:"subscription"`
}
const sampleConfig = `
## Address and port to host HTTP listener on
service_address = ":8080"
## Application secret to verify messages originate from Cloud Pub/Sub
# token = ""
## Path to listen to.
# path = "/"
## Maximum duration before timing out read of the request
# read_timeout = "10s"
## Maximum duration before timing out write of the response. This should be set to a value
## large enough that you can send at least 'metric_batch_size' number of messages within the
## duration.
# write_timeout = "10s"
## Maximum allowed http request body size in bytes.
## 0 means to use the default of 524,288,00 bytes (500 mebibytes)
# max_body_size = "500MB"
## Whether to add the pubsub metadata, such as message attributes and subscription as a tag.
# add_meta = false
## Optional. Maximum messages to read from PubSub that have not been written
## to an output. Defaults to 1000.
## For best throughput set based on the number of metrics within
## each message and the size of the output's metric_batch_size.
##
## For example, if each message contains 10 metrics and the output
## metric_batch_size is 1000, setting this to 100 will ensure that a
## full batch is collected and the write is triggered immediately without
## waiting until the next flush_interval.
# max_undelivered_messages = 1000
## Set one or more allowed client CA certificate file names to
## enable mutually authenticated TLS connections
# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"]
## Add service certificate and key
# tls_cert = "/etc/telegraf/cert.pem"
# tls_key = "/etc/telegraf/key.pem"
## Data format to consume.
## Each data format has its own unique set of configuration options, read
## more about them here:
## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
data_format = "influx"
`
func (p *PubSubPush) SampleConfig() string {
return sampleConfig
}
func (p *PubSubPush) Description() string {
return "Google Cloud Pub/Sub Push HTTP listener"
}
func (p *PubSubPush) Gather(_ telegraf.Accumulator) error {
return nil
}
func (p *PubSubPush) SetParser(parser parsers.Parser) {
p.Parser = parser
}
// Start starts the http listener service.
func (p *PubSubPush) Start(acc telegraf.Accumulator) error {
if p.MaxBodySize.Size == 0 {
p.MaxBodySize.Size = defaultMaxBodySize
}
if p.ReadTimeout.Duration < time.Second {
p.ReadTimeout.Duration = time.Second * 10
}
if p.WriteTimeout.Duration < time.Second {
p.WriteTimeout.Duration = time.Second * 10
}
tlsConf, err := p.ServerConfig.TLSConfig()
if err != nil {
return err
}
p.server = &http.Server{
Addr: p.ServiceAddress,
Handler: http.TimeoutHandler(p, p.WriteTimeout.Duration, "timed out processing metric"),
ReadTimeout: p.ReadTimeout.Duration,
TLSConfig: tlsConf,
}
p.ctx, p.cancel = context.WithCancel(context.Background())
p.wg = &sync.WaitGroup{}
p.acc = acc.WithTracking(p.MaxUndeliveredMessages)
p.sem = make(chan struct{}, p.MaxUndeliveredMessages)
p.undelivered = make(map[telegraf.TrackingID]chan bool)
p.mu = &sync.Mutex{}
p.wg.Add(1)
go func() {
defer p.wg.Done()
p.receiveDelivered()
}()
p.wg.Add(1)
go func() {
defer p.wg.Done()
if tlsConf != nil {
p.server.ListenAndServeTLS("", "")
} else {
p.server.ListenAndServe()
}
}()
return nil
}
// Stop cleans up all resources
func (p *PubSubPush) Stop() {
p.cancel()
p.server.Shutdown(p.ctx)
p.wg.Wait()
}
func (p *PubSubPush) ServeHTTP(res http.ResponseWriter, req *http.Request) {
if req.URL.Path == p.Path {
p.AuthenticateIfSet(p.serveWrite, res, req)
} else {
p.AuthenticateIfSet(http.NotFound, res, req)
}
}
func (p *PubSubPush) serveWrite(res http.ResponseWriter, req *http.Request) {
select {
case <-req.Context().Done():
res.WriteHeader(http.StatusServiceUnavailable)
return
case <-p.ctx.Done():
res.WriteHeader(http.StatusServiceUnavailable)
return
case p.sem <- struct{}{}:
break
}
// Check that the content length is not too large for us to handle.
if req.ContentLength > p.MaxBodySize.Size {
res.WriteHeader(http.StatusRequestEntityTooLarge)
return
}
if req.Method != http.MethodPost {
res.WriteHeader(http.StatusMethodNotAllowed)
return
}
body := http.MaxBytesReader(res, req.Body, p.MaxBodySize.Size)
bytes, err := ioutil.ReadAll(body)
if err != nil {
res.WriteHeader(http.StatusRequestEntityTooLarge)
return
}
var payload Payload
if err = json.Unmarshal(bytes, &payload); err != nil {
p.Log.Errorf("Error decoding payload %s", err.Error())
res.WriteHeader(http.StatusBadRequest)
return
}
sDec, err := base64.StdEncoding.DecodeString(payload.Msg.Data)
if err != nil {
p.Log.Errorf("Base64-decode failed %s", err.Error())
res.WriteHeader(http.StatusBadRequest)
return
}
metrics, err := p.Parse(sDec)
if err != nil {
p.Log.Debug(err.Error())
res.WriteHeader(http.StatusBadRequest)
return
}
if p.AddMeta {
for i := range metrics {
for k, v := range payload.Msg.Atts {
metrics[i].AddTag(k, v)
}
metrics[i].AddTag("subscription", payload.Subscription)
}
}
ch := make(chan bool, 1)
p.mu.Lock()
p.undelivered[p.acc.AddTrackingMetricGroup(metrics)] = ch
p.mu.Unlock()
select {
case <-req.Context().Done():
res.WriteHeader(http.StatusServiceUnavailable)
return
case success := <-ch:
if success {
res.WriteHeader(http.StatusNoContent)
} else {
res.WriteHeader(http.StatusInternalServerError)
}
}
}
func (p *PubSubPush) receiveDelivered() {
for {
select {
case <-p.ctx.Done():
return
case info := <-p.acc.Delivered():
<-p.sem
p.mu.Lock()
ch, ok := p.undelivered[info.ID()]
if !ok {
p.mu.Unlock()
continue
}
delete(p.undelivered, info.ID())
p.mu.Unlock()
if info.Delivered() {
ch <- true
} else {
ch <- false
p.Log.Debug("Metric group failed to process")
}
}
}
}
func (p *PubSubPush) AuthenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) {
if p.Token != "" {
if subtle.ConstantTimeCompare([]byte(req.FormValue("token")), []byte(p.Token)) != 1 {
http.Error(res, "Unauthorized.", http.StatusUnauthorized)
return
}
}
handler(res, req)
}
func init() {
inputs.Add("cloud_pubsub_push", func() telegraf.Input {
return &PubSubPush{
ServiceAddress: ":8080",
Path: "/",
MaxUndeliveredMessages: defaultMaxUndeliveredMessages,
}
})
}