-
Notifications
You must be signed in to change notification settings - Fork 0
/
serverless.yml
258 lines (214 loc) · 6.24 KB
/
serverless.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
#
# Note to self: I'm not actually using the Serverless integration on their site,
# it turns out that streaming logs is super expensive, relatively speaking.
# I was looking at about $6/mo, which is more than the rest of my AWS bill combined.
#
org: dmuth
app: peco
service: peco-api
frameworkVersion: '3'
provider:
name: aws
# This is a faster method, and will be the default in Serverless 4
deploymentMethod: direct
runtime: python3.10
# Don't keep old versions of functions.
versionFunctions: false
# Cloudwatch log retention for 30 days
logRetentionInDays: 30
environment:
STAGE: ${sls:stage}
DYNAMO_TABLE_NAME: !Ref pecoOutagesTable
DYNAMO_TABLE_NAME_ARCHIVE: !Ref pecoOutagesTableArchive
iam:
role:
statements:
- Effect: "Allow"
Action:
- dynamodb:Query
- dynamodb:Scan
- dynamodb:GetItem
- dynamodb:PutItem
- dynamodb:UpdateItem
- dynamodb:DeleteItem
Resource: [
"arn:aws:dynamodb:${aws:region}:${aws:accountId}:table/${self:custom.dynamoTableName}",
"arn:aws:dynamodb:${aws:region}:${aws:accountId}:table/${self:custom.dynamoTableName}/index/Hour"
]
#
# Limit our Lambdas to just being able to write to the Archive table.
#
- Effect: "Allow"
Action:
- dynamodb:PutItem
Resource: [
"arn:aws:dynamodb:${aws:region}:${aws:accountId}:table/${self:custom.dynamoTableNameArchive}"
]
stackTags:
project: "peco"
stage: "${sls:stage}"
httpApi:
cors: true
functions:
test:
handler: api/testing.main
#url: true
#events:
# - httpApi:
# path: /test
# method: get
#
# This function fetches the latest PECO statuses from DynamoDB.
#
peco:
handler: api/peco.get_status
events:
- httpApi:
path: /peco
method: get
#
# Get recent status. At this time, the default is 12 (1 hour).
#
peco_recent:
handler: api/peco.get_status_recent
events:
- httpApi:
path: /peco/recent
method: get
#
# Live status from PECO (for testing/development purposes)
#
peco_live:
handler: api/peco.get_status_live
#events: # Debugging
# - httpApi:
# path: /peco/live
# method: get
#
# This function fetches PECO's status periodically and writes it to
# a DynamoDB table.
#
cron:
handler: api/cron.main
events:
- schedule:
rate: rate(1 minute)
enabled: true
#- httpApi: # Debugging
# path: /cron
# method: get
#
# Our resources are in CloudFormation syntax
#
resources:
Resources:
#
# Our "main" DynamoDB table. It stores a subset of data for what the website uses.
# With less data stored here, reads will take up less read units.
#
# This is, BTW, raw CloudFormation config right here.
#
pecoOutagesTable:
Type: AWS::DynamoDB::Table
Properties:
#
# If I mess around with the indexes, I will have to briefly rename this table
# to something else, deploy, then change the name back and deploy a second time.
#
# If I don't do that, Serverless (or CloudFormation) doesn't properly pick up
# what I'm trying to do and will throw an error when it tries to delete
# a non-existant table.
#
# What a stupid bug.
#
TableName: ${self:custom.dynamoTableName}
#TableName: ${self:custom.dynamoTableName}-tmp-01 # Uncomment this when changing the table schema
# Set to on-demand capacity mode
BillingMode: PAY_PER_REQUEST
# Prevent accidental deletion
#DeletionProtectionEnabled: true
#
# Set up our attribute definitions for the Partition Key and Sort Key.
#
AttributeDefinitions:
- AttributeName: Date
AttributeType: S
- AttributeName: DateTime
AttributeType: S
- AttributeName: Hour
AttributeType: S
KeySchema:
- AttributeName: Date
KeyType: HASH
- AttributeName: DateTime
KeyType: RANGE
GlobalSecondaryIndexes:
- IndexName: Hour
KeySchema:
- AttributeName: Date
KeyType: HASH
- AttributeName: Hour
KeyType: RANGE
Projection:
ProjectionType: "ALL"
PointInTimeRecoverySpecification:
PointInTimeRecoveryEnabled: true
TimeToLiveSpecification:
AttributeName: "ttl"
Enabled: true
#
# This table holds the raw data that we got back from PECO.
# This table won't be used by the website, but rather for archival purposes.
#
pecoOutagesTableArchive:
Type: AWS::DynamoDB::Table
Properties:
TableName: ${self:custom.dynamoTableNameArchive}
# Set to on-demand capacity mode
BillingMode: PAY_PER_REQUEST
# Prevent accidental deletion
#DeletionProtectionEnabled: true
#
# We're just having a single key for the archive
#
AttributeDefinitions:
- AttributeName: DateTime
AttributeType: S
KeySchema:
- AttributeName: DateTime
KeyType: HASH
#
# Create our bucket. Public access is blocked by default.
#
AssetsBucket:
Type: AWS::S3::Bucket
Properties:
BucketName: dmuth-peco-${sls:stage}
plugins:
- serverless-python-requirements
#
# https://www.serverless.com/plugins/serverless-plugin-resource-tagging
#
- serverless-plugin-resource-tagging
#
# Used to sync our static assets to S3.
#
# https://www.serverless.com/plugins/serverless-s3-sync
#
- serverless-s3-sync
custom:
stage: "${sls:stage}"
#
# This is the source of truth for Dynamo table names.
#
dynamoTableName: "peco-outages-${sls:stage}"
dynamoTableNameArchive: "peco-outages-archive-${sls:stage}"
s3Sync:
#
# Don't deploy when doing "sls deploy", require "sls s3sync" instead.
#
noSync: true
buckets:
- bucketName: dmuth-peco-${sls:stage}
localDir: hugo/public
deleteRemoved: true