Skip to content

Commit b839b9c

Browse files
committed
Add opt-in varnish cache along standard tileserver
1 parent 24db45d commit b839b9c

File tree

7 files changed

+413
-11
lines changed

7 files changed

+413
-11
lines changed

infra/acm.tf

+1
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ resource "aws_acm_certificate" "tileserver" {
1919
# Always create the tile server certificate, no matter what the frontend domain
2020
count = 1
2121
domain_name = local.tileserver_dns_alias
22+
subject_alternative_names = [local.tile_cache_dns_alias, local.mml_cache_dns_alias]
2223
validation_method = "DNS"
2324
tags = merge(local.default_tags, { Name = "${var.prefix}-tileserver-acm" })
2425
}

infra/cloudwatch.tf

+6
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,12 @@ resource "aws_cloudwatch_log_group" "tarmo_tileserver" {
3434
tags = local.default_tags
3535
}
3636

37+
resource "aws_cloudwatch_log_group" "tarmo_tilecache" {
38+
name = "/aws/ecs/${aws_ecs_task_definition.tileserv_cache.family}"
39+
retention_in_days = 30
40+
tags = local.default_tags
41+
}
42+
3743
resource "aws_cloudwatch_event_rule" "lambda_lipas" {
3844
name = "${var.prefix}-lambda-lipas-update"
3945
description = "Run lipas import every night"

infra/domain.tf

+26
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,32 @@ resource "aws_route53_record" "tileserver" {
7171
ttl = "60"
7272
}
7373

74+
resource "aws_route53_record" "tilecache" {
75+
count = 1
76+
77+
zone_id = data.aws_route53_zone.zone[0].id
78+
name = local.tile_cache_dns_alias
79+
type = "CNAME"
80+
81+
records = [
82+
aws_lb.tileserver.dns_name
83+
]
84+
ttl = "60"
85+
}
86+
87+
resource "aws_route53_record" "mmlcache" {
88+
count = 1
89+
90+
zone_id = data.aws_route53_zone.zone[0].id
91+
name = local.mml_cache_dns_alias
92+
type = "CNAME"
93+
94+
records = [
95+
aws_lb.tileserver.dns_name
96+
]
97+
ttl = "60"
98+
}
99+
74100
resource "aws_route53_record" "tileserver_validation" {
75101

76102
for_each = {

infra/ecs.tf

+240
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,219 @@ resource "aws_ecs_task_definition" "pg_tileserv" {
6363
tags = merge(local.default_tags, {Name = "${var.prefix}-pg_tileserv-definition"})
6464
}
6565

66+
# Now this is a handful. We need varnish, plus varnish configuration container, plus
67+
# ssl tunnel containers from varnish to both tarmo and mml tile servers, since
68+
# varnish doesn't know how to cache https requests.
69+
resource "aws_ecs_task_definition" "tileserv_cache" {
70+
family = "${var.prefix}-tileserv_cache"
71+
network_mode = "awsvpc"
72+
requires_compatibilities = ["FARGATE"]
73+
# This is the IAM role that the docker daemon will use, e.g. for pulling the image from ECR (AWS's own docker repository)
74+
execution_role_arn = aws_iam_role.backend-task-execution.arn
75+
# If the containers in the task definition need to access AWS services, we'd specify a role via task_role_arn.
76+
# task_role_arn = ...
77+
cpu = var.varnish_cpu
78+
memory = var.varnish_memory
79+
# https://kichik.com/2020/09/10/mounting-configuration-files-in-fargate/
80+
volume {
81+
name = "varnish_configuration"
82+
}
83+
84+
container_definitions = jsonencode(
85+
[
86+
{
87+
name = "tarmo-stunnel-sidecar"
88+
image = "tstrohmeier/stunnel-client"
89+
logConfiguration = {
90+
logDriver = "awslogs"
91+
options = {
92+
awslogs-group = "/aws/ecs/${var.prefix}-tileserv_cache"
93+
awslogs-region = var.AWS_REGION_NAME
94+
awslogs-stream-prefix = "ecs"
95+
}
96+
}
97+
portMappings = [
98+
{
99+
hostPort = 8888
100+
# This port is the same that the contained application also uses
101+
containerPort = 8888
102+
protocol = "tcp"
103+
}
104+
]
105+
environment = [
106+
{
107+
name = "ACCEPT"
108+
value = "8888"
109+
},
110+
{
111+
name = "CONNECT"
112+
value = "${local.tileserver_dns_alias}:443"
113+
}
114+
]
115+
},
116+
{
117+
name = "mml-stunnel-sidecar"
118+
image = "tstrohmeier/stunnel-client"
119+
logConfiguration = {
120+
logDriver = "awslogs"
121+
options = {
122+
awslogs-group = "/aws/ecs/${var.prefix}-tileserv_cache"
123+
awslogs-region = var.AWS_REGION_NAME
124+
awslogs-stream-prefix = "ecs"
125+
}
126+
}
127+
portMappings = [
128+
{
129+
hostPort = 8889
130+
# This port is the same that the contained application also uses
131+
containerPort = 8889
132+
protocol = "tcp"
133+
}
134+
]
135+
environment = [
136+
{
137+
name = "ACCEPT"
138+
value = "8889"
139+
},
140+
{
141+
name = "CONNECT"
142+
value = "avoin-karttakuva.maanmittauslaitos.fi:443"
143+
}
144+
]
145+
},
146+
{
147+
# https://kichik.com/2020/09/10/mounting-configuration-files-in-fargate/
148+
name = "varnish-configuration-sidecar"
149+
image = "bash"
150+
DependsOn = [
151+
{
152+
condition = "START"
153+
containerName = "tarmo-stunnel-sidecar"
154+
},
155+
{
156+
condition = "START"
157+
containerName = "mml-stunnel-sidecar"
158+
}
159+
]
160+
mountPoints = [
161+
{
162+
containerPath = "/etc/varnish"
163+
sourceVolume = "varnish_configuration"
164+
}
165+
]
166+
essential = false
167+
logConfiguration = {
168+
logDriver = "awslogs"
169+
options = {
170+
awslogs-group = "/aws/ecs/${var.prefix}-tileserv_cache"
171+
awslogs-region = var.AWS_REGION_NAME
172+
awslogs-stream-prefix = "ecs"
173+
}
174+
}
175+
command = ["-c", "echo $VARNISH_CONF | base64 -d - | tee /etc/varnish/default.vcl"]
176+
environment = [
177+
{
178+
name = "VARNISH_CONF"
179+
value = base64encode(
180+
<<EOF
181+
# specify the VCL syntax version to use
182+
vcl 4.1;
183+
184+
backend tarmo {
185+
.host = "localhost";
186+
.port = "8888";
187+
}
188+
189+
backend mml {
190+
.host = "localhost";
191+
.port = "8889";
192+
}
193+
194+
sub vcl_recv {
195+
# ping endpoint for testing
196+
if(req.url ~ "/ping$") {
197+
return(synth(700, "Pong"));
198+
}
199+
if (req.http.host ~ "${local.tile_cache_dns_alias}") {
200+
set req.backend_hint = tarmo;
201+
set req.http.host = "${local.tileserver_dns_alias}";
202+
}
203+
if(req.http.host ~ "${local.mml_cache_dns_alias}") {
204+
set req.backend_hint = mml;
205+
set req.http.host = "avoin-karttakuva.maanmittauslaitos.fi";
206+
}
207+
}
208+
209+
sub vcl_synth {
210+
# respond HTTP 200
211+
if (resp.status == 700) {
212+
set resp.status = 200;
213+
set resp.http.Content-Type = "text/plain";
214+
synthetic({"Pong"});
215+
return (deliver);
216+
}
217+
}
218+
219+
sub vcl_backend_response {
220+
set beresp.ttl = 1h;
221+
set beresp.grace = 2h;
222+
}
223+
224+
EOF
225+
)
226+
}
227+
]
228+
},
229+
{
230+
name = "varnish-from-dockerhub"
231+
image = var.varnish_image
232+
cpu = var.varnish_cpu
233+
memory = var.varnish_memory
234+
DependsOn = [
235+
{
236+
condition = "COMPLETE"
237+
containerName = "varnish-configuration-sidecar"
238+
}
239+
]
240+
mountPoints = [
241+
{
242+
containerPath = "/etc/varnish"
243+
sourceVolume = "varnish_configuration"
244+
readOnly = true
245+
}
246+
]
247+
volumesFrom = []
248+
logConfiguration = {
249+
logDriver = "awslogs"
250+
options = {
251+
awslogs-group = "/aws/ecs/${var.prefix}-tileserv_cache"
252+
awslogs-region = var.AWS_REGION_NAME
253+
awslogs-stream-prefix = "ecs"
254+
}
255+
}
256+
essential = true
257+
portMappings = [
258+
{
259+
hostPort = var.varnish_port
260+
# This port is the same that the contained application also uses
261+
containerPort = var.varnish_port
262+
protocol = "tcp"
263+
}
264+
]
265+
# With Fargate, we use awsvpc networking, which will reserve a ENI (Elastic Network Interface) and attach it to
266+
# our VPC
267+
networkMode = "awsvpc"
268+
environment = [
269+
{
270+
name = "VARNISH_SIZE"
271+
value = "4G"
272+
}
273+
]
274+
}
275+
])
276+
tags = merge(local.default_tags, {Name = "${var.prefix}-tileserv_cache-definition"})
277+
}
278+
66279
# Service can also be attached to a load balancer for HTTP, TCP or UDP traffic
67280
resource "aws_ecs_service" "pg_tileserv" {
68281
name = "${var.prefix}_pg_tileserv"
@@ -90,3 +303,30 @@ resource "aws_ecs_service" "pg_tileserv" {
90303

91304
tags = merge(local.default_tags, {Name = "${var.prefix}-pg_tileserv-service"})
92305
}
306+
307+
resource "aws_ecs_service" "varnish" {
308+
name = "${var.prefix}_varnish"
309+
cluster = aws_ecs_cluster.pg_tileserv.id
310+
task_definition = aws_ecs_task_definition.tileserv_cache.arn
311+
desired_count = 1
312+
313+
# We run containers with the Fargate launch type. The other alternative is EC2, in which case we'd provision EC2
314+
# instances and attach them to the cluster.
315+
launch_type = "FARGATE"
316+
317+
load_balancer {
318+
target_group_arn = aws_lb_target_group.tilecache.arn
319+
container_name = "varnish-from-dockerhub"
320+
container_port = var.varnish_port
321+
}
322+
323+
network_configuration {
324+
# Fargate uses awspvc networking, we tell here into what subnets to attach the service
325+
subnets = aws_subnet.public.*.id
326+
# Ditto for security groups
327+
security_groups = [aws_security_group.backend.id]
328+
assign_public_ip = true
329+
}
330+
331+
tags = merge(local.default_tags, {Name = "${var.prefix}-varnish-service"})
332+
}

infra/load_balancer.tf

+48-1
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,37 @@ resource "aws_lb_target_group" "tileserver" {
4242
]
4343

4444
tags = merge(local.default_tags, {
45-
Name = "${var.prefix}-lb-target-group"
45+
Name = "${var.prefix}-lb-tileserver-target-group"
46+
})
47+
48+
}
49+
50+
resource "aws_lb_target_group" "tilecache" {
51+
name = "${var.prefix}-tilecache"
52+
port = var.varnish_port
53+
protocol = "HTTP"
54+
deregistration_delay = 30
55+
vpc_id = aws_vpc.main.id
56+
target_type = "ip"
57+
58+
health_check {
59+
enabled = true
60+
healthy_threshold = 2
61+
interval = 30
62+
matcher = "200"
63+
path = "/ping"
64+
port = "traffic-port"
65+
protocol = "HTTP"
66+
timeout = 5
67+
unhealthy_threshold = 10
68+
}
69+
70+
depends_on = [
71+
aws_lb.tileserver
72+
]
73+
74+
tags = merge(local.default_tags, {
75+
Name = "${var.prefix}-lb-tilecache-target-group"
4676
})
4777

4878
}
@@ -63,6 +93,23 @@ resource "aws_lb_listener" "tileserver" {
6393
tags = merge(local.default_tags, { Name = "${var.prefix}-lb-default-listener" })
6494
}
6595

96+
# The extra rule will point cache requests to cache target group
97+
resource "aws_lb_listener_rule" "tilecache" {
98+
listener_arn = aws_lb_listener.tileserver.arn
99+
priority = 100
100+
101+
action {
102+
type = "forward"
103+
target_group_arn = aws_lb_target_group.tilecache.arn
104+
}
105+
106+
condition {
107+
host_header {
108+
values = [local.tile_cache_dns_alias, local.mml_cache_dns_alias]
109+
}
110+
}
111+
}
112+
66113
resource "aws_lb_listener" "http" {
67114
# we want a listener even if we don't use route53
68115
count = 1

0 commit comments

Comments
 (0)