-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathmain.go
348 lines (317 loc) · 10.4 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
package main
import (
"bytes"
"context"
"os"
"os/signal"
"syscall"
"text/template"
"github.com/pkg/errors"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/client"
log "github.com/sirupsen/logrus"
)
// TODO LIST:
// * support generic templates (allows using any reverse proxy)
// * reconcile the proxy state when the manager comes up by checking that
// all the configs are in the right place
// * support updates
// * support configuring/changing default values
// * optimize by caching
// * auto-generate proxy service and network, instead of relying on them
// already existing.
// * allow non-standard ports; right now we default to port 80
// * batch updates to the proxy, so that creating a lot of services in a short
// time period is done more efficiently
// * handle more errors and edge cases
// * write Dockerfile
// I'm cheating and baking this templating into the file because then it gets
// compiled into the binary. In a more complete and less proof-of-concept
// implementation, we would read this template from a file (which the user
// could customize in a child image!)
const nginxConfigTemplate = `server {
# This config file was generated by ProxyManager
server_name {{.ServerName}};
{{/* TODO(dperny): what to do w/ access_log directive? */}}
location / {
proxy_pass http://{{.ServiceName}};
}
}`
const (
configServerNameLabel = "proxymanager_config_server_name"
defaultConfigFilePrefix = "proxymanager_config_"
defaultProxyService = "proxy"
defaultProxyNetwork = "proxynet"
nginxConfigLocation = "/etc/nginx/conf.d/"
configServiceIDLabel = "proxyServiceId"
)
type ServerConfig struct {
ServerName string // the domain name to respond as
ServiceName string // the name of the service responding
}
// ProxyManager is a struct that holds data for the proxy manager
type ProxyManager struct {
Cli *client.Client
ProxyService string // the name of the service acting as the proxy
ProxyNetwork string // the network that proxied services are connected to
ConfigTemplate *template.Template
proxyNwId string // the ID of the proxy network
}
func NewProxyManager() (*ProxyManager, error) {
t := template.Must(template.New("config").Parse(nginxConfigTemplate))
cli, err := client.NewEnvClient()
if err != nil {
return nil, err
}
return &ProxyManager{
Cli: cli,
ProxyService: defaultProxyService,
ProxyNetwork: defaultProxyNetwork,
ConfigTemplate: t,
}, nil
}
func (p *ProxyManager) IsServiceManaged(service swarm.Service) bool {
log.Debugf("Checking if service has nw %v", p.ProxyNetwork)
managed := false
for _, network := range service.Spec.TaskTemplate.Networks {
if network.Target == p.ProxyNetwork {
log.Debugf("Service has nw %v", network.Target)
managed = true
break
}
}
return managed
}
func (p *ProxyManager) CreateConfig(ctx context.Context, service swarm.Service) error {
log.Debug("Creating config for service")
// Generate the service config
// set the server name to be the service name
serverConfig := ServerConfig{
ServerName: service.Spec.Name,
ServiceName: service.Spec.Name,
}
// if the override is set, use it as the name instead of the server name
if name, ok := service.Spec.Labels[configServerNameLabel]; ok {
log.Debugf("Overriding service server name with %v", name)
serverConfig.ServerName = name
}
// create a buffer to hold the generated config
buf := &bytes.Buffer{}
err := p.ConfigTemplate.Execute(buf, serverConfig)
log.Debug(buf.String())
// then create a config spec with the newly created config
configSpec := swarm.ConfigSpec{
Annotations: swarm.Annotations{
Name: defaultConfigFilePrefix + serverConfig.ServiceName,
Labels: map[string]string{
// add the ID of this managed service to the config's labels
// helps with removals later
configServiceIDLabel: service.ID,
},
},
Data: buf.Bytes(),
}
// create the config
log.Debugf("Creating new config %v", configSpec.Name)
resp, err := p.Cli.ConfigCreate(ctx, configSpec)
if err != nil {
return err
}
// create the config reference
ref := &swarm.ConfigReference{
File: &swarm.ConfigReferenceFileTarget{
Name: nginxConfigLocation + configSpec.Name + ".conf",
// we have to set UID and GID. they're unset, this will fail.
// unsure why sensible defaults aren't assumed.
UID: "0",
GID: "0",
Mode: 444,
},
ConfigID: resp.ID,
ConfigName: configSpec.Name,
}
log.Debug("Updating proxy service")
// now update the proxy service to include this config
proxy, _, err := p.Cli.ServiceInspectWithRaw(ctx, p.ProxyService, types.ServiceInspectOptions{})
if err != nil {
return err
}
// add the config to the service's configs
proxy.Spec.TaskTemplate.ContainerSpec.Configs = append(proxy.Spec.TaskTemplate.ContainerSpec.Configs, ref)
_, err = p.Cli.ServiceUpdate(ctx, proxy.ID, proxy.Version, proxy.Spec, types.ServiceUpdateOptions{})
if err != nil {
return err
}
log.Infof("Added config for %v", service.ID)
return nil
}
// RemoveConfig handles events where a proxied service is removed.
func (p *ProxyManager) RemoveConfig(ctx context.Context, actor events.Actor) error {
log.Debug("Checking for remove")
// this function is kinda tricky because we only have the ID of the removed
// service, because it's removed.
// get the id of the service. this is just for programmer clarity, in case
// the implementation of this function changes later
serviceID := actor.ID
// First, list all configs
configs, err := p.Cli.ConfigList(ctx, types.ConfigListOptions{})
if err != nil {
return err
}
for _, config := range configs {
if id := config.Spec.Labels[configServiceIDLabel]; id == serviceID {
log.Debugf("Found config for %v", serviceID)
// we found a config for this service
// get the proxy service and remove this config from it
service, _, err := p.Cli.ServiceInspectWithRaw(ctx, p.ProxyService, types.ServiceInspectOptions{})
if err != nil {
return err
}
// `i` is the location of the config in the proxy service's
// config references. if it's still -1 after this loop, that means
// the proxy service doesn't have this config
i := -1
// take the configs array as a variable so we don't have to type the whole chain
refs := service.Spec.TaskTemplate.ContainerSpec.Configs
for j, configRef := range refs {
if configRef.ConfigID == config.ID {
i = j
}
}
if i >= 0 {
log.Debug("Removing config from proxy")
// from github.com/golang/go/wiki/SliceTricks
// delete without preserving order (memory safe)
// replace the target element with the last element
refs[i] = refs[len(refs)-1]
// nil the last element
refs[len(refs)-1] = nil
// cut of the last element
refs = refs[:len(refs)-1]
// now update the service
service.Spec.TaskTemplate.ContainerSpec.Configs = refs
_, err = p.Cli.ServiceUpdate(ctx, service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{})
if err != nil {
// TODO(dperny) handle case where the service has been updated
// in the meantime and the version is old
return err
}
} else {
log.Infof("A config for service %v exists, but is not in the proxy's configs")
}
// now delete the old config
// TODO(dperny) what happens if we try to remove the config while
// the service update removing the config is in progress?
log.Infof("Removing config %v", config.Spec.Name)
err = p.Cli.ConfigRemove(ctx, config.ID)
if err != nil {
return err
}
return nil
}
}
log.Infof("Service %v had no config to remove", serviceID)
return nil
}
func (p *ProxyManager) HandleEvent(ctx context.Context, event events.Message) {
// check that the service belongs to our network and is managed by us
var err error
switch event.Action {
case "create":
var service swarm.Service
service, _, err = p.Cli.ServiceInspectWithRaw(ctx, event.Actor.ID, types.ServiceInspectOptions{})
if err != nil {
break
}
if !p.IsServiceManaged(service) {
// TODO(dperny) log that we passed
log.Infof("Service %v is not managed", service.Spec.Name)
break
}
err = p.CreateConfig(ctx, service)
case "update":
err = errors.New("service updates not supported")
case "remove":
err = p.RemoveConfig(ctx, event.Actor)
}
if err != nil {
log.Warnf("Event handling error: %v", err)
}
}
// Reconcile checks the cluster state to see if all services are wired up
// correctly. It's called once at the start of the program.
func (p *ProxyManager) Reconcile(ctx context.Context) error {
log.Info("Reconciling proxy configs with cluster state")
log.Error("Reconciling not implemented!")
// TODO(dperny) implement
return nil
}
func (p *ProxyManager) Run(ctx context.Context) error {
// replace the network name with id
nw, err := p.Cli.NetworkInspect(ctx, p.ProxyNetwork, false)
if err != nil {
return err
}
log.Debugf("Setting ProxyNetwork to %v", nw.ID)
p.ProxyNetwork = nw.ID
// Reconcile our current state
if err := p.Reconcile(ctx); err != nil {
return err
}
// subscribe to service events
filter := filters.NewArgs()
filter.Add("type", events.ServiceEventType)
opts := types.EventsOptions{Filters: filter}
log.Debug("Subscribing to Docker events")
events, errors := p.Cli.Events(ctx, opts)
for {
select {
case <-ctx.Done():
// TODO(dperny) check if I need to perform any cleanup
log.Debug("Context done, returning")
return nil
// handle context done
case event := <-events:
// handle events
log.Debug("Recieved Docker event, calling handler")
// TODO(dperny) Can I safely do HandleEvent async?
p.HandleEvent(ctx, event)
case err := <-errors:
log.Error("Recieved event stream error")
// TODO(dperny) Restart event stream
return err
// handle event stream errors
}
}
}
func main() {
log.SetLevel(log.DebugLevel)
log.Info("Running main function")
ctx, cancel := context.WithCancel(context.Background())
log.Info("Registering Signal Handlers")
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM, os.Interrupt)
defer func() {
signal.Stop(ch)
cancel()
}()
go func() {
select {
case <-ch:
log.Info("Recieved signal, canceling context")
cancel()
case <-ctx.Done():
}
}()
p, err := NewProxyManager()
if err != nil {
log.Fatal(err.Error())
}
if err := p.Run(ctx); err != nil {
log.Fatal(err.Error())
}
log.Info("Exited cleanly")
}