forked from ProtonMail/gluon
-
Notifications
You must be signed in to change notification settings - Fork 0
/
server.go
382 lines (292 loc) · 9.29 KB
/
server.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
// Package gluon implements an IMAP4rev1 (+ extensions) mailserver.
package gluon
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"sync"
"time"
"github.com/ProtonMail/gluon/async"
"github.com/ProtonMail/gluon/connector"
"github.com/ProtonMail/gluon/events"
"github.com/ProtonMail/gluon/imap"
"github.com/ProtonMail/gluon/internal/backend"
"github.com/ProtonMail/gluon/internal/contexts"
"github.com/ProtonMail/gluon/internal/session"
"github.com/ProtonMail/gluon/logging"
"github.com/ProtonMail/gluon/profiling"
"github.com/ProtonMail/gluon/reporter"
"github.com/ProtonMail/gluon/store"
"github.com/ProtonMail/gluon/version"
"github.com/ProtonMail/gluon/watcher"
_ "github.com/mattn/go-sqlite3"
"github.com/sirupsen/logrus"
)
// Server is the gluon IMAP server.
type Server struct {
// dataDir is the directory in which backend files should be stored.
dataDir string
// databaseDir is the directory in which database files should be stored.
databaseDir string
// backend provides the server with access to the IMAP backend.
backend *backend.Backend
// sessions holds all active IMAP sessions.
sessions map[int]*session.Session
sessionsLock sync.RWMutex
// serveErrCh collects errors encountered while serving.
serveErrCh *async.QueuedChannel[error]
// serveDoneCh is used to stop the server.
serveDoneCh chan struct{}
// serveWG keeps track of serving goroutines.
serveWG async.WaitGroup
// nextID holds the ID that will be given to the next session.
nextID int
nextIDLock sync.Mutex
// inLogger and outLogger are used to log incoming and outgoing IMAP communications.
inLogger, outLogger io.Writer
// tlsConfig is used to serve over TLS.
tlsConfig *tls.Config
// watchers holds streams of events.
watchers []*watcher.Watcher[events.Event]
watchersLock sync.RWMutex
// storeBuilder builds message stores.
storeBuilder store.Builder
// cmdExecProfBuilder builds command profiling collectors.
cmdExecProfBuilder profiling.CmdProfilerBuilder
// versionInfo holds info about the Gluon version.
versionInfo version.Info
// reporter is used to report errors to things like Sentry.
reporter reporter.Reporter
// idleBulkTime to control how often IDLE responses are sent. 0 means
// immediate response with no response merging.
idleBulkTime time.Duration
// disableParallelism indicates whether the server is allowed to parallelize certain IMAP commands.
disableParallelism bool
uidValidityGenerator imap.UIDValidityGenerator
panicHandler async.PanicHandler
}
// New creates a new server with the given options.
func New(withOpt ...Option) (*Server, error) {
builder, err := newBuilder()
if err != nil {
return nil, err
}
for _, opt := range withOpt {
opt.config(builder)
}
return builder.build()
}
// AddUser creates a new user and generates new unique ID for this user.
// If the user already exists, an error is returned (use LoadUser instead).
func (s *Server) AddUser(ctx context.Context, conn connector.Connector, passphrase []byte) (string, error) {
userID := s.backend.NewUserID()
if isNew, err := s.LoadUser(ctx, conn, userID, passphrase); err != nil {
return "", err
} else if !isNew {
return "", errors.New("user already exists")
}
return userID, nil
}
// LoadUser adds an existing user using a previously crated unique user ID.
// It returns true if the user was newly created, false if it already existed.
func (s *Server) LoadUser(ctx context.Context, conn connector.Connector, userID string, passphrase []byte) (bool, error) {
ctx = reporter.NewContextWithReporter(ctx, s.reporter)
isNew, err := s.backend.AddUser(ctx, userID, conn, passphrase, s.uidValidityGenerator)
if err != nil {
return false, fmt.Errorf("failed to add user: %w", err)
}
counts, err := s.backend.GetMailboxMessageCounts(ctx, userID)
if err != nil {
return false, fmt.Errorf("failed to get counts: %w", err)
}
s.publish(events.UserAdded{
UserID: userID,
Counts: counts,
})
return isNew, nil
}
// RemoveUser removes a user from gluon.
func (s *Server) RemoveUser(ctx context.Context, userID string, removeFiles bool) error {
ctx = reporter.NewContextWithReporter(ctx, s.reporter)
if err := s.backend.RemoveUser(ctx, userID, removeFiles); err != nil {
return err
}
s.publish(events.UserRemoved{
UserID: userID,
})
return nil
}
// AddWatcher adds a new watcher which watches events of the given types.
// If no types are specified, the watcher watches all events.
func (s *Server) AddWatcher(ofType ...events.Event) <-chan events.Event {
s.watchersLock.Lock()
defer s.watchersLock.Unlock()
watcher := watcher.New(s.panicHandler, ofType...)
s.watchers = append(s.watchers, watcher)
return watcher.GetChannel()
}
// Serve serves connections accepted from the given listener.
// It stops serving when the context is canceled, the listener is closed, or the server is closed.
func (s *Server) Serve(ctx context.Context, l net.Listener) error {
ctx = reporter.NewContextWithReporter(ctx, s.reporter)
ctx = contexts.NewDisableParallelismCtx(ctx, s.disableParallelism)
s.publish(events.ListenerAdded{
Addr: l.Addr(),
})
s.serveWG.Go(func() {
defer s.publish(events.ListenerRemoved{
Addr: l.Addr(),
})
s.serve(ctx, newConnCh(l, s.panicHandler))
})
return nil
}
// serve handles incoming connections and starts a new goroutine for each.
func (s *Server) serve(ctx context.Context, connCh <-chan net.Conn) {
connWG := async.MakeWaitGroup(s.panicHandler)
for {
select {
case <-ctx.Done():
logrus.Debug("Stopping serve, context canceled")
return
case <-s.serveDoneCh:
logrus.Debug("Stopping serve, server stopped")
return
case conn, ok := <-connCh:
if !ok {
logrus.Debug("Stopping serve, listener closed")
return
}
defer conn.Close()
connWG.Go(func() {
session, sessionID := s.addSession(ctx, conn)
defer s.removeSession(sessionID)
logging.DoAnnotated(ctx, func(ctx context.Context) {
if err := session.Serve(ctx); err != nil {
if !errors.Is(err, net.ErrClosed) {
s.serveErrCh.Enqueue(err)
}
}
}, logging.Labels{
"Action": "Serve",
"SessionID": sessionID,
})
})
}
}
}
// GetErrorCh returns the error channel.
func (s *Server) GetErrorCh() <-chan error {
return s.serveErrCh.GetChannel()
}
// GetVersionInfo returns the version info.
func (s *Server) GetVersionInfo() version.Info {
return s.versionInfo
}
// GetDataPath returns the path in which gluon stores its data.
func (s *Server) GetDataPath() string {
return s.dataDir
}
// GetDatabasePath returns the path in which gluon stores its data.
func (s *Server) GetDatabasePath() string {
return s.databaseDir
}
// Close closes the server.
func (s *Server) Close(ctx context.Context) error {
ctx = reporter.NewContextWithReporter(ctx, s.reporter)
// Tell the server to stop serving.
close(s.serveDoneCh)
// Wait until all goroutines currently handling connections are done.
s.serveWG.Wait()
// Close the backend.
if err := s.backend.Close(ctx); err != nil {
return fmt.Errorf("failed to close backend: %w", err)
}
// Close the server error channel.
s.serveErrCh.Close()
// Close any watchers.
s.watchersLock.Lock()
defer s.watchersLock.Unlock()
for _, watcher := range s.watchers {
watcher.Close()
}
s.watchers = nil
return nil
}
func (s *Server) addSession(ctx context.Context, conn net.Conn) (*session.Session, int) {
s.sessionsLock.Lock()
defer s.sessionsLock.Unlock()
nextID := s.getNextID()
s.sessions[nextID] = session.New(conn, s.backend, nextID, s.versionInfo, s.cmdExecProfBuilder, s.newEventCh(ctx), s.idleBulkTime, s.panicHandler)
if s.tlsConfig != nil {
s.sessions[nextID].SetTLSConfig(s.tlsConfig)
}
if s.inLogger != nil {
s.sessions[nextID].SetIncomingLogger(s.inLogger)
}
if s.outLogger != nil {
s.sessions[nextID].SetOutgoingLogger(s.outLogger)
}
s.publish(events.SessionAdded{
SessionID: nextID,
LocalAddr: conn.LocalAddr(),
RemoteAddr: conn.RemoteAddr(),
})
return s.sessions[nextID], nextID
}
func (s *Server) removeSession(sessionID int) {
s.sessionsLock.Lock()
defer s.sessionsLock.Unlock()
delete(s.sessions, sessionID)
s.publish(events.SessionRemoved{
SessionID: sessionID,
})
}
func (s *Server) getNextID() int {
s.nextIDLock.Lock()
defer s.nextIDLock.Unlock()
s.nextID++
return s.nextID
}
func (s *Server) newEventCh(ctx context.Context) chan events.Event {
eventCh := make(chan events.Event)
async.GoAnnotated(ctx, s.panicHandler, func(ctx context.Context) {
for event := range eventCh {
s.publish(event)
}
}, logging.Labels{
"Action": "Publishing events",
})
return eventCh
}
func (s *Server) publish(event events.Event) {
s.watchersLock.RLock()
defer s.watchersLock.RUnlock()
for _, watcher := range s.watchers {
if watcher.IsWatching(event) {
if ok := watcher.Send(event); !ok {
logrus.WithField("event", event).Warn("Failed to send event to watcher")
}
}
}
}
// newConnCh accepts connections from the given listener.
// It returns a channel of all accepted connections which is closed when the listener is closed.
func newConnCh(l net.Listener, panicHandler async.PanicHandler) <-chan net.Conn {
connCh := make(chan net.Conn)
go func() {
defer async.HandlePanic(panicHandler)
defer close(connCh)
for {
conn, err := l.Accept()
if err != nil {
return
}
connCh <- conn
}
}()
return connCh
}