-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy paths3.go
321 lines (282 loc) · 7.31 KB
/
s3.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
package cloudwatcher
import (
"context"
"encoding/json"
"fmt"
"strings"
"sync/atomic"
"time"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
)
type objectInfo = minio.ObjectInfo
type s3Configuration struct {
BucketName string `json:"bucket_name"`
Endpoint string `json:"endpoint"`
AccessKey string `json:"access_key"`
SecretAccessKey string `json:"secret_key"`
SessionToken string `json:"token"`
Region string `json:"region"`
SSLEnabled Bool `json:"ssl_enabled"`
UseAWSIAMCredentials Bool `json:"aws_iam_credentials"`
AWSIAMEndpoint string `json:"aws_iam_endpoint"`
UseAWSFile Bool `json:"aws_file"`
AWSFileName string `json:"aws_file_name"`
AWSFileProfile string `json:"aws_file_profile"`
}
// S3Watcher is the specialized watcher for Amazon S3 service
type S3Watcher struct {
WatcherBase
syncing uint32
ticker *time.Ticker
stop chan bool
config *s3Configuration
client IMinio
cache map[string]*S3Object
}
// S3Object is the object that contains the info of the file
type S3Object struct {
Key string
Etag string
Size int64
Tags map[string]string
LastModified time.Time
}
func newS3Watcher(dir string, interval time.Duration) (Watcher, error) {
upd := &S3Watcher{
cache: make(map[string]*S3Object),
config: nil,
stop: make(chan bool, 1),
WatcherBase: WatcherBase{
Events: make(chan Event, 100),
Errors: make(chan error, 100),
watchDir: dir,
pollingTime: interval,
},
}
return upd, nil
}
// SetConfig is used to configure the S3Watcher
func (u *S3Watcher) SetConfig(m map[string]string) error {
j, err := json.Marshal(m)
if err != nil {
return err
}
config := s3Configuration{}
if err := json.Unmarshal(j, &config); err != nil {
return err
}
u.config = &config
options := minio.Options{
Secure: bool(u.config.SSLEnabled),
}
if u.config.UseAWSFile {
options.Creds = credentials.NewFileAWSCredentials(u.config.AWSFileName, u.config.AWSFileProfile)
} else if u.config.UseAWSIAMCredentials {
options.Creds = credentials.NewIAM(u.config.AWSIAMEndpoint)
} else {
options.Creds = credentials.NewStaticV4(u.config.AccessKey, u.config.SecretAccessKey, u.config.SessionToken)
}
client, err := minio.New(u.config.Endpoint, &options)
if err != nil {
return err
}
u.client = client
return nil
}
// Start launches the polling process
func (u *S3Watcher) Start() error {
if u.config == nil {
return fmt.Errorf("configuration for S3 needed")
}
if ok, err := u.bucketExists(u.config.BucketName); err != nil {
return fmt.Errorf("error on checking the bucket: %s", err)
} else if !ok {
return fmt.Errorf("error on checking the bucket: bucket %s not exists", u.config.BucketName)
}
u.ticker = time.NewTicker(u.pollingTime)
go func() {
// launch synchronization also the first time
u.sync(true)
for {
select {
case <-u.ticker.C:
u.sync(false)
case <-u.stop:
close(u.Events)
close(u.Errors)
return
}
}
}()
return nil
}
// Close stop the polling process
func (u *S3Watcher) Close() {
if u.stop != nil {
u.stop <- true
}
}
func (u *S3Watcher) getCachedObject(o *S3Object) *S3Object {
if cachedObject, ok := u.cache[o.Key]; ok {
return cachedObject
}
return nil
}
func (u *S3Object) areTagsChanged(new *S3Object) bool {
// Check if tags are changed
if len(u.Tags) != len(new.Tags) {
return true
}
for k, v := range u.Tags {
if nv, ok := new.Tags[k]; !ok || v != nv {
return true
}
}
for k, v := range new.Tags {
if nv, ok := u.Tags[k]; !ok || v != nv {
return true
}
}
return false
}
func (u *S3Watcher) sync(firstSync bool) {
// allow only one sync at same time
if !atomic.CompareAndSwapUint32(&u.syncing, 0, 1) {
return
}
defer atomic.StoreUint32(&u.syncing, 0)
if found, err := u.bucketExists(u.config.BucketName); found == false || err != nil {
u.Errors <- fmt.Errorf("bucket '%s' not found: %s", u.config.BucketName, err)
return
}
fileList := make(map[string]*S3Object, 0)
err := u.enumerateFiles(u.config.BucketName, u.watchDir, func(page int64, obj *objectInfo) bool {
// Get Info from S3 object
upd, err := u.getInfoFromObject(obj)
if err != nil {
return true // continue
}
// Store the files to check the deleted one
fileList[upd.Key] = upd
if !firstSync {
// Check if the object is cached by Key
cached := u.getCachedObject(upd)
// Object has been cached previously by Key
if cached != nil {
// Check if the LastModified has been changed
if !cached.LastModified.Equal(upd.LastModified) || cached.Size != upd.Size {
event := Event{
Key: upd.Key,
Type: FileChanged,
Object: upd,
}
u.Events <- event
}
// Check if the tags have been updated
if cached.areTagsChanged(upd) {
event := Event{
Key: upd.Key,
Type: TagsChanged,
Object: upd,
}
u.Events <- event
}
} else {
event := Event{
Key: upd.Key,
Type: FileCreated,
Object: upd,
}
u.Events <- event
}
}
u.cache[upd.Key] = upd
return true
})
if err != nil {
u.Errors <- err
return
}
if !firstSync {
for k, o := range u.cache {
if _, found := fileList[k]; !found {
// file not found in the list...deleting it
delete(u.cache, k)
event := Event{
Key: o.Key,
Type: FileDeleted,
Object: o,
}
u.Events <- event
}
}
}
}
func (u *S3Watcher) bucketExists(bucket string) (bool, error) {
found, err := u.client.BucketExists(context.Background(), bucket)
if err != nil {
return false, err
}
return found, nil
}
func (u *S3Watcher) getTags(key string, bucket string) (map[string]string, error) {
t, err := u.client.GetObjectTagging(context.Background(), bucket, key, minio.GetObjectTaggingOptions{})
if err != nil {
return nil, err
}
tags := make(map[string]string)
for key, tag := range t.ToMap() {
tags[key] = tag
}
return tags, nil
}
func (u *S3Watcher) isConnected() bool {
found, err := u.bucketExists(u.config.BucketName)
if err != nil {
return false
}
return found
}
func (u *S3Watcher) getInfoFromObject(obj *objectInfo) (*S3Object, error) {
var upd *S3Object
tags, err := u.getTags(obj.Key, u.config.BucketName)
if err != nil {
return nil, fmt.Errorf("getting tags from key '%s': %s", obj.Key, err)
}
//log.Debug("s3 watcher: get tags from key '%s': %v", obj.Key, tags)
upd = &S3Object{
Key: obj.Key,
Etag: strings.ToLower(strings.Trim(obj.ETag, "\"")), // ETag contains double quotes
Size: obj.Size,
LastModified: obj.LastModified,
Tags: make(map[string]string),
}
for k, v := range tags {
upd.Tags[k] = v
}
return upd, nil
}
func (u *S3Watcher) enumerateFiles(bucket, prefix string, callback func(page int64, object *objectInfo) bool) error {
options := minio.ListObjectsOptions{
WithVersions: false,
WithMetadata: false,
Prefix: prefix,
Recursive: true,
MaxKeys: 0,
UseV1: false,
}
// List all objects from a bucket-name with a matching prefix.
for object := range u.client.ListObjects(context.Background(), bucket, options) {
if object.Err != nil {
continue
}
if callback(0, &object) == false {
break
}
}
return nil
}
func init() {
supportedServices["s3"] = newS3Watcher
}