-
Notifications
You must be signed in to change notification settings - Fork 58
/
Copy pathmain.go
1593 lines (1334 loc) · 39.2 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// pg_back
//
// Copyright 2011-2021 Nicolas Thauvin and contributors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package main
import (
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
)
var version = "2.5.0"
var binDir string
type dump struct {
// Name of the database to dump
Database string
// Per database pg_dump options to filter schema, tables, etc.
Options *dbOpts
// Path is the output file or directory of the dump
// a directory is output with the directory format of pg_dump
// It remains empty until after the dump is done
Path string
// Directory is the target directory where to create the dump
Directory string
// Time format for the filename
TimeFormat string
// Connection parameters
ConnString *ConnInfo
// Cipher passphrase, when not empty cipher the file
CipherPassphrase string
// AGE public key used for encryption; in Bech32 encoding starting with "age1"
CipherPublicKey string
// Keep original files after encryption
EncryptKeepSrc bool
// Result
When time.Time
ExitCode int
// Version of pg_dump
PgDumpVersion int
}
type dbOpts struct {
// Format of the dump
Format rune
// Algorithm of the checksum of the file, "none" is used to
// disable checksuming
SumAlgo string
// Number of parallel jobs for directory format
Jobs int
// Compression level for compressed formats, -1 means the default
CompressLevel int
// Purge configuration
PurgeInterval time.Duration
PurgeKeep int
// Limit schemas
Schemas []string
ExcludedSchemas []string
// Limit dumped tables
Tables []string
ExcludedTables []string
// Other pg_dump options to use
PgDumpOpts []string
// Whether to force the dump of large objects or not with pg_dump -b or
// -B, or let pg_dump use its default. 0 means default, 1 include
// blobs, 2 exclude blobs.
WithBlobs int
}
func main() {
// Use another function to allow the use of defer for cleanup, as
// os.Exit() does not run deferred functions
if err := run(); err != nil {
l.Fatalln(err)
os.Exit(1)
}
}
func run() (retVal error) {
// Parse commanline arguments first so that we can quit if we
// have shown usage or version string. We may have to load a
// non default configuration file
cliOpts, cliOptList, err := parseCli(os.Args[1:])
var pce *parseCliResult
if err != nil {
if errors.As(err, &pce) {
// Convert the configuration file if a path as been
// passed in the result and exit. Since the
// configuration file from pg_back v1 is a shell
// script, we may just fail to convert it. So we just
// output the result on stdout and exit to let the user
// check the result
if len(pce.LegacyConfig) > 0 {
if err := convertLegacyConfFile(pce.LegacyConfig); err != nil {
return err
}
}
return nil
}
return err
}
// Enable verbose mode or quiet mode as soon as possible
l.SetVerbosity(cliOpts.Verbose, cliOpts.Quiet)
var cliOptions options
if cliOpts.NoConfigFile {
l.Infoln("Skipping reading config file")
cliOptions = defaultOptions()
} else {
// Load configuration file and allow the default configuration
// file to be absent
cliOptions, err = loadConfigurationFile(cliOpts.CfgFile)
if err != nil {
return err
}
}
// override options from the configuration file with ones from
// the command line
opts := mergeCliAndConfigOptions(cliOpts, cliOptions, cliOptList)
err = ensureCipherParamsPresent(&opts)
if err != nil {
return fmt.Errorf("required cipher parameters not present: %w", err)
}
if (opts.Upload == "s3" || opts.Download == "s3" || opts.ListRemote == "s3") && opts.S3Bucket == "" {
return fmt.Errorf("a bucket is mandatory with s3")
}
if (opts.Upload == "b2" || opts.Download == "b2" || opts.ListRemote == "b2") && opts.B2Bucket == "" {
return fmt.Errorf("a bucket is mandatory with B2")
}
if (opts.Upload == "gcs" || opts.Download == "gcs" || opts.ListRemote == "gcs") && opts.GCSBucket == "" {
return fmt.Errorf("a bucket is mandatory with gcs")
}
if (opts.Upload == "azure" || opts.Download == "azure" || opts.ListRemote == "azure") && opts.AzureContainer == "" {
return fmt.Errorf("a container is mandatory with azure")
}
// Run actions that won't dump databases first, in that case the list
// of databases become file globs. Avoid getting wrong globs from the
// config file since we are using the remaining args from the command
// line that are usually as a list of databases to dump
globs := []string{}
for _, v := range cliOptList {
if v == "include-dbs" {
globs = opts.Dbnames
break
}
}
// Listing remote files take priority over the other options that won't dump databases
if opts.ListRemote != "none" {
if err := listRemoteFiles(opts.ListRemote, opts, globs); err != nil {
return err
}
return nil
}
// When asked to download or decrypt the backups, do it here and exit, we have all
// required input (passphrase and backup directory)
if opts.Decrypt || opts.Download != "none" {
if opts.Download != "none" {
if err := downloadFiles(opts.Download, opts, opts.Directory, globs); err != nil {
return err
}
}
if opts.Decrypt {
params := decryptParams{PrivateKey: opts.CipherPrivateKey, Passphrase: opts.CipherPassphrase}
if err := decryptDirectory(opts.Directory, params, opts.Jobs, globs); err != nil {
return err
}
}
return nil
}
// Remember when we start so that a purge interval of 0s won't remove
// the dumps we are taking. We truncate the time to the second because
// the purge parses the date in the name of the file and its resolution
// is the second, thus the parsing truncates to the second.
now := time.Now().Truncate(time.Second)
if opts.BinDirectory != "" {
binDir = opts.BinDirectory
}
// Ensure that pg_dump accepts the options we will give it
pgDumpVersion := pgToolVersion("pg_dump")
if pgDumpVersion < 80400 {
return fmt.Errorf("provided pg_dump is older than 8.4, unable use it.")
}
// Parse the connection information
l.Verboseln("processing input connection parameters")
conninfo, err := prepareConnInfo(opts.Host, opts.Port, opts.Username, opts.ConnDb)
if err != nil {
return fmt.Errorf("could not compute connection string: %w", err)
}
defer postBackupHook(opts.PostHook)
if err := preBackupHook(opts.PreHook); err != nil {
return err
}
// Use another goroutine to compute checksum and other operations on
// files by sending them with this channel
producedFiles := make(chan sumFileJob)
var wg sync.WaitGroup
postProcRet := postProcessFiles(producedFiles, &wg, opts)
// retVal allow us to return with an error from the post processing go
// routines, by changing it in a deferred function. Using deferred
// function helps preventing us from forgetting any cleanup task. This
// is why retVal is named in the signature of run().
defer func() {
// Detect if the producedFiles channel has been closed twice,
// it will be closed to stop post processing and check for
// error before purging old files
if err := recover(); err == nil {
l.Infoln("waiting for postprocessing to complete")
}
err := stopPostProcess(&wg, postProcRet)
if err != nil {
if retVal != nil {
// Do not overwrite the error
l.Errorln("failed to stop postprocessing:", err)
} else {
retVal = err
}
}
}()
// Closing the input channel makes the postprocessing go routine stop,
// so it must be done before blocking on the WaitGroup in stopPostProcess()
defer close(producedFiles)
// Connect before running pg_dumpall so that we know if the user is superuser
db, err := dbOpen(conninfo)
if err != nil {
return fmt.Errorf("connection to PostgreSQL failed: %w", err)
}
defer db.Close()
if !opts.DumpOnly {
if !db.superuser {
l.Infoln("connection user is not superuser, some information will not be dumped")
}
// Then we can implicitely avoid dumping role password when using a
// regular user
dumpRolePasswords := opts.WithRolePasswords && db.superuser
if dumpRolePasswords {
l.Infoln("dumping globals")
} else {
l.Infoln("dumping globals without role passwords")
}
if err := dumpGlobals(opts.Directory, opts.TimeFormat, dumpRolePasswords, conninfo, producedFiles); err != nil {
return fmt.Errorf("pg_dumpall of globals failed: %w", err)
}
l.Infoln("dumping instance configuration")
var (
verr *pgVersionError
perr *pgPrivError
)
if err := dumpSettings(opts.Directory, opts.TimeFormat, db, producedFiles); err != nil {
if errors.As(err, &verr) || errors.As(err, &perr) {
l.Warnln(err)
} else {
return fmt.Errorf("could not dump configuration parameters: %w", err)
}
}
if err := dumpConfigFiles(opts.Directory, opts.TimeFormat, db, producedFiles); err != nil {
return fmt.Errorf("could not dump configuration files: %w", err)
}
}
databases, err := listDatabases(db, opts.WithTemplates, opts.ExcludeDbs, opts.Dbnames)
if err != nil {
return err
}
l.Verboseln("databases to dump:", databases)
if err := pauseReplicationWithTimeout(db, opts.PauseTimeout); err != nil {
return err
}
exitCode := 0
maxWorkers := opts.Jobs
numJobs := len(databases)
jobs := make(chan *dump, numJobs)
results := make(chan *dump, numJobs)
// start workers - thanks gobyexample.com
l.Verbosef("launching %d workers", maxWorkers)
for w := 0; w < maxWorkers; w++ {
go dumper(w, jobs, results, producedFiles)
}
defDbOpts := defaultDbOpts(opts)
var passphrase, publicKey string
if opts.Encrypt {
passphrase = opts.CipherPassphrase
publicKey = opts.CipherPublicKey
}
// feed the database
for _, dbname := range databases {
o, found := opts.PerDbOpts[dbname]
if !found {
o = defDbOpts
}
d := &dump{
Database: dbname,
Options: o,
Directory: opts.Directory,
TimeFormat: opts.TimeFormat,
ConnString: conninfo,
CipherPassphrase: passphrase,
CipherPublicKey: publicKey,
EncryptKeepSrc: opts.EncryptKeepSrc,
ExitCode: -1,
PgDumpVersion: pgDumpVersion,
}
l.Verbosef("sending dump job for database %s to worker pool", dbname)
jobs <- d
}
canDumpACL := true
canDumpConfig := true
// When asked to only dump database, exclude ACL and config even if
// this can lead of missing info on restore when pg_dump is older than
// 11
if opts.DumpOnly {
canDumpACL = false
canDumpConfig = false
}
// collect the result of the jobs
for j := 0; j < numJobs; j++ {
var b, c string
var err error
l.Verboseln("waiting for worker to send job back")
d := <-results
dbname := d.Database
l.Verboseln("received job result of", dbname)
if d.ExitCode > 0 {
exitCode = 1
}
// Dump the ACL and Configuration of the
// database. Since the information is in the catalog,
// if it fails once it fails all the time.
if canDumpACL {
l.Verboseln("dumping create database query and ACL of", dbname)
force := false
if d.Options.Format == 'p' {
force = true
}
b, err = dumpCreateDBAndACL(db, dbname, force)
var verr *pgVersionError
if err != nil {
if !errors.As(err, &verr) {
l.Errorln(err)
exitCode = 1
} else {
l.Warnln(err)
canDumpACL = false
}
}
}
if canDumpConfig {
l.Verboseln("dumping configuration of", dbname)
c, err = dumpDBConfig(db, dbname)
if err != nil {
var verr *pgVersionError
if !errors.As(err, &verr) {
l.Errorln(err)
exitCode = 1
} else {
l.Warnln(err)
canDumpConfig = false
}
}
}
// Write ACL and configuration to an SQL file
if len(b) > 0 || len(c) > 0 {
aclpath := formatDumpPath(d.Directory, d.TimeFormat, "createdb.sql", dbname, d.When, 0)
if err := os.MkdirAll(filepath.Dir(aclpath), 0700); err != nil {
l.Errorln(err)
exitCode = 1
continue
}
f, err := os.Create(aclpath)
if err != nil {
l.Errorln(err)
exitCode = 1
continue
}
fmt.Fprintf(f, "%s", b)
fmt.Fprintf(f, "%s", c)
f.Close()
if err := os.Chmod(aclpath, 0600); err != nil {
return fmt.Errorf("could not chmod to more secure permission for ACL %s: %s", dbname, err)
}
// Have its checksum computed
producedFiles <- sumFileJob{
Path: aclpath,
SumAlgo: d.Options.SumAlgo,
}
l.Infoln("dump of ACL and configuration of", dbname, "to", aclpath, "done")
}
}
if err := resumeReplication(db); err != nil {
l.Errorln(err)
}
db.Close()
if exitCode != 0 {
return fmt.Errorf("some operation failed")
}
// Closing the input channel makes the postprocessing go routine stop,
// so it must be done before blocking on the WaitGroup in
// stopPostProcess()
close(producedFiles)
l.Infoln("waiting for postprocessing to complete")
if err := stopPostProcess(&wg, postProcRet); err != nil {
return err
}
// purge old dumps per database and treat special files
// (globals and settings) like databases
l.Infoln("purging old dumps")
var repo Repo
switch opts.Upload {
case "s3":
repo, err = NewS3Repo(opts)
if err != nil {
return fmt.Errorf("failed to prepare upload to S3: %w", err)
}
case "b2":
repo, err = NewB2Repo(opts)
if err != nil {
return fmt.Errorf("failed to prepare upload to B2: %w", err)
}
case "sftp":
repo, err = NewSFTPRepo(opts)
if err != nil {
return fmt.Errorf("failed to prepare upload over SFTP: %w", err)
}
case "gcs":
repo, err = NewGCSRepo(opts)
if err != nil {
return fmt.Errorf("failed to prepare upload to GCS: %w", err)
}
case "azure":
repo, err = NewAzRepo(opts)
if err != nil {
return fmt.Errorf("failed to prepare upload to Azure: %w", err)
}
}
for _, dbname := range databases {
o, found := opts.PerDbOpts[dbname]
if !found {
o = defDbOpts
}
limit := now.Add(o.PurgeInterval)
if err := purgeDumps(opts.Directory, dbname, o.PurgeKeep, limit); err != nil {
retVal = err
}
if opts.PurgeRemote && repo != nil {
if err := purgeRemoteDumps(repo, opts.UploadPrefix, opts.Directory, dbname, o.PurgeKeep, limit); err != nil {
retVal = err
}
}
}
if !opts.DumpOnly {
for _, other := range []string{"pg_globals", "pg_settings", "hba_file", "ident_file"} {
limit := now.Add(defDbOpts.PurgeInterval)
if err := purgeDumps(opts.Directory, other, defDbOpts.PurgeKeep, limit); err != nil {
retVal = err
}
if opts.PurgeRemote && repo != nil {
if err := purgeRemoteDumps(repo, opts.UploadPrefix, opts.Directory, other, defDbOpts.PurgeKeep, limit); err != nil {
retVal = err
}
}
}
}
return
}
func defaultDbOpts(opts options) *dbOpts {
dbo := dbOpts{
Format: opts.Format,
Jobs: opts.DirJobs,
CompressLevel: opts.CompressLevel,
SumAlgo: opts.SumAlgo,
PurgeInterval: opts.PurgeInterval,
PurgeKeep: opts.PurgeKeep,
PgDumpOpts: opts.PgDumpOpts,
}
return &dbo
}
func (d *dump) dump(fc chan<- sumFileJob) error {
dbname := d.Database
d.ExitCode = 1
l.Infoln("dumping database", dbname)
// Try to lock a file named after to database we are going to
// dump to prevent stacking pg_back processes if pg_dump last
// longer than a schedule of pg_back. If the lock cannot be
// acquired, skip the dump and exit with an error.
lock := formatDumpPath(d.Directory, d.TimeFormat, "lock", dbname, time.Time{}, 0)
flock, locked, err := lockPath(lock)
if err != nil {
return fmt.Errorf("unable to lock %s: %s", lock, err)
}
if !locked {
return fmt.Errorf("could not acquire lock for %s", dbname)
}
d.When = time.Now()
var fileEnd string
switch d.Options.Format {
case 'p':
fileEnd = "sql"
case 'c':
fileEnd = "dump"
case 't':
fileEnd = "tar"
case 'd':
if d.PgDumpVersion < 90100 {
return fmt.Errorf("provided pg_dump version does not support directory format")
}
fileEnd = "d"
}
file := formatDumpPath(d.Directory, d.TimeFormat, fileEnd, dbname, d.When, d.Options.CompressLevel)
formatOpt := fmt.Sprintf("-F%c", d.Options.Format)
command := execPath("pg_dump")
args := []string{formatOpt, "-f", file, "-w"}
if fileEnd == "d" && d.Options.Jobs > 1 {
if d.PgDumpVersion < 90300 {
l.Warnln("provided pg_dump version does not support parallel jobs, ignoring option")
} else {
args = append(args, "-j", fmt.Sprintf("%d", d.Options.Jobs))
}
}
// Included and excluded schemas and table
for _, obj := range d.Options.Schemas {
args = append(args, "-n", obj)
}
for _, obj := range d.Options.ExcludedSchemas {
args = append(args, "-N", obj)
}
for _, obj := range d.Options.Tables {
args = append(args, "-t", obj)
}
for _, obj := range d.Options.ExcludedTables {
args = append(args, "-T", obj)
}
switch d.Options.WithBlobs {
case 1: // with blobs
args = append(args, "-b")
case 2: // without blobs
if d.PgDumpVersion < 100000 {
l.Warnln("provided pg_dump version does not support excluding blobs, ignoring option")
} else {
args = append(args, "-B")
}
}
// Add compression level option only if not dumping in the tar format
if d.Options.CompressLevel >= 0 {
if d.Options.Format != 't' {
args = append(args, "-Z", fmt.Sprintf("%d", d.Options.CompressLevel))
} else {
l.Warnln("compression level is not supported by the target format")
}
}
if len(d.Options.PgDumpOpts) > 0 {
args = append(args, d.Options.PgDumpOpts...)
}
// Connection option are passed as a connstring even if we add options
// on the command line. For older version, it is passed using the
// environment
conninfo := d.ConnString.Set("dbname", dbname)
var env []string
if d.PgDumpVersion < 90300 {
args = append(args, dbname)
env = os.Environ()
env = append(env, d.ConnString.MakeEnv()...)
} else {
args = append(args, "-d", conninfo.String())
}
pgDumpCmd := exec.Command(command, args...)
pgDumpCmd.Env = env
l.Verboseln("running:", pgDumpCmd)
stdoutStderr, err := pgDumpCmd.CombinedOutput()
if err != nil {
for _, line := range strings.Split(string(stdoutStderr), "\n") {
if line != "" {
l.Errorf("[%s] %s\n", dbname, line)
}
}
if err := unlockPath(flock); err != nil {
l.Errorf("could not release lock for %s: %s", dbname, err)
flock.Close()
}
return err
}
if len(stdoutStderr) > 0 {
for _, line := range strings.Split(string(stdoutStderr), "\n") {
if line != "" {
l.Infof("[%s] %s\n", dbname, line)
}
}
}
if err := unlockPath(flock); err != nil {
flock.Close()
return fmt.Errorf("could not release lock for %s: %s", dbname, err)
}
// Send the info on the file for post processing
if fc != nil {
fc <- sumFileJob{
Path: file,
SumAlgo: d.Options.SumAlgo,
}
}
d.Path = file
d.ExitCode = 0
var mode os.FileMode = 0600
if d.Options.Format == 'd' {
// The hardening of permissions only apply to the top level
// directory, this won't make the contents executable
mode = 0700
}
if err := os.Chmod(file, mode); err != nil {
return fmt.Errorf("could not chmod to more secure permission for %s: %s", dbname, err)
}
return nil
}
func dumper(id int, jobs <-chan *dump, results chan<- *dump, fc chan<- sumFileJob) {
for j := range jobs {
if err := j.dump(fc); err != nil {
l.Errorln("dump of", j.Database, "failed:", err)
results <- j
} else {
l.Infoln("dump of", j.Database, "to", j.Path, "done")
results <- j
}
}
}
func ensureCipherParamsPresent(opts *options) error {
// Nothing needs to be done if we are not encrypting or decrypting
if !opts.Encrypt && !opts.Decrypt {
return nil
}
// If we are encrypting or decrypting, make sure we either have a public/private key or a passphrase
needEncryptParams := opts.Encrypt && len(opts.CipherPublicKey) == 0 && len(opts.CipherPassphrase) == 0
needDecryptParams := opts.Decrypt && len(opts.CipherPrivateKey) == 0 && len(opts.CipherPassphrase) == 0
if needEncryptParams || needDecryptParams { // Fallback on the environment
opts.CipherPassphrase = os.Getenv("PGBK_CIPHER_PASS")
if len(opts.CipherPassphrase) == 0 {
return fmt.Errorf("cannot use an empty passphrase for encryption")
}
}
return nil
}
func relPath(basedir, path string) string {
target, err := filepath.Rel(basedir, path)
if err != nil {
l.Warnf("could not get relative path from %s: %s\n", path, err)
target = path
}
prefix := fmt.Sprintf("..%c", os.PathSeparator)
for strings.HasPrefix(target, prefix) {
target = strings.TrimPrefix(target, prefix)
}
return target
}
func execPath(prog string) string {
binFile := prog
if runtime.GOOS == "windows" {
binFile = fmt.Sprintf("%s.exe", prog)
}
if binDir != "" {
return filepath.Join(binDir, binFile)
}
return binFile
}
func cleanDBName(dbname string) string {
// We do not want a database name starting with a dot to avoid creating hidden files
if strings.HasPrefix(dbname, ".") {
dbname = "_" + dbname
}
// If there is a path separator in the database name, we do not want to
// create the dump in a subdirectory or in a parent directory
if strings.ContainsRune(dbname, os.PathSeparator) {
dbname = strings.ReplaceAll(dbname, string(os.PathSeparator), "_")
}
// Always remove slashes to avoid issues with filenames on windows
if strings.ContainsRune(dbname, '/') {
dbname = strings.ReplaceAll(dbname, "/", "_")
}
return dbname
}
func formatDumpPath(dir string, timeFormat string, suffix string, dbname string, when time.Time, compressLevel int) string {
var f, s, d string
// Avoid attacks on the database name
dbname = cleanDBName(dbname)
d = dir
if dbname != "" {
d = strings.Replace(dir, "{dbname}", dbname, -1)
}
s = suffix
if suffix == "" {
s = "dump"
}
// Output is "dir(formatted)/dbname_date.suffix" when the
// input time is not zero, otherwise do not include the date
// and time. Reference time for time.Format(): "Mon Jan 2
// 15:04:05 MST 2006"
if when.IsZero() {
f = fmt.Sprintf("%s.%s", dbname, s)
} else {
f = fmt.Sprintf("%s_%s.%s", dbname, when.Format(timeFormat), s)
}
if suffix == "sql" && compressLevel > 0 {
f = f + ".gz"
}
return filepath.Join(d, f)
}
func pgToolVersion(tool string) int {
vs, err := exec.Command(execPath(tool), "--version").Output()
if err != nil {
l.Warnf("failed to retrieve version of %s: %s", tool, err)
return 0
}
var maj, min, rev, numver int
n, _ := fmt.Sscanf(string(vs), tool+" (PostgreSQL) %d.%d.%d", &maj, &min, &rev)
if n == 3 {
// Before PostgreSQL 10, the format si MAJ.MIN.REV
numver = (maj*100+min)*100 + rev
} else if n == 2 {
// From PostgreSQL 10, the format si MAJ.REV, so the rev ends
// up in min with the scan
numver = maj*10000 + min
} else {
// We have the special case of the development version, where the
// format is MAJdevel
fmt.Sscanf(string(vs), tool+" (PostgreSQL) %ddevel", &maj)
numver = maj * 10000
}
l.Verboseln(tool, "version is:", numver)
return numver
}
func dumpGlobals(dir string, timeFormat string, withRolePasswords bool, conninfo *ConnInfo, fc chan<- sumFileJob) error {
command := execPath("pg_dumpall")
args := []string{"-g", "-w"}
// pg_dumpall only connects to another database if it is given
// with the -l option
if dbname, ok := conninfo.Infos["dbname"]; ok {
args = append(args, "-l", dbname)
}
// With older version of PostgreSQL not supporting connection strings
// on their -d option, use the environment to pass the connection
// information
var env []string
pgDumpallVersion := pgToolVersion("pg_dumpall")
if pgDumpallVersion < 90300 {
env = os.Environ()
env = append(env, conninfo.MakeEnv()...)
} else {
args = append(args, "-d", conninfo.String())
}
// The --no-role-passwords option was added to pg_dumpall from 10
if !withRolePasswords {
if pgDumpallVersion < 100000 {
return fmt.Errorf("pg_dumpall does not support --no-role-passwords, use pg_dumpall >= 10")
}
args = append(args, "--no-role-passwords")
}
file := formatDumpPath(dir, timeFormat, "sql", "pg_globals", time.Now(), 0)
args = append(args, "-f", file)
if err := os.MkdirAll(filepath.Dir(file), 0700); err != nil {
return err
}
pgDumpallCmd := exec.Command(command, args...)
pgDumpallCmd.Env = env
l.Verboseln("running:", pgDumpallCmd)
stdoutStderr, err := pgDumpallCmd.CombinedOutput()
if err != nil {
for _, line := range strings.Split(string(stdoutStderr), "\n") {
if line != "" {
l.Errorln(line)
}
}
return err
}
if len(stdoutStderr) > 0 {
for _, line := range strings.Split(string(stdoutStderr), "\n") {
if line != "" {
l.Infoln(line)
}
}
}
if err := os.Chmod(file, 0600); err != nil {
return fmt.Errorf("could not chmod to more secure permission for pg_globals: %s", err)
}
if fc != nil {
fc <- sumFileJob{
Path: file,
}
}
return nil
}
func dumpSettings(dir string, timeFormat string, db *pg, fc chan<- sumFileJob) error {
file := formatDumpPath(dir, timeFormat, "out", "pg_settings", time.Now(), 0)
if err := os.MkdirAll(filepath.Dir(file), 0700); err != nil {
return err
}
s, err := showSettings(db)
if err != nil {
return err
}
// Use a Buffer to avoid creating an empty file
if len(s) > 0 {
l.Verboseln("writing settings to:", file)
if err := os.WriteFile(file, []byte(s), 0600); err != nil {
return err
}
if fc != nil {
fc <- sumFileJob{
Path: file,
}
}
}
return nil
}
func dumpConfigFiles(dir string, timeFormat string, db *pg, fc chan<- sumFileJob) error {
for _, param := range []string{"hba_file", "ident_file"} {
file := formatDumpPath(dir, timeFormat, "out", param, time.Now(), 0)
if err := os.MkdirAll(filepath.Dir(file), 0700); err != nil {