Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,10 @@
## pg_back 2.6.0

* Allow to override the connection user per database
* Add the --delete-uploaded to delete local file after upload
* Add the --backup-file-mode option to configure the permissions on dumps
* Add the --uniform-timestamp option to create all files of a run with the same
time in the filenames

## pg_back 2.5.0

Expand Down
2 changes: 2 additions & 0 deletions CONTRIBUTORS
Original file line number Diff line number Diff line change
Expand Up @@ -16,3 +16,5 @@ Massimo Lusetti
Kenny Root
Pierrick Chovelon
Dennis Urban
Julian Vanden Broeck
Stéphane Klein
30 changes: 24 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -66,12 +66,6 @@ If default and command line options are not enough, a configuration file
may be provided with `-c <configfilename>` (see [pg_back.conf](pg_back.conf)).
(Note: see below to convert configuration files from version 1.)

If the default output directory `/var/backups/postgresql` does not exist or has
improper ownership for your user, use `-b` to give the path where to store the
files. The path may contain the `{dbname}` keyword, that would be replaced by
the name of the database being dumped, this permits to dump each database in
its own directory.

To connect to PostgreSQL, use the `-h`, `-p`, `-U` and `-d` options. If you
need less known connection options such as `sslcert` and `sslkey`, you can give
a `keyword=value` libpq connection string like `pg_dump` and `pg_dumpall`
Expand All @@ -83,6 +77,24 @@ The other command line options let you tweak what is dumped, purged, and how
it is done. These options can be put in a configuration file. The command line
options override configuration options.

### Output

If the default output directory `/var/backups/postgresql` does not exist or has
improper ownership for your user, use `-b` to give the path where to store the
files. The path may contain the `{dbname}` keyword, that would be replaced by
the name of the database being dumped, this permits to dump each database in
its own directory.

By default the files are created with script file permissions, 0600, it can be
changed with the `--backup-file-mode` option. Permissions on directories are
deduced from the file mode to add the `x` bit if necessary.

A run of pg_back creates multiple files, with the date and time of their
creation in their filename. While this help finding at what time the data would
be restored, all timestamps in filename are potentially different. The
`--uniform-timestamp` changes this behavious to use the same date and time for
all files output by a run, it is the time of the first dump of the run.

### Per-database configuration

Per-database configuration can only be done with a configuration file. The
Expand Down Expand Up @@ -219,6 +231,12 @@ the prefix is separated by a / in the remote location.
The `--purge-remote` option can be set to `yes` to apply the same purge policy
on the remote location as the local directory.

The `--delete-uploaded` option can be set to `yes` (or `no` to override the
configuration file value) to remove a local file as soon as it is uploaded
successfully. (Note: it will leave unencrypted local files when
`--encrypt-keep-src` and `--encrypt` are active because only encrypted files
are uploaded in that case)

When files are encrypted and their unencrypted source is kept, only encrypted
files are uploaded.

Expand Down
30 changes: 16 additions & 14 deletions config.go
Original file line number Diff line number Diff line change
Expand Up @@ -280,11 +280,11 @@ func parseCli(args []string) (options, []string, error) {
pflag.PrintDefaults()
}

pflag.StringVarP(&opts.CfgFile, "config", "c", defaultCfgFile, "alternate config file")
pflag.BoolVar(&opts.NoConfigFile, "no-config-file", false, "skip reading config file\n")
pflag.StringVarP(&opts.BinDirectory, "bin-directory", "B", "", "PostgreSQL binaries directory. Empty to search $PATH")
pflag.StringVarP(&opts.Directory, "backup-directory", "b", "/var/backups/postgresql", "store dump files there")
pflag.StringVarP(&mode, "backup-file-mode", "m", "0600", "mode to apply to dump files")
pflag.StringVarP(&opts.CfgFile, "config", "c", defaultCfgFile, "alternate config file")
pflag.StringSliceVarP(&opts.ExcludeDbs, "exclude-dbs", "D", []string{}, "list of databases to exclude")
pflag.BoolVarP(&opts.WithTemplates, "with-templates", "t", false, "include templates")
WithoutTemplates := pflag.Bool("without-templates", false, "force exclude templates")
Expand All @@ -297,31 +297,31 @@ func parseCli(args []string) (options, []string, error) {
pflag.IntVarP(&opts.DirJobs, "parallel-backup-jobs", "J", 1, "number of parallel jobs to dumps when using directory format")
pflag.IntVarP(&opts.CompressLevel, "compress", "Z", -1, "compression level for compressed formats")
pflag.StringVarP(&opts.SumAlgo, "checksum-algo", "S", "none", "signature algorithm: none sha1 sha224 sha256 sha384 sha512")
pflag.BoolVar(&opts.UniformTimestamp, "uniform-timestamp", false, "Use the same timestamp for all pg_back files instead of individual\ncreation times")
pflag.StringVarP(&purgeInterval, "purge-older-than", "P", "30", "purge backups older than this duration in days\nuse an interval with units \"s\" (seconds), \"m\" (minutes) or \"h\" (hours)\nfor less than a day.")
pflag.StringVarP(&purgeKeep, "purge-min-keep", "K", "0", "minimum number of dumps to keep when purging or 'all' to keep\neverything")
pflag.StringVar(&opts.PreHook, "pre-backup-hook", "", "command to run before taking dumps")
pflag.StringVar(&opts.PostHook, "post-backup-hook", "", "command to run after taking dumps\n")
pflag.BoolVar(&opts.UniformTimestamp, "uniform-timestamp", false, "Use the same timestamp for all pg_back files instead of individual creation times")

pflag.BoolVar(&opts.Encrypt, "encrypt", false, "encrypt the dumps")
NoEncrypt := pflag.Bool("no-encrypt", false, "do not encrypt the dumps")
pflag.BoolVar(&opts.EncryptKeepSrc, "encrypt-keep-src", false, "keep original files when encrypting")
NoEncryptKeepSrc := pflag.Bool("no-encrypt-keep-src", false, "do not keep original files when encrypting")
pflag.BoolVar(&opts.Decrypt, "decrypt", false, "decrypt files in the backup directory instead of dumping. DBNAMEs become\nglobs to select files")
pflag.StringVar(&opts.CipherPassphrase, "cipher-pass", "", "cipher passphrase for encryption and decryption\n")
pflag.StringVar(&opts.CipherPublicKey, "cipher-public-key", "", "AGE public key for encryption; in Bech32 encoding starting with 'age1'\n")
pflag.StringVar(&opts.CipherPrivateKey, "cipher-private-key", "", "AGE private key for decryption; in Bech32 encoding starting with 'AGE-SECRET-KEY-1'\n")
pflag.StringVar(&opts.CipherPassphrase, "cipher-pass", "", "cipher passphrase for encryption and decryption")
pflag.StringVar(&opts.CipherPublicKey, "cipher-public-key", "", "AGE public key for encryption; in Bech32 encoding starting with 'age1'")
pflag.StringVar(&opts.CipherPrivateKey, "cipher-private-key", "", "AGE private key for decryption; in Bech32 encoding starting with\n'AGE-SECRET-KEY-1'\n")

pflag.StringVar(&opts.Upload, "upload", "none", "upload produced files to target (s3, gcs,..) use \"none\" to override\nconfiguration file and disable upload")
pflag.StringVar(&opts.UploadPrefix, "upload-prefix", "", "add this prefix to uploaded files, similar to a target directory")
deleteUploaded := pflag.String("delete-uploaded", "no", "delete local file after upload")
pflag.StringVar(&opts.Download, "download", "none", "download files from target (s3, gcs,..) instead of dumping. DBNAMEs become\nglobs to select files")
pflag.StringVar(&opts.ListRemote, "list-remote", "none", "list the remote files on s3, gcs, sftp, azure instead of dumping. DBNAMEs become\nglobs to select files")
pflag.StringVar(&opts.ListRemote, "list-remote", "none", "list the remote files on s3, gcs, sftp, azure instead of dumping. DBNAMEs\nbecome globs to select files")
purgeRemote := pflag.String("purge-remote", "no", "purge the file on remote location after upload, with the same rules\nas the local directory")

pflag.StringVar(&opts.B2Bucket, "b2-bucket", "", "B2 bucket")
pflag.StringVar(&opts.B2KeyID, "b2-key-id", "", "B2 access key ID")
pflag.StringVar(&opts.B2AppKey, "b2-app-key", "", "B2 app key")
pflag.StringVar(&opts.B2Bucket, "b2-bucket", "", "Backblaze B2 bucket")
pflag.StringVar(&opts.B2KeyID, "b2-key-id", "", "Backblaze B2 access key ID")
pflag.StringVar(&opts.B2AppKey, "b2-app-key", "", "Backblaze B2 app key")
B2ForcePath := pflag.String("b2-force-path", "no", "force path style addressing instead of virtual hosted bucket\naddressing")
B2ConcurrentConnections := pflag.Int("b2-concurrent-connections", 5, "set the amount of concurrent b2 http connections")

Expand Down Expand Up @@ -576,7 +576,7 @@ gkLoop:
}

subs := cfg.Sections()
knonw_perdb := []string{
known_perdb := []string{
"format", "parallel_backup_jobs", "compress_level", "checksum_algorithm",
"purge_older_than", "purge_min_keep", "schemas", "exclude_schemas", "tables",
"exclude_tables", "pg_dump_options", "with_blobs", "user",
Expand All @@ -589,7 +589,7 @@ gkLoop:

dbkLoop:
for _, v := range sub.KeyStrings() {
for _, c := range knonw_perdb {
for _, c := range known_perdb {
if v == c {
continue dbkLoop
}
Expand Down Expand Up @@ -886,6 +886,9 @@ func mergeCliAndConfigOptions(cliOpts options, configOpts options, onCli []strin
for _, dbo := range opts.PerDbOpts {
dbo.SumAlgo = cliOpts.SumAlgo
}
case "uniform-timestamp":
opts.UniformTimestamp = cliOpts.UniformTimestamp

case "purge-older-than":
opts.PurgeInterval = cliOpts.PurgeInterval
for _, dbo := range opts.PerDbOpts {
Expand Down Expand Up @@ -923,6 +926,8 @@ func mergeCliAndConfigOptions(cliOpts options, configOpts options, onCli []strin
opts.ListRemote = cliOpts.ListRemote
case "purge-remote":
opts.PurgeRemote = cliOpts.PurgeRemote
case "delete-uploaded":
opts.DeleteUploaded = cliOpts.DeleteUploaded

case "b2-bucket":
opts.B2Bucket = cliOpts.B2Bucket
Expand Down Expand Up @@ -991,10 +996,7 @@ func mergeCliAndConfigOptions(cliOpts options, configOpts options, onCli []strin
opts.Username = cliOpts.Username
case "dbname":
opts.ConnDb = cliOpts.ConnDb
case "uniform-timestamp":
opts.UniformTimestamp = cliOpts.UniformTimestamp
}
}

return opts
}
38 changes: 22 additions & 16 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -304,9 +304,9 @@ func run() (retVal error) {
defer db.Close()

// Generate a single datetime that will be used in all files generated by pg_back
var uniformTimestamp time.Time
var fileTime time.Time
if opts.UniformTimestamp {
uniformTimestamp = time.Now()
fileTime = time.Now()
}

if !opts.DumpOnly {
Expand All @@ -322,7 +322,7 @@ func run() (retVal error) {
} else {
l.Infoln("dumping globals without role passwords")
}
if err := dumpGlobals(opts.Directory, opts.Mode, opts.TimeFormat, dumpRolePasswords, conninfo, producedFiles, uniformTimestamp); err != nil {
if err := dumpGlobals(opts.Directory, opts.Mode, opts.TimeFormat, dumpRolePasswords, conninfo, producedFiles, fileTime); err != nil {
return fmt.Errorf("pg_dumpall of globals failed: %w", err)
}

Expand All @@ -332,15 +332,15 @@ func run() (retVal error) {
perr *pgPrivError
)

if err := dumpSettings(opts.Directory, opts.Mode, opts.TimeFormat, db, producedFiles, uniformTimestamp); err != nil {
if err := dumpSettings(opts.Directory, opts.Mode, opts.TimeFormat, db, producedFiles, fileTime); err != nil {
if errors.As(err, &verr) || errors.As(err, &perr) {
l.Warnln(err)
} else {
return fmt.Errorf("could not dump configuration parameters: %w", err)
}
}

if err := dumpConfigFiles(opts.Directory, opts.Mode, opts.TimeFormat, db, producedFiles, uniformTimestamp); err != nil {
if err := dumpConfigFiles(opts.Directory, opts.Mode, opts.TimeFormat, db, producedFiles, fileTime); err != nil {
return fmt.Errorf("could not dump configuration files: %w", err)
}
}
Expand Down Expand Up @@ -392,7 +392,7 @@ func run() (retVal error) {
CipherPassphrase: passphrase,
CipherPublicKey: publicKey,
EncryptKeepSrc: opts.EncryptKeepSrc,
When: uniformTimestamp,
When: fileTime,
ExitCode: -1,
PgDumpVersion: pgDumpVersion,
}
Expand Down Expand Up @@ -944,7 +944,7 @@ func pgToolVersion(tool string) int {
return numver
}

func dumpGlobals(dir string, mode int, timeFormat string, withRolePasswords bool, conninfo *ConnInfo, fc chan<- sumFileJob, uniformTimestamp time.Time) error {
func dumpGlobals(dir string, mode int, timeFormat string, withRolePasswords bool, conninfo *ConnInfo, fc chan<- sumFileJob, when time.Time) error {
command := execPath("pg_dumpall")
args := []string{"-g", "-w"}

Expand Down Expand Up @@ -976,11 +976,11 @@ func dumpGlobals(dir string, mode int, timeFormat string, withRolePasswords bool
args = append(args, "--no-role-passwords")
}

if uniformTimestamp.IsZero() {
uniformTimestamp = time.Now()
if when.IsZero() {
when = time.Now()
}

file := formatDumpPath(dir, timeFormat, "sql", "pg_globals", uniformTimestamp, 0)
file := formatDumpPath(dir, timeFormat, "sql", "pg_globals", when, 0)
args = append(args, "-f", file)

if err := os.MkdirAll(filepath.Dir(file), 0700); err != nil {
Expand Down Expand Up @@ -1021,8 +1021,12 @@ func dumpGlobals(dir string, mode int, timeFormat string, withRolePasswords bool
return nil
}

func dumpSettings(dir string, mode int, timeFormat string, db *pg, fc chan<- sumFileJob, uniformTimestamp time.Time) error {
file := formatDumpPath(dir, timeFormat, "out", "pg_settings", uniformTimestamp, 0)
func dumpSettings(dir string, mode int, timeFormat string, db *pg, fc chan<- sumFileJob, when time.Time) error {
if when.IsZero() {
when = time.Now()
}

file := formatDumpPath(dir, timeFormat, "out", "pg_settings", when, 0)

if err := os.MkdirAll(filepath.Dir(file), 0o700); err != nil {
return err
Expand Down Expand Up @@ -1056,12 +1060,12 @@ func dumpSettings(dir string, mode int, timeFormat string, db *pg, fc chan<- sum
return nil
}

func dumpConfigFiles(dir string, mode int, timeFormat string, db *pg, fc chan<- sumFileJob, uniformTimestamp time.Time) error {
func dumpConfigFiles(dir string, mode int, timeFormat string, db *pg, fc chan<- sumFileJob, when time.Time) error {
for _, param := range []string{"hba_file", "ident_file"} {
if uniformTimestamp.IsZero() {
uniformTimestamp = time.Now()
if when.IsZero() {
when = time.Now()
}
file := formatDumpPath(dir, timeFormat, "out", param, uniformTimestamp, 0)
file := formatDumpPath(dir, timeFormat, "out", param, when, 0)

if err := os.MkdirAll(filepath.Dir(file), 0700); err != nil {
return err
Expand Down Expand Up @@ -1611,6 +1615,8 @@ func postProcessFiles(inFiles chan sumFileJob, wg *sync.WaitGroup, opts options)
}
continue
}

l.Infoln("uploaded", j.Path)
if opts.DeleteUploaded {
l.Infoln("removing", j.Path)
os.Remove(j.Path)
Expand Down
2 changes: 1 addition & 1 deletion pg_back.conf
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ with_templates = false
# Dump only databases, excluding configuration and globals
dump_only = false

# Apply the same consistent timestamp to all filenames generated
# Apply the same timestamp to all filenames generated
# by pg_back instead of using individual file creation times
uniform_timestamp = false

Expand Down
17 changes: 9 additions & 8 deletions testdata/fixture.sql
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,16 @@ GRANT CONNECT ON DATABASE b2 TO u2;
\c b1

SET ROLE u1;
CREATE TABLE t1 AS SELECT generate_series(0, 9) i;
CREATE TABLE t2 AS SELECT generate_series(10, 19) j;
CREATE TABLE t3 AS SELECT generate_series(0, 9) i;
CREATE TABLE t4 AS SELECT generate_series(10, 19) j;
CREATE TABLE IF NOT EXISTS t1 AS SELECT generate_series(0, 9) i;
CREATE TABLE IF NOT EXISTS t2 AS SELECT generate_series(10, 19) j;
CREATE TABLE IF NOT EXISTS t3 AS SELECT generate_series(0, 9) i;
CREATE TABLE IF NOT EXISTS t4 AS SELECT generate_series(10, 19) j;

\c b2

GRANT CREATE,USAGE ON SCHEMA public TO u2;
SET ROLE u2;
CREATE TABLE t1 AS SELECT generate_series(0, 9) i;
CREATE TABLE t2 AS SELECT generate_series(10, 19) j;
CREATE TABLE t3 AS SELECT generate_series(0, 9) i;
CREATE TABLE t4 AS SELECT generate_series(10, 19) j;
CREATE TABLE IF NOT EXISTS t1 AS SELECT generate_series(0, 9) i;
CREATE TABLE IF NOT EXISTS t2 AS SELECT generate_series(10, 19) j;
CREATE TABLE IF NOT EXISTS t3 AS SELECT generate_series(0, 9) i;
CREATE TABLE IF NOT EXISTS t4 AS SELECT generate_series(10, 19) j;