From 822111e278afb1318a9ae98cab954f971129606c Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Tue, 19 Jul 2022 14:58:51 +0300 Subject: [PATCH 1/6] [PBCKP-232] remove 9.5-9.6 support, part 1 --- .travis.yml | 1 + README.md | 72 +++++++-------- src/backup.c | 10 --- src/catchup.c | 8 -- src/parsexlog.c | 5 -- src/pg_probackup.c | 7 +- src/pg_probackup.h | 8 -- src/stream.c | 42 ++------- src/util.c | 42 +-------- src/utils/pgut.c | 18 +--- tests/backup.py | 149 ++++++++------------------------ tests/checkdb.py | 60 +------------ tests/helpers/ptrack_helpers.py | 61 ++++--------- 13 files changed, 100 insertions(+), 383 deletions(-) diff --git a/.travis.yml b/.travis.yml index 26b2bc4e2..ed932b68e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -52,6 +52,7 @@ env: jobs: allow_failures: - if: env(PG_BRANCH) = master + - if: env(PG_BRANCH) = REL9_6_STABLE - if: env(PG_BRANCH) = REL9_5_STABLE # - if: env(MODE) IN (archive, backup, delta, locking, merge, replica, retention, restore) diff --git a/README.md b/README.md index 5da8d199e..281116cce 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ `pg_probackup` is a utility to manage backup and recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. The utility is compatible with: -* PostgreSQL 9.6, 10, 11, 12, 13, 14; +* PostgreSQL 10, 11, 12, 13, 14; As compared to other backup solutions, `pg_probackup` offers the following benefits that can help you implement different backup strategies and deal with large amounts of data: * Incremental backup: page-level incremental backup allows you to save disk space, speed up backup and restore. With three different incremental modes, you can plan the backup strategy in accordance with your data flow. @@ -74,62 +74,62 @@ Installers are available in release **assets**. [Latests](https://github.com/pos #DEB Ubuntu|Debian Packages sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6}-dbg +sudo apt-get install pg-probackup-{14,13,12,11,10} +sudo apt-get install pg-probackup-{14,13,12,11,10}-dbg #DEB-SRC Packages sudo sh -c 'echo "deb-src [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" >>\ /etc/apt/sources.list.d/pg_probackup.list' && sudo apt-get update -sudo apt-get source pg-probackup-{14,13,12,11,10,9.6} +sudo apt-get source pg-probackup-{14,13,12,11,10} #DEB Astra Linix Orel sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{14,13,12,11,10,9.6}{-dbg,} +sudo apt-get install pg-probackup-{14,13,12,11,10}{-dbg,} #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-centos.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{14,13,12,11,10} +yum install pg_probackup-{14,13,12,11,10}-debuginfo #RPM RHEL Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-rhel.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{14,13,12,11,10} +yum install pg_probackup-{14,13,12,11,10}-debuginfo #RPM Oracle Linux Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-oraclelinux.noarch.rpm -yum install pg_probackup-{14,13,12,11,10,9.6} -yum install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{14,13,12,11,10} +yum install pg_probackup-{14,13,12,11,10}-debuginfo #SRPM Centos|RHEL|OracleLinux Packages -yumdownloader --source pg_probackup-{14,13,12,11,10,9.6} +yumdownloader --source pg_probackup-{14,13,12,11,10} #RPM SUSE|SLES Packages zypper install --allow-unsigned-rpm -y https://repo.postgrespro.ru/pg_probackup/keys/pg_probackup-repo-suse.noarch.rpm -zypper --gpg-auto-import-keys install -y pg_probackup-{14,13,12,11,10,9.6} -zypper install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +zypper --gpg-auto-import-keys install -y pg_probackup-{14,13,12,11,10} +zypper install pg_probackup-{14,13,12,11,10}-debuginfo #SRPM SUSE|SLES Packages -zypper si pg_probackup-{14,13,12,11,10,9.6} +zypper si pg_probackup-{14,13,12,11,10} #RPM ALT Linux 7 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p7 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{14,13,12,11,10} +sudo apt-get install pg_probackup-{14,13,12,11,10}-debuginfo #RPM ALT Linux 8 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p8 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{14,13,12,11,10} +sudo apt-get install pg_probackup-{14,13,12,11,10}-debuginfo #RPM ALT Linux 9 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p9 x86_64 vanilla" > /etc/apt/sources.list.d/pg_probackup.list' sudo apt-get update -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6} -sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{14,13,12,11,10} +sudo apt-get install pg_probackup-{14,13,12,11,10}-debuginfo ``` #### pg_probackup for PostgresPro Standard and Enterprise @@ -137,46 +137,46 @@ sudo apt-get install pg_probackup-{14,13,12,11,10,9.6}-debuginfo #DEB Ubuntu|Debian Packages sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ $(lsb_release -cs) main-$(lsb_release -cs)" > /etc/apt/sources.list.d/pg_probackup-forks.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10,9.6}-dbg +sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10} +sudo apt-get install pg-probackup-{std,ent}-{13,12,11,10}-dbg #DEB Astra Linix Orel sudo sh -c 'echo "deb [arch=amd64] https://repo.postgrespro.ru/pg_probackup-forks/deb/ stretch main-stretch" > /etc/apt/sources.list.d/pg_probackup.list' sudo wget -O - https://repo.postgrespro.ru/pg_probackup-forks/keys/GPG-KEY-PG_PROBACKUP | sudo apt-key add - && sudo apt-get update -sudo apt-get install pg-probackup-{std,ent}-{12,11,10,9.6}{-dbg,} +sudo apt-get install pg-probackup-{std,ent}-{12,11,10}{-dbg,} #RPM Centos Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-centos.noarch.rpm -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{13,12,11,10} +yum install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo #RPM RHEL Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-rhel.noarch.rpm -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{13,12,11,10} +yum install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo #RPM Oracle Linux Packages rpm -ivh https://repo.postgrespro.ru/pg_probackup-forks/keys/pg_probackup-repo-forks-oraclelinux.noarch.rpm -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6} -yum install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +yum install pg_probackup-{std,ent}-{13,12,11,10} +yum install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo #RPM ALT Linux 7 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p7 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10} +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo #RPM ALT Linux 8 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p8 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10} +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo #RPM ALT Linux 9 sudo sh -c 'echo "rpm https://repo.postgrespro.ru/pg_probackup-forks/rpm/latest/altlinux-p9 x86_64 forks" > /etc/apt/sources.list.d/pg_probackup_forks.list' && sudo apt-get update -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6} -sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10,9.6}-debuginfo +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10} +sudo apt-get install pg_probackup-{std,ent}-{13,12,11,10}-debuginfo ``` Once you have `pg_probackup` installed, complete [the setup](https://postgrespro.github.io/pg_probackup/#pbk-install-and-setup). diff --git a/src/backup.c b/src/backup.c index 84b503245..0edb57710 100644 --- a/src/backup.c +++ b/src/backup.c @@ -133,12 +133,7 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, pg_start_backup(label, smooth_checkpoint, ¤t, nodeInfo, backup_conn); /* Obtain current timeline */ -#if PG_VERSION_NUM >= 90600 current.tli = get_current_timeline(backup_conn); -#else - /* PG-9.5 */ - current.tli = get_current_timeline_from_control(FIO_DB_HOST, instance_config.pgdata, false); -#endif /* * In incremental backup mode ensure that already-validated @@ -1053,7 +1048,6 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup, /* * Switch to a new WAL segment. It should be called only for master. - * For PG 9.5 it should be called only if pguser is superuser. */ void pg_switch_wal(PGconn *conn) @@ -1062,11 +1056,7 @@ pg_switch_wal(PGconn *conn) pg_silent_client_messages(conn); -#if PG_VERSION_NUM >= 100000 res = pgut_execute(conn, "SELECT pg_catalog.pg_switch_wal()", 0, NULL); -#else - res = pgut_execute(conn, "SELECT pg_catalog.pg_switch_xlog()", 0, NULL); -#endif PQclear(res); } diff --git a/src/catchup.c b/src/catchup.c index 385d8e9df..522279ac9 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -66,13 +66,7 @@ catchup_init_state(PGNodeInfo *source_node_info, const char *source_pgdata, cons source_node_info->is_ptrack_enabled = pg_is_ptrack_enabled(source_conn, source_node_info->ptrack_version_num); /* Obtain current timeline */ -#if PG_VERSION_NUM >= 90600 current.tli = get_current_timeline(source_conn); -#else - /* PG-9.5 */ - instance_config.pgdata = source_pgdata; - current.tli = get_current_timeline_from_control(FIO_DB_HOST, source_pgdata, false); -#endif elog(INFO, "Catchup start, pg_probackup version: %s, " "PostgreSQL version: %s, " @@ -1033,7 +1027,6 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, if (!dry_run) wait_wal_and_calculate_stop_lsn(dest_xlog_path, stop_backup_result.lsn, ¤t); -#if PG_VERSION_NUM >= 90600 /* Write backup_label */ Assert(stop_backup_result.backup_label_content != NULL); if (!dry_run) @@ -1061,7 +1054,6 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, stop_backup_result.tablespace_map_content = NULL; stop_backup_result.tablespace_map_content_len = 0; } -#endif /* wait for end of wal streaming and calculate wal size transfered */ if (!dry_run) diff --git a/src/parsexlog.c b/src/parsexlog.c index df9b96fb3..39fb64f0a 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -29,13 +29,8 @@ * RmgrNames is an array of resource manager names, to make error messages * a bit nicer. */ -#if PG_VERSION_NUM >= 100000 #define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup,mask) \ name, -#else -#define PG_RMGR(symname,name,redo,desc,identify,startup,cleanup) \ - name, -#endif static const char *RmgrNames[RM_MAX_ID + 1] = { #include "access/rmgrlist.h" diff --git a/src/pg_probackup.c b/src/pg_probackup.c index ff5ab85d3..b0245f864 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -78,10 +78,8 @@ pid_t my_pid = 0; __thread int my_thread_num = 1; bool progress = false; bool no_sync = false; -#if PG_VERSION_NUM >= 100000 char *replication_slot = NULL; bool temp_slot = false; -#endif bool perm_slot = false; /* backup options */ @@ -205,9 +203,7 @@ static ConfigOption cmd_options[] = { 'f', 'b', "backup-mode", opt_backup_mode, SOURCE_CMD_STRICT }, { 'b', 'C', "smooth-checkpoint", &smooth_checkpoint, SOURCE_CMD_STRICT }, { 's', 'S', "slot", &replication_slot, SOURCE_CMD_STRICT }, -#if PG_VERSION_NUM >= 100000 { 'b', 181, "temp-slot", &temp_slot, SOURCE_CMD_STRICT }, -#endif { 'b', 'P', "perm-slot", &perm_slot, SOURCE_CMD_STRICT }, { 'b', 182, "delete-wal", &delete_wal, SOURCE_CMD_STRICT }, { 'b', 183, "delete-expired", &delete_expired, SOURCE_CMD_STRICT }, @@ -905,14 +901,13 @@ main(int argc, char *argv[]) wal_file_name, instanceState->instance_name, instance_config.system_identifier, system_id); } -#if PG_VERSION_NUM >= 100000 if (temp_slot && perm_slot) elog(ERROR, "You cannot specify \"--perm-slot\" option with the \"--temp-slot\" option"); /* if slot name was not provided for temp slot, use default slot name */ if (!replication_slot && temp_slot) replication_slot = DEFAULT_TEMP_SLOT_NAME; -#endif + if (!replication_slot && perm_slot) replication_slot = DEFAULT_PERMANENT_SLOT_NAME; diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 6f6dcdff6..2439fc23b 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -65,13 +65,8 @@ extern const char *PROGRAM_EMAIL; #define DATABASE_DIR "database" #define BACKUPS_DIR "backups" #define WAL_SUBDIR "wal" -#if PG_VERSION_NUM >= 100000 #define PG_XLOG_DIR "pg_wal" #define PG_LOG_DIR "log" -#else -#define PG_XLOG_DIR "pg_xlog" -#define PG_LOG_DIR "pg_log" -#endif #define PG_TBLSPC_DIR "pg_tblspc" #define PG_GLOBAL_DIR "global" #define BACKUP_CONTROL_FILE "backup.control" @@ -777,11 +772,8 @@ extern bool stream_wal; extern bool show_color; extern bool progress; extern bool is_archive_cmd; /* true for archive-{get,push} */ -/* In pre-10 'replication_slot' is defined in receivelog.h */ extern char *replication_slot; -#if PG_VERSION_NUM >= 100000 extern bool temp_slot; -#endif extern bool perm_slot; /* backup options */ diff --git a/src/stream.c b/src/stream.c index 1ee8dee37..b10eb7308 100644 --- a/src/stream.c +++ b/src/stream.c @@ -2,7 +2,7 @@ * * stream.c: pg_probackup specific code for WAL streaming * - * Portions Copyright (c) 2015-2020, Postgres Professional + * Portions Copyright (c) 2015-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -174,10 +174,10 @@ checkpoint_timeout(PGconn *backup_conn) * CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin, * bool is_temporary, bool is_physical, bool reserve_wal, * bool slot_exists_ok) - * PG 9.5-10 + * PG 10 * CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin, * bool is_physical, bool slot_exists_ok) - * NOTE: PG 9.6 and 10 support reserve_wal in + * NOTE: PG 10 support reserve_wal in * pg_catalog.pg_create_physical_replication_slot(slot_name name [, immediately_reserve boolean]) * and * CREATE_REPLICATION_SLOT slot_name { PHYSICAL [ RESERVE_WAL ] | LOGICAL output_plugin } @@ -194,7 +194,7 @@ CreateReplicationSlot_compat(PGconn *conn, const char *slot_name, const char *pl #elif PG_VERSION_NUM >= 110000 return CreateReplicationSlot(conn, slot_name, plugin, is_temporary, is_physical, /* reserve_wal = */ true, slot_exists_ok); -#elif PG_VERSION_NUM >= 100000 +#else /* * PG-10 doesn't support creating temp_slot by calling CreateReplicationSlot(), but * it will be created by setting StreamCtl.temp_slot later in StreamLog() @@ -203,10 +203,6 @@ CreateReplicationSlot_compat(PGconn *conn, const char *slot_name, const char *pl return CreateReplicationSlot(conn, slot_name, plugin, /*is_temporary,*/ is_physical, /*reserve_wal,*/ slot_exists_ok); else return true; -#else - /* these parameters not supported in PG < 10 */ - Assert(!is_temporary); - return CreateReplicationSlot(conn, slot_name, plugin, /*is_temporary,*/ is_physical, /*reserve_wal,*/ slot_exists_ok); #endif } @@ -229,13 +225,8 @@ StreamLog(void *arg) stream_stop_begin = 0; /* Create repslot */ -#if PG_VERSION_NUM >= 100000 if (temp_slot || perm_slot) if (!CreateReplicationSlot_compat(stream_arg->conn, replication_slot, NULL, temp_slot, true, false)) -#else - if (perm_slot) - if (!CreateReplicationSlot_compat(stream_arg->conn, replication_slot, NULL, false, true, false)) -#endif { interrupted = true; elog(ERROR, "Couldn't create physical replication slot %s", replication_slot); @@ -248,18 +239,13 @@ StreamLog(void *arg) elog(LOG, "started streaming WAL at %X/%X (timeline %u) using%s slot %s", (uint32) (stream_arg->startpos >> 32), (uint32) stream_arg->startpos, stream_arg->starttli, -#if PG_VERSION_NUM >= 100000 temp_slot ? " temporary" : "", -#else - "", -#endif replication_slot); else elog(LOG, "started streaming WAL at %X/%X (timeline %u)", (uint32) (stream_arg->startpos >> 32), (uint32) stream_arg->startpos, stream_arg->starttli); -#if PG_VERSION_NUM >= 90600 { StreamCtl ctl; @@ -274,7 +260,6 @@ StreamLog(void *arg) ctl.synchronous = false; ctl.mark_done = false; -#if PG_VERSION_NUM >= 100000 ctl.walmethod = CreateWalDirectoryMethod( stream_arg->basedir, // (instance_config.compress_alg == NONE_COMPRESS) ? 0 : instance_config.compress_level, @@ -284,13 +269,10 @@ StreamLog(void *arg) ctl.stop_socket = PGINVALID_SOCKET; ctl.do_sync = false; /* We sync all files at the end of backup */ // ctl.mark_done /* for future use in s3 */ -#if PG_VERSION_NUM >= 100000 && PG_VERSION_NUM < 110000 +#if PG_VERSION_NUM < 110000 /* StreamCtl.temp_slot used only for PG-10, in PG>10, temp_slots are created by calling CreateReplicationSlot() */ ctl.temp_slot = temp_slot; -#endif /* PG_VERSION_NUM >= 100000 && PG_VERSION_NUM < 110000 */ -#else /* PG_VERSION_NUM < 100000 */ - ctl.basedir = (char *) stream_arg->basedir; -#endif /* PG_VERSION_NUM >= 100000 */ +#endif /* PG_VERSION_NUM < 110000 */ if (ReceiveXlogStream(stream_arg->conn, &ctl) == false) { @@ -298,25 +280,13 @@ StreamLog(void *arg) elog(ERROR, "Problem in receivexlog"); } -#if PG_VERSION_NUM >= 100000 if (!ctl.walmethod->finish()) { interrupted = true; elog(ERROR, "Could not finish writing WAL files: %s", strerror(errno)); } -#endif /* PG_VERSION_NUM >= 100000 */ - } -#else /* PG_VERSION_NUM < 90600 */ - /* PG-9.5 */ - if (ReceiveXlogStream(stream_arg->conn, stream_arg->startpos, stream_arg->starttli, - NULL, (char *) stream_arg->basedir, stop_streaming, - standby_message_timeout, NULL, false, false) == false) - { - interrupted = true; - elog(ERROR, "Problem in receivexlog"); } -#endif /* PG_VERSION_NUM >= 90600 */ /* be paranoid and sort xlog_files_list, * so if stop_lsn segno is already in the list, diff --git a/src/util.c b/src/util.c index e89f5776b..28bdf283e 100644 --- a/src/util.c +++ b/src/util.c @@ -102,11 +102,7 @@ checkControlFile(ControlFileData *ControlFile) static void digestControlFile(ControlFileData *ControlFile, char *src, size_t size) { -#if PG_VERSION_NUM >= 100000 int ControlFileSize = PG_CONTROL_FILE_SIZE; -#else - int ControlFileSize = PG_CONTROL_SIZE; -#endif if (size != ControlFileSize) elog(ERROR, "unexpected control file size %d, expected %d", @@ -127,11 +123,7 @@ writeControlFile(fio_location location, const char *path, ControlFileData *Contr int fd; char *buffer = NULL; -#if PG_VERSION_NUM >= 100000 int ControlFileSize = PG_CONTROL_FILE_SIZE; -#else - int ControlFileSize = PG_CONTROL_SIZE; -#endif /* copy controlFileSize */ buffer = pg_malloc0(ControlFileSize); @@ -207,44 +199,25 @@ get_current_timeline_from_control(fio_location location, const char *pgdata_path } /* - * Get last check point record ptr from pg_tonrol. + * Get last check point record ptr from pg_control. */ XLogRecPtr get_checkpoint_location(PGconn *conn) { -#if PG_VERSION_NUM >= 90600 PGresult *res; uint32 lsn_hi; uint32 lsn_lo; XLogRecPtr lsn; -#if PG_VERSION_NUM >= 100000 res = pgut_execute(conn, "SELECT checkpoint_lsn FROM pg_catalog.pg_control_checkpoint()", 0, NULL); -#else - res = pgut_execute(conn, - "SELECT checkpoint_location FROM pg_catalog.pg_control_checkpoint()", - 0, NULL); -#endif XLogDataFromLSN(PQgetvalue(res, 0, 0), &lsn_hi, &lsn_lo); PQclear(res); /* Calculate LSN */ lsn = ((uint64) lsn_hi) << 32 | lsn_lo; return lsn; -#else - /* PG-9.5 */ - char *buffer; - size_t size; - ControlFileData ControlFile; - - buffer = slurpFile(FIO_DB_HOST, instance_config.pgdata, XLOG_CONTROL_FILE, &size, false); - digestControlFile(&ControlFile, buffer, size); - pg_free(buffer); - - return ControlFile.checkPoint; -#endif } uint64 @@ -267,7 +240,6 @@ get_system_identifier(fio_location location, const char *pgdata_path, bool safe) uint64 get_remote_system_identifier(PGconn *conn) { -#if PG_VERSION_NUM >= 90600 PGresult *res; uint64 system_id_conn; char *val; @@ -284,18 +256,6 @@ get_remote_system_identifier(PGconn *conn) PQclear(res); return system_id_conn; -#else - /* PG-9.5 */ - char *buffer; - size_t size; - ControlFileData ControlFile; - - buffer = slurpFile(FIO_DB_HOST, instance_config.pgdata, XLOG_CONTROL_FILE, &size, false); - digestControlFile(&ControlFile, buffer, size); - pg_free(buffer); - - return ControlFile.system_identifier; -#endif } uint32 diff --git a/src/utils/pgut.c b/src/utils/pgut.c index c220b807d..f1b8da0b2 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -3,7 +3,7 @@ * pgut.c * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2017-2021, Postgres Professional + * Portions Copyright (c) 2017-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -20,11 +20,7 @@ #include "common/string.h" #endif -#if PG_VERSION_NUM >= 100000 #include "common/connect.h" -#else -#include "fe_utils/connect.h" -#endif #include @@ -94,7 +90,7 @@ prompt_for_password(const char *username) snprintf(message, lengthof(message), "Password for user %s: ", username); password = simple_prompt(message , false); } -#elif PG_VERSION_NUM >= 100000 +#else password = (char *) pgut_malloc(sizeof(char) * 100 + 1); if (username == NULL) simple_prompt("Password: ", password, 100, false); @@ -104,17 +100,7 @@ prompt_for_password(const char *username) snprintf(message, lengthof(message), "Password for user %s: ", username); simple_prompt(message, password, 100, false); } -#else - if (username == NULL) - password = simple_prompt("Password: ", 100, false); - else - { - char message[256]; - snprintf(message, lengthof(message), "Password for user %s: ", username); - password = simple_prompt(message, 100, false); - } #endif - in_password = false; } diff --git a/tests/backup.py b/tests/backup.py index 20ac480e0..23836cdbe 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1856,118 +1856,43 @@ def test_backup_with_least_privileges_role(self): "CREATE SCHEMA ptrack; " "CREATE EXTENSION ptrack WITH SCHEMA ptrack") - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if self.ptrack: node.safe_psql( diff --git a/tests/checkdb.py b/tests/checkdb.py index 5b6dda250..71f81fd6c 100644 --- a/tests/checkdb.py +++ b/tests/checkdb.py @@ -640,66 +640,8 @@ def test_checkdb_with_least_privileges(self): "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC;") - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - 'CREATE ROLE backup WITH LOGIN; ' - 'GRANT CONNECT ON DATABASE backupdb to backup; ' - 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' - 'GRANT USAGE ON SCHEMA public TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' # amcheck-next function - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - 'CREATE ROLE backup WITH LOGIN; ' - 'GRANT CONNECT ON DATABASE backupdb to backup; ' - 'GRANT USAGE ON SCHEMA pg_catalog TO backup; ' - 'GRANT USAGE ON SCHEMA public TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_am TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_class TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_index TO backup; ' - 'GRANT SELECT ON TABLE pg_catalog.pg_namespace TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.texteq(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.namene(name, name) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.int8(integer) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.charne("char", "char") TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.string_to_array(text, text) TO backup; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.array_position(anyarray, anyelement) TO backup; ' -# 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass) TO backup; ' - 'GRANT EXECUTE ON FUNCTION bt_index_check(regclass, bool) TO backup;' - ) # PG 10 - elif self.get_version(node) > 100000 and self.get_version(node) < 110000: + if self.get_version(node) < 110000: node.safe_psql( 'backupdb', 'CREATE ROLE backup WITH LOGIN; ' diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index e3036d9c4..0fa252739 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -417,52 +417,21 @@ def simple_bootstrap(self, node, role) -> None: 'postgres', 'CREATE ROLE {0} WITH LOGIN REPLICATION'.format(role)) - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'postgres', - 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0};'.format(role)) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'postgres', - 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) - # >= 10 - else: - node.safe_psql( - 'postgres', - 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' - 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) + # PG >= 10 + node.safe_psql( + 'postgres', + 'GRANT USAGE ON SCHEMA pg_catalog TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO {0}; ' + 'GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_checkpoint() TO {0};'.format(role)) def create_tblspace_in_node(self, node, tblspc_name, tblspc_path=None, cfs=False): res = node.execute( From ae275dccd35e2e865bce92f51e554331947cd030 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Wed, 20 Jul 2022 02:15:07 +0300 Subject: [PATCH 2/6] [PBCKP-232] remove 9.5-9.6 support, part 2 --- .travis.yml | 4 - src/backup.c | 172 ++++++-------------- src/catchup.c | 13 +- src/dir.c | 16 -- src/pg_probackup.h | 6 +- src/utils/file.c | 5 +- tests/archive.py | 169 ++++---------------- tests/auth_test.py | 30 +--- tests/backup.py | 272 ++++++++------------------------ tests/catchup.py | 41 +++-- tests/false_positive.py | 3 - tests/helpers/ptrack_helpers.py | 32 +--- tests/incr_restore.py | 10 -- tests/pgpro2068.py | 24 +-- tests/pgpro560.py | 40 ++--- tests/ptrack.py | 143 ++++------------- tests/replica.py | 85 +--------- tests/restore.py | 201 +++++++---------------- tests/retention.py | 13 -- tests/validate.py | 19 +-- 20 files changed, 295 insertions(+), 1003 deletions(-) diff --git a/.travis.yml b/.travis.yml index ed932b68e..9e48c9cab 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,8 +32,6 @@ env: - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_BRANCH=REL_12_STABLE - PG_VERSION=11 PG_BRANCH=REL_11_STABLE PTRACK_PATCH_PG_BRANCH=REL_11_STABLE - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=archive # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup @@ -52,8 +50,6 @@ env: jobs: allow_failures: - if: env(PG_BRANCH) = master - - if: env(PG_BRANCH) = REL9_6_STABLE - - if: env(PG_BRANCH) = REL9_5_STABLE # - if: env(MODE) IN (archive, backup, delta, locking, merge, replica, retention, restore) # Only run CI for master branch commits to limit our travis usage diff --git a/src/backup.c b/src/backup.c index 0edb57710..449d8d09c 100644 --- a/src/backup.c +++ b/src/backup.c @@ -32,9 +32,6 @@ parray *backup_files_list = NULL; /* We need critical section for datapagemap_add() in case of using threads */ static pthread_mutex_t backup_pagemap_mutex = PTHREAD_MUTEX_INITIALIZER; -// TODO: move to PGnodeInfo -bool exclusive_backup = false; - /* Is pg_start_backup() was executed */ bool backup_in_progress = false; @@ -80,7 +77,7 @@ backup_stopbackup_callback(bool fatal, void *userdata) { elog(WARNING, "backup in progress, stop backup"); /* don't care about stop_lsn in case of error */ - pg_stop_backup_send(st->conn, st->server_version, current.from_replica, exclusive_backup, NULL); + pg_stop_backup_send(st->conn, st->server_version, current.from_replica, NULL); } } @@ -493,10 +490,10 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, /* Notify end of backup */ pg_stop_backup(instanceState, ¤t, backup_conn, nodeInfo); - /* In case of backup from replica >= 9.6 we must fix minRecPoint, + /* In case of backup from replica we must fix minRecPoint, * First we must find pg_control in backup_files_list. */ - if (current.from_replica && !exclusive_backup) + if (current.from_replica) { pgFile *pg_control = NULL; @@ -781,11 +778,6 @@ do_backup(InstanceState *instanceState, pgSetBackupParams *set_backup_params, } } - if (current.from_replica && exclusive_backup) - /* Check master connection options */ - if (instance_config.master_conn_opt.pghost == NULL) - elog(ERROR, "Options for connection to master must be provided to perform backup from replica"); - /* add note to backup if requested */ if (set_backup_params && set_backup_params->note) add_note(¤t, set_backup_params->note); @@ -866,22 +858,12 @@ check_server_version(PGconn *conn, PGNodeInfo *nodeInfo) elog(ERROR, "Unknown server version %d", nodeInfo->server_version); if (nodeInfo->server_version < 100000) - sprintf(nodeInfo->server_version_str, "%d.%d", - nodeInfo->server_version / 10000, - (nodeInfo->server_version / 100) % 100); - else - sprintf(nodeInfo->server_version_str, "%d", - nodeInfo->server_version / 10000); - - if (nodeInfo->server_version < 90500) elog(ERROR, "server version is %s, must be %s or higher", - nodeInfo->server_version_str, "9.5"); + nodeInfo->server_version_str, "10"); - if (current.from_replica && nodeInfo->server_version < 90600) - elog(ERROR, - "server version is %s, must be %s or higher for backup from replica", - nodeInfo->server_version_str, "9.6"); + sprintf(nodeInfo->server_version_str, "%d", + nodeInfo->server_version / 10000); if (nodeInfo->pgpro_support) res = pgut_execute(conn, "SELECT pg_catalog.pgpro_edition()", 0, NULL); @@ -922,9 +904,6 @@ check_server_version(PGconn *conn, PGNodeInfo *nodeInfo) if (res) PQclear(res); - - /* Do exclusive backup only for PostgreSQL 9.5 */ - exclusive_backup = nodeInfo->server_version < 90600; } /* @@ -1006,16 +985,10 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup, /* 2nd argument is 'fast'*/ params[1] = smooth ? "false" : "true"; - if (!exclusive_backup) - res = pgut_execute(conn, - "SELECT pg_catalog.pg_start_backup($1, $2, false)", - 2, - params); - else - res = pgut_execute(conn, - "SELECT pg_catalog.pg_start_backup($1, $2)", - 2, - params); + res = pgut_execute(conn, + "SELECT pg_catalog.pg_start_backup($1, $2, false)", + 2, + params); /* * Set flag that pg_start_backup() was called. If an error will happen it @@ -1034,14 +1007,10 @@ pg_start_backup(const char *label, bool smooth, pgBackup *backup, PQclear(res); if ((!backup->stream || backup->backup_mode == BACKUP_MODE_DIFF_PAGE) && - !backup->from_replica && - !(nodeInfo->server_version < 90600 && - !nodeInfo->is_superuser)) + !backup->from_replica) /* * Switch to a new WAL segment. It is necessary to get archived WAL * segment, which includes start LSN of current backup. - * Don`t do this for replica backups and for PG 9.5 if pguser is not superuser - * (because in 9.5 only superuser can switch WAL) */ pg_switch_wal(conn); } @@ -1546,20 +1515,9 @@ pg_create_restore_point(PGconn *conn, time_t backup_start_time) } void -pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, bool is_exclusive, char **query_text) +pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, char **query_text) { static const char - stop_exlusive_backup_query[] = - /* - * Stop the non-exclusive backup. Besides stop_lsn it returns from - * pg_stop_backup(false) copy of the backup label and tablespace map - * so they can be written to disk by the caller. - * TODO, question: add NULLs as backup_label and tablespace_map? - */ - "SELECT" - " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," - " current_timestamp(0)::timestamptz," - " pg_catalog.pg_stop_backup() as lsn", stop_backup_on_master_query[] = "SELECT" " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," @@ -1568,16 +1526,8 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica " labelfile," " spcmapfile" " FROM pg_catalog.pg_stop_backup(false, false)", - stop_backup_on_master_before10_query[] = - "SELECT" - " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," - " current_timestamp(0)::timestamptz," - " lsn," - " labelfile," - " spcmapfile" - " FROM pg_catalog.pg_stop_backup(false)", /* - * In case of backup from replica >= 9.6 we do not trust minRecPoint + * In case of backup from replica we do not trust minRecPoint * and stop_backup LSN, so we use latest replayed LSN as STOP LSN. */ stop_backup_on_replica_query[] = @@ -1587,28 +1537,12 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica " pg_catalog.pg_last_wal_replay_lsn()," " labelfile," " spcmapfile" - " FROM pg_catalog.pg_stop_backup(false, false)", - stop_backup_on_replica_before10_query[] = - "SELECT" - " pg_catalog.txid_snapshot_xmax(pg_catalog.txid_current_snapshot())," - " current_timestamp(0)::timestamptz," - " pg_catalog.pg_last_xlog_replay_location()," - " labelfile," - " spcmapfile" - " FROM pg_catalog.pg_stop_backup(false)"; + " FROM pg_catalog.pg_stop_backup(false, false)"; const char * const stop_backup_query = - is_exclusive ? - stop_exlusive_backup_query : - server_version >= 100000 ? - (is_started_on_replica ? + is_started_on_replica ? stop_backup_on_replica_query : - stop_backup_on_master_query - ) : - (is_started_on_replica ? - stop_backup_on_replica_before10_query : - stop_backup_on_master_before10_query - ); + stop_backup_on_master_query; bool sent = false; /* Make proper timestamp format for parse_time(recovery_time) */ @@ -1641,7 +1575,7 @@ pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica */ void pg_stop_backup_consume(PGconn *conn, int server_version, - bool is_exclusive, uint32 timeout, const char *query_text, + uint32 timeout, const char *query_text, PGStopBackupResult *result) { PGresult *query_result; @@ -1743,28 +1677,18 @@ pg_stop_backup_consume(PGconn *conn, int server_version, /* get backup_label_content */ result->backup_label_content = NULL; // if (!PQgetisnull(query_result, 0, backup_label_colno)) - if (!is_exclusive) - { - result->backup_label_content_len = PQgetlength(query_result, 0, backup_label_colno); - if (result->backup_label_content_len > 0) - result->backup_label_content = pgut_strndup(PQgetvalue(query_result, 0, backup_label_colno), - result->backup_label_content_len); - } else { - result->backup_label_content_len = 0; - } + result->backup_label_content_len = PQgetlength(query_result, 0, backup_label_colno); + if (result->backup_label_content_len > 0) + result->backup_label_content = pgut_strndup(PQgetvalue(query_result, 0, backup_label_colno), + result->backup_label_content_len); /* get tablespace_map_content */ result->tablespace_map_content = NULL; // if (!PQgetisnull(query_result, 0, tablespace_map_colno)) - if (!is_exclusive) - { - result->tablespace_map_content_len = PQgetlength(query_result, 0, tablespace_map_colno); - if (result->tablespace_map_content_len > 0) - result->tablespace_map_content = pgut_strndup(PQgetvalue(query_result, 0, tablespace_map_colno), - result->tablespace_map_content_len); - } else { - result->tablespace_map_content_len = 0; - } + result->tablespace_map_content_len = PQgetlength(query_result, 0, tablespace_map_colno); + if (result->tablespace_map_content_len > 0) + result->tablespace_map_content = pgut_strndup(PQgetvalue(query_result, 0, tablespace_map_colno), + result->tablespace_map_content_len); } /* @@ -1832,21 +1756,18 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb /* Create restore point * Only if backup is from master. - * For PG 9.5 create restore point only if pguser is superuser. */ - if (!backup->from_replica && - !(nodeInfo->server_version < 90600 && - !nodeInfo->is_superuser)) //TODO: check correctness + if (!backup->from_replica) pg_create_restore_point(pg_startbackup_conn, backup->start_time); /* Execute pg_stop_backup using PostgreSQL connection */ - pg_stop_backup_send(pg_startbackup_conn, nodeInfo->server_version, backup->from_replica, exclusive_backup, &query_text); + pg_stop_backup_send(pg_startbackup_conn, nodeInfo->server_version, backup->from_replica, &query_text); /* * Wait for the result of pg_stop_backup(), but no longer than * archive_timeout seconds */ - pg_stop_backup_consume(pg_startbackup_conn, nodeInfo->server_version, exclusive_backup, timeout, query_text, &stop_backup_result); + pg_stop_backup_consume(pg_startbackup_conn, nodeInfo->server_version, timeout, query_text, &stop_backup_result); if (backup->stream) { @@ -1859,28 +1780,25 @@ pg_stop_backup(InstanceState *instanceState, pgBackup *backup, PGconn *pg_startb wait_wal_and_calculate_stop_lsn(xlog_path, stop_backup_result.lsn, backup); /* Write backup_label and tablespace_map */ - if (!exclusive_backup) + Assert(stop_backup_result.backup_label_content != NULL); + + /* Write backup_label */ + pg_stop_backup_write_file_helper(backup->database_dir, PG_BACKUP_LABEL_FILE, "backup label", + stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, + backup_files_list); + free(stop_backup_result.backup_label_content); + stop_backup_result.backup_label_content = NULL; + stop_backup_result.backup_label_content_len = 0; + + /* Write tablespace_map */ + if (stop_backup_result.tablespace_map_content != NULL) { - Assert(stop_backup_result.backup_label_content != NULL); - - /* Write backup_label */ - pg_stop_backup_write_file_helper(backup->database_dir, PG_BACKUP_LABEL_FILE, "backup label", - stop_backup_result.backup_label_content, stop_backup_result.backup_label_content_len, + pg_stop_backup_write_file_helper(backup->database_dir, PG_TABLESPACE_MAP_FILE, "tablespace map", + stop_backup_result.tablespace_map_content, stop_backup_result.tablespace_map_content_len, backup_files_list); - free(stop_backup_result.backup_label_content); - stop_backup_result.backup_label_content = NULL; - stop_backup_result.backup_label_content_len = 0; - - /* Write tablespace_map */ - if (stop_backup_result.tablespace_map_content != NULL) - { - pg_stop_backup_write_file_helper(backup->database_dir, PG_TABLESPACE_MAP_FILE, "tablespace map", - stop_backup_result.tablespace_map_content, stop_backup_result.tablespace_map_content_len, - backup_files_list); - free(stop_backup_result.tablespace_map_content); - stop_backup_result.tablespace_map_content = NULL; - stop_backup_result.tablespace_map_content_len = 0; - } + free(stop_backup_result.tablespace_map_content); + stop_backup_result.tablespace_map_content = NULL; + stop_backup_result.tablespace_map_content_len = 0; } if (backup->stream) diff --git a/src/catchup.c b/src/catchup.c index 522279ac9..1195f7a7f 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -185,9 +185,6 @@ catchup_preflight_checks(PGNodeInfo *source_node_info, PGconn *source_conn, elog(ERROR, "Ptrack is disabled"); } - if (current.from_replica && exclusive_backup) - elog(ERROR, "Catchup from standby is only available for PostgreSQL >= 9.6"); - /* check that we don't overwrite tablespace in source pgdata */ catchup_check_tablespaces_existance_in_tbsmapping(source_conn); @@ -1012,13 +1009,13 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, pg_silent_client_messages(source_conn); /* Execute pg_stop_backup using PostgreSQL connection */ - pg_stop_backup_send(source_conn, source_node_info.server_version, current.from_replica, exclusive_backup, &stop_backup_query_text); + pg_stop_backup_send(source_conn, source_node_info.server_version, current.from_replica, &stop_backup_query_text); /* * Wait for the result of pg_stop_backup(), but no longer than * archive_timeout seconds */ - pg_stop_backup_consume(source_conn, source_node_info.server_version, exclusive_backup, timeout, stop_backup_query_text, &stop_backup_result); + pg_stop_backup_consume(source_conn, source_node_info.server_version, timeout, stop_backup_query_text, &stop_backup_result); /* Cleanup */ pg_free(stop_backup_query_text); @@ -1076,12 +1073,10 @@ do_catchup(const char *source_pgdata, const char *dest_pgdata, int num_threads, } /* - * In case of backup from replica >= 9.6 we must fix minRecPoint + * In case of backup from replica we must fix minRecPoint */ - if (current.from_replica && !exclusive_backup) - { + if (current.from_replica) set_min_recovery_point(source_pg_control_file, dest_pgdata, current.stop_lsn); - } /* close ssh session in main thread */ fio_disconnect(); diff --git a/src/dir.c b/src/dir.c index 3e5e28cef..5f25f2ee4 100644 --- a/src/dir.c +++ b/src/dir.c @@ -83,11 +83,7 @@ static char *pgdata_exclude_files[] = "probackup_recovery.conf", "recovery.signal", "standby.signal", - NULL -}; -static char *pgdata_exclude_files_non_exclusive[] = -{ /*skip in non-exclusive backup */ "backup_label", "tablespace_map", @@ -571,18 +567,6 @@ dir_check_file(pgFile *file, bool backup_logs) /* Check if we need to exclude file by name */ if (S_ISREG(file->mode)) { - if (!exclusive_backup) - { - for (i = 0; pgdata_exclude_files_non_exclusive[i]; i++) - if (strcmp(file->rel_path, - pgdata_exclude_files_non_exclusive[i]) == 0) - { - /* Skip */ - elog(VERBOSE, "Excluding file: %s", file->name); - return CHECK_FALSE; - } - } - for (i = 0; pgdata_exclude_files[i]; i++) if (strcmp(file->rel_path, pgdata_exclude_files[i]) == 0) { diff --git a/src/pg_probackup.h b/src/pg_probackup.h index 2439fc23b..eb051065b 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -782,8 +782,6 @@ extern bool smooth_checkpoint; /* remote probackup options */ extern char* remote_agent; -extern bool exclusive_backup; - /* delete options */ extern bool delete_wal; extern bool delete_expired; @@ -1273,9 +1271,9 @@ extern void pg_start_backup(const char *label, bool smooth, pgBackup *backup, PGNodeInfo *nodeInfo, PGconn *conn); extern void pg_silent_client_messages(PGconn *conn); extern void pg_create_restore_point(PGconn *conn, time_t backup_start_time); -extern void pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, bool is_exclusive, char **query_text); +extern void pg_stop_backup_send(PGconn *conn, int server_version, bool is_started_on_replica, char **query_text); extern void pg_stop_backup_consume(PGconn *conn, int server_version, - bool is_exclusive, uint32 timeout, const char *query_text, + uint32 timeout, const char *query_text, PGStopBackupResult *result); extern void pg_stop_backup_write_file_helper(const char *path, const char *filename, const char *error_msg_filename, const void *data, size_t len, parray *file_list); diff --git a/src/utils/file.c b/src/utils/file.c index 53ab451f8..92bebc7c8 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -38,7 +38,6 @@ typedef struct bool follow_symlink; bool add_root; bool backup_logs; - bool exclusive_backup; bool skip_hidden; int external_dir_num; } fio_list_dir_request; @@ -2798,7 +2797,6 @@ fio_list_dir_internal(parray *files, const char *root, bool exclude, req.follow_symlink = follow_symlink; req.add_root = add_root; req.backup_logs = backup_logs; - req.exclusive_backup = exclusive_backup; req.skip_hidden = skip_hidden; req.external_dir_num = external_dir_num; @@ -2891,7 +2889,6 @@ fio_list_dir_impl(int out, char* buf) * TODO: correctly send elog messages from agent to main process. */ instance_config.logger.log_level_console = ERROR; - exclusive_backup = req->exclusive_backup; dir_list_file(file_files, req->path, req->exclude, req->follow_symlink, req->add_root, req->backup_logs, req->skip_hidden, @@ -4863,4 +4860,4 @@ init_pio_objects(void) localDrive = bindref_pioDrive($alloc(pioLocalDrive)); remoteDrive = bindref_pioDrive($alloc(pioRemoteDrive)); -} \ No newline at end of file +} diff --git a/tests/archive.py b/tests/archive.py index 52fb225e8..fe3d89b17 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -84,11 +84,6 @@ def test_pgpro434_2(self): 'checkpoint_timeout': '30s'} ) - if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because pg_control_checkpoint() is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -264,15 +259,9 @@ def test_pgpro434_3(self): with open(log_file, 'r') as f: log_content = f.read() - # in PG =< 9.6 pg_stop_backup always wait - if self.get_version(node) < 100000: - self.assertIn( - "ERROR: pg_stop_backup doesn't answer in 60 seconds, cancel it", - log_content) - else: - self.assertIn( - "ERROR: WAL segment 000000010000000000000003 could not be archived in 60 seconds", - log_content) + self.assertIn( + "ERROR: WAL segment 000000010000000000000003 could not be archived in 60 seconds", + log_content) log_file = os.path.join(node.logs_dir, 'postgresql.log') with open(log_file, 'r') as f: @@ -418,12 +407,8 @@ def test_archive_push_file_exists(self): self.assertNotIn( 'pg_probackup archive-push completed successfully', log_content) - if self.get_version(node) < 100000: - wal_src = os.path.join( - node.data_dir, 'pg_xlog', '000000010000000000000001') - else: - wal_src = os.path.join( - node.data_dir, 'pg_wal', '000000010000000000000001') + wal_src = os.path.join( + node.data_dir, 'pg_wal', '000000010000000000000001') if self.archive_compress: with open(wal_src, 'rb') as f_in, gzip.open( @@ -555,16 +540,10 @@ def test_archive_push_partial_file_exists(self): "postgres", "INSERT INTO t1 VALUES (1) RETURNING (xmin)").decode('utf-8').rstrip() - if self.get_version(node) < 100000: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location());").rstrip() - else: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() + filename_orig = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() filename_orig = filename_orig.decode('utf-8') @@ -634,16 +613,10 @@ def test_archive_push_part_file_exists_not_stale(self): "postgres", "create table t2()") - if self.get_version(node) < 100000: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location());").rstrip() - else: - filename_orig = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() + filename_orig = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn());").rstrip() filename_orig = filename_orig.decode('utf-8') @@ -708,11 +681,6 @@ def test_replica_archive(self): 'checkpoint_timeout': '30s', 'max_wal_size': '32MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) # ADD INSTANCE 'MASTER' self.add_instance(backup_dir, 'master', master) @@ -839,11 +807,6 @@ def test_master_and_replica_parallel_archiving(self): 'archive_timeout': '10s'} ) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - replica = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'replica')) replica.cleanup() @@ -921,9 +884,6 @@ def test_basic_master_and_replica_concurrent_archiving(self): set replica with archiving, make sure that archiving on both node is working. """ - if self.pg_config_version < self.version_to_num('9.6.0'): - return unittest.skip('You need PostgreSQL >= 9.6 for this test') - fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( @@ -934,11 +894,6 @@ def test_basic_master_and_replica_concurrent_archiving(self): 'checkpoint_timeout': '30s', 'archive_timeout': '10s'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - replica = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'replica')) replica.cleanup() @@ -1115,10 +1070,7 @@ def test_archive_pg_receivexlog(self): self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() - if self.get_version(node) < 100000: - pg_receivexlog_path = self.get_bin_path('pg_receivexlog') - else: - pg_receivexlog_path = self.get_bin_path('pg_receivewal') + pg_receivexlog_path = self.get_bin_path('pg_receivewal') pg_receivexlog = self.run_binary( [ @@ -1188,11 +1140,8 @@ def test_archive_pg_receivexlog_compression_pg10(self): self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() - if self.get_version(node) < self.version_to_num('10.0'): - return unittest.skip('You need PostgreSQL >= 10 for this test') - else: - pg_receivexlog_path = self.get_bin_path('pg_receivewal') + pg_receivexlog_path = self.get_bin_path('pg_receivewal') pg_receivexlog = self.run_binary( [ pg_receivexlog_path, '-p', str(node.port), '--synchronous', @@ -1269,11 +1218,6 @@ def test_archive_catalog(self): 'archive_timeout': '30s', 'checkpoint_timeout': '30s'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -1930,10 +1874,6 @@ def test_waldir_outside_pgdata_archiving(self): """ check that archive-push works correct with symlinked waldir """ - if self.pg_config_version < self.version_to_num('10.0'): - return unittest.skip( - 'Skipped because waldir outside pgdata is supported since PG 10') - fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') external_wal_dir = os.path.join(self.tmp_path, module_name, fname, 'ext_wal_dir') @@ -2041,10 +1981,7 @@ def test_archiving_and_slots(self): self.set_archiving(backup_dir, 'node', node, log_level='verbose') node.slow_start() - if self.get_version(node) < 100000: - pg_receivexlog_path = self.get_bin_path('pg_receivexlog') - else: - pg_receivexlog_path = self.get_bin_path('pg_receivewal') + pg_receivexlog_path = self.get_bin_path('pg_receivewal') # "pg_receivewal --create-slot --slot archive_slot --if-not-exists " # "&& pg_receivewal --synchronous -Z 1 /tmp/wal --slot archive_slot --no-loop" @@ -2167,22 +2104,13 @@ def test_archive_pg_receivexlog_partial_handling(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) node.slow_start() - if self.get_version(node) < 100000: - app_name = 'pg_receivexlog' - pg_receivexlog_path = self.get_bin_path('pg_receivexlog') - else: - app_name = 'pg_receivewal' - pg_receivexlog_path = self.get_bin_path('pg_receivewal') + app_name = 'pg_receivewal' + pg_receivexlog_path = self.get_bin_path('pg_receivewal') cmdline = [ pg_receivexlog_path, '-p', str(node.port), '--synchronous', @@ -2376,11 +2304,6 @@ def test_archive_get_batching_sanity(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(node) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2600,16 +2523,10 @@ def test_archive_show_partial_files_handling(self): "postgres", "create table t1()") - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() filename = filename.decode('utf-8') @@ -2624,16 +2541,10 @@ def test_archive_show_partial_files_handling(self): "postgres", "create table t2()") - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() filename = filename.decode('utf-8') @@ -2648,16 +2559,10 @@ def test_archive_show_partial_files_handling(self): "postgres", "create table t3()") - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() filename = filename.decode('utf-8') @@ -2672,16 +2577,10 @@ def test_archive_show_partial_files_handling(self): "postgres", "create table t4()") - if self.get_version(node) < 100000: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_xlogfile_name_offset(pg_current_xlog_location())").rstrip() - else: - filename = node.safe_psql( - "postgres", - "SELECT file_name " - "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() + filename = node.safe_psql( + "postgres", + "SELECT file_name " + "FROM pg_walfile_name_offset(pg_current_wal_flush_lsn())").rstrip() filename = filename.decode('utf-8') diff --git a/tests/auth_test.py b/tests/auth_test.py index 78af21be9..16c73308f 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -62,14 +62,9 @@ def test_backup_via_unprivileged_user(self): "GRANT EXECUTE ON FUNCTION" " pg_start_backup(text, boolean, boolean) TO backup;") - if self.get_version(node) < 100000: - node.safe_psql( - 'postgres', - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup") - else: - node.safe_psql( - 'postgres', - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup") + node.safe_psql( + 'postgres', + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup") try: self.backup_node( @@ -103,19 +98,10 @@ def test_backup_via_unprivileged_user(self): '\n Unexpected Error Message: {0}\n CMD: {1}'.format( repr(e.message), self.cmd)) - if self.get_version(node) < self.version_to_num('10.0'): - node.safe_psql( - "postgres", - "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup") - else: - node.safe_psql( - "postgres", - "GRANT EXECUTE ON FUNCTION " - "pg_stop_backup(boolean, boolean) TO backup") - # Do this for ptrack backups - node.safe_psql( - "postgres", - "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup") + node.safe_psql( + "postgres", + "GRANT EXECUTE ON FUNCTION " + "pg_stop_backup(boolean, boolean) TO backup") self.backup_node( backup_dir, 'node', node, options=['-U', 'backup']) @@ -184,8 +170,6 @@ def setUpClass(cls): "GRANT EXECUTE ON FUNCTION current_setting(text) TO backup; " "GRANT EXECUTE ON FUNCTION pg_is_in_recovery() TO backup; " "GRANT EXECUTE ON FUNCTION pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup; " "GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; " "GRANT EXECUTE ON FUNCTION pg_switch_xlog() TO backup; " "GRANT EXECUTE ON FUNCTION txid_current() TO backup; " diff --git a/tests/backup.py b/tests/backup.py index 23836cdbe..685436291 100644 --- a/tests/backup.py +++ b/tests/backup.py @@ -1427,9 +1427,6 @@ def test_basic_temp_slot_for_stream_backup(self): initdb_params=['--data-checksums'], pg_options={'max_wal_size': '40MB'}) - if self.get_version(node) < self.version_to_num('10.0'): - return unittest.skip('You need PostgreSQL >= 10 for this test') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) self.set_archiving(backup_dir, 'node', node) @@ -2167,62 +2164,24 @@ def test_backup_with_less_privileges_role(self): 'backupdb', 'CREATE EXTENSION ptrack') - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "BEGIN; " - "CREATE ROLE backup WITH LOGIN; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " - "COMMIT;" - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "BEGIN; " - "CREATE ROLE backup WITH LOGIN; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " - "COMMIT;" - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "BEGIN; " - "CREATE ROLE backup WITH LOGIN; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " - "COMMIT;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "BEGIN; " + "CREATE ROLE backup WITH LOGIN; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup; " + "COMMIT;" + ) # enable STREAM backup node.safe_psql( @@ -2262,10 +2221,6 @@ def test_backup_with_less_privileges_role(self): backup_dir, 'node', node, backup_type='ptrack', datname='backupdb', options=['--stream', '-U', 'backup']) - if self.get_version(node) < 90600: - self.del_test_dir(module_name, fname) - return - # Restore as replica replica = self.make_simple_node( base_dir=os.path.join(module_name, fname, 'replica')) @@ -2952,71 +2907,28 @@ def test_missing_replication_permission(self): 'postgres', 'CREATE DATABASE backupdb') - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;") - # >= 10 - else: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if ProbackupTest.enterprise: node.safe_psql( @@ -3083,73 +2995,28 @@ def test_missing_replication_permission_1(self): 'postgres', 'CREATE DATABASE backupdb') - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "CREATE ROLE backup WITH LOGIN; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "CREATE ROLE backup WITH LOGIN; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if ProbackupTest.enterprise: node.safe_psql( @@ -3305,18 +3172,9 @@ def test_pg_stop_backup_missing_permissions(self): self.simple_bootstrap(node, 'backup') - if self.get_version(node) < 90600: - node.safe_psql( - 'postgres', - 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() FROM backup') - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'postgres', - 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) FROM backup') - else: - node.safe_psql( - 'postgres', - 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) FROM backup') + node.safe_psql( + 'postgres', + 'REVOKE EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) FROM backup') # Full backup in streaming mode try: diff --git a/tests/catchup.py b/tests/catchup.py index a83755c54..ac243da72 100644 --- a/tests/catchup.py +++ b/tests/catchup.py @@ -1231,27 +1231,26 @@ def test_catchup_with_replication_slot(self): ).decode('utf-8').rstrip() self.assertEqual(slot_name, 'pg_probackup_perm_slot', 'Slot name mismatch') - # 5. --perm-slot --temp-slot (PG>=10) - if self.get_version(src_pg) >= self.version_to_num('10.0'): - dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_5')) - try: - self.catchup_node( - backup_mode = 'FULL', - source_pgdata = src_pg.data_dir, - destination_node = dst_pg, - options = [ - '-d', 'postgres', '-p', str(src_pg.port), '--stream', - '--perm-slot', - '--temp-slot' - ] - ) - self.assertEqual(1, 0, "Expecting Error because conflicting options --perm-slot and --temp-slot used together\n Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) - except ProbackupException as e: - self.assertIn( - 'ERROR: You cannot specify "--perm-slot" option with the "--temp-slot" option', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) + # 5. --perm-slot --temp-slot + dst_pg = self.make_empty_node(os.path.join(module_name, self.fname, 'dst_5')) + try: + self.catchup_node( + backup_mode = 'FULL', + source_pgdata = src_pg.data_dir, + destination_node = dst_pg, + options = [ + '-d', 'postgres', '-p', str(src_pg.port), '--stream', + '--perm-slot', + '--temp-slot' + ] + ) + self.assertEqual(1, 0, "Expecting Error because conflicting options --perm-slot and --temp-slot used together\n Output: {0} \n CMD: {1}".format( + repr(self.output), self.cmd)) + except ProbackupException as e: + self.assertIn( + 'ERROR: You cannot specify "--perm-slot" option with the "--temp-slot" option', + e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format(repr(e.message), self.cmd)) #self.assertEqual(1, 0, 'Stop test') self.del_test_dir(module_name, self.fname) diff --git a/tests/false_positive.py b/tests/false_positive.py index a101f8107..9cff54185 100644 --- a/tests/false_positive.py +++ b/tests/false_positive.py @@ -113,9 +113,6 @@ def test_pg_10_waldir(self): """ test group access for PG >= 11 """ - if self.pg_config_version < self.version_to_num('10.0'): - return unittest.skip('You need PostgreSQL >= 10 for this test') - fname = self.id().split('.')[3] wal_dir = os.path.join( os.path.join(self.tmp_path, module_name, fname), 'wal_dir') diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 0fa252739..8e24dd279 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -547,13 +547,7 @@ def get_md5_per_page_for_fork(self, file, size_in_pages): def get_ptrack_bits_per_page_for_fork(self, node, file, size=[]): - if self.get_pgpro_edition(node) == 'enterprise': - if self.get_version(node) < self.version_to_num('10.0'): - header_size = 48 - else: - header_size = 24 - else: - header_size = 24 + header_size = 24 ptrack_bits_for_fork = [] # TODO: use macro instead of hard coded 8KB @@ -1517,25 +1511,15 @@ def version_to_num(self, version): def switch_wal_segment(self, node): """ - Execute pg_switch_wal/xlog() in given node + Execute pg_switch_wal() in given node Args: node: an instance of PostgresNode or NodeConnection class """ if isinstance(node, testgres.PostgresNode): - if self.version_to_num( - node.safe_psql('postgres', 'show server_version').decode('utf-8') - ) >= self.version_to_num('10.0'): - node.safe_psql('postgres', 'select pg_switch_wal()') - else: - node.safe_psql('postgres', 'select pg_switch_xlog()') + node.safe_psql('postgres', 'select pg_switch_wal()') else: - if self.version_to_num( - node.execute('show server_version')[0][0] - ) >= self.version_to_num('10.0'): - node.execute('select pg_switch_wal()') - else: - node.execute('select pg_switch_xlog()') + node.execute('select pg_switch_wal()') sleep(1) @@ -1545,12 +1529,8 @@ def wait_until_replica_catch_with_master(self, master, replica): 'postgres', 'show server_version').decode('utf-8').rstrip() - if self.version_to_num(version) >= self.version_to_num('10.0'): - master_function = 'pg_catalog.pg_current_wal_lsn()' - replica_function = 'pg_catalog.pg_last_wal_replay_lsn()' - else: - master_function = 'pg_catalog.pg_current_xlog_location()' - replica_function = 'pg_catalog.pg_last_xlog_replay_location()' + master_function = 'pg_catalog.pg_current_wal_lsn()' + replica_function = 'pg_catalog.pg_last_wal_replay_lsn()' lsn = master.safe_psql( 'postgres', diff --git a/tests/incr_restore.py b/tests/incr_restore.py index cb684a23a..b3a2ce4a6 100644 --- a/tests/incr_restore.py +++ b/tests/incr_restore.py @@ -1492,11 +1492,6 @@ def test_make_replica_via_incr_checksum_restore(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) self.set_archiving(backup_dir, 'node', master, replica=True) @@ -1565,11 +1560,6 @@ def test_make_replica_via_incr_lsn_restore(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) self.set_archiving(backup_dir, 'node', master, replica=True) diff --git a/tests/pgpro2068.py b/tests/pgpro2068.py index 3baa0ba0b..454cac532 100644 --- a/tests/pgpro2068.py +++ b/tests/pgpro2068.py @@ -136,29 +136,7 @@ def test_minrecpoint_on_replica(self): recovery_config, "recovery_target_action = 'pause'") replica.slow_start(replica=True) - if self.get_version(node) < 100000: - script = ''' -DO -$$ -relations = plpy.execute("select class.oid from pg_class class WHERE class.relkind IN ('r', 'i', 't', 'm') and class.relpersistence = 'p'") -current_xlog_lsn = plpy.execute("SELECT min_recovery_end_location as lsn FROM pg_control_recovery()")[0]['lsn'] -plpy.notice('CURRENT LSN: {0}'.format(current_xlog_lsn)) -found_corruption = False -for relation in relations: - pages_from_future = plpy.execute("with number_of_blocks as (select blknum from generate_series(0, pg_relation_size({0}) / 8192 -1) as blknum) select blknum, lsn, checksum, flags, lower, upper, special, pagesize, version, prune_xid from number_of_blocks, page_header(get_raw_page('{0}'::oid::regclass::text, number_of_blocks.blknum::int)) where lsn > '{1}'::pg_lsn".format(relation['oid'], current_xlog_lsn)) - - if pages_from_future.nrows() == 0: - continue - - for page in pages_from_future: - plpy.notice('Found page from future. OID: {0}, BLKNUM: {1}, LSN: {2}'.format(relation['oid'], page['blknum'], page['lsn'])) - found_corruption = True -if found_corruption: - plpy.error('Found Corruption') -$$ LANGUAGE plpython3u; -''' - else: - script = ''' + script = ''' DO $$ relations = plpy.execute("select class.oid from pg_class class WHERE class.relkind IN ('r', 'i', 't', 'm') and class.relpersistence = 'p'") diff --git a/tests/pgpro560.py b/tests/pgpro560.py index 53c7914a2..ffda7b5ee 100644 --- a/tests/pgpro560.py +++ b/tests/pgpro560.py @@ -84,20 +84,12 @@ def test_pgpro560_systemid_mismatch(self): "Expecting Error because of SYSTEM ID mismatch.\n " "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) except ProbackupException as e: - if self.get_version(node1) > 90600: - self.assertTrue( - 'ERROR: Backup data directory was ' - 'initialized for system id' in e.message and - 'but connected instance system id is' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - else: - self.assertIn( - 'ERROR: System identifier mismatch. ' - 'Connected PostgreSQL instance has system id', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.assertTrue( + 'ERROR: Backup data directory was ' + 'initialized for system id' in e.message and + 'but connected instance system id is' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) sleep(1) @@ -111,20 +103,12 @@ def test_pgpro560_systemid_mismatch(self): "Expecting Error because of of SYSTEM ID mismatch.\n " "Output: {0} \n CMD: {1}".format(repr(self.output), self.cmd)) except ProbackupException as e: - if self.get_version(node1) > 90600: - self.assertTrue( - 'ERROR: Backup data directory was initialized ' - 'for system id' in e.message and - 'but connected instance system id is' in e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) - else: - self.assertIn( - 'ERROR: System identifier mismatch. ' - 'Connected PostgreSQL instance has system id', - e.message, - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + self.assertTrue( + 'ERROR: Backup data directory was initialized ' + 'for system id' in e.message and + 'but connected instance system id is' in e.message, + '\n Unexpected Error Message: {0}\n CMD: {1}'.format( + repr(e.message), self.cmd)) # Clean after yourself self.del_test_dir(module_name, fname) diff --git a/tests/ptrack.py b/tests/ptrack.py index 08ea90f8d..19df9ff16 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -511,114 +511,41 @@ def test_ptrack_unprivileged(self): "postgres", "CREATE DATABASE backupdb") - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) node.safe_psql( "backupdb", diff --git a/tests/replica.py b/tests/replica.py index acf655aac..4bcfa6083 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -28,11 +28,6 @@ def test_replica_switchover(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node1', node1) @@ -105,10 +100,6 @@ def test_replica_stream_ptrack_backup(self): if not self.ptrack: return unittest.skip('Skipped because ptrack support is disabled') - if self.pg_config_version > self.version_to_num('9.6.0'): - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - fname = self.id().split('.')[3] backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') master = self.make_simple_node( @@ -239,11 +230,6 @@ def test_replica_archive_page_backup(self): 'checkpoint_timeout': '30s', 'max_wal_size': '32MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -381,11 +367,6 @@ def test_basic_make_replica_via_restore(self): pg_options={ 'archive_timeout': '10s'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -439,11 +420,6 @@ def test_take_backup_from_delayed_replica(self): initdb_params=['--data-checksums'], pg_options={'archive_timeout': '10s'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -552,11 +528,6 @@ def test_replica_promote(self): 'checkpoint_timeout': '30s', 'max_wal_size': '32MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -643,11 +614,6 @@ def test_replica_stop_lsn_null_offset(self): 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) self.set_archiving(backup_dir, 'node', master) @@ -728,11 +694,6 @@ def test_replica_stop_lsn_null_offset_next_record(self): 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -830,11 +791,6 @@ def test_archive_replica_null_offset(self): 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) self.set_archiving(backup_dir, 'node', master) @@ -914,11 +870,6 @@ def test_archive_replica_not_null_offset(self): 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', master) self.set_archiving(backup_dir, 'node', master) @@ -1003,11 +954,6 @@ def test_replica_toast(self): 'wal_level': 'replica', 'shared_buffers': '128MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) self.set_archiving(backup_dir, 'master', master) @@ -1105,11 +1051,6 @@ def test_start_stop_lsn_in_the_same_segno(self): 'wal_level': 'replica', 'shared_buffers': '128MB'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) master.slow_start() @@ -1183,11 +1124,6 @@ def test_replica_promote_1(self): 'checkpoint_timeout': '1h', 'wal_level': 'replica'}) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) # set replica True, so archive_mode 'always' is used. @@ -1310,11 +1246,6 @@ def test_replica_promote_archive_delta(self): 'checkpoint_timeout': '30s', 'archive_timeout': '30s'}) - if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node1) self.set_config( @@ -1435,11 +1366,6 @@ def test_replica_promote_archive_page(self): 'checkpoint_timeout': '30s', 'archive_timeout': '30s'}) - if self.get_version(node1) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node1) self.set_archiving(backup_dir, 'node', node1) @@ -1557,11 +1483,6 @@ def test_parent_choosing(self): set_replication=True, initdb_params=['--data-checksums']) - if self.get_version(master) < self.version_to_num('9.6.0'): - self.del_test_dir(module_name, fname) - return unittest.skip( - 'Skipped because backup from replica is not supported in PG 9.5') - self.init_pb(backup_dir) self.add_instance(backup_dir, 'master', master) @@ -1708,11 +1629,7 @@ def test_replica_via_basebackup(self): # restore stream backup self.restore_node(backup_dir, 'node', node) - xlog_dir = 'pg_wal' - if self.get_version(node) < 100000: - xlog_dir = 'pg_xlog' - - filepath = os.path.join(node.data_dir, xlog_dir, "00000002.history") + filepath = os.path.join(node.data_dir, 'pg_wal', "00000002.history") self.assertTrue( os.path.exists(filepath), "History file do not exists: {0}".format(filepath)) diff --git a/tests/restore.py b/tests/restore.py index 5a00bc23b..9c300d232 100644 --- a/tests/restore.py +++ b/tests/restore.py @@ -361,10 +361,6 @@ def test_restore_to_lsn_inclusive(self): base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums']) - if self.get_version(node) < self.version_to_num('10.0'): - self.del_test_dir(module_name, fname) - return - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -432,10 +428,6 @@ def test_restore_to_lsn_not_inclusive(self): base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums']) - if self.get_version(node) < self.version_to_num('10.0'): - self.del_test_dir(module_name, fname) - return - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -2146,10 +2138,7 @@ def test_restore_target_new_options(self): with node.connect("postgres") as con: con.execute("INSERT INTO tbl0005 VALUES (1)") con.commit() - if self.get_version(node) > self.version_to_num('10.0'): - res = con.execute("SELECT pg_current_wal_lsn()") - else: - res = con.execute("SELECT pg_current_xlog_location()") + res = con.execute("SELECT pg_current_wal_lsn()") con.commit() con.execute("INSERT INTO tbl0005 VALUES (2)") @@ -2240,33 +2229,32 @@ def test_restore_target_new_options(self): node.slow_start() # Restore with recovery target lsn - if self.get_version(node) >= 100000: - node.cleanup() - self.restore_node( - backup_dir, 'node', node, - options=[ - '--recovery-target-lsn={0}'.format(target_lsn), - "--recovery-target-action=promote", - '--recovery-target-timeline=1', - ]) + node.cleanup() + self.restore_node( + backup_dir, 'node', node, + options=[ + '--recovery-target-lsn={0}'.format(target_lsn), + "--recovery-target-action=promote", + '--recovery-target-timeline=1', + ]) - with open(recovery_conf, 'r') as f: - recovery_conf_content = f.read() + with open(recovery_conf, 'r') as f: + recovery_conf_content = f.read() - self.assertIn( - "recovery_target_lsn = '{0}'".format(target_lsn), - recovery_conf_content) + self.assertIn( + "recovery_target_lsn = '{0}'".format(target_lsn), + recovery_conf_content) - self.assertIn( - "recovery_target_action = 'promote'", - recovery_conf_content) + self.assertIn( + "recovery_target_action = 'promote'", + recovery_conf_content) - self.assertIn( - "recovery_target_timeline = '1'", - recovery_conf_content) + self.assertIn( + "recovery_target_timeline = '1'", + recovery_conf_content) - node.slow_start() + node.slow_start() # Clean after yourself self.del_test_dir(module_name, fname) @@ -3197,117 +3185,42 @@ def test_missing_database_map(self): "postgres", "CREATE DATABASE backupdb") - # PG 9.5 - if self.get_version(node) < 90600: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # PG 9.6 - elif self.get_version(node) > 90600 and self.get_version(node) < 100000: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.textout(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.timestamptz(timestamp with time zone, integer) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_xlog_replay_location() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) - # >= 10 - else: - node.safe_psql( - 'backupdb', - "REVOKE ALL ON DATABASE backupdb from PUBLIC; " - "REVOKE ALL ON SCHEMA public from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " - "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " - "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " - "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " - "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " - "CREATE ROLE backup WITH LOGIN REPLICATION; " - "GRANT CONNECT ON DATABASE backupdb to backup; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " - "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack - "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" - ) + # PG >= 10 + node.safe_psql( + 'backupdb', + "REVOKE ALL ON DATABASE backupdb from PUBLIC; " + "REVOKE ALL ON SCHEMA public from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA public FROM PUBLIC; " + "REVOKE ALL ON SCHEMA pg_catalog from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA pg_catalog FROM PUBLIC; " + "REVOKE ALL ON SCHEMA information_schema from PUBLIC; " + "REVOKE ALL ON ALL TABLES IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL FUNCTIONS IN SCHEMA information_schema FROM PUBLIC; " + "REVOKE ALL ON ALL SEQUENCES IN SCHEMA information_schema FROM PUBLIC; " + "CREATE ROLE backup WITH LOGIN REPLICATION; " + "GRANT CONNECT ON DATABASE backupdb to backup; " + "GRANT USAGE ON SCHEMA pg_catalog TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_proc TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_extension TO backup; " + "GRANT SELECT ON TABLE pg_catalog.pg_database TO backup; " # for partial restore, checkdb and ptrack + "GRANT EXECUTE ON FUNCTION pg_catalog.oideq(oid, oid) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.nameeq(name, name) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.current_setting(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.set_config(text, text, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_is_in_recovery() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_control_system() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_start_backup(text, boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_stop_backup(boolean, boolean) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_create_restore_point(text) TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_switch_wal() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.pg_last_wal_replay_lsn() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_current_snapshot() TO backup; " + "GRANT EXECUTE ON FUNCTION pg_catalog.txid_snapshot_xmax(txid_snapshot) TO backup;" + ) if self.ptrack: # TODO why backup works without these grants ? diff --git a/tests/retention.py b/tests/retention.py index b0399a239..7bfff6b28 100644 --- a/tests/retention.py +++ b/tests/retention.py @@ -1575,11 +1575,6 @@ def test_window_error_backups_2(self): self.show_pb(backup_dir, 'node')[1]['id'] - if self.get_version(node) < 90600: - node.safe_psql( - 'postgres', - 'SELECT pg_catalog.pg_stop_backup()') - # Take DELTA backup self.backup_node( backup_dir, 'node', node, backup_type='delta', @@ -1599,10 +1594,6 @@ def test_retention_redundancy_overlapping_chains(self): base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums']) - if self.get_version(node) < 90600: - self.del_test_dir(module_name, fname) - return unittest.skip('Skipped because ptrack support is disabled') - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) @@ -1649,10 +1640,6 @@ def test_retention_redundancy_overlapping_chains_1(self): base_dir=os.path.join(module_name, fname, 'node'), initdb_params=['--data-checksums']) - if self.get_version(node) < 90600: - self.del_test_dir(module_name, fname) - return unittest.skip('Skipped because ptrack support is disabled') - backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') self.init_pb(backup_dir) self.add_instance(backup_dir, 'node', node) diff --git a/tests/validate.py b/tests/validate.py index 22a03c3be..7cdc0e92e 100644 --- a/tests/validate.py +++ b/tests/validate.py @@ -1757,14 +1757,9 @@ def test_validate_corrupt_wal_between_backups(self): con.commit() target_xid = res[0][0] - if self.get_version(node) < self.version_to_num('10.0'): - walfile = node.safe_psql( - 'postgres', - 'select pg_xlogfile_name(pg_current_xlog_location())').decode('utf-8').rstrip() - else: - walfile = node.safe_psql( - 'postgres', - 'select pg_walfile_name(pg_current_wal_lsn())').decode('utf-8').rstrip() + walfile = node.safe_psql( + 'postgres', + 'select pg_walfile_name(pg_current_wal_lsn())').decode('utf-8').rstrip() if self.archive_compress: walfile = walfile + '.gz' @@ -3506,12 +3501,8 @@ def test_corrupt_pg_control_via_resetxlog(self): backup_id = self.backup_node(backup_dir, 'node', node) - if self.get_version(node) < 100000: - pg_resetxlog_path = self.get_bin_path('pg_resetxlog') - wal_dir = 'pg_xlog' - else: - pg_resetxlog_path = self.get_bin_path('pg_resetwal') - wal_dir = 'pg_wal' + pg_resetxlog_path = self.get_bin_path('pg_resetwal') + wal_dir = 'pg_wal' os.mkdir( os.path.join( From 5ed469d500969be554bdc906ecfd3cb368d8372d Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Wed, 20 Jul 2022 03:08:01 +0300 Subject: [PATCH 3/6] [PBCKP-232] remove depricated options (master-db, master-host, master-port, master-user, replica-timeout) part 1 --- src/configure.c | 54 ------------------------ src/help.c | 16 +------ src/pg_probackup.h | 3 -- tests/archive.py | 9 ---- tests/ptrack.py | 102 +++++++-------------------------------------- tests/replica.py | 24 ++--------- 6 files changed, 21 insertions(+), 187 deletions(-) diff --git a/src/configure.c b/src/configure.c index 3871aa8b9..47433346f 100644 --- a/src/configure.c +++ b/src/configure.c @@ -90,32 +90,6 @@ ConfigOption instance_options[] = &instance_config.conn_opt.pguser, SOURCE_CMD, 0, OPTION_CONN_GROUP, 0, option_get_value }, - /* Replica options */ - { - 's', 202, "master-db", - &instance_config.master_conn_opt.pgdatabase, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 203, "master-host", - &instance_config.master_conn_opt.pghost, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 204, "master-port", - &instance_config.master_conn_opt.pgport, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 205, "master-user", - &instance_config.master_conn_opt.pguser, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 'u', 206, "replica-timeout", - &instance_config.replica_timeout, SOURCE_CMD, SOURCE_DEFAULT, - OPTION_REPLICA_GROUP, OPTION_UNIT_S, option_get_value - }, /* Archive options */ { 'u', 207, "archive-timeout", @@ -362,8 +336,6 @@ init_config(InstanceConfig *config, const char *instance_name) config->xlog_seg_size = XLOG_SEG_SIZE; #endif - config->replica_timeout = REPLICA_TIMEOUT_DEFAULT; - config->archive_timeout = ARCHIVE_TIMEOUT_DEFAULT; /* Copy logger defaults */ @@ -437,32 +409,6 @@ readInstanceConfigFile(InstanceState *instanceState) &instance->conn_opt.pguser, SOURCE_CMD, 0, OPTION_CONN_GROUP, 0, option_get_value }, - /* Replica options */ - { - 's', 202, "master-db", - &instance->master_conn_opt.pgdatabase, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 203, "master-host", - &instance->master_conn_opt.pghost, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 204, "master-port", - &instance->master_conn_opt.pgport, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 's', 205, "master-user", - &instance->master_conn_opt.pguser, SOURCE_CMD, 0, - OPTION_REPLICA_GROUP, 0, option_get_value - }, - { - 'u', 206, "replica-timeout", - &instance->replica_timeout, SOURCE_CMD, SOURCE_DEFAULT, - OPTION_REPLICA_GROUP, OPTION_UNIT_S, option_get_value - }, /* Archive options */ { 'u', 207, "archive-timeout", diff --git a/src/help.c b/src/help.c index b22fa912e..14ed38bc8 100644 --- a/src/help.c +++ b/src/help.c @@ -2,7 +2,7 @@ * * help.c * - * Copyright (c) 2017-2021, Postgres Professional + * Copyright (c) 2017-2022, Postgres Professional * *------------------------------------------------------------------------- */ @@ -416,13 +416,6 @@ help_backup(void) printf(_(" --remote-user=username user name for ssh connection (default: current user)\n")); printf(_(" --ssh-options=ssh_options additional ssh options (default: none)\n")); printf(_(" (example: --ssh-options='-c cipher_spec -F configfile')\n")); - - printf(_("\n Replica options:\n")); - printf(_(" --master-user=user_name user name to connect to master (deprecated)\n")); - printf(_(" --master-db=db_name database to connect to master (deprecated)\n")); - printf(_(" --master-host=host_name database server host of master (deprecated)\n")); - printf(_(" --master-port=port database server port of master (deprecated)\n")); - printf(_(" --replica-timeout=timeout wait timeout for WAL segment streaming through replication (deprecated)\n\n")); } static void @@ -878,13 +871,6 @@ help_set_config(void) printf(_(" --archive-host=destination address or hostname for ssh connection to archive host\n")); printf(_(" --archive-port=port port for ssh connection to archive host (default: 22)\n")); printf(_(" --archive-user=username user name for ssh connection to archive host (default: PostgreSQL user)\n")); - - printf(_("\n Replica options:\n")); - printf(_(" --master-user=user_name user name to connect to master (deprecated)\n")); - printf(_(" --master-db=db_name database to connect to master (deprecated)\n")); - printf(_(" --master-host=host_name database server host of master (deprecated)\n")); - printf(_(" --master-port=port database server port of master (deprecated)\n")); - printf(_(" --replica-timeout=timeout wait timeout for WAL segment streaming through replication (deprecated)\n\n")); } static void diff --git a/src/pg_probackup.h b/src/pg_probackup.h index eb051065b..8e9d1568f 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -378,9 +378,6 @@ typedef struct InstanceConfig char *external_dir_str; ConnectionOptions conn_opt; - ConnectionOptions master_conn_opt; - - uint32 replica_timeout; //Deprecated. Not used anywhere /* Wait timeout for WAL segment archiving */ uint32 archive_timeout; diff --git a/tests/archive.py b/tests/archive.py index fe3d89b17..be5e33fbc 100644 --- a/tests/archive.py +++ b/tests/archive.py @@ -725,9 +725,6 @@ def test_replica_archive(self): backup_dir, 'replica', replica, options=[ '--archive-timeout=30', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), '--stream']) self.validate_pb(backup_dir, 'replica') @@ -764,9 +761,6 @@ def test_replica_archive(self): replica, backup_type='page', options=[ '--archive-timeout=60', - '--master-db=postgres', - '--master-host=localhost', - '--master-port={0}'.format(master.port), '--stream']) self.validate_pb(backup_dir, 'replica') @@ -857,9 +851,6 @@ def test_master_and_replica_parallel_archiving(self): backup_dir, 'replica', replica, options=[ '--archive-timeout=30', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), '--stream']) self.validate_pb(backup_dir, 'replica') diff --git a/tests/ptrack.py b/tests/ptrack.py index 19df9ff16..7dec55cc7 100644 --- a/tests/ptrack.py +++ b/tests/ptrack.py @@ -1560,13 +1560,7 @@ def test_create_db_on_replica(self): self.backup_node( backup_dir, 'replica', replica, - options=[ - '-j10', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(node.port), - '--stream' - ] + options=['-j10', '--stream'] ) # CREATE DATABASE DB1 @@ -1584,13 +1578,7 @@ def test_create_db_on_replica(self): backup_id = self.backup_node( backup_dir, 'replica', replica, backup_type='ptrack', - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(node.port) - ] + options=['-j10', '--stream'] ) if self.paranoia: @@ -2304,11 +2292,7 @@ def test_ptrack_clean_replica(self): backup_dir, 'replica', replica, - options=[ - '-j10', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) master.safe_psql('postgres', 'checkpoint') for i in idx_ptrack: @@ -2335,11 +2319,7 @@ def test_ptrack_clean_replica(self): 'replica', replica, backup_type='ptrack', - options=[ - '-j10', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) master.safe_psql('postgres', 'checkpoint') for i in idx_ptrack: @@ -2367,11 +2347,7 @@ def test_ptrack_clean_replica(self): 'replica', replica, backup_type='page', - options=[ - '-j10', '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) + options=['-j10', '--stream']) master.safe_psql('postgres', 'checkpoint') for i in idx_ptrack: @@ -2437,8 +2413,7 @@ def test_ptrack_cluster_on_btree(self): idx_ptrack[i]['old_pages'] = self.get_md5_per_page_for_fork( idx_ptrack[i]['path'], idx_ptrack[i]['old_size']) - self.backup_node( - backup_dir, 'node', node, options=['-j10', '--stream']) + self.backup_node(backup_dir, 'node', node, options=['-j10', '--stream']) node.safe_psql('postgres', 'delete from t_heap where id%2 = 1') node.safe_psql('postgres', 'cluster t_heap using t_btree') @@ -2573,11 +2548,7 @@ def test_ptrack_cluster_on_btree_replica(self): master.safe_psql('postgres', 'vacuum t_heap') master.safe_psql('postgres', 'checkpoint') - self.backup_node( - backup_dir, 'replica', replica, options=[ - '-j10', '--stream', '--master-host=localhost', - '--master-db=postgres', '--master-port={0}'.format( - master.port)]) + self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream']) for i in idx_ptrack: # get size of heap and indexes. size calculated in pages @@ -2674,9 +2645,7 @@ def test_ptrack_cluster_on_gist_replica(self): self.backup_node( backup_dir, 'replica', replica, options=[ - '-j10', '--stream', '--master-host=localhost', - '--master-db=postgres', '--master-port={0}'.format( - master.port)]) + '-j10', '--stream']) for i in idx_ptrack: # get size of heap and indexes. size calculated in pages @@ -2844,11 +2813,7 @@ def test_ptrack_empty_replica(self): backup_dir, 'replica', replica, - options=[ - '-j10', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) # Create indexes for i in idx_ptrack: @@ -2868,11 +2833,7 @@ def test_ptrack_empty_replica(self): 'replica', replica, backup_type='ptrack', - options=[ - '-j1', '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j1', '--stream']) if self.paranoia: pgdata = self.pgdata_content(replica.data_dir) @@ -3041,12 +3002,7 @@ def test_basic_ptrack_truncate_replica(self): # Make backup to clean every ptrack self.backup_node( backup_dir, 'replica', replica, - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) if replica.major_version < 11: for i in idx_ptrack: @@ -3070,12 +3026,7 @@ def test_basic_ptrack_truncate_replica(self): self.backup_node( backup_dir, 'replica', replica, backup_type='ptrack', - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['-j10', '--stream']) pgdata = self.pgdata_content(replica.data_dir) @@ -3245,12 +3196,7 @@ def test_ptrack_vacuum_replica(self): replica.safe_psql('postgres', 'checkpoint') # Make FULL backup to clean every ptrack - self.backup_node( - backup_dir, 'replica', replica, options=[ - '-j10', '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) + self.backup_node(backup_dir, 'replica', replica, options=['-j10', '--stream']) if replica.major_version < 11: for i in idx_ptrack: @@ -3430,12 +3376,7 @@ def test_ptrack_vacuum_bits_frozen_replica(self): # Take backup to clean every ptrack self.backup_node( backup_dir, 'replica', replica, - options=[ - '-j10', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) + options=['-j10', '--stream']) if replica.major_version < 11: for i in idx_ptrack: @@ -3688,12 +3629,7 @@ def test_ptrack_vacuum_full_replica(self): # Take FULL backup to clean every ptrack self.backup_node( backup_dir, 'replica', replica, - options=[ - '-j10', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port), - '--stream']) + options=['-j10', '--stream']) if replica.major_version < 11: for i in idx_ptrack: @@ -3860,13 +3796,7 @@ def test_ptrack_vacuum_truncate_replica(self): # Take FULL backup to clean every ptrack self.backup_node( backup_dir, 'replica', replica, - options=[ - '-j10', - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port) - ] + options=['-j10', '--stream'] ) if master.major_version < 11: diff --git a/tests/replica.py b/tests/replica.py index 4bcfa6083..4fe009062 100644 --- a/tests/replica.py +++ b/tests/replica.py @@ -152,11 +152,7 @@ def test_replica_stream_ptrack_backup(self): backup_id = self.backup_node( backup_dir, 'replica', replica, - options=[ - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['--stream']) self.validate_pb(backup_dir, 'replica') self.assertEqual( 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) @@ -188,11 +184,7 @@ def test_replica_stream_ptrack_backup(self): backup_id = self.backup_node( backup_dir, 'replica', replica, backup_type='ptrack', - options=[ - '--stream', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['--stream']) self.validate_pb(backup_dir, 'replica') self.assertEqual( 'OK', self.show_pb(backup_dir, 'replica', backup_id)['status']) @@ -279,11 +271,7 @@ def test_replica_archive_page_backup(self): backup_id = self.backup_node( backup_dir, 'replica', replica, - options=[ - '--archive-timeout=60', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['--archive-timeout=60']) self.validate_pb(backup_dir, 'replica') self.assertEqual( @@ -315,11 +303,7 @@ def test_replica_archive_page_backup(self): backup_id = self.backup_node( backup_dir, 'replica', replica, backup_type='page', - options=[ - '--archive-timeout=60', - '--master-host=localhost', - '--master-db=postgres', - '--master-port={0}'.format(master.port)]) + options=['--archive-timeout=60']) pgbench.wait() From a880b9165b4e0f89b0ae798f2b15aafadcb02a0b Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Wed, 20 Jul 2022 03:13:35 +0300 Subject: [PATCH 4/6] [PBCKP-232] makefile simplification --- Makefile | 26 ++++++-------------------- get_pg_version.mk | 36 ------------------------------------ 2 files changed, 6 insertions(+), 56 deletions(-) delete mode 100644 get_pg_version.mk diff --git a/Makefile b/Makefile index 3753d9cb7..a1b1ebed3 100644 --- a/Makefile +++ b/Makefile @@ -17,21 +17,17 @@ # git clone https://github.com/postgrespro/pg_probackup postgresql/contrib/pg_probackup # cd postgresql # ./configure ... && make -# make --no-print-directory -C contrib/pg_probackup +# make -C contrib/pg_probackup # # 4. out of PG source and without PGXS # git clone https://git.postgresql.org/git/postgresql.git postgresql-src # git clone https://github.com/postgrespro/pg_probackup postgresql-src/contrib/pg_probackup # mkdir postgresql-build && cd postgresql-build # ../postgresql-src/configure ... && make -# make --no-print-directory -C contrib/pg_probackup +# make -C contrib/pg_probackup # top_pbk_srcdir := $(dir $(realpath $(firstword $(MAKEFILE_LIST)))) -# get postgres version -PG_MAJORVER != $(MAKE) USE_PGXS=$(USE_PGXS) PG_CONFIG=$(PG_CONFIG) --silent --makefile=$(top_pbk_srcdir)get_pg_version.mk -#$(info Making with PG_MAJORVER=$(PG_MAJORVER)) - PROGRAM := pg_probackup # pg_probackup sources @@ -47,18 +43,14 @@ OBJS += src/archive.o src/backup.o src/catalog.o src/checkdb.o src/configure.o s BORROWED_H_SRC := \ src/include/portability/instr_time.h \ src/bin/pg_basebackup/receivelog.h \ - src/bin/pg_basebackup/streamutil.h + src/bin/pg_basebackup/streamutil.h \ + src/bin/pg_basebackup/walmethods.h BORROWED_C_SRC := \ src/backend/access/transam/xlogreader.c \ src/backend/utils/hash/pg_crc.c \ src/bin/pg_basebackup/receivelog.c \ - src/bin/pg_basebackup/streamutil.c -ifneq ($(PG_MAJORVER), $(findstring $(PG_MAJORVER), 9.5 9.6)) -BORROWED_H_SRC += \ - src/bin/pg_basebackup/walmethods.h -BORROWED_C_SRC += \ + src/bin/pg_basebackup/streamutil.c \ src/bin/pg_basebackup/walmethods.c -endif BORROW_DIR := src/borrowed BORROWED_H := $(addprefix $(BORROW_DIR)/, $(notdir $(BORROWED_H_SRC))) @@ -84,9 +76,6 @@ include $(top_builddir)/src/Makefile.global include $(top_srcdir)/contrib/contrib-global.mk endif -# now we can use standard MAJORVERSION variable instead of calculated PG_MAJORVER -undefine PG_MAJORVER - # PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -I$(top_pbk_srcdir)src -I$(BORROW_DIR) PG_CPPFLAGS += -I$(top_pbk_srcdir)src/fu_util -Wno-declaration-after-statement @@ -99,11 +88,8 @@ PG_LIBS_INTERNAL = $(libpq_pgport) ${PTHREAD_CFLAGS} # additional dependencies on borrowed files src/archive.o: $(BORROW_DIR)/instr_time.h src/backup.o src/catchup.o src/pg_probackup.o: $(BORROW_DIR)/streamutil.h -src/stream.o $(BORROW_DIR)/receivelog.o $(BORROW_DIR)/streamutil.o: $(BORROW_DIR)/receivelog.h -ifneq ($(MAJORVERSION), $(findstring $(MAJORVERSION), 9.5 9.6)) +src/stream.o $(BORROW_DIR)/receivelog.o $(BORROW_DIR)/streamutil.o $(BORROW_DIR)/walmethods.o: $(BORROW_DIR)/receivelog.h $(BORROW_DIR)/receivelog.h: $(BORROW_DIR)/walmethods.h -$(BORROW_DIR)/walmethods.o: $(BORROW_DIR)/receivelog.h -endif # generate separate makefile to handle borrowed files borrowed.mk: $(firstword $(MAKEFILE_LIST)) diff --git a/get_pg_version.mk b/get_pg_version.mk deleted file mode 100644 index d5468c5bb..000000000 --- a/get_pg_version.mk +++ /dev/null @@ -1,36 +0,0 @@ -# pg_probackup build system -# -# When building pg_probackup, there is a chicken and egg problem: -# 1. We have to define the OBJS list before including the PG makefiles. -# 2. To define this list, we need to know the PG major version. -# 3. But we can find out the postgres version only after including makefiles. -# -# This minimal makefile solves this problem, its only purpose is to -# calculate the version number from which the main build will occur next. -# -# Usage: -# include this line into main makefile -# PG_MAJORVER != $(MAKE) USE_PGXS=$(USE_PGXS) PG_CONFIG=$(PG_CONFIG) --silent --makefile=get_pg_version.mk -# -# Known issues: -# When parent make called with -C and without --no-print-directory, then -# 'make: Leaving directory ...' string will be added (by caller make process) to PG_MAJORVER -# (at least with GNU Make 4.2.1) -# -.PHONY: get_pg_version -get_pg_version: - -ifdef USE_PGXS -PG_CONFIG = pg_config -PGXS := $(shell $(PG_CONFIG) --pgxs) -include $(PGXS) -else -subdir = contrib/pg_probackup -top_builddir = ../.. -include $(top_builddir)/src/Makefile.global -include $(top_srcdir)/contrib/contrib-global.mk -endif - -get_pg_version: - $(info $(MAJORVERSION)) - From 249876ad2b6a1b74e175ae4585e1c99fc29f3378 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Fri, 26 Aug 2022 17:16:38 +0300 Subject: [PATCH 5/6] [PBCKP-232] remove deprecated options (master-db, master-host, master-port, master-user, replica-timeout) part 2 --- src/configure.c | 25 +++++++++++++++++++++- tests/compatibility.py | 48 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+), 1 deletion(-) diff --git a/src/configure.c b/src/configure.c index 47433346f..b828a30e5 100644 --- a/src/configure.c +++ b/src/configure.c @@ -34,13 +34,15 @@ static void show_configure_json(ConfigOption *opt); #define OPTION_INSTANCE_GROUP "Backup instance information" #define OPTION_CONN_GROUP "Connection parameters" -#define OPTION_REPLICA_GROUP "Replica parameters" #define OPTION_ARCHIVE_GROUP "Archive parameters" #define OPTION_LOG_GROUP "Logging parameters" #define OPTION_RETENTION_GROUP "Retention parameters" #define OPTION_COMPRESS_GROUP "Compression parameters" #define OPTION_REMOTE_GROUP "Remote access parameters" +/* dummy placeholder for obsolete options to store in following instance_options[] */ +static char *obsolete_option_placeholder = NULL; + /* * Short name should be non-printable ASCII character. */ @@ -90,6 +92,27 @@ ConfigOption instance_options[] = &instance_config.conn_opt.pguser, SOURCE_CMD, 0, OPTION_CONN_GROUP, 0, option_get_value }, + /* Obsolete options */ + { + 's', 202, "master-db", + &obsolete_option_placeholder, SOURCE_FILE_STRICT, SOURCE_CONST, "", 0, option_get_value + }, + { + 's', 203, "master-host", + &obsolete_option_placeholder, SOURCE_FILE_STRICT, SOURCE_CONST, "", 0, option_get_value + }, + { + 's', 204, "master-port", + &obsolete_option_placeholder, SOURCE_FILE_STRICT, SOURCE_CONST, "", 0, option_get_value + }, + { + 's', 205, "master-user", + &obsolete_option_placeholder, SOURCE_FILE_STRICT, SOURCE_CONST, "", 0, option_get_value + }, + { + 's', 206, "replica-timeout", + &obsolete_option_placeholder, SOURCE_FILE_STRICT, SOURCE_CONST, "", 0, option_get_value + }, /* Archive options */ { 'u', 207, "archive-timeout", diff --git a/tests/compatibility.py b/tests/compatibility.py index e274c22be..e3aab15e0 100644 --- a/tests/compatibility.py +++ b/tests/compatibility.py @@ -1482,3 +1482,51 @@ def test_compatibility_tablespace(self): # Clean after yourself self.del_test_dir(module_name, fname) + + # @unittest.skip("skip") + def test_compatibility_master_options(self): + """ + Test correctness of handling of removed master-db, master-host, master-port, + master-user and replica-timeout options + """ + self.assertTrue( + self.version_to_num(self.old_probackup_version) <= self.version_to_num('2.6.0'), + 'You need pg_probackup old_binary =< 2.6.0 for this test') + + fname = self.id().split('.')[3] + node = self.make_simple_node(base_dir=os.path.join(module_name, fname, 'node')) + backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup') + + self.init_pb(backup_dir, old_binary=True) + self.add_instance(backup_dir, 'node', node, old_binary=True) + + # add deprecated options (using probackup< 2.6) into pg_probackup.conf + # don't care about option values, we can use random values here + self.set_config( + backup_dir, 'node', + options=[ + '--master-db=postgres', + '--master-host=localhost', + '--master-port=5432', + '--master-user={0}'.format(self.user), + '--replica-timeout=100500'], + old_binary=True) + + # and try to show config with new binary (those options must be silently skipped) + self.show_config(backup_dir, 'node', old_binary=False) + + # store config with new version (those options must disappear from config) + self.set_config( + backup_dir, 'node', + options=[], + old_binary=False) + + # and check absence + config_options = self.show_config(backup_dir, 'node', old_binary=False) + self.assertFalse( + ['master-db', 'master-host', 'master-port', 'master-user', 'replica-timeout'] & config_options.keys(), + 'Obsolete options found in new config') + + # Clean after yourself + self.del_test_dir(module_name, fname) + From ee2b3303e54b53b9e93408940655efae613d3726 Mon Sep 17 00:00:00 2001 From: "Mikhail A. Kulagin" Date: Wed, 31 Aug 2022 18:17:15 +0300 Subject: [PATCH 6/6] [PBCKP-232] remove REPLICA_TIMEOUT_DEFAULT macro --- src/pg_probackup.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/pg_probackup.h b/src/pg_probackup.h index aeb55f83e..e6a3a48f8 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -88,7 +88,6 @@ extern const char *PROGRAM_EMAIL; /* Timeout defaults */ #define ARCHIVE_TIMEOUT_DEFAULT 300 -#define REPLICA_TIMEOUT_DEFAULT 300 #define LOCK_TIMEOUT 60 #define LOCK_STALE_TIMEOUT 30 #define LOG_FREQ 10