diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml deleted file mode 100644 index 6f99d0f27..000000000 --- a/.github/workflows/build.yml +++ /dev/null @@ -1,94 +0,0 @@ -name: Build Probackup - -on: - push: - branches: - - "**" - # Runs triggered by pull requests are disabled to prevent executing potentially unsafe code from public pull requests - # pull_request: - # branches: - # - main - - # Allows you to run this workflow manually from the Actions tab - workflow_dispatch: - -jobs: - - build-win2019: - - runs-on: - - windows-2019 - - env: - zlib_dir: C:\dep\zlib - - steps: - - - uses: actions/checkout@v2 - - - name: Install pacman packages - run: | - $env:PATH += ";C:\msys64\usr\bin" - pacman -S --noconfirm --needed bison flex - - - name: Make zlib - run: | - git clone -b v1.2.11 --depth 1 https://github.com/madler/zlib.git - cd zlib - cmake -DCMAKE_INSTALL_PREFIX:PATH=C:\dep\zlib -G "Visual Studio 16 2019" . - cmake --build . --config Release --target ALL_BUILD - cmake --build . --config Release --target INSTALL - copy C:\dep\zlib\lib\zlibstatic.lib C:\dep\zlib\lib\zdll.lib - copy C:\dep\zlib\bin\zlib.dll C:\dep\zlib\lib - - - name: Get Postgres sources - run: git clone -b REL_14_STABLE https://github.com/postgres/postgres.git - - # Copy ptrack to contrib to build the ptrack extension - # Convert line breaks in the patch file to LF otherwise the patch doesn't apply - - name: Get Ptrack sources - run: | - git clone -b master --depth 1 https://github.com/postgrespro/ptrack.git - Copy-Item -Path ptrack -Destination postgres\contrib -Recurse - (Get-Content ptrack\patches\REL_14_STABLE-ptrack-core.diff -Raw).Replace("`r`n","`n") | Set-Content ptrack\patches\REL_14_STABLE-ptrack-core.diff -Force -NoNewline - cd postgres - git apply -3 ../ptrack/patches/REL_14_STABLE-ptrack-core.diff - - - name: Build Postgres - run: | - $env:PATH += ";C:\msys64\usr\bin" - cd postgres\src\tools\msvc - (Get-Content config_default.pl) -Replace "zlib *=>(.*?)(?=,? *#)", "zlib => '${{ env.zlib_dir }}'" | Set-Content config.pl - cmd.exe /s /c "`"C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat`" amd64 && .\build.bat" - - - name: Build Probackup - run: cmd.exe /s /c "`"C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvarsall.bat`" amd64 && perl .\gen_probackup_project.pl `"${{ github.workspace }}`"\postgres" - - - name: Install Postgres - run: | - cd postgres - src\tools\msvc\install.bat postgres_install - - - name: Install Testgres - run: | - git clone -b no-port-for --single-branch --depth 1 https://github.com/postgrespro/testgres.git - cd testgres - python setup.py install - - # Grant the Github runner user full control of the workspace for initdb to successfully process the data folder - - name: Test Probackup - run: | - icacls.exe "${{ github.workspace }}" /grant "${env:USERNAME}:(OI)(CI)F" - $env:PATH += ";${{ github.workspace }}\postgres\postgres_install\lib;${{ env.zlib_dir }}\lib" - $Env:LC_MESSAGES = "English" - $Env:PG_CONFIG = "${{ github.workspace }}\postgres\postgres_install\bin\pg_config.exe" - $Env:PGPROBACKUPBIN = "${{ github.workspace }}\postgres\Release\pg_probackup\pg_probackup.exe" - $Env:PG_PROBACKUP_PTRACK = "ON" - If (!$Env:MODE -Or $Env:MODE -Eq "basic") { - $Env:PG_PROBACKUP_TEST_BASIC = "ON" - python -m unittest -v tests - python -m unittest -v tests.init_test - } else { - python -m unittest -v tests.$Env:MODE - } - diff --git a/.travis.yml b/.travis.yml index 074ae3d02..d774e48e4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,6 +1,6 @@ os: linux -dist: jammy +dist: noble language: c @@ -63,17 +63,21 @@ notifications: # Default MODE is basic, i.e. all tests with PG_PROBACKUP_TEST_BASIC=ON env: - - PG_VERSION=16 PG_BRANCH=master PTRACK_PATCH_PG_BRANCH=master + - PG_VERSION=19 PG_BRANCH=master PTRACK_PATCH_PG_BRANCH=master + - PG_VERSION=18 PG_BRANCH=REL_18_STABLE PTRACK_PATCH_PG_BRANCH=REL_18_STABLE + - PG_VERSION=18 PG_BRANCH=REL_18_STABLE PTRACK_PATCH_PG_BRANCH=REL_18_STABLE MODE=full + - PG_VERSION=17 PG_BRANCH=REL_17_STABLE PTRACK_PATCH_PG_BRANCH=REL_17_STABLE + - PG_VERSION=16 PG_BRANCH=REL_16_STABLE PTRACK_PATCH_PG_BRANCH=REL_16_STABLE - PG_VERSION=15 PG_BRANCH=REL_15_STABLE PTRACK_PATCH_PG_BRANCH=REL_15_STABLE - PG_VERSION=14 PG_BRANCH=REL_14_STABLE PTRACK_PATCH_PG_BRANCH=REL_14_STABLE - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE - - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_BRANCH=REL_12_STABLE - - PG_VERSION=11 PG_BRANCH=REL_11_STABLE PTRACK_PATCH_PG_BRANCH=REL_11_STABLE - - PG_VERSION=10 PG_BRANCH=REL_10_STABLE - - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE - - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE - - PG_VERSION=15 PG_BRANCH=REL_15_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=backup_test.BackupTest.test_full_backup - - PG_VERSION=15 PG_BRANCH=REL_15_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=backup_test.BackupTest.test_full_backup_stream +# - PG_VERSION=12 PG_BRANCH=REL_12_STABLE PTRACK_PATCH_PG_BRANCH=REL_12_STABLE +# - PG_VERSION=11 PG_BRANCH=REL_11_STABLE PTRACK_PATCH_PG_BRANCH=REL_11_STABLE +# - PG_VERSION=10 PG_BRANCH=REL_10_STABLE +# - PG_VERSION=9.6 PG_BRANCH=REL9_6_STABLE +# - PG_VERSION=9.5 PG_BRANCH=REL9_5_STABLE +# - PG_VERSION=18 PG_BRANCH=REL_18_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=backup_test.BackupTest.test_full_backup +# - PG_VERSION=18 PG_BRANCH=REL_18_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=backup_test.BackupTest.test_full_backup_stream # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=backup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=catchup # - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=checkdb @@ -92,7 +96,8 @@ env: jobs: allow_failures: - if: env(PG_BRANCH) = master - - if: env(PG_BRANCH) = REL9_5_STABLE + - if: env(MODE) = full + # - if: env(PG_BRANCH) = REL9_5_STABLE # - if: env(MODE) IN (archive, backup, delta, locking, merge, replica, retention, restore) # Only run CI for master branch commits to limit our travis usage diff --git a/LICENSE b/LICENSE index 66476e8a9..4fed760f8 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2015-2023, Postgres Professional +Copyright (c) 2015-2025, Postgres Professional Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group diff --git a/Makefile b/Makefile index f93cc37a4..e449c9e52 100644 --- a/Makefile +++ b/Makefile @@ -87,3 +87,4 @@ endif include packaging/Makefile.pkg include packaging/Makefile.repo include packaging/Makefile.test + diff --git a/README.md b/README.md index 2279b97a4..da62364f3 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ `pg_probackup` is a utility to manage backup and recovery of PostgreSQL database clusters. It is designed to perform periodic backups of the PostgreSQL instance that enable you to restore the server in case of a failure. The utility is compatible with: -* PostgreSQL 11, 12, 13, 14, 15, 16 +* PostgreSQL 13, 14, 15, 16, 17, 18 As compared to other backup solutions, `pg_probackup` offers the following benefits that can help you implement different backup strategies and deal with large amounts of data: * Incremental backup: page-level incremental backup allows you to save disk space, speed up backup and restore. With three different incremental modes, you can plan the backup strategy in accordance with your data flow. @@ -41,9 +41,9 @@ Regardless of the chosen backup type, all backups taken with `pg_probackup` supp ## ptrack support `PTRACK` backup support provided via following options: -* vanilla PostgreSQL 11, 12, 13, 14, 15, 16 with [ptrack extension](https://github.com/postgrespro/ptrack) -* Postgres Pro Standard 11, 12, 13, 14, 15, 16 -* Postgres Pro Enterprise 11, 12, 13, 14, 15, 16 +* vanilla PostgreSQL 13, 14, 15, 16, 17 with [ptrack extension](https://github.com/postgrespro/ptrack) +* Postgres Pro Standard 13, 14, 15, 16, 17 +* Postgres Pro Enterprise 13, 14, 15, 16, 17 ## Limitations @@ -79,7 +79,7 @@ For users of Postgres Pro products, commercial editions of pg_probackup are avai ## Building from source ### Linux -To compile `pg_probackup`, you must have a PostgreSQL installation and raw source tree. Execute this in the module's directory: +To compile `pg_probackup`, you must have a PostgreSQL installation and raw source tree. For versions under 18 execute this in the module's directory: ```shell make USE_PGXS=1 PG_CONFIG= top_srcdir= @@ -91,6 +91,8 @@ The alternative way, without using the PGXS infrastructure, is to place `pg_prob cd && git clone https://github.com/postgrespro/pg_probackup contrib/pg_probackup && cd contrib/pg_probackup && make ``` +For version 18 you have to apply PostgreSQL core patch (patches/REL_18_STABLE_pg_probackup.patch) first and recompile and reinstall PostgreSQL + ### Windows Currently pg_probackup can be build using only MSVC 2013. diff --git a/doc/pgprobackup.xml b/doc/pgprobackup.xml index 1491059c5..3497dab30 100644 --- a/doc/pgprobackup.xml +++ b/doc/pgprobackup.xml @@ -529,14 +529,14 @@ doc/src/sgml/pgprobackup.sgml Initialize the backup catalog: -backup_user@backup_host:~$ pg_probackup init -B /mnt/backups +backup_user@backup_host:~$ pg_probackup-16 init -B /mnt/backups INFO: Backup catalog '/mnt/backups' successfully initialized Add a backup instance called mydb to the backup catalog: -backup_user@backup_host:~$ pg_probackup add-instance \ +backup_user@backup_host:~$ pg_probackup-16 add-instance \ -B /mnt/backups \ -D /var/lib/pgpro/std-16/data \ --instance=node \ @@ -548,7 +548,7 @@ INFO: Instance 'node' successfully initialized Make a FULL backup: -backup_user@backup_host:~$ pg_probackup backup \ +backup_user@backup_host:~$ pg_probackup-16 backup \ -B /mnt/backups \ -b FULL \ --instance=node \ @@ -558,7 +558,7 @@ backup_user@backup_host:~$ pg_probackup backup \ --remote-user=postgres \ -U backup \ -d backupdb -INFO: Backup start, pg_probackup version: 2.5.15, instance: node, backup ID: SCUN1Q, backup mode: FULL, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 +INFO: Backup start, pg_probackup version: 2.5.16, instance: node, backup ID: SCUN1Q, backup mode: FULL, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 INFO: This PostgreSQL instance was initialized with data block checksums. Data block corruption will be detected INFO: Database backup start INFO: wait for pg_backup_start() @@ -582,7 +582,7 @@ INFO: Backup SCUN1Q completed List the backups of the instance: -backup_user@backup_host:~$ pg_probackup show \ +backup_user@backup_host:~$ pg_probackup-16 show \ -B /mnt/backups \ --instance=node ================================================================================================================================ @@ -594,7 +594,7 @@ backup_user@backup_host:~$ pg_probackup show \ Make an incremental backup in the DELTA mode: -backup_user@backup_host:~$ pg_probackup backup \ +backup_user@backup_host:~$ pg_probackup-16 backup \ -B /mnt/backups \ -b DELTA \ --instance=node \ @@ -604,7 +604,7 @@ backup_user@backup_host:~$ pg_probackup backup \ --remote-user=postgres \ -U backup \ -d backupdb -INFO: Backup start, pg_probackup version: 2.5.15, instance: node, backup ID: SCUN22, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 +INFO: Backup start, pg_probackup version: 2.5.16, instance: node, backup ID: SCUN22, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 INFO: This PostgreSQL instance was initialized with data block checksums. Data block corruption will be detected INFO: Database backup start INFO: wait for pg_backup_start() @@ -631,7 +631,7 @@ INFO: Backup SCUN22 completed Add or modify some parameters in the pg_probackup configuration file, so that you do not have to specify them each time on the command line: -backup_user@backup_host:~$ pg_probackup set-config \ +backup_user@backup_host:~$ pg_probackup-16 set-config \ -B /mnt/backups \ --instance=node \ --remote-host=postgres_host \ @@ -643,7 +643,7 @@ backup_user@backup_host:~$ pg_probackup set-config \ Check the configuration of the instance: -backup_user@backup_host:~$ pg_probackup show-config \ +backup_user@backup_host:~$ pg_probackup-16 show-config \ -B /mnt/backups \ --instance=node # Backup instance information @@ -686,13 +686,13 @@ remote-user = postgres Make another incremental backup in the DELTA mode, omitting the parameters stored in the configuration file earlier: -backup_user@backup_host:~$ pg_probackup backup \ +backup_user@backup_host:~$ pg_probackup-16 backup \ -B /mnt/backups \ -b DELTA \ --instance=node \ --stream \ --compress-algorithm=zlib -INFO: Backup start, pg_probackup version: 2.5.15, instance: node, backup ID: SCUN2C, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 +INFO: Backup start, pg_probackup version: 2.5.16, instance: node, backup ID: SCUN2C, backup mode: DELTA, wal mode: STREAM, remote: true, compress-algorithm: zlib, compress-level: 1 INFO: This PostgreSQL instance was initialized with data block checksums. Data block corruption will be detected INFO: Database backup start INFO: wait for pg_backup_start() @@ -718,7 +718,7 @@ INFO: Backup SCUN2C completed List the backups of the instance again: -backup_user@backup_host:~$ pg_probackup show \ +backup_user@backup_host:~$ pg_probackup-16 show \ -B /mnt/backups \ --instance=node =================================================================================================================================== @@ -732,7 +732,7 @@ backup_user@backup_host:~$ pg_probackup show \ Restore the data from the latest available backup to an arbitrary location: -backup_user@backup_host:~$ pg_probackup restore \ +backup_user@backup_host:~$ pg_probackup-16 restore \ -B /mnt/backups \ -D /var/lib/pgpro/std-16/staging-data \ --instance=node @@ -924,7 +924,7 @@ yumdownloader --source pg_probackup-16 . /etc/os-release -echo "rpm http://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p$VERSION_ID x86_64 vanilla" | \ +echo "rpm http://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-p${VERSION_ID%%.*} x86_64 vanilla" | \ sudo tee /etc/apt/sources.list.d/pg_probackup.list @@ -934,7 +934,7 @@ sudo tee /etc/apt/sources.list.d/pg_probackup.list . /etc/os-release -echo "rpm http://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-$VERSION_ID x86_64 vanilla" | \ +echo "rpm http://repo.postgrespro.ru/pg_probackup/rpm/latest/altlinux-${VERSION_ID%%.*} x86_64 vanilla" | \ sudo tee /etc/apt/sources.list.d/pg_probackup.list @@ -2332,7 +2332,7 @@ pg_probackup restore -B backup_dir --instance=backup_dir --instance=backup_dir [--instance=backup_dir [--instance=backup_dir [--instance=backup_dir [--instance=backup_dir [--instance= +Date: Mon, 17 Nov 2025 02:57:38 +0100 +Subject: [PATCH] REL_18_STABLE_pg_probackup + +--- + src/backend/utils/hash/pg_crc.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/src/backend/utils/hash/pg_crc.c b/src/backend/utils/hash/pg_crc.c +index e67a74ef852..6a474e804b5 100644 +--- a/src/backend/utils/hash/pg_crc.c ++++ b/src/backend/utils/hash/pg_crc.c +@@ -99,6 +99,7 @@ const uint32 pg_crc32_table[256] = { + 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D + }; + ++#ifndef FRONTEND + /* + * SQL-callable functions + */ +@@ -128,3 +129,4 @@ crc32c_bytea(PG_FUNCTION_ARGS) + + PG_RETURN_INT64(crc); + } ++#endif +-- +2.39.5 (Apple Git-154) + diff --git a/src/archive.c b/src/archive.c index 7d753c8b3..b1c4ea347 100644 --- a/src/archive.c +++ b/src/archive.c @@ -3,7 +3,7 @@ * archive.c: - pg_probackup specific archive commands for archive backups. * * - * Portions Copyright (c) 2018-2022, Postgres Professional + * Portions Copyright (c) 2018-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/backup.c b/src/backup.c index 78c3512e9..3cbd4fbf0 100644 --- a/src/backup.c +++ b/src/backup.c @@ -3,7 +3,7 @@ * backup.c: backup DB cluster, archived WAL * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2022, Postgres Professional + * Portions Copyright (c) 2015-2025, Postgres Professional * *------------------------------------------------------------------------- */ @@ -506,12 +506,12 @@ do_backup_pg(InstanceState *instanceState, PGconn *backup_conn, /* copy pg_control at very end */ if (backup_isok) { + char from_fullpath[MAXPGPATH]; + char to_fullpath[MAXPGPATH]; elog(progress ? INFO : LOG, "Progress: Backup file \"%s\"", src_pg_control_file->rel_path); - char from_fullpath[MAXPGPATH]; - char to_fullpath[MAXPGPATH]; join_path_components(from_fullpath, instance_config.pgdata, src_pg_control_file->rel_path); join_path_components(to_fullpath, current.database_dir, src_pg_control_file->rel_path); @@ -2518,11 +2518,16 @@ process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno) int segno; pgFile **file_item; pgFile f; +#if PG_VERSION_NUM >= 180000 + RelPathStr rel_path_str = relpathperm(rnode, forknum); + rel_path = rel_path_str.str; +#else + rel_path = relpathperm(rnode, forknum); +#endif segno = blkno / RELSEG_SIZE; blkno_inseg = blkno % RELSEG_SIZE; - rel_path = relpathperm(rnode, forknum); if (segno > 0) f.rel_path = psprintf("%s.%u", rel_path, segno); else @@ -2554,7 +2559,9 @@ process_block_change(ForkNumber forknum, RelFileNode rnode, BlockNumber blkno) if (segno > 0) pg_free(f.rel_path); +#if PG_VERSION_NUM < 180000 pg_free(rel_path); +#endif } diff --git a/src/catalog.c b/src/catalog.c index b29090789..409d9141f 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -3,7 +3,7 @@ * catalog.c: backup catalog operation * * Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2025, Postgres Professional * *------------------------------------------------------------------------- */ @@ -1755,16 +1755,16 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) for (i = 0; i < parray_num(timelineinfos); i++) { - timelineInfo *tlinfo = parray_get(timelineinfos, i); + timelineInfo *tlInfo = parray_get(timelineinfos, i); for (j = 0; j < parray_num(backups); j++) { pgBackup *backup = parray_get(backups, j); - if (tlinfo->tli == backup->tli) + if (tlInfo->tli == backup->tli) { - if (tlinfo->backups == NULL) - tlinfo->backups = parray_new(); + if (tlInfo->backups == NULL) + tlInfo->backups = parray_new(); - parray_append(tlinfo->backups, backup); + parray_append(tlInfo->backups, backup); } } } @@ -1772,10 +1772,10 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) /* determine oldest backup and closest backup for every timeline */ for (i = 0; i < parray_num(timelineinfos); i++) { - timelineInfo *tlinfo = parray_get(timelineinfos, i); + timelineInfo *tlInfo = parray_get(timelineinfos, i); - tlinfo->oldest_backup = get_oldest_backup(tlinfo); - tlinfo->closest_backup = get_closest_backup(tlinfo); + tlInfo->oldest_backup = get_oldest_backup(tlInfo); + tlInfo->closest_backup = get_closest_backup(tlInfo); } /* determine which WAL segments must be kept because of wal retention */ @@ -1845,18 +1845,18 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) for (i = 0; i < parray_num(timelineinfos); i++) { int count = 0; - timelineInfo *tlinfo = parray_get(timelineinfos, i); + timelineInfo *tlInfo = parray_get(timelineinfos, i); /* * Iterate backward on backups belonging to this timeline to find * anchor_backup. NOTE Here we rely on the fact that backups list * is ordered by start_lsn DESC. */ - if (tlinfo->backups) + if (tlInfo->backups) { - for (j = 0; j < parray_num(tlinfo->backups); j++) + for (j = 0; j < parray_num(tlInfo->backups); j++) { - pgBackup *backup = parray_get(tlinfo->backups, j); + pgBackup *backup = parray_get(tlInfo->backups, j); /* sanity */ if (XLogRecPtrIsInvalid(backup->start_lsn) || @@ -1886,12 +1886,12 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) if (count == instance->wal_depth) { elog(LOG, "On timeline %i WAL is protected from purge at %X/%X", - tlinfo->tli, + tlInfo->tli, (uint32) (backup->start_lsn >> 32), (uint32) (backup->start_lsn)); - tlinfo->anchor_lsn = backup->start_lsn; - tlinfo->anchor_tli = backup->tli; + tlInfo->anchor_lsn = backup->start_lsn; + tlInfo->anchor_tli = backup->tli; break; } } @@ -1916,7 +1916,7 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) * If closest_backup is not available, then general WAL purge rules * are applied. */ - if (XLogRecPtrIsInvalid(tlinfo->anchor_lsn)) + if (XLogRecPtrIsInvalid(tlInfo->anchor_lsn)) { /* * Failed to find anchor_lsn in our own timeline. @@ -1942,7 +1942,7 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) xlogInterval *interval = NULL; TimeLineID tli = 0; /* check if tli has closest_backup */ - if (!tlinfo->closest_backup) + if (!tlInfo->closest_backup) /* timeline has no closest_backup, wal retention cannot be * applied to this timeline. * Timeline will be purged up to oldest_backup if any or @@ -1952,47 +1952,47 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) continue; /* sanity for closest_backup */ - if (XLogRecPtrIsInvalid(tlinfo->closest_backup->start_lsn) || - tlinfo->closest_backup->tli <= 0) + if (XLogRecPtrIsInvalid(tlInfo->closest_backup->start_lsn) || + tlInfo->closest_backup->tli <= 0) continue; /* * Set anchor_lsn and anchor_tli to protect whole timeline from purge * In the example above: tli3. */ - tlinfo->anchor_lsn = tlinfo->closest_backup->start_lsn; - tlinfo->anchor_tli = tlinfo->closest_backup->tli; + tlInfo->anchor_lsn = tlInfo->closest_backup->start_lsn; + tlInfo->anchor_tli = tlInfo->closest_backup->tli; /* closest backup may be located not in parent timeline */ - closest_backup = tlinfo->closest_backup; + closest_backup = tlInfo->closest_backup; - tli = tlinfo->tli; + tli = tlInfo->tli; /* * Iterate over parent timeline chain and * look for timeline where closest_backup belong */ - while (tlinfo->parent_link) + while (tlInfo->parent_link) { /* In case of intermediate timeline save to keep_segments * begin_segno and switchpoint segment. * In case of final timelines save to keep_segments * closest_backup start_lsn segment and switchpoint segment. */ - XLogRecPtr switchpoint = tlinfo->switchpoint; + XLogRecPtr switchpoint = tlInfo->switchpoint; - tlinfo = tlinfo->parent_link; + tlInfo = tlInfo->parent_link; - if (tlinfo->keep_segments == NULL) - tlinfo->keep_segments = parray_new(); + if (tlInfo->keep_segments == NULL) + tlInfo->keep_segments = parray_new(); /* in any case, switchpoint segment must be added to interval */ interval = palloc(sizeof(xlogInterval)); GetXLogSegNo(switchpoint, interval->end_segno, instance->xlog_seg_size); /* Save [S1`, S2] to keep_segments */ - if (tlinfo->tli != closest_backup->tli) - interval->begin_segno = tlinfo->begin_segno; + if (tlInfo->tli != closest_backup->tli) + interval->begin_segno = tlInfo->begin_segno; /* Save [B1, S1] to keep_segments */ else GetXLogSegNo(closest_backup->start_lsn, interval->begin_segno, instance->xlog_seg_size); @@ -2002,27 +2002,27 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) * covered by other larger interval. */ - GetXLogFileName(begin_segno_str, tlinfo->tli, interval->begin_segno, instance->xlog_seg_size); - GetXLogFileName(end_segno_str, tlinfo->tli, interval->end_segno, instance->xlog_seg_size); + GetXLogFileName(begin_segno_str, tlInfo->tli, interval->begin_segno, instance->xlog_seg_size); + GetXLogFileName(end_segno_str, tlInfo->tli, interval->end_segno, instance->xlog_seg_size); elog(LOG, "Timeline %i to stay reachable from timeline %i " "protect from purge WAL interval between " "%s and %s on timeline %i", tli, closest_backup->tli, begin_segno_str, - end_segno_str, tlinfo->tli); + end_segno_str, tlInfo->tli); - parray_append(tlinfo->keep_segments, interval); + parray_append(tlInfo->keep_segments, interval); continue; } continue; } /* Iterate over backups left */ - for (j = count; j < parray_num(tlinfo->backups); j++) + for (j = count; j < parray_num(tlInfo->backups); j++) { XLogSegNo segno = 0; xlogInterval *interval = NULL; - pgBackup *backup = parray_get(tlinfo->backups, j); + pgBackup *backup = parray_get(tlInfo->backups, j); /* * We must calculate keep_segments intervals for ARCHIVE backups @@ -2039,7 +2039,7 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) continue; /* no point in clogging keep_segments by backups protected by anchor_lsn */ - if (backup->start_lsn >= tlinfo->anchor_lsn) + if (backup->start_lsn >= tlInfo->anchor_lsn) continue; /* append interval to keep_segments */ @@ -2057,8 +2057,8 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) else interval->end_segno = segno; - GetXLogFileName(begin_segno_str, tlinfo->tli, interval->begin_segno, instance->xlog_seg_size); - GetXLogFileName(end_segno_str, tlinfo->tli, interval->end_segno, instance->xlog_seg_size); + GetXLogFileName(begin_segno_str, tlInfo->tli, interval->begin_segno, instance->xlog_seg_size); + GetXLogFileName(end_segno_str, tlInfo->tli, interval->end_segno, instance->xlog_seg_size); elog(LOG, "Archive backup %s to stay consistent " "protect from purge WAL interval " @@ -2066,10 +2066,10 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) backup_id_of(backup), begin_segno_str, end_segno_str, backup->tli); - if (tlinfo->keep_segments == NULL) - tlinfo->keep_segments = parray_new(); + if (tlInfo->keep_segments == NULL) + tlInfo->keep_segments = parray_new(); - parray_append(tlinfo->keep_segments, interval); + parray_append(tlInfo->keep_segments, interval); } } @@ -2081,27 +2081,27 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) for (i = 0; i < parray_num(timelineinfos); i++) { XLogSegNo anchor_segno = 0; - timelineInfo *tlinfo = parray_get(timelineinfos, i); + timelineInfo *tlInfo = parray_get(timelineinfos, i); /* * At this point invalid anchor_lsn can be only in one case: * timeline is going to be purged by regular WAL purge rules. */ - if (XLogRecPtrIsInvalid(tlinfo->anchor_lsn)) + if (XLogRecPtrIsInvalid(tlInfo->anchor_lsn)) continue; /* * anchor_lsn is located in another timeline, it means that the timeline * will be protected from purge entirely. */ - if (tlinfo->anchor_tli > 0 && tlinfo->anchor_tli != tlinfo->tli) + if (tlInfo->anchor_tli > 0 && tlInfo->anchor_tli != tlInfo->tli) continue; - GetXLogSegNo(tlinfo->anchor_lsn, anchor_segno, instance->xlog_seg_size); + GetXLogSegNo(tlInfo->anchor_lsn, anchor_segno, instance->xlog_seg_size); - for (j = 0; j < parray_num(tlinfo->xlog_filelist); j++) + for (j = 0; j < parray_num(tlInfo->xlog_filelist); j++) { - xlogFile *wal_file = (xlogFile *) parray_get(tlinfo->xlog_filelist, j); + xlogFile *wal_file = (xlogFile *) parray_get(tlInfo->xlog_filelist, j); if (wal_file->segno >= anchor_segno) { @@ -2110,13 +2110,13 @@ catalog_get_timelines(InstanceState *instanceState, InstanceConfig *instance) } /* no keep segments */ - if (!tlinfo->keep_segments) + if (!tlInfo->keep_segments) continue; /* Protect segments belonging to one of the keep invervals */ - for (k = 0; k < parray_num(tlinfo->keep_segments); k++) + for (k = 0; k < parray_num(tlInfo->keep_segments); k++) { - xlogInterval *keep_segments = (xlogInterval *) parray_get(tlinfo->keep_segments, k); + xlogInterval *keep_segments = (xlogInterval *) parray_get(tlInfo->keep_segments, k); if ((wal_file->segno >= keep_segments->begin_segno) && wal_file->segno <= keep_segments->end_segno) diff --git a/src/catchup.c b/src/catchup.c index 00752b194..39fd37d26 100644 --- a/src/catchup.c +++ b/src/catchup.c @@ -2,7 +2,7 @@ * * catchup.c: sync DB cluster * - * Copyright (c) 2021-2022, Postgres Professional + * Copyright (c) 2021-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/checkdb.c b/src/checkdb.c index 2a7d4e9eb..24c80657e 100644 --- a/src/checkdb.c +++ b/src/checkdb.c @@ -9,7 +9,7 @@ * instance can be logically verified using extensions * amcheck or amcheck_next. * - * Portions Copyright (c) 2019-2019, Postgres Professional + * Portions Copyright (c) 2019-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/configure.c b/src/configure.c index 964548343..59f164fa9 100644 --- a/src/configure.c +++ b/src/configure.c @@ -2,7 +2,7 @@ * * configure.c: - manage backup catalog. * - * Copyright (c) 2017-2019, Postgres Professional + * Copyright (c) 2017-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/data.c b/src/data.c index 1a9616bae..544adf182 100644 --- a/src/data.c +++ b/src/data.c @@ -3,7 +3,7 @@ * data.c: utils to parse and backup data pages * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2022, Postgres Professional + * Portions Copyright (c) 2015-2025, Postgres Professional * *------------------------------------------------------------------------- */ @@ -1531,7 +1531,7 @@ validate_one_page(Page page, BlockNumber absolute_blkno, /* Verify checksum */ page_st->checksum = pg_checksum_page(page, absolute_blkno); - if (checksum_version) + if (checksum_version && !skip_block_validation) { /* Checksums are enabled, so check them. */ if (page_st->checksum != ((PageHeader) page)->pd_checksum) diff --git a/src/datapagemap.c b/src/datapagemap.c index 7e4202a72..49ce91334 100644 --- a/src/datapagemap.c +++ b/src/datapagemap.c @@ -5,7 +5,7 @@ * * This is a fairly simple bitmap. * - * Copyright (c) 2013-2019, PostgreSQL Global Development Group + * Copyright (c) 2013-2025, PostgreSQL Global Development Group * *------------------------------------------------------------------------- */ diff --git a/src/delete.c b/src/delete.c index f48ecc95f..1c628e04f 100644 --- a/src/delete.c +++ b/src/delete.c @@ -3,7 +3,7 @@ * delete.c: delete backup files. * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/dir.c b/src/dir.c index 4b1bc2816..a5bde57f3 100644 --- a/src/dir.c +++ b/src/dir.c @@ -3,7 +3,7 @@ * dir.c: directory operation utility. * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2022, Postgres Professional + * Portions Copyright (c) 2015-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/help.c b/src/help.c index e18706a13..eacef9a48 100644 --- a/src/help.c +++ b/src/help.c @@ -2,7 +2,7 @@ * * help.c * - * Copyright (c) 2017-2021, Postgres Professional + * Copyright (c) 2017-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/init.c b/src/init.c index 837e2bad0..6afb5706c 100644 --- a/src/init.c +++ b/src/init.c @@ -3,7 +3,7 @@ * init.c: - initialize backup catalog. * * Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/merge.c b/src/merge.c index e8f926795..3692fee8a 100644 --- a/src/merge.c +++ b/src/merge.c @@ -2,7 +2,7 @@ * * merge.c: merge FULL and incremental backups * - * Copyright (c) 2018-2022, Postgres Professional + * Copyright (c) 2018-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/parsexlog.c b/src/parsexlog.c index 7df169fbf..3dd591e52 100644 --- a/src/parsexlog.c +++ b/src/parsexlog.c @@ -5,7 +5,7 @@ * * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/pg_probackup.c b/src/pg_probackup.c index fa67ddff5..030d64b00 100644 --- a/src/pg_probackup.c +++ b/src/pg_probackup.c @@ -35,7 +35,7 @@ * which includes info about pgdata directory and connection. * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2021, Postgres Professional + * Portions Copyright (c) 2015-2025, Postgres Professional * *------------------------------------------------------------------------- */ @@ -687,7 +687,7 @@ main(int argc, char *argv[]) if (instance_config.pgdata != NULL && (backup_subcmd != ARCHIVE_GET_CMD && backup_subcmd != CATCHUP_CMD) && !is_absolute_path(instance_config.pgdata)) - elog(ERROR, "-D, --pgdata must be an absolute path"); + elog(ERROR, "-D, --pgdata must be an absolute path: %s", instance_config.pgdata); #if PG_VERSION_NUM >= 110000 /* Check xlog-seg-size option */ diff --git a/src/pg_probackup.h b/src/pg_probackup.h index ae99e0605..e5d034953 100644 --- a/src/pg_probackup.h +++ b/src/pg_probackup.h @@ -3,7 +3,7 @@ * pg_probackup.h: Backup/Recovery manager for PostgreSQL. * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2022, Postgres Professional + * Portions Copyright (c) 2015-2025, Postgres Professional * *------------------------------------------------------------------------- */ @@ -356,7 +356,7 @@ typedef enum ShowFormat #define BYTES_INVALID (-1) /* file didn`t changed since previous backup, DELTA backup do not rely on it */ #define FILE_NOT_FOUND (-2) /* file disappeared during backup */ #define BLOCKNUM_INVALID (-1) -#define PROGRAM_VERSION "2.5.15" +#define PROGRAM_VERSION "2.5.16" /* update when remote agent API or behaviour changes */ #define AGENT_PROTOCOL_VERSION 20509 @@ -431,6 +431,8 @@ typedef struct InstanceConfig extern ConfigOption instance_options[]; extern InstanceConfig instance_config; extern time_t current_time; +extern bool no_validate; +extern IncrRestoreMode incremental_mode; typedef struct PGNodeInfo { @@ -805,9 +807,12 @@ extern pid_t my_pid; extern __thread int my_thread_num; extern int num_threads; extern bool stream_wal; +extern bool no_color; extern bool show_color; extern bool progress; +extern bool no_sync; extern bool is_archive_cmd; /* true for archive-{get,push} */ +extern time_t start_time; /* In pre-10 'replication_slot' is defined in receivelog.h */ extern char *replication_slot; #if PG_VERSION_NUM >= 100000 @@ -816,6 +821,7 @@ extern bool temp_slot; extern bool perm_slot; /* backup options */ +extern bool backup_logs; extern bool smooth_checkpoint; /* remote probackup options */ @@ -827,8 +833,15 @@ extern bool exclusive_backup; extern bool delete_wal; extern bool delete_expired; extern bool merge_expired; +extern bool force; extern bool dry_run; +/* archive push options */ +extern int batch_size; + +/* archive get options */ +extern bool no_validate_wal; + /* ===== instanceState ===== */ typedef struct InstanceState @@ -858,11 +871,18 @@ typedef struct InstanceState /* show options */ extern ShowFormat show_format; +extern bool show_archive; + +/* set backup options */ +extern int64 ttl; /* checkdb options */ +extern bool need_amcheck; extern bool heapallindexed; extern bool checkunique; +extern bool amcheck_parent; extern bool skip_block_validation; +extern bool skip_external_dirs; /* current settings */ extern pgBackup current; diff --git a/src/pg_probackup_state.h b/src/pg_probackup_state.h index 56d852537..a1b221f46 100644 --- a/src/pg_probackup_state.h +++ b/src/pg_probackup_state.h @@ -2,7 +2,7 @@ * * pg_probackup_state.h: Definitions of internal pg_probackup states * - * Portions Copyright (c) 2021, Postgres Professional + * Portions Copyright (c) 2021-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/ptrack.c b/src/ptrack.c index d27629e45..ba97088c1 100644 --- a/src/ptrack.c +++ b/src/ptrack.c @@ -2,7 +2,7 @@ * * ptrack.c: support functions for ptrack backups * - * Copyright (c) 2021 Postgres Professional + * Copyright (c) 2021-2025 Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/restore.c b/src/restore.c index f9310dcee..0be151a99 100644 --- a/src/restore.c +++ b/src/restore.c @@ -3,7 +3,7 @@ * restore.c: restore DB cluster and archived WAL. * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2022, Postgres Professional + * Portions Copyright (c) 2015-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/show.c b/src/show.c index 810262df6..0732c6a7a 100644 --- a/src/show.c +++ b/src/show.c @@ -3,7 +3,7 @@ * show.c: show backup information. * * Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2022, Postgres Professional + * Portions Copyright (c) 2015-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/stream.c b/src/stream.c index 77453e997..f136ed151 100644 --- a/src/stream.c +++ b/src/stream.c @@ -2,7 +2,7 @@ * * stream.c: pg_probackup specific code for WAL streaming * - * Portions Copyright (c) 2015-2020, Postgres Professional + * Portions Copyright (c) 2015-2025, Postgres Professional * *------------------------------------------------------------------------- */ @@ -188,7 +188,11 @@ CreateReplicationSlot_compat(PGconn *conn, const char *slot_name, const char *pl bool is_temporary, bool is_physical, bool slot_exists_ok) { -#if PG_VERSION_NUM >= 150000 +#if PG_VERSION_NUM >= 180000 + return CreateReplicationSlot(conn, slot_name, plugin, is_temporary, is_physical, + /* reserve_wal = */ true, slot_exists_ok, /* two_phase = */ false, /* failover = */ false); +#elif PG_VERSION_NUM >= 150000 + return CreateReplicationSlot(conn, slot_name, plugin, is_temporary, is_physical, /* reserve_wal = */ true, slot_exists_ok, /* two_phase = */ false); #elif PG_VERSION_NUM >= 110000 @@ -592,7 +596,7 @@ parse_tli_history_buffer(char *history, TimeLineID tli) if (curLineLen > 0) { char *ptr; - TimeLineID tli; + TimeLineID currTLI; uint32 switchpoint_hi; uint32 switchpoint_lo; int nfields; @@ -605,7 +609,7 @@ parse_tli_history_buffer(char *history, TimeLineID tli) if (*ptr == '\0' || *ptr == '#') continue; - nfields = sscanf(tempStr, "%u\t%X/%X", &tli, &switchpoint_hi, &switchpoint_lo); + nfields = sscanf(tempStr, "%u\t%X/%X", &currTLI, &switchpoint_hi, &switchpoint_lo); if (nfields < 1) { @@ -615,11 +619,11 @@ parse_tli_history_buffer(char *history, TimeLineID tli) if (nfields != 3) elog(ERROR, "Syntax error in timeline history: \"%s\". Expected a transaction log switchpoint location.", tempStr); - if (last_timeline && tli <= last_timeline->tli) + if (last_timeline && currTLI <= last_timeline->tli) elog(ERROR, "Timeline IDs must be in increasing sequence: \"%s\"", tempStr); entry = pgut_new(TimeLineHistoryEntry); - entry->tli = tli; + entry->tli = currTLI; entry->end = ((uint64) switchpoint_hi << 32) | switchpoint_lo; last_timeline = entry; @@ -628,7 +632,7 @@ parse_tli_history_buffer(char *history, TimeLineID tli) result = parray_new(); parray_append(result, entry); elog(VERBOSE, "parse_tli_history_buffer() found entry: tli = %X, end = %X/%X", - tli, switchpoint_hi, switchpoint_lo); + currTLI, switchpoint_hi, switchpoint_lo); /* we ignore the remainder of each line */ } diff --git a/src/util.c b/src/util.c index 3c0a33453..5189ba3b0 100644 --- a/src/util.c +++ b/src/util.c @@ -3,7 +3,7 @@ * util.c: log messages to log file or stderr, and misc code. * * Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2021, Postgres Professional + * Portions Copyright (c) 2015-2025, Postgres Professional * *------------------------------------------------------------------------- */ @@ -93,8 +93,13 @@ digestControlFile(ControlFileData *ControlFile, char *src, size_t size) #endif if (size != ControlFileSize) + { + if (size == 16384) + elog(ERROR, "Unexpected control file size %d, expected %d. Probably you trying to connect Postgres Pro using %s built with PostgreSQL. ", + (int) size, ControlFileSize, PROGRAM_NAME); elog(ERROR, "Unexpected control file size %d, expected %d", (int) size, ControlFileSize); + } memcpy(ControlFile, src, sizeof(ControlFileData)); diff --git a/src/utils/configuration.c b/src/utils/configuration.c index f049aa1be..47497850f 100644 --- a/src/utils/configuration.c +++ b/src/utils/configuration.c @@ -3,7 +3,7 @@ * configuration.c: - function implementations to work with pg_probackup * configurations. * - * Copyright (c) 2017-2019, Postgres Professional + * Copyright (c) 2017-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/utils/configuration.h b/src/utils/configuration.h index 59da29bd5..da86b9db0 100644 --- a/src/utils/configuration.h +++ b/src/utils/configuration.h @@ -3,7 +3,7 @@ * configuration.h: - prototypes of functions and structures for * configuration. * - * Copyright (c) 2018-2019, Postgres Professional + * Copyright (c) 2018-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/utils/file.c b/src/utils/file.c index fa08939f5..b49c97d0c 100644 --- a/src/utils/file.c +++ b/src/utils/file.c @@ -2499,7 +2499,7 @@ fio_send_pages_impl(int out, char* buf) int fio_send_file_gz(const char *from_fullpath, FILE* out, char **errormsg) { - fio_header hdr; + fio_header header; int exit_code = SEND_OK; char *in_buf = pgut_malloc(CHUNK_SIZE); /* buffer for compressed data */ char *out_buf = pgut_malloc(OUT_BUF_SIZE); /* 1MB buffer for decompressed data */ @@ -2507,13 +2507,13 @@ fio_send_file_gz(const char *from_fullpath, FILE* out, char **errormsg) /* decompressor */ z_stream *strm = NULL; - hdr.cop = FIO_SEND_FILE; - hdr.size = path_len; + header.cop = FIO_SEND_FILE; + header.size = path_len; // elog(VERBOSE, "Thread [%d]: Attempting to open remote compressed WAL file '%s'", // thread_num, from_fullpath); - IO_CHECK(fio_write_all(fio_stdout, &hdr, sizeof(hdr)), sizeof(hdr)); + IO_CHECK(fio_write_all(fio_stdout, &header, sizeof(header)), sizeof(header)); IO_CHECK(fio_write_all(fio_stdout, from_fullpath, path_len), path_len); for (;;) diff --git a/src/utils/json.c b/src/utils/json.c index 2c8e0fe9b..1bcb4e644 100644 --- a/src/utils/json.c +++ b/src/utils/json.c @@ -2,7 +2,7 @@ * * json.c: - make json document. * - * Copyright (c) 2018-2019, Postgres Professional + * Copyright (c) 2018-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/utils/json.h b/src/utils/json.h index f80832e69..f635e1f3c 100644 --- a/src/utils/json.h +++ b/src/utils/json.h @@ -2,7 +2,7 @@ * * json.h: - prototypes of json output functions. * - * Copyright (c) 2018-2019, Postgres Professional + * Copyright (c) 2018-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/utils/logger.c b/src/utils/logger.c index 7ea41f74e..ade57e529 100644 --- a/src/utils/logger.c +++ b/src/utils/logger.c @@ -2,7 +2,7 @@ * * logger.c: - log events into log file or stderr. * - * Copyright (c) 2017-2019, Postgres Professional + * Copyright (c) 2017-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/utils/logger.h b/src/utils/logger.h index adc5061e0..9e2cb958f 100644 --- a/src/utils/logger.h +++ b/src/utils/logger.h @@ -2,7 +2,7 @@ * * logger.h: - prototypes of logger functions. * - * Copyright (c) 2017-2019, Postgres Professional + * Copyright (c) 2017-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/utils/pgut.c b/src/utils/pgut.c index 9559fa644..003f0a559 100644 --- a/src/utils/pgut.c +++ b/src/utils/pgut.c @@ -3,7 +3,7 @@ * pgut.c * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2017-2021, Postgres Professional + * Portions Copyright (c) 2017-2025, Postgres Professional * *------------------------------------------------------------------------- */ @@ -1062,7 +1062,23 @@ handle_interrupt(SIGNAL_ARGS) static void init_cancel_handler(void) { +#if PG_VERSION_NUM < 180000 oldhandler = pqsignal(SIGINT, handle_interrupt); +#else + { + struct sigaction act, oldact; + + act.sa_handler = handle_interrupt; + sigemptyset(&act.sa_mask); + act.sa_flags = SA_RESTART; + + /* Get the previous handler and set the new one */ + if (sigaction(SIGINT, &act, &oldact) < 0) + elog(ERROR, "sigaction(SIGINT) failed: %m"); + + oldhandler = oldact.sa_handler; + } +#endif pqsignal(SIGQUIT, handle_interrupt); pqsignal(SIGTERM, handle_interrupt); pqsignal(SIGPIPE, handle_interrupt); diff --git a/src/utils/pgut.h b/src/utils/pgut.h index 1b7b7864c..6fea4d022 100644 --- a/src/utils/pgut.h +++ b/src/utils/pgut.h @@ -3,7 +3,7 @@ * pgut.h * * Portions Copyright (c) 2009-2013, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2017-2021, Postgres Professional + * Portions Copyright (c) 2017-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/utils/remote.h b/src/utils/remote.h index dc98644ab..582210142 100644 --- a/src/utils/remote.h +++ b/src/utils/remote.h @@ -2,7 +2,7 @@ * * remote.h: - prototypes of remote functions. * - * Copyright (c) 2017-2019, Postgres Professional + * Copyright (c) 2017-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/utils/thread.c b/src/utils/thread.c index 1c469bd29..4127701f0 100644 --- a/src/utils/thread.c +++ b/src/utils/thread.c @@ -2,7 +2,7 @@ * * thread.c: - multi-platform pthread implementations. * - * Copyright (c) 2018-2019, Postgres Professional + * Copyright (c) 2018-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/utils/thread.h b/src/utils/thread.h index 2eaa5fb45..d79e2d8d0 100644 --- a/src/utils/thread.h +++ b/src/utils/thread.h @@ -2,7 +2,7 @@ * * thread.h: - multi-platform pthread implementations. * - * Copyright (c) 2018-2019, Postgres Professional + * Copyright (c) 2018-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/src/validate.c b/src/validate.c index 0887b2e7a..3bff3f756 100644 --- a/src/validate.c +++ b/src/validate.c @@ -3,7 +3,7 @@ * validate.c: validate backup files. * * Portions Copyright (c) 2009-2011, NIPPON TELEGRAPH AND TELEPHONE CORPORATION - * Portions Copyright (c) 2015-2019, Postgres Professional + * Portions Copyright (c) 2015-2025, Postgres Professional * *------------------------------------------------------------------------- */ diff --git a/tests/archive_test.py b/tests/archive_test.py index 00fd1f592..d2a141cb7 100644 --- a/tests/archive_test.py +++ b/tests/archive_test.py @@ -8,7 +8,6 @@ import subprocess from sys import exit from time import sleep -from distutils.dir_util import copy_tree class ArchiveTest(ProbackupTest, unittest.TestCase): @@ -1243,10 +1242,6 @@ def test_archive_catalog(self): self.add_instance(backup_dir, 'replica', replica) self.set_archiving(backup_dir, 'replica', replica, replica=True) - copy_tree( - os.path.join(backup_dir, 'wal', 'master'), - os.path.join(backup_dir, 'wal', 'replica')) - replica.slow_start(replica=True) # FULL backup replica @@ -2442,6 +2437,9 @@ def test_archive_get_prefetch_corruption(self): prefetch_line = 'Prefetched WAL segment {0} is invalid, cannot use it'.format(filename) restored_line = 'LOG: restored log file "{0}" from archive'.format(filename) + + self.wait_server_wal_exists(replica.data_dir, wal_dir, filename) + tailer = tail_file(os.path.join(replica.logs_dir, 'postgresql.log')) tailer.wait(contains=prefetch_line) tailer.wait(contains=restored_line) @@ -2488,6 +2486,9 @@ def test_archive_show_partial_files_handling(self): self.switch_wal_segment(node) + self.wait_instance_wal_exists(backup_dir, 'node', + f"{filename}.gz") + os.rename( os.path.join(wals_dir, filename), os.path.join(wals_dir, '{0}.part'.format(filename))) diff --git a/tests/auth_test.py b/tests/auth_test.py index 32cabc4a1..eaef4e582 100644 --- a/tests/auth_test.py +++ b/tests/auth_test.py @@ -174,51 +174,36 @@ def test_backup_via_unprivileged_user(self): class AuthTest(unittest.TestCase): pb = None node = None + test_path = None - # TODO move to object scope, replace module_name - @classmethod - def setUpClass(cls): + @unittest.skipIf(skip_test, "Module pexpect isn't installed. You need to install it.") + def setUp(self): - super(AuthTest, cls).setUpClass() + super().setUp() - cls.pb = ProbackupTest() - cls.backup_dir = os.path.join(cls.pb.tmp_path, module_name, 'backup') + self.pb = ProbackupTest() + self.test_path = os.path.join(self.pb.tmp_path, module_name, self._testMethodName) + self.backup_dir = os.path.join(self.test_path, 'backup') - cls.node = cls.pb.make_simple_node( - base_dir="{}/node".format(module_name), + self.node = self.pb.make_simple_node( + base_dir=os.path.join(self.test_path, 'node'), set_replication=True, initdb_params=['--data-checksums', '--auth-host=md5'] ) - cls.username = cls.pb.get_username() + self.modify_pg_hba(self.node, self.pb.get_username()) - cls.modify_pg_hba(cls.node) - - cls.pb.init_pb(cls.backup_dir) - cls.pb.add_instance(cls.backup_dir, cls.node.name, cls.node) - cls.pb.set_archiving(cls.backup_dir, cls.node.name, cls.node) + self.pb.init_pb(self.backup_dir) + self.pb.add_instance(self.backup_dir, self.node.name, self.node) + self.pb.set_archiving(self.backup_dir, self.node.name, self.node) try: - cls.node.slow_start() + self.node.slow_start() except StartNodeException: raise unittest.skip("Node hasn't started") - if cls.pb.get_version(cls.node) < 100000: - cls.node.safe_psql( - "postgres", - "CREATE ROLE backup WITH LOGIN PASSWORD 'password'; " - "GRANT USAGE ON SCHEMA pg_catalog TO backup; " - "GRANT EXECUTE ON FUNCTION current_setting(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_is_in_recovery() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_start_backup(text, boolean, boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_stop_backup() TO backup; " - "GRANT EXECUTE ON FUNCTION pg_stop_backup(boolean) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_create_restore_point(text) TO backup; " - "GRANT EXECUTE ON FUNCTION pg_switch_xlog() TO backup; " - "GRANT EXECUTE ON FUNCTION txid_current() TO backup; " - "GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; " - "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;") - elif cls.pb.get_version(cls.node) < 150000: - cls.node.safe_psql( + version = self.pb.get_version(self.node) + if version < 150000: + self.node.safe_psql( "postgres", "CREATE ROLE backup WITH LOGIN PASSWORD 'password'; " "GRANT USAGE ON SCHEMA pg_catalog TO backup; " @@ -233,7 +218,7 @@ def setUpClass(cls): "GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;") else: - cls.node.safe_psql( + self.node.safe_psql( "postgres", "CREATE ROLE backup WITH LOGIN PASSWORD 'password'; " "GRANT USAGE ON SCHEMA pg_catalog TO backup; " @@ -247,16 +232,29 @@ def setUpClass(cls): "GRANT EXECUTE ON FUNCTION txid_current_snapshot() TO backup; " "GRANT EXECUTE ON FUNCTION txid_snapshot_xmax(txid_snapshot) TO backup;") - cls.pgpass_file = os.path.join(os.path.expanduser('~'), '.pgpass') - - # TODO move to object scope, replace module_name - @classmethod - def tearDownClass(cls): - cls.node.cleanup() - cls.pb.del_test_dir(module_name, '') + if version >= 150000: + home_dir = os.path.join(self.test_path, "home") + os.makedirs(home_dir, exist_ok=True) + self.pb.test_env['HOME'] = home_dir + self.pgpass_file = os.path.join(home_dir, '.pgpass') + self.pgpass_file_lock = None + else: + # before PGv15 only true home dir were inspected. + # Since we can't have separate file per test, we have to serialize + # tests. + self.pgpass_file = os.path.join(os.path.expanduser('~'), '.pgpass') + self.pgpass_file_lock = self.pgpass_file + '~probackup_test_lock' + # have to lock pgpass by creating file in exclusive mode + for i in range(120): + try: + open(self.pgpass_file_lock, "x").close() + except FileExistsError: + time.sleep(1) + else: + break + else: + raise TimeoutError("can't create ~/.pgpass~probackup_test_lock for 120 seconds") - @unittest.skipIf(skip_test, "Module pexpect isn't installed. You need to install it.") - def setUp(self): self.pb_cmd = ['backup', '-B', self.backup_dir, '--instance', self.node.name, @@ -268,6 +266,19 @@ def setUp(self): ] def tearDown(self): + if (self.pgpass_file_lock + and hasattr(self, "pgpass_line") + and os.path.exists(self.pgpass_file)): + with open(self.pgpass_file, 'r', encoding="utf-8") as fl: + lines = fl.readlines() + if self.pgpass_line in lines: + lines.remove(self.pgpass_line) + if len(lines) == 0: + os.remove(self.pgpass_file) + else: + with open(self.pgpass_file, 'w', encoding="utf-8") as fl: + fl.writelines(lines) + if "PGPASSWORD" in self.pb.test_env.keys(): del self.pb.test_env["PGPASSWORD"] @@ -279,6 +290,10 @@ def tearDown(self): except OSError: pass + test_path = os.path.join(self.pb.tmp_path, module_name) + self.node.cleanup() + self.pb.del_test_dir(test_path, self._testMethodName) + def test_empty_password(self): """ Test case: PGPB_AUTH03 - zero password length """ try: @@ -313,7 +328,7 @@ def test_ctrl_c_event(self): def test_pgpassfile_env(self): """ Test case: PGPB_AUTH06 - set environment var PGPASSFILE """ - path = os.path.join(self.pb.tmp_path, module_name, 'pgpass.conf') + path = os.path.join(self.test_path, 'pgpass.conf') line = ":".join(['127.0.0.1', str(self.node.port), 'postgres', 'backup', 'password']) self.create_pgpass(path, line) self.pb.test_env["PGPASSFILE"] = path @@ -367,7 +382,7 @@ def run_pb_with_auth(self, password=None, add_args = [], kill=False): @classmethod - def modify_pg_hba(cls, node): + def modify_pg_hba(self, node, username): """ Description: Add trust authentication for user postgres. Need for add new role and set grant. @@ -378,11 +393,12 @@ def modify_pg_hba(cls, node): with open(hba_conf, 'r+') as fio: data = fio.read() fio.seek(0) - fio.write('host\tall\t%s\t127.0.0.1/0\ttrust\n%s' % (cls.username, data)) + fio.write('host\tall\t%s\t127.0.0.1/0\ttrust\n%s' % (username, data)) def create_pgpass(self, path, line): + self.pgpass_line = line+"\n" with open(path, 'w') as passfile: # host:port:db:username:password - passfile.write(line) + passfile.write(self.pgpass_line) os.chmod(path, 0o600) diff --git a/tests/backup_test.py b/tests/backup_test.py index dc60228b5..5e4e4fc32 100644 --- a/tests/backup_test.py +++ b/tests/backup_test.py @@ -4,7 +4,6 @@ from time import sleep, time from .helpers.ptrack_helpers import base36enc, ProbackupTest, ProbackupException import shutil -from distutils.dir_util import copy_tree from testgres import ProcessType, QueryException import subprocess @@ -2330,14 +2329,13 @@ def test_backup_with_less_privileges_role(self): # bgwriter_pid = node.auxiliary_pids[ProcessType.BackgroundWriter][0] # gdb_checkpointer = self.gdb_attach(bgwriter_pid) - copy_tree( - os.path.join(backup_dir, 'wal', 'node'), - os.path.join(backup_dir, 'wal', 'replica')) - replica.slow_start(replica=True) - # self.switch_wal_segment(node) - # self.switch_wal_segment(node) + # make sure replica will archive wal segment with backup start point + lsn = self.switch_wal_segment(node, and_tx=True) + replica.poll_query_until(f"select pg_last_wal_replay_lsn() >= '{lsn}'") + replica.execute('CHECKPOINT') + replica.poll_query_until(f"select redo_lsn >= '{lsn}' from pg_control_checkpoint()") self.backup_node( backup_dir, 'replica', replica, @@ -3220,6 +3218,10 @@ def test_missing_replication_permission_1(self): replica.promote() + # Wait for replica to catch up with master before promoting + # to ensure 'backup' role is replicated + self.wait_until_replica_catch_with_master(node, replica) + # PAGE output = self.backup_node( backup_dir, 'node', replica, backup_type='page', diff --git a/tests/catchup_test.py b/tests/catchup_test.py index cf8388dd2..2997a9c4d 100644 --- a/tests/catchup_test.py +++ b/tests/catchup_test.py @@ -1586,18 +1586,22 @@ def test_dry_run_catchup_delta(self): # Cleanup src_pg.stop() + # Skip test, because it's PGDATA is global variable and has impact for other tests + # e.g. test_checkdb_amcheck_only_sanity + @unittest.skip("skip") def test_pgdata_is_ignored(self): """ In catchup we still allow PGDATA to be set either from command line or from the env var. This test that PGDATA is actually ignored and --source-pgadta is used instead """ - node = self.make_simple_node('node', + node = self.make_simple_node( + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication = True ) node.slow_start() # do full catchup - dest = self.make_empty_node('dst') + dest = self.make_empty_node(base_dir=os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = node.data_dir, @@ -1612,7 +1616,7 @@ def test_pgdata_is_ignored(self): os.environ['PGDATA']='xxx' - dest2 = self.make_empty_node('dst') + dest2 = self.make_empty_node(base_dir=os.path.join(self.module_name, self.fname, 'dst')) self.catchup_node( backup_mode = 'FULL', source_pgdata = node.data_dir, diff --git a/tests/cfs_backup_test.py b/tests/cfs_backup_test.py index fb4a6c6b8..fed60643e 100644 --- a/tests/cfs_backup_test.py +++ b/tests/cfs_backup_test.py @@ -1104,7 +1104,7 @@ def test_delete_random_data_file_from_tablespace_dir(self): list_data_files = find_by_pattern( [self.get_tblspace_path(self.node, tblspace_name)], - '^.*/\d+$') + r'^.*/\d+$') self.assertTrue( list_data_files, "ERROR: Files of data not found into tablespace dir" @@ -1166,7 +1166,7 @@ def test_broken_random_data_file_into_tablespace_dir(self): list_data_files = find_by_pattern( [self.get_tblspace_path(self.node, tblspace_name)], - '^.*/\d+$') + r'^.*/\d+$') self.assertTrue( list_data_files, "ERROR: Files of data not found into tablespace dir" diff --git a/tests/checkdb_test.py b/tests/checkdb_test.py index eb46aea19..f5c0f6895 100644 --- a/tests/checkdb_test.py +++ b/tests/checkdb_test.py @@ -18,7 +18,7 @@ def test_checkdb_amcheck_only_sanity(self): backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir="{0}/{1}/node".format(self.module_name, self.fname), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) @@ -227,7 +227,7 @@ def test_basic_checkdb_amcheck_only_sanity(self): """""" backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( - base_dir="{0}/{1}/node".format(self.module_name, self.fname), + base_dir=os.path.join(self.module_name, self.fname, 'node'), set_replication=True, initdb_params=['--data-checksums']) diff --git a/tests/delete_test.py b/tests/delete_test.py index 10100887d..40ab74c87 100644 --- a/tests/delete_test.py +++ b/tests/delete_test.py @@ -486,7 +486,7 @@ def test_delete_interleaved_incremental_chains(self): # @unittest.skip("skip") def test_delete_multiple_descendants(self): - """ + r""" PAGEb3 | PAGEa3 PAGEb2 / @@ -654,7 +654,7 @@ def test_delete_multiple_descendants(self): # @unittest.skip("skip") def test_delete_multiple_descendants_dry_run(self): - """ + r""" PAGEa3 PAGEa2 / \ / diff --git a/tests/false_positive_test.py b/tests/false_positive_test.py index ea82cb18f..77f2226c2 100644 --- a/tests/false_positive_test.py +++ b/tests/false_positive_test.py @@ -214,6 +214,7 @@ def test_recovery_target_lsn_backup_victim(self): @y.sokolov: looks like this test should pass. So I commented 'expectedFailure' """ + self._check_gdb_flag_or_skip_test() backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), diff --git a/tests/helpers/ptrack_helpers.py b/tests/helpers/ptrack_helpers.py index 27d982856..76f9ea3ab 100644 --- a/tests/helpers/ptrack_helpers.py +++ b/tests/helpers/ptrack_helpers.py @@ -6,13 +6,14 @@ import signal import subprocess import shutil +from time import sleep import six import testgres import hashlib import re import getpass import select -from time import sleep +import time import re import json import random @@ -150,8 +151,9 @@ def __str__(self): class PostgresNodeExtended(testgres.PostgresNode): - def __init__(self, base_dir=None, *args, **kwargs): - super(PostgresNodeExtended, self).__init__(name='test', base_dir=base_dir, *args, **kwargs) + def __init__(self, base_dir=None, port = None, bin_dir=None, *args, **kwargs): + assert port is None or type(port) == int + super(PostgresNodeExtended, self).__init__(name='test', base_dir=base_dir, port = port, bin_dir=bin_dir, *args, **kwargs) self.is_started = False def slow_start(self, replica=False): @@ -414,25 +416,28 @@ def is_test_result_ok(test_case): # # 2. python versions 3.11+ mixin, verified on 3.11, taken from: https://stackoverflow.com/a/39606065 - if not isinstance(test_case, unittest.TestCase): - raise AssertionError("test_case is not instance of unittest.TestCase") - - if hasattr(test_case, '_outcome'): # Python 3.4+ - if hasattr(test_case._outcome, 'errors'): - # Python 3.4 - 3.10 (These two methods have no side effects) - result = test_case.defaultTestResult() # These two methods have no side effects - test_case._feedErrorsToResult(result, test_case._outcome.errors) - else: - # Python 3.11+ and pytest 5.3.5+ - result = test_case._outcome.result - if not hasattr(result, 'errors'): - result.errors = [] - if not hasattr(result, 'failures'): - result.failures = [] - else: # Python 2.7, 3.0-3.3 - result = getattr(test_case, '_outcomeForDoCleanups', test_case._resultForDoCleanups) + if hasattr(test_case._outcome, 'errors'): + # Python 3.4 - 3.10 (These two methods have no side effects) + result = test_case.defaultTestResult() # These two methods have no side effects + test_case._feedErrorsToResult(result, test_case._outcome.errors) + else: + # Python 3.11+ and pytest 5.3.5+ + result = test_case._outcome.result + if not hasattr(result, 'errors'): + result.errors = [] + if not hasattr(result, 'failures'): + result.failures = [] ok = all(test != test_case for test, text in result.errors + result.failures) + # check subtests as well + ok = ok and all(getattr(test, 'test_case', None) != test_case + for test, text in result.errors + result.failures) + + # for pytest 8+ + if hasattr(result, '_excinfo'): + if result._excinfo is not None and len(result._excinfo) > 0: + # if test was successful, _excinfo will be None, else it will be non-empty list + ok = False return ok @@ -475,12 +480,14 @@ def pg_config_version(self): def make_empty_node( self, - base_dir=None): + base_dir=None, + port=None, + bin_dir=None): real_base_dir = os.path.join(self.tmp_path, base_dir) shutil.rmtree(real_base_dir, ignore_errors=True) os.makedirs(real_base_dir) - node = PostgresNodeExtended(base_dir=real_base_dir) + node = PostgresNodeExtended(base_dir=real_base_dir, port=port, bin_dir=bin_dir) node.should_rm_dirs = True self.nodes_to_cleanup.append(node) @@ -489,12 +496,14 @@ def make_empty_node( def make_simple_node( self, base_dir=None, + port=None, + bin_dir=None, set_replication=False, ptrack_enable=False, initdb_params=[], pg_options={}): - node = self.make_empty_node(base_dir) + node = self.make_empty_node(base_dir, port=port, bin_dir=bin_dir) node.init( initdb_params=initdb_params, allow_streaming=set_replication) @@ -910,6 +919,24 @@ def get_backup_filelist_diff(self, filelist_A, filelist_B): return filelist_diff + def wait_instance_wal_exists(self, backup_dir, instance, file, timeout=300): + """Wait for WAL segment appeared in the WAL archive""" + start = time.time() + fl = f'wal/{instance}/{file}' + while time.time() - start < timeout: + if os.path.exists(fl): + break + time.sleep(0.25) + + def wait_server_wal_exists(self, data_dir, wal_dir, file, timeout=300): + """Wait for WAL segment appeared in the server WAL dir""" + start = time.time() + fl = f'{data_dir}/{wal_dir}/{file}' + while time.time() - start < timeout: + if os.path.exists(fl): + return + time.sleep(0.25) + # used for partial restore def truncate_every_file_in_dir(self, path): for file in os.listdir(path): @@ -1701,29 +1728,26 @@ def version_to_num(self, version): num = num * 100 + int(re.sub(r"[^\d]", "", part)) return num - def switch_wal_segment(self, node): + def switch_wal_segment(self, node, sleep_seconds=1, and_tx=False): """ - Execute pg_switch_wal/xlog() in given node + Execute pg_switch_wal() in given node Args: node: an instance of PostgresNode or NodeConnection class """ if isinstance(node, testgres.PostgresNode): - if self.version_to_num( - node.safe_psql('postgres', 'show server_version').decode('utf-8') - ) >= self.version_to_num('10.0'): - node.safe_psql('postgres', 'select pg_switch_wal()') - else: - node.safe_psql('postgres', 'select pg_switch_xlog()') + with node.connect('postgres') as con: + if and_tx: + con.execute('select txid_current()') + con.execute('select pg_switch_wal()') + lsn = con.execute('select pg_switch_wal()')[0][0] else: - if self.version_to_num( - node.execute('show server_version')[0][0] - ) >= self.version_to_num('10.0'): - node.execute('select pg_switch_wal()') - else: - node.execute('select pg_switch_xlog()') + node.execute('select pg_switch_wal()') + lsn = node.execute('select pg_switch_wal()')[0][0] - sleep(1) + if sleep_seconds > 0: + sleep(sleep_seconds) + return lsn def wait_until_replica_catch_with_master(self, master, replica): diff --git a/tests/incr_restore_test.py b/tests/incr_restore_test.py index 6a2164098..6fb9cd0ec 100644 --- a/tests/incr_restore_test.py +++ b/tests/incr_restore_test.py @@ -1592,17 +1592,12 @@ def test_incr_checksum_long_xact(self): 'select count(*) from t1').decode('utf-8').rstrip(), '1') - # @unittest.skip("skip") - # @unittest.expectedFailure - # This test will pass with Enterprise - # because it has checksums enabled by default - @unittest.skipIf(ProbackupTest.enterprise, 'skip') def test_incr_lsn_long_xact_1(self): """ """ node = self.make_simple_node( base_dir=os.path.join(self.module_name, self.fname, 'node'), - set_replication=True) + set_replication=True, initdb_params=['--no-data-checksums']) backup_dir = os.path.join(self.tmp_path, self.module_name, self.fname, 'backup') self.init_pb(backup_dir) @@ -2046,8 +2041,9 @@ def test_incremental_partial_restore_exclude_lsn(self): base_dir=os.path.join(self.module_name, self.fname, 'node1')) node1.cleanup() - node2 = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node2')) + node2 = self.make_empty_node( + base_dir=os.path.join(self.module_name, self.fname, 'node2'), port=node.port) + assert node2.port == node.port node2.cleanup() # restore some data into node2 @@ -2063,7 +2059,7 @@ def test_incremental_partial_restore_exclude_lsn(self): pgdata1 = self.pgdata_content(node1.data_dir) # partial incremental restore backup into node2 - node2.port = node.port + # node2.port = node.port node2.slow_start() node2.stop() self.restore_node( @@ -2433,7 +2429,7 @@ def test_incr_restore_issue_313(self): """ Check that failed incremental restore can be restarted """ - self._check_gdb_flag_or_skip_test + self._check_gdb_flag_or_skip_test() node = self.make_simple_node('node', set_replication=True, initdb_params=['--data-checksums']) diff --git a/tests/merge_test.py b/tests/merge_test.py index 1d40af7f7..82af36272 100644 --- a/tests/merge_test.py +++ b/tests/merge_test.py @@ -1950,7 +1950,7 @@ def test_merge_backup_from_future(self): # @unittest.skip("skip") def test_merge_multiple_descendants(self): - """ + r""" PAGEb3 | PAGEa3 PAGEb2 / diff --git a/tests/ptrack_test.py b/tests/ptrack_test.py index 7b5bc416b..38317ea2c 100644 --- a/tests/ptrack_test.py +++ b/tests/ptrack_test.py @@ -1781,7 +1781,9 @@ def test_alter_database_set_tablespace_ptrack(self): # RESTORE node_restored = self.make_simple_node( - base_dir=os.path.join(self.module_name, self.fname, 'node_restored')) + base_dir=os.path.join(self.module_name, self.fname, 'node_restored'), + port = node.port) + assert node_restored.port == node.port node_restored.cleanup() self.restore_node( backup_dir, 'node', @@ -1799,7 +1801,6 @@ def test_alter_database_set_tablespace_ptrack(self): self.compare_pgdata(pgdata, pgdata_restored) # START RESTORED NODE - node_restored.port = node.port node_restored.slow_start() # @unittest.skip("skip") diff --git a/tests/replica_test.py b/tests/replica_test.py index 17fc5a823..e96e0dfa7 100644 --- a/tests/replica_test.py +++ b/tests/replica_test.py @@ -4,7 +4,6 @@ from datetime import datetime, timedelta import subprocess import time -from distutils.dir_util import copy_tree from testgres import ProcessType from time import sleep @@ -718,10 +717,6 @@ def test_replica_stop_lsn_null_offset_next_record(self): self.set_replica(master, replica, synchronous=True) self.set_archiving(backup_dir, 'replica', replica, replica=True) - copy_tree( - os.path.join(backup_dir, 'wal', 'master'), - os.path.join(backup_dir, 'wal', 'replica')) - replica.slow_start(replica=True) self.switch_wal_segment(master) @@ -980,10 +975,6 @@ def test_replica_toast(self): self.set_replica(master, replica, synchronous=True) self.set_archiving(backup_dir, 'replica', replica, replica=True) - copy_tree( - os.path.join(backup_dir, 'wal', 'master'), - os.path.join(backup_dir, 'wal', 'replica')) - replica.slow_start(replica=True) self.switch_wal_segment(master) diff --git a/tests/requirements.txt b/tests/requirements.txt index e2ac18bea..0a0331b6b 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -5,7 +5,7 @@ # git+https://github.com/postgrespro/testgres.git@ # 3. From a local directory # /path/to/local/directory/testgres -git+https://github.com/postgrespro/testgres.git@archive-command-exec#egg=testgres-pg_probackup2&subdirectory=testgres/plugins/pg_probackup2 +testgres==1.12.0 allure-pytest deprecation pexpect diff --git a/tests/restore_test.py b/tests/restore_test.py index b6664252e..7b3b67555 100644 --- a/tests/restore_test.py +++ b/tests/restore_test.py @@ -3866,7 +3866,7 @@ def test_restore_issue_313(self): """ Check that partially restored PostgreSQL instance cannot be started """ - self._check_gdb_flag_or_skip_test + self._check_gdb_flag_or_skip_test() node = self.make_simple_node('node', set_replication=True, initdb_params=['--data-checksums']) diff --git a/tests/retention_test.py b/tests/retention_test.py index 88432a00f..0c426416e 100644 --- a/tests/retention_test.py +++ b/tests/retention_test.py @@ -3,7 +3,6 @@ from datetime import datetime, timedelta from .helpers.ptrack_helpers import ProbackupTest, ProbackupException from time import sleep -from distutils.dir_util import copy_tree class RetentionTest(ProbackupTest, unittest.TestCase): @@ -715,7 +714,7 @@ def test_window_merge_interleaved_incremental_chains_1(self): # @unittest.skip("skip") def test_basic_window_merge_multiple_descendants(self): - """ + r""" PAGEb3 | PAGEa3 -----------------------------retention window @@ -971,7 +970,7 @@ def test_basic_window_merge_multiple_descendants(self): # @unittest.skip("skip") def test_basic_window_merge_multiple_descendants_1(self): - """ + r""" PAGEb3 | PAGEa3 -----------------------------retention window diff --git a/tests/set_backup_test.py b/tests/set_backup_test.py index 31334cfba..cda29b7a7 100644 --- a/tests/set_backup_test.py +++ b/tests/set_backup_test.py @@ -339,6 +339,9 @@ def test_wal_retention_and_pinning_1(self): node.pgbench_init(scale=2) + self.wait_instance_wal_exists(backup_dir, 'node', + "000000010000000000000004") + # Purge backups out = self.delete_expired( backup_dir, 'node', diff --git a/tests/validate_test.py b/tests/validate_test.py index 4ff44941f..e8c6587f6 100644 --- a/tests/validate_test.py +++ b/tests/validate_test.py @@ -1711,7 +1711,7 @@ def test_validate_corrupt_wal_between_backups(self): wals_dir = os.path.join(backup_dir, 'wal', 'node') with open(os.path.join(wals_dir, walfile), "rb+", 0) as f: f.seek(9000) - f.write(b"b") + f.write(b"Because the answer is 42") f.flush() f.close @@ -1722,20 +1722,17 @@ def test_validate_corrupt_wal_between_backups(self): 'node', backup_id, options=[ - "--xid={0}".format(target_xid), "-j", "4"]) + f"--xid={target_xid}", "-j", "4"]) self.assertEqual( 1, 0, "Expecting Error because of wal segments corruption.\n" - " Output: {0} \n CMD: {1}".format( - repr(self.output), self.cmd)) + f" Output: {repr(self.output)} \n CMD: {self.cmd}") except ProbackupException as e: self.assertTrue( 'ERROR: Not enough WAL records to xid' in e.message and 'WARNING: Recovery can be done up to time' in e.message and - "ERROR: Not enough WAL records to xid {0}\n".format( - target_xid), - '\n Unexpected Error Message: {0}\n CMD: {1}'.format( - repr(e.message), self.cmd)) + f"ERROR: Not enough WAL records to xid {target_xid}\n", + f'\n Unexpected Error Message: {repr(e.message)}\n CMD: {self.cmd}') self.assertEqual( 'OK', @@ -3397,6 +3394,10 @@ def test_corrupt_pg_control_via_resetxlog(self): os.path.join( backup_dir, 'backups', 'node', backup_id, 'database', wal_dir, 'archive_status')) + os.mkdir( + os.path.join( + backup_dir, 'backups', 'node', backup_id, 'database', wal_dir, 'summaries')) + pg_control_path = os.path.join( backup_dir, 'backups', 'node', backup_id, 'database', 'global', 'pg_control') diff --git a/travis/install.sh b/travis/install.sh index 43ada47b7..5f85b0cb3 100755 --- a/travis/install.sh +++ b/travis/install.sh @@ -39,6 +39,11 @@ cd postgres # Go to postgres dir if [ "$PG_PROBACKUP_PTRACK" = "ON" ]; then git apply -3 contrib/ptrack/patches/${PTRACK_PATCH_PG_BRANCH}-ptrack-core.diff fi + +if [ "$PG_BRANCH" = "REL_18_STABLE" ]; then + git apply -3 ../patches/${PG_BRANCH}_pg_probackup.patch +fi + CC='ccache gcc' CFLAGS="-Og" ./configure --prefix=$PGHOME \ --cache-file=~/.ccache/configure-cache \ --enable-debug --enable-cassert --enable-depend \ @@ -63,4 +68,4 @@ if [ ! -d "contrib/amcheck" ]; then make -C contrib/amcheck install fi -pip3 install testgres \ No newline at end of file +pip3 install -r ../tests/requirements.txt \ No newline at end of file diff --git a/travis/script.sh b/travis/script.sh index 31ef09726..daf4a36e6 100755 --- a/travis/script.sh +++ b/travis/script.sh @@ -21,6 +21,10 @@ if [ -z ${PGPROBACKUP_GDB+x} ]; then PGPROBACKUP_GDB=ON fi +if [ -z ${PG_PROBACKUP_PTRACK+x} ]; then + PG_PROBACKUP_PTRACK=ON +fi + echo "############### Testing:" echo PG_PROBACKUP_PARANOIA=${PG_PROBACKUP_PARANOIA} echo ARCHIVE_COMPRESSION=${ARCHIVE_COMPRESSION} @@ -30,12 +34,20 @@ echo PGPROBACKUP_SSH_REMOTE=${PGPROBACKUP_SSH_REMOTE} echo PGPROBACKUP_GDB=${PGPROBACKUP_GDB} echo PG_PROBACKUP_PTRACK=${PG_PROBACKUP_PTRACK} +#Run Full tests only if FULL_TESTS=ON e.g. for master branch +if [ "$MODE" = "full" ] && [ -z ${FULL_TESTS} ]; then + exit +fi + if [ "$MODE" = "basic" ]; then export PG_PROBACKUP_TEST_BASIC=ON echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} - python3 -m unittest -v tests - python3 -m unittest -v tests.init_test + python3 -m pytest -v -n4 -k "test_basic" + python3 -m pytest -v -n4 -k "init_test.py" +elif [ "$MODE" = "full" ]; then + echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} + python3 -m pytest -v -n4 else echo PG_PROBACKUP_TEST_BASIC=${PG_PROBACKUP_TEST_BASIC} - python3 -m unittest -v tests.$MODE + python3 -m pytest -v -n4 -k "$MODE" fi diff --git a/version.sh b/version.sh new file mode 100755 index 000000000..0e271bcd4 --- /dev/null +++ b/version.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash + +# Output the program version based on the state of the repository (source code +# and tags). +# Tags must be in the form of +# x.y.z, e.g. 3.1.0 +# or +# x.y.z-, e.g. 3.1.0-beta2 +# If the tag consists only of the version number (i.e. it doesn't contain a +# prerelease part) and this number is equal to the version in the header file +# then the version is considered a release and no additional version data is +# appended to it by default (but can be forced by "-p" and "-r" command line +# arguments). Otherwise, provided Git is available, the prerelease part and Git +# revision are automatically added to the version. + +cd `dirname "$0"` + +while getopts p:r:sv: opt; do + case $opt in + p) prerelease=$OPTARG;; + r) revision=$OPTARG;; + s) ID=semver;; + v) version=$OPTARG;; + esac +done + +if [ -z "$ID" ]; then + . /etc/os-release +fi +case $ID in + altlinux | astra | debian | ubuntu) + # The only scheme that properly sorts metadata and prerelease fields is + # when the both are specified after a '~' + presep='~'; metasep='~';; + centos | opensuse-leap | redos) + presep='~'; metasep=^;; + *) # semver + presep=-; metasep=+ +esac + +if [ -z "$version" ]; then + version=`grep '#define PROGRAM_VERSION' src/pg_probackup.h | cut -f 2 | tr -d '"'` +fi + +if which git >/dev/null 2>&1; then + tag=`git describe --tags 2> /dev/null` + # Shallow cloned repository may not have tags + if [ -z "$prerelease" -a "$tag" ]; then + f1=`cut -d - -f 1 <<< $tag` + f2=`cut -d - -f 2 <<< $tag` + # Append the prerelease part only if the tag refers to the current version + # Assume that the prerelease part contains letters + if [ $f1 = $version ] && expr "$f2" : "[1-9]*[a-zA-Z]" 1>/dev/null; then + prerelease="$f2" + fi + fi + if [ -z "$revision" ]; then + revision=g`git rev-parse --short HEAD` + fi +fi + +out=$version${prerelease:+$presep$prerelease} +if [ "$tag" != $version -a "$revision" ]; then + out=$out$metasep`date +%Y%m%d`$revision +fi + +echo $out