diff --git a/.github/actions/save_logs_and_results/action.yml b/.github/actions/save_logs_and_results/action.yml index 0f238835d19..b344c68f2ef 100644 --- a/.github/actions/save_logs_and_results/action.yml +++ b/.github/actions/save_logs_and_results/action.yml @@ -6,7 +6,7 @@ inputs: runs: using: composite steps: - - uses: actions/upload-artifact@v3.1.1 + - uses: actions/upload-artifact@v4.6.0 name: Upload logs with: name: ${{ inputs.folder }} diff --git a/.github/actions/setup_extension/action.yml b/.github/actions/setup_extension/action.yml index 96b408e7e43..33129f17de6 100644 --- a/.github/actions/setup_extension/action.yml +++ b/.github/actions/setup_extension/action.yml @@ -17,7 +17,7 @@ runs: echo "PG_MAJOR=${{ inputs.pg_major }}" >> $GITHUB_ENV fi shell: bash - - uses: actions/download-artifact@v3.0.1 + - uses: actions/download-artifact@v4.1.8 with: name: build-${{ env.PG_MAJOR }} - name: Install Extension diff --git a/.github/actions/upload_coverage/action.yml b/.github/actions/upload_coverage/action.yml index 0b5f581a6a4..ba80ba63afa 100644 --- a/.github/actions/upload_coverage/action.yml +++ b/.github/actions/upload_coverage/action.yml @@ -21,7 +21,7 @@ runs: mkdir -p /tmp/codeclimate cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/${{ inputs.flags }}.json lcov.info shell: bash - - uses: actions/upload-artifact@v3.1.1 + - uses: actions/upload-artifact@v4.6.0 with: path: "/tmp/codeclimate/*.json" - name: codeclimate + name: codeclimate-${{ inputs.flags }} diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index d149ff650a8..32c761766d5 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -48,7 +48,7 @@ jobs: image: ${{ needs.params.outputs.build_image_name }}:${{ needs.params.outputs.sql_snapshot_pg_version }}${{ needs.params.outputs.image_suffix }} options: --user root steps: - - uses: actions/checkout@v3.5.0 + - uses: actions/checkout@v4 - name: Check Snapshots run: | git config --global --add safe.directory ${GITHUB_WORKSPACE} @@ -125,7 +125,7 @@ jobs: - name: Build run: "./ci/build-citus.sh" shell: bash - - uses: actions/upload-artifact@v3.1.1 + - uses: actions/upload-artifact@v4.6.0 with: name: build-${{ env.PG_MAJOR }} path: |- @@ -284,10 +284,12 @@ jobs: check-arbitrary-configs parallel=4 CONFIGS=$TESTS - uses: "./.github/actions/save_logs_and_results" if: always() + with: + folder: ${{ env.PG_MAJOR }}_arbitrary_configs_${{ matrix.parallel }} - uses: "./.github/actions/upload_coverage" if: always() with: - flags: ${{ env.pg_major }}_upgrade + flags: ${{ env.PG_MAJOR }}_arbitrary_configs_${{ matrix.parallel }} codecov_token: ${{ secrets.CODECOV_TOKEN }} test-pg-upgrade: name: PG${{ matrix.old_pg_major }}-PG${{ matrix.new_pg_major }} - check-pg-upgrade @@ -335,6 +337,8 @@ jobs: if: failure() - uses: "./.github/actions/save_logs_and_results" if: always() + with: + folder: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade - uses: "./.github/actions/upload_coverage" if: always() with: @@ -380,10 +384,12 @@ jobs: done; - uses: "./.github/actions/save_logs_and_results" if: always() + with: + folder: ${{ env.PG_MAJOR }}_citus_upgrade - uses: "./.github/actions/upload_coverage" if: always() with: - flags: ${{ env.pg_major }}_upgrade + flags: ${{ env.PG_MAJOR }}_citus_upgrade codecov_token: ${{ secrets.CODECOV_TOKEN }} upload-coverage: if: always() @@ -399,10 +405,11 @@ jobs: - test-citus-upgrade - test-pg-upgrade steps: - - uses: actions/download-artifact@v3.0.1 + - uses: actions/download-artifact@v4.1.8 with: - name: "codeclimate" - path: "codeclimate" + pattern: codeclimate* + path: codeclimate + merge-multiple: true - name: Upload coverage results to Code Climate run: |- cc-test-reporter sum-coverage codeclimate/*.json -o total.json @@ -516,6 +523,7 @@ jobs: matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }} steps: - uses: actions/checkout@v4 + - uses: actions/download-artifact@v4.1.8 - uses: "./.github/actions/setup_extension" - name: Run minimal tests run: |- diff --git a/.github/workflows/flaky_test_debugging.yml b/.github/workflows/flaky_test_debugging.yml index 7135f99fa02..812fbe2418a 100644 --- a/.github/workflows/flaky_test_debugging.yml +++ b/.github/workflows/flaky_test_debugging.yml @@ -34,7 +34,7 @@ jobs: echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV ./ci/build-citus.sh shell: bash - - uses: actions/upload-artifact@v3.1.1 + - uses: actions/upload-artifact@v4.6.0 with: name: build-${{ env.PG_MAJOR }} path: |- diff --git a/.github/workflows/packaging-test-pipelines.yml b/.github/workflows/packaging-test-pipelines.yml index 7f89b9f834b..db0fd08efd0 100644 --- a/.github/workflows/packaging-test-pipelines.yml +++ b/.github/workflows/packaging-test-pipelines.yml @@ -129,7 +129,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set pg_config path and python parameters for deb based distros run: | diff --git a/CHANGELOG.md b/CHANGELOG.md index 0ebb6bec835..ee3f2d0a203 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +### citus v13.0.1 (February 4th, 2025) ### + +* Drops support for PostgreSQL 14 (#7753) + ### citus v13.0.0 (January 17, 2025) ### * Adds support for PostgreSQL 17 (#7699, #7661) diff --git a/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py b/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py index 1ab44803117..c25a3448238 100755 --- a/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py +++ b/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py @@ -62,10 +62,16 @@ def run_citus_upgrade_tests(config, before_upgrade_schedule, after_upgrade_sched install_citus(config.post_tar_path) + # disable 2pc recovery for all nodes to work around https://github.com/citusdata/citus/issues/7875 + disable_2pc_recovery_for_all_nodes(config.bindir, config) + restart_databases(config.bindir, config.datadir, config.mixed_mode, config) run_alter_citus(config.bindir, config.mixed_mode, config) verify_upgrade(config, config.mixed_mode, config.node_name_to_ports.values()) + # re-enable 2pc recovery for all nodes + enable_2pc_recovery_for_all_nodes(config.bindir, config) + run_test_on_coordinator(config, after_upgrade_schedule) remove_citus(config.post_tar_path) @@ -146,6 +152,18 @@ def restart_database(pg_path, abs_data_path, node_name, node_ports, logfile_pref subprocess.run(command, check=True) +def disable_2pc_recovery_for_all_nodes(pg_path, config): + for port in config.node_name_to_ports.values(): + utils.psql(pg_path, port, "ALTER SYSTEM SET citus.recover_2pc_interval TO -1;") + utils.psql(pg_path, port, "SELECT pg_reload_conf();") + + +def enable_2pc_recovery_for_all_nodes(pg_path, config): + for port in config.node_name_to_ports.values(): + utils.psql(pg_path, port, "ALTER SYSTEM RESET citus.recover_2pc_interval;") + utils.psql(pg_path, port, "SELECT pg_reload_conf();") + + def run_alter_citus(pg_path, mixed_mode, config): for port in config.node_name_to_ports.values(): if mixed_mode and port in ( diff --git a/src/test/regress/expected/upgrade_pg_dist_cleanup_after_0.out b/src/test/regress/expected/upgrade_pg_dist_cleanup_after_0.out index d71fad887c9..168c64ccaa3 100644 --- a/src/test/regress/expected/upgrade_pg_dist_cleanup_after_0.out +++ b/src/test/regress/expected/upgrade_pg_dist_cleanup_after_0.out @@ -28,3 +28,12 @@ SELECT * FROM pg_dist_cleanup; CALL citus_cleanup_orphaned_resources(); NOTICE: cleaned up 1 orphaned resources DROP TABLE table_with_orphaned_shards; +-- Re-enable automatic shard cleanup by maintenance daemon as +-- we have disabled it in upgrade_pg_dist_cleanup_before.sql +ALTER SYSTEM RESET citus.defer_shard_delete_interval; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + diff --git a/src/test/regress/expected/upgrade_pg_dist_cleanup_before_0.out b/src/test/regress/expected/upgrade_pg_dist_cleanup_before_0.out index a0cf9ceb1ea..dd6c8868e32 100644 --- a/src/test/regress/expected/upgrade_pg_dist_cleanup_before_0.out +++ b/src/test/regress/expected/upgrade_pg_dist_cleanup_before_0.out @@ -30,6 +30,23 @@ SELECT COUNT(*) FROM pg_dist_placement WHERE shardstate = 1 AND shardid IN (SELE (1 row) -- create an orphaned placement based on an existing one +-- +-- But before doing that, first disable automatic shard cleanup +-- by maintenance daemon so that we can reliably test the cleanup +-- in upgrade_pg_dist_cleanup_after.sql. +ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + INSERT INTO pg_dist_placement(placementid, shardid, shardstate, shardlength, groupid) SELECT nextval('pg_dist_placement_placementid_seq'::regclass), shardid, 4, shardlength, 3-groupid FROM pg_dist_placement diff --git a/src/test/regress/sql/upgrade_pg_dist_cleanup_after.sql b/src/test/regress/sql/upgrade_pg_dist_cleanup_after.sql index e84c35b608c..333ac60ca93 100644 --- a/src/test/regress/sql/upgrade_pg_dist_cleanup_after.sql +++ b/src/test/regress/sql/upgrade_pg_dist_cleanup_after.sql @@ -13,3 +13,8 @@ SELECT COUNT(*) FROM pg_dist_placement WHERE shardid IN (SELECT shardid FROM pg_ SELECT * FROM pg_dist_cleanup; CALL citus_cleanup_orphaned_resources(); DROP TABLE table_with_orphaned_shards; + +-- Re-enable automatic shard cleanup by maintenance daemon as +-- we have disabled it in upgrade_pg_dist_cleanup_before.sql +ALTER SYSTEM RESET citus.defer_shard_delete_interval; +SELECT pg_reload_conf(); diff --git a/src/test/regress/sql/upgrade_pg_dist_cleanup_before.sql b/src/test/regress/sql/upgrade_pg_dist_cleanup_before.sql index 62ec8a1fb46..ec0eef353dc 100644 --- a/src/test/regress/sql/upgrade_pg_dist_cleanup_before.sql +++ b/src/test/regress/sql/upgrade_pg_dist_cleanup_before.sql @@ -16,6 +16,16 @@ SELECT create_distributed_table('table_with_orphaned_shards', 'a'); -- show all 32 placements are active SELECT COUNT(*) FROM pg_dist_placement WHERE shardstate = 1 AND shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='table_with_orphaned_shards'::regclass); -- create an orphaned placement based on an existing one +-- +-- But before doing that, first disable automatic shard cleanup +-- by maintenance daemon so that we can reliably test the cleanup +-- in upgrade_pg_dist_cleanup_after.sql. + +ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1; +SELECT pg_reload_conf(); + +SELECT pg_sleep(0.1); + INSERT INTO pg_dist_placement(placementid, shardid, shardstate, shardlength, groupid) SELECT nextval('pg_dist_placement_placementid_seq'::regclass), shardid, 4, shardlength, 3-groupid FROM pg_dist_placement