Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/release-13.0' into naisila/drop_…
Browse files Browse the repository at this point in the history
…pg14
  • Loading branch information
onurctirtir committed Jan 31, 2025
2 parents 1cca860 + 26f16a7 commit b690d1c
Show file tree
Hide file tree
Showing 12 changed files with 84 additions and 18 deletions.
2 changes: 1 addition & 1 deletion .github/actions/save_logs_and_results/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ inputs:
runs:
using: composite
steps:
- uses: actions/upload-artifact@v3.1.1
- uses: actions/upload-artifact@v4.6.0
name: Upload logs
with:
name: ${{ inputs.folder }}
Expand Down
2 changes: 1 addition & 1 deletion .github/actions/setup_extension/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ runs:
echo "PG_MAJOR=${{ inputs.pg_major }}" >> $GITHUB_ENV
fi
shell: bash
- uses: actions/download-artifact@v3.0.1
- uses: actions/download-artifact@v4.1.8
with:
name: build-${{ env.PG_MAJOR }}
- name: Install Extension
Expand Down
4 changes: 2 additions & 2 deletions .github/actions/upload_coverage/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ runs:
mkdir -p /tmp/codeclimate
cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/${{ inputs.flags }}.json lcov.info
shell: bash
- uses: actions/upload-artifact@v3.1.1
- uses: actions/upload-artifact@v4.6.0
with:
path: "/tmp/codeclimate/*.json"
name: codeclimate
name: codeclimate-${{ inputs.flags }}
21 changes: 14 additions & 7 deletions .github/workflows/build_and_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ jobs:
- name: Build
run: "./ci/build-citus.sh"
shell: bash
- uses: actions/upload-artifact@v3.1.1
- uses: actions/upload-artifact@v4.6.0
with:
name: build-${{ env.PG_MAJOR }}
path: |-
Expand Down Expand Up @@ -284,10 +284,12 @@ jobs:
check-arbitrary-configs parallel=4 CONFIGS=$TESTS
- uses: "./.github/actions/save_logs_and_results"
if: always()
with:
folder: ${{ env.PG_MAJOR }}_arbitrary_configs_${{ matrix.parallel }}
- uses: "./.github/actions/upload_coverage"
if: always()
with:
flags: ${{ env.pg_major }}_upgrade
flags: ${{ env.PG_MAJOR }}_arbitrary_configs_${{ matrix.parallel }}
codecov_token: ${{ secrets.CODECOV_TOKEN }}
test-pg-upgrade:
name: PG${{ matrix.old_pg_major }}-PG${{ matrix.new_pg_major }} - check-pg-upgrade
Expand Down Expand Up @@ -335,6 +337,8 @@ jobs:
if: failure()
- uses: "./.github/actions/save_logs_and_results"
if: always()
with:
folder: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade
- uses: "./.github/actions/upload_coverage"
if: always()
with:
Expand Down Expand Up @@ -380,10 +384,12 @@ jobs:
done;
- uses: "./.github/actions/save_logs_and_results"
if: always()
with:
folder: ${{ env.PG_MAJOR }}_citus_upgrade
- uses: "./.github/actions/upload_coverage"
if: always()
with:
flags: ${{ env.pg_major }}_upgrade
flags: ${{ env.PG_MAJOR }}_citus_upgrade
codecov_token: ${{ secrets.CODECOV_TOKEN }}
upload-coverage:
if: always()
Expand All @@ -399,10 +405,11 @@ jobs:
- test-citus-upgrade
- test-pg-upgrade
steps:
- uses: actions/download-artifact@v3.0.1
- uses: actions/download-artifact@v4.1.8
with:
name: "codeclimate"
path: "codeclimate"
pattern: codeclimate*
path: codeclimate
merge-multiple: true
- name: Upload coverage results to Code Climate
run: |-
cc-test-reporter sum-coverage codeclimate/*.json -o total.json
Expand Down Expand Up @@ -500,7 +507,7 @@ jobs:
matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }}
steps:
- uses: actions/[email protected]
- uses: actions/download-artifact@v3.0.1
- uses: actions/download-artifact@v4.1.8
- uses: "./.github/actions/setup_extension"
- name: Run minimal tests
run: |-
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/flaky_test_debugging.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ jobs:
echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV
./ci/build-citus.sh
shell: bash
- uses: actions/upload-artifact@v3.1.1
- uses: actions/upload-artifact@v4.6.0
with:
name: build-${{ env.PG_MAJOR }}
path: |-
Expand Down
18 changes: 17 additions & 1 deletion src/backend/distributed/transaction/transaction_recovery.c
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,23 @@ RecoverWorkerTransactions(WorkerNode *workerNode)
}

systable_endscan(scanDescriptor);
table_close(pgDistTransaction, NoLock);

/*
* Here we release the lock on pg_dist_transaction while closing it to avoid
* deadlocks that might occur because of trying to acquire a lock on
* pg_dist_authinfo while holding a lock on pg_dist_transaction. Such a scenario
* can only cause a deadlock if another transaction is trying to acquire a strong
* lock on pg_dist_transaction while holding a lock on pg_dist_authinfo. As of
* today, we (implicitly) acquire a strong lock on pg_dist_transaction only when
* upgrading Citus to 11.3-1 and this happens when creating a REPLICA IDENTITY on
* pg_dist_transaction.
*
* And reglardless of the code-path we are in, it should be okay to release the
* lock now because all we do after this point is to abort the prepared
* transactions that are not part of an in-progress distributed transaction and
* releasing the lock before doing so should be just fine.
*/
table_close(pgDistTransaction, RowExclusiveLock);

if (!recoveryFailed)
{
Expand Down
7 changes: 3 additions & 4 deletions src/test/regress/expected/isolation_create_restore_point.out
Original file line number Diff line number Diff line change
Expand Up @@ -147,16 +147,15 @@ recover_prepared_transactions

step s2-create-restore:
SELECT 1 FROM citus_create_restore_point('citus-test');
<waiting ...>
step s1-commit:
COMMIT;

step s2-create-restore: <... completed>
?column?
---------------------------------------------------------------------
1
(1 row)

step s1-commit:
COMMIT;


starting permutation: s1-begin s1-drop s2-create-restore s1-commit
create_reference_table
Expand Down
9 changes: 9 additions & 0 deletions src/test/regress/expected/upgrade_pg_dist_cleanup_after_0.out
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,12 @@ SELECT * FROM pg_dist_cleanup;
CALL citus_cleanup_orphaned_resources();
NOTICE: cleaned up 1 orphaned resources
DROP TABLE table_with_orphaned_shards;
-- Re-enable automatic shard cleanup by maintenance daemon as
-- we have disabled it in upgrade_pg_dist_cleanup_before.sql
ALTER SYSTEM RESET citus.defer_shard_delete_interval;
SELECT pg_reload_conf();
pg_reload_conf
---------------------------------------------------------------------
t
(1 row)

17 changes: 17 additions & 0 deletions src/test/regress/expected/upgrade_pg_dist_cleanup_before_0.out
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,23 @@ SELECT COUNT(*) FROM pg_dist_placement WHERE shardstate = 1 AND shardid IN (SELE
(1 row)

-- create an orphaned placement based on an existing one
--
-- But before doing that, first disable automatic shard cleanup
-- by maintenance daemon so that we can reliably test the cleanup
-- in upgrade_pg_dist_cleanup_after.sql.
ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1;
SELECT pg_reload_conf();
pg_reload_conf
---------------------------------------------------------------------
t
(1 row)

SELECT pg_sleep(0.1);
pg_sleep
---------------------------------------------------------------------

(1 row)

INSERT INTO pg_dist_placement(placementid, shardid, shardstate, shardlength, groupid)
SELECT nextval('pg_dist_placement_placementid_seq'::regclass), shardid, 4, shardlength, 3-groupid
FROM pg_dist_placement
Expand Down
5 changes: 4 additions & 1 deletion src/test/regress/spec/isolation_create_restore_point.spec
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,10 @@ permutation "s1-begin" "s1-ddl" "s2-create-restore" "s1-commit"
// verify that citus_create_restore_point is not blocked by concurrent COPY (only commit)
permutation "s1-begin" "s1-copy" "s2-create-restore" "s1-commit"

// verify that citus_create_restore_point is blocked by concurrent recover_prepared_transactions
// verify that citus_create_restore_point is partially blocked by concurrent recover_prepared_transactions.
// In the test output, we won't be able to explicitly observe this since
// recover_prepared_transactions unblocks citus_create_restore_point after in-progress prepared transactions
// are recovered.
permutation "s1-begin" "s1-recover" "s2-create-restore" "s1-commit"

// verify that citus_create_restore_point is blocked by concurrent DROP TABLE
Expand Down
5 changes: 5 additions & 0 deletions src/test/regress/sql/upgrade_pg_dist_cleanup_after.sql
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,8 @@ SELECT COUNT(*) FROM pg_dist_placement WHERE shardid IN (SELECT shardid FROM pg_
SELECT * FROM pg_dist_cleanup;
CALL citus_cleanup_orphaned_resources();
DROP TABLE table_with_orphaned_shards;

-- Re-enable automatic shard cleanup by maintenance daemon as
-- we have disabled it in upgrade_pg_dist_cleanup_before.sql
ALTER SYSTEM RESET citus.defer_shard_delete_interval;
SELECT pg_reload_conf();
10 changes: 10 additions & 0 deletions src/test/regress/sql/upgrade_pg_dist_cleanup_before.sql
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,16 @@ SELECT create_distributed_table('table_with_orphaned_shards', 'a');
-- show all 32 placements are active
SELECT COUNT(*) FROM pg_dist_placement WHERE shardstate = 1 AND shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='table_with_orphaned_shards'::regclass);
-- create an orphaned placement based on an existing one
--
-- But before doing that, first disable automatic shard cleanup
-- by maintenance daemon so that we can reliably test the cleanup
-- in upgrade_pg_dist_cleanup_after.sql.

ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1;
SELECT pg_reload_conf();

SELECT pg_sleep(0.1);

INSERT INTO pg_dist_placement(placementid, shardid, shardstate, shardlength, groupid)
SELECT nextval('pg_dist_placement_placementid_seq'::regclass), shardid, 4, shardlength, 3-groupid
FROM pg_dist_placement
Expand Down

0 comments on commit b690d1c

Please sign in to comment.