Skip to content

Commit

Permalink
Fix flaky citus upgrade test
Browse files Browse the repository at this point in the history
  • Loading branch information
onurctirtir committed Jan 31, 2025
1 parent 50f9bf5 commit 4cad81d
Show file tree
Hide file tree
Showing 4 changed files with 41 additions and 0 deletions.
9 changes: 9 additions & 0 deletions src/test/regress/expected/upgrade_pg_dist_cleanup_after_0.out
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,12 @@ SELECT * FROM pg_dist_cleanup;
CALL citus_cleanup_orphaned_resources();
NOTICE: cleaned up 1 orphaned resources
DROP TABLE table_with_orphaned_shards;
-- Re-enable automatic shard cleanup by maintenance daemon as
-- we have disabled it in upgrade_pg_dist_cleanup_before.sql
ALTER SYSTEM RESET citus.defer_shard_delete_interval;
SELECT pg_reload_conf();
pg_reload_conf
---------------------------------------------------------------------
t
(1 row)

17 changes: 17 additions & 0 deletions src/test/regress/expected/upgrade_pg_dist_cleanup_before_0.out
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,23 @@ SELECT COUNT(*) FROM pg_dist_placement WHERE shardstate = 1 AND shardid IN (SELE
(1 row)

-- create an orphaned placement based on an existing one
--
-- But before doing that, first disable automatic shard cleanup
-- by maintenance daemon so that we can reliably test the cleanup
-- in upgrade_pg_dist_cleanup_after.sql.
ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1;
SELECT pg_reload_conf();
pg_reload_conf
---------------------------------------------------------------------
t
(1 row)

SELECT pg_sleep(0.1);
pg_sleep
---------------------------------------------------------------------

(1 row)

INSERT INTO pg_dist_placement(placementid, shardid, shardstate, shardlength, groupid)
SELECT nextval('pg_dist_placement_placementid_seq'::regclass), shardid, 4, shardlength, 3-groupid
FROM pg_dist_placement
Expand Down
5 changes: 5 additions & 0 deletions src/test/regress/sql/upgrade_pg_dist_cleanup_after.sql
Original file line number Diff line number Diff line change
Expand Up @@ -13,3 +13,8 @@ SELECT COUNT(*) FROM pg_dist_placement WHERE shardid IN (SELECT shardid FROM pg_
SELECT * FROM pg_dist_cleanup;
CALL citus_cleanup_orphaned_resources();
DROP TABLE table_with_orphaned_shards;

-- Re-enable automatic shard cleanup by maintenance daemon as
-- we have disabled it in upgrade_pg_dist_cleanup_before.sql
ALTER SYSTEM RESET citus.defer_shard_delete_interval;
SELECT pg_reload_conf();
10 changes: 10 additions & 0 deletions src/test/regress/sql/upgrade_pg_dist_cleanup_before.sql
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,16 @@ SELECT create_distributed_table('table_with_orphaned_shards', 'a');
-- show all 32 placements are active
SELECT COUNT(*) FROM pg_dist_placement WHERE shardstate = 1 AND shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='table_with_orphaned_shards'::regclass);
-- create an orphaned placement based on an existing one
--
-- But before doing that, first disable automatic shard cleanup
-- by maintenance daemon so that we can reliably test the cleanup
-- in upgrade_pg_dist_cleanup_after.sql.

ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1;
SELECT pg_reload_conf();

SELECT pg_sleep(0.1);

INSERT INTO pg_dist_placement(placementid, shardid, shardstate, shardlength, groupid)
SELECT nextval('pg_dist_placement_placementid_seq'::regclass), shardid, 4, shardlength, 3-groupid
FROM pg_dist_placement
Expand Down

0 comments on commit 4cad81d

Please sign in to comment.