Skip to content

Commit 4cad81d

Browse files
committed
Fix flaky citus upgrade test
1 parent 50f9bf5 commit 4cad81d

File tree

4 files changed

+41
-0
lines changed

4 files changed

+41
-0
lines changed

src/test/regress/expected/upgrade_pg_dist_cleanup_after_0.out

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,3 +28,12 @@ SELECT * FROM pg_dist_cleanup;
2828
CALL citus_cleanup_orphaned_resources();
2929
NOTICE: cleaned up 1 orphaned resources
3030
DROP TABLE table_with_orphaned_shards;
31+
-- Re-enable automatic shard cleanup by maintenance daemon as
32+
-- we have disabled it in upgrade_pg_dist_cleanup_before.sql
33+
ALTER SYSTEM RESET citus.defer_shard_delete_interval;
34+
SELECT pg_reload_conf();
35+
pg_reload_conf
36+
---------------------------------------------------------------------
37+
t
38+
(1 row)
39+

src/test/regress/expected/upgrade_pg_dist_cleanup_before_0.out

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,23 @@ SELECT COUNT(*) FROM pg_dist_placement WHERE shardstate = 1 AND shardid IN (SELE
3030
(1 row)
3131

3232
-- create an orphaned placement based on an existing one
33+
--
34+
-- But before doing that, first disable automatic shard cleanup
35+
-- by maintenance daemon so that we can reliably test the cleanup
36+
-- in upgrade_pg_dist_cleanup_after.sql.
37+
ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1;
38+
SELECT pg_reload_conf();
39+
pg_reload_conf
40+
---------------------------------------------------------------------
41+
t
42+
(1 row)
43+
44+
SELECT pg_sleep(0.1);
45+
pg_sleep
46+
---------------------------------------------------------------------
47+
48+
(1 row)
49+
3350
INSERT INTO pg_dist_placement(placementid, shardid, shardstate, shardlength, groupid)
3451
SELECT nextval('pg_dist_placement_placementid_seq'::regclass), shardid, 4, shardlength, 3-groupid
3552
FROM pg_dist_placement

src/test/regress/sql/upgrade_pg_dist_cleanup_after.sql

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,3 +13,8 @@ SELECT COUNT(*) FROM pg_dist_placement WHERE shardid IN (SELECT shardid FROM pg_
1313
SELECT * FROM pg_dist_cleanup;
1414
CALL citus_cleanup_orphaned_resources();
1515
DROP TABLE table_with_orphaned_shards;
16+
17+
-- Re-enable automatic shard cleanup by maintenance daemon as
18+
-- we have disabled it in upgrade_pg_dist_cleanup_before.sql
19+
ALTER SYSTEM RESET citus.defer_shard_delete_interval;
20+
SELECT pg_reload_conf();

src/test/regress/sql/upgrade_pg_dist_cleanup_before.sql

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,16 @@ SELECT create_distributed_table('table_with_orphaned_shards', 'a');
1616
-- show all 32 placements are active
1717
SELECT COUNT(*) FROM pg_dist_placement WHERE shardstate = 1 AND shardid IN (SELECT shardid FROM pg_dist_shard WHERE logicalrelid='table_with_orphaned_shards'::regclass);
1818
-- create an orphaned placement based on an existing one
19+
--
20+
-- But before doing that, first disable automatic shard cleanup
21+
-- by maintenance daemon so that we can reliably test the cleanup
22+
-- in upgrade_pg_dist_cleanup_after.sql.
23+
24+
ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1;
25+
SELECT pg_reload_conf();
26+
27+
SELECT pg_sleep(0.1);
28+
1929
INSERT INTO pg_dist_placement(placementid, shardid, shardstate, shardlength, groupid)
2030
SELECT nextval('pg_dist_placement_placementid_seq'::regclass), shardid, 4, shardlength, 3-groupid
2131
FROM pg_dist_placement

0 commit comments

Comments
 (0)