Skip to content

Commit 68b77a0

Browse files
authored
[PBCKP-150] Reading buffer is flushed each time we verify the checksum. (#487)
The race condition is covered with a unit-test, the buffer is flushed now so each of 300 reads requests the data from the disc.
1 parent 4b2df86 commit 68b77a0

File tree

5 files changed

+88
-1
lines changed

5 files changed

+88
-1
lines changed

Diff for: .travis.yml

+1
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ env:
4747
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=replica
4848
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=OFF MODE=retention
4949
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=restore
50+
# - PG_VERSION=13 PG_BRANCH=REL_13_STABLE PTRACK_PATCH_PG_BRANCH=REL_13_STABLE MODE=time_consuming
5051

5152
jobs:
5253
allow_failures:

Diff for: src/data.c

+2
Original file line numberDiff line numberDiff line change
@@ -349,6 +349,8 @@ prepare_page(pgFile *file, XLogRecPtr prev_backup_start_lsn,
349349
Assert(false);
350350
}
351351
}
352+
/* avoid re-reading once buffered data, flushing on further attempts, see PBCKP-150 */
353+
fflush(in);
352354
}
353355

354356
/*

Diff for: tests/Readme.md

+2
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,8 @@ Run suit of basic simple tests:
4141
Run ptrack tests:
4242
export PG_PROBACKUP_PTRACK=ON
4343
44+
Run long (time consuming) tests:
45+
export PG_PROBACKUP_LONG=ON
4446
4547
Usage:
4648
sudo echo 0 > /proc/sys/kernel/yama/ptrace_scope

Diff for: tests/__init__.py

+7-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
compression, page, ptrack, archive, exclude, cfs_backup, cfs_restore, \
88
cfs_validate_backup, auth_test, time_stamp, logging, \
99
locking, remote, external, config, checkdb, set_backup, incr_restore, \
10-
catchup, CVE_2018_1058
10+
catchup, CVE_2018_1058, time_consuming
1111

1212

1313
def load_tests(loader, tests, pattern):
@@ -21,6 +21,12 @@ def load_tests(loader, tests, pattern):
2121
if os.environ['PG_PROBACKUP_PTRACK'] == 'ON':
2222
suite.addTests(loader.loadTestsFromModule(ptrack))
2323

24+
# PG_PROBACKUP_LONG section for tests that are long
25+
# by design e.g. they contain loops, sleeps and so on
26+
if 'PG_PROBACKUP_LONG' in os.environ:
27+
if os.environ['PG_PROBACKUP_LONG'] == 'ON':
28+
suite.addTests(loader.loadTestsFromModule(time_consuming))
29+
2430
# suite.addTests(loader.loadTestsFromModule(auth_test))
2531
suite.addTests(loader.loadTestsFromModule(archive))
2632
suite.addTests(loader.loadTestsFromModule(backup))

Diff for: tests/time_consuming.py

+76
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
import os
2+
import unittest
3+
from .helpers.ptrack_helpers import ProbackupTest
4+
import subprocess
5+
from time import sleep
6+
7+
module_name = 'time_consuming'
8+
9+
class TimeConsumingTests(ProbackupTest, unittest.TestCase):
10+
def test_pbckp150(self):
11+
"""
12+
https://jira.postgrespro.ru/browse/PBCKP-150
13+
create a node filled with pgbench
14+
create FULL backup followed by PTRACK backup
15+
run pgbench, vacuum VERBOSE FULL and ptrack backups in parallel
16+
"""
17+
# init node
18+
fname = self.id().split('.')[3]
19+
node = self.make_simple_node(
20+
base_dir=os.path.join(module_name, fname, 'node'),
21+
set_replication=True,
22+
initdb_params=['--data-checksums'])
23+
node.append_conf('postgresql.conf',
24+
"""
25+
max_connections = 100
26+
wal_keep_size = 16000
27+
ptrack.map_size = 1
28+
shared_preload_libraries='ptrack'
29+
log_statement = 'none'
30+
fsync = off
31+
log_checkpoints = on
32+
autovacuum = off
33+
""")
34+
35+
# init probackup and add an instance
36+
backup_dir = os.path.join(self.tmp_path, module_name, fname, 'backup')
37+
self.init_pb(backup_dir)
38+
self.add_instance(backup_dir, 'node', node)
39+
40+
# run the node and init ptrack
41+
node.slow_start()
42+
node.safe_psql("postgres", "CREATE EXTENSION ptrack")
43+
# populate it with pgbench
44+
node.pgbench_init(scale=5)
45+
46+
# FULL backup followed by PTRACK backup
47+
self.backup_node(backup_dir, 'node', node, options=['--stream'])
48+
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream'])
49+
50+
# run ordinary pgbench scenario to imitate some activity and another pgbench for vacuuming in parallel
51+
nBenchDuration = 30
52+
pgbench = node.pgbench(options=['-c', '20', '-j', '8', '-T', str(nBenchDuration)])
53+
with open('/tmp/pbckp150vacuum.sql', 'w') as f:
54+
f.write('VACUUM (FULL) pgbench_accounts, pgbench_tellers, pgbench_history; SELECT pg_sleep(1);\n')
55+
pgbenchval = node.pgbench(options=['-c', '1', '-f', '/tmp/pbckp150vacuum.sql', '-T', str(nBenchDuration)])
56+
57+
# several PTRACK backups
58+
for i in range(nBenchDuration):
59+
print("[{}] backing up PTRACK diff...".format(i+1))
60+
self.backup_node(backup_dir, 'node', node, backup_type='ptrack', options=['--stream', '--log-level-console', 'VERBOSE'])
61+
sleep(0.1)
62+
# if the activity pgbench has finished, stop backing up
63+
if pgbench.poll() is not None:
64+
break
65+
66+
pgbench.kill()
67+
pgbenchval.kill()
68+
pgbench.wait()
69+
pgbenchval.wait()
70+
71+
backups = self.show_pb(backup_dir, 'node')
72+
for b in backups:
73+
self.assertEqual("OK", b['status'])
74+
75+
# Clean after yourself
76+
self.del_test_dir(module_name, fname)

0 commit comments

Comments
 (0)