diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 8addafcccaa..00000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,1131 +0,0 @@ -version: 2.1 -orbs: - codecov: codecov/codecov@1.1.1 - azure-cli: circleci/azure-cli@1.0.0 - -parameters: - image_suffix: - type: string - default: '-v87fd773' - pg14_version: - type: string - default: '14.9' - pg15_version: - type: string - default: '15.4' - pg16_version: - type: string - default: '16.0' - upgrade_pg_versions: - type: string - default: '14.9-15.4-16.0' - style_checker_tools_version: - type: string - default: '0.8.18' - flaky_test: - type: string - default: '' - flaky_test_runs_per_job: - type: integer - default: 50 - skip_flaky_tests: - type: boolean - default: false - -commands: - install_extension: - parameters: - pg_major: - description: 'postgres major version to use' - type: integer - steps: - - run: - name: 'Install Extension' - command: | - tar xfv "${CIRCLE_WORKING_DIRECTORY}/install-<< parameters.pg_major >>.tar" --directory / - - configure: - steps: - - run: - name: 'Configure' - command: | - chown -R circleci . - gosu circleci ./configure --without-pg-version-check - - enable_core: - steps: - - run: - name: 'Enable core dumps' - command: | - ulimit -c unlimited - - save_regressions: - steps: - - run: - name: 'Regressions' - command: | - if [ -f "src/test/regress/regression.diffs" ]; then - cat src/test/regress/regression.diffs - exit 1 - fi - when: on_fail - - store_artifacts: - name: 'Save regressions' - path: src/test/regress/regression.diffs - - save_logs_and_results: - steps: - - store_artifacts: - name: 'Save mitmproxy output (failure test specific)' - path: src/test/regress/proxy.output - - store_artifacts: - name: 'Save results' - path: src/test/regress/results/ - - store_artifacts: - name: 'Save coordinator log' - path: src/test/regress/tmp_check/master/log - - store_artifacts: - name: 'Save worker1 log' - path: src/test/regress/tmp_check/worker.57637/log - - store_artifacts: - name: 'Save worker2 log' - path: src/test/regress/tmp_check/worker.57638/log - - stack_trace: - steps: - - run: - name: 'Print stack traces' - command: | - ./ci/print_stack_trace.sh - when: on_fail - - coverage: - parameters: - flags: - description: 'codecov flags' - type: string - steps: - - codecov/upload: - flags: '<< parameters.flags >>' - - run: - name: 'Create codeclimate coverage' - command: | - lcov --directory . --capture --output-file lcov.info - lcov --remove lcov.info -o lcov.info '/usr/*' - sed "s=^SF:$PWD/=SF:=g" -i lcov.info # relative pats are required by codeclimate - mkdir -p /tmp/codeclimate - # We started getting permissions error. This fixes them and since - # weqre not on a multi-user system so this is safe to do. - git config --global --add safe.directory /home/circleci/project - cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/$CIRCLE_JOB.json lcov.info - - persist_to_workspace: - root: /tmp - paths: - - codeclimate/*.json - -jobs: - build: - description: Build the citus extension - parameters: - pg_major: - description: postgres major version building citus for - type: integer - image: - description: docker image to use for the build - type: string - default: citus/extbuilder - image_tag: - description: tag to use for the docker image - type: string - docker: - - image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>' - steps: - - checkout - - run: - name: 'Configure, Build, and Install' - command: | - ./ci/build-citus.sh - - persist_to_workspace: - root: . - paths: - - build-<< parameters.pg_major >>/* - - install-<>.tar - - check-style: - docker: - - image: 'citus/stylechecker:<< pipeline.parameters.style_checker_tools_version >><< pipeline.parameters.image_suffix >>' - steps: - - checkout - - run: - name: 'Check C Style' - command: citus_indent --check - - run: - name: 'Check Python style' - command: black --check . - - run: - name: 'Check Python import order' - command: isort --check . - - run: - name: 'Check Python lints' - command: flake8 . - - run: - name: 'Fix whitespace' - command: ci/editorconfig.sh && git diff --exit-code - - run: - name: 'Remove useless declarations' - command: ci/remove_useless_declarations.sh && git diff --cached --exit-code - - run: - name: 'Normalize test output' - command: ci/normalize_expected.sh && git diff --exit-code - - run: - name: 'Check for C-style comments in migration files' - command: ci/disallow_c_comments_in_migrations.sh && git diff --exit-code - - run: - name: 'Check for comment--cached ns that start with # character in spec files' - command: ci/disallow_hash_comments_in_spec_files.sh && git diff --exit-code - - run: - name: 'Check for gitignore entries .for source files' - command: ci/fix_gitignore.sh && git diff --exit-code - - run: - name: 'Check for lengths of changelog entries' - command: ci/disallow_long_changelog_entries.sh - - run: - name: 'Check for banned C API usage' - command: ci/banned.h.sh - - run: - name: 'Check for tests missing in schedules' - command: ci/check_all_tests_are_run.sh - - run: - name: 'Check if all CI scripts are actually run' - command: ci/check_all_ci_scripts_are_run.sh - - run: - name: 'Check if all GUCs are sorted alphabetically' - command: ci/check_gucs_are_alphabetically_sorted.sh - - run: - name: 'Check for missing downgrade scripts' - command: ci/check_migration_files.sh - - check-sql-snapshots: - docker: - - image: 'citus/extbuilder:latest' - steps: - - checkout - - run: - name: 'Check Snapshots' - command: ci/check_sql_snapshots.sh - - test-pg-upgrade: - description: Runs postgres upgrade tests - parameters: - old_pg_major: - description: 'postgres major version to use before the upgrade' - type: integer - new_pg_major: - description: 'postgres major version to upgrade to' - type: integer - image: - description: 'docker image to use as for the tests' - type: string - default: citus/pgupgradetester - image_tag: - description: 'docker image tag to use' - type: string - docker: - - image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>' - working_directory: /home/circleci/project - steps: - - checkout - - attach_workspace: - at: . - - install_extension: - pg_major: << parameters.old_pg_major >> - - install_extension: - pg_major: << parameters.new_pg_major >> - - configure - - enable_core - - run: - name: 'Install and test postgres upgrade' - command: | - gosu circleci \ - make -C src/test/regress \ - check-pg-upgrade \ - old-bindir=/usr/lib/postgresql/<< parameters.old_pg_major >>/bin \ - new-bindir=/usr/lib/postgresql/<< parameters.new_pg_major >>/bin - no_output_timeout: 2m - - run: - name: 'Copy pg_upgrade logs for newData dir' - command: | - mkdir -p /tmp/pg_upgrade_newData_logs - if ls src/test/regress/tmp_upgrade/newData/*.log 1> /dev/null 2>&1; then - cp src/test/regress/tmp_upgrade/newData/*.log /tmp/pg_upgrade_newData_logs - fi - when: on_fail - - store_artifacts: - name: 'Save pg_upgrade logs for newData dir' - path: /tmp/pg_upgrade_newData_logs - - save_logs_and_results - - save_regressions - - stack_trace - - coverage: - flags: 'test_<< parameters.old_pg_major >>_<< parameters.new_pg_major >>,upgrade' - - test-pytest: - description: Runs pytest based tests - parameters: - pg_major: - description: 'postgres major version' - type: integer - image: - description: 'docker image to use as for the tests' - type: string - default: citus/failtester - image_tag: - description: 'docker image tag to use' - type: string - docker: - - image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>' - working_directory: /home/circleci/project - steps: - - checkout - - attach_workspace: - at: . - - install_extension: - pg_major: << parameters.pg_major >> - - configure - - enable_core - - run: - name: 'Run pytest' - command: | - gosu circleci \ - make -C src/test/regress check-pytest - no_output_timeout: 2m - - stack_trace - - coverage: - flags: 'test_<< parameters.pg_major >>,pytest' - - - test-arbitrary-configs: - description: Runs tests on arbitrary configs - parallelism: 6 - parameters: - pg_major: - description: 'postgres major version to use' - type: integer - image: - description: 'docker image to use as for the tests' - type: string - default: citus/failtester - image_tag: - description: 'docker image tag to use' - type: string - docker: - - image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>' - resource_class: xlarge - working_directory: /home/circleci/project - steps: - - checkout - - attach_workspace: - at: . - - install_extension: - pg_major: << parameters.pg_major >> - - configure - - enable_core - - run: - name: 'Test arbitrary configs' - command: | - TESTS=$(src/test/regress/citus_tests/print_test_names.py | circleci tests split) - # Our test suite expects comma separated values - TESTS=$(echo $TESTS | tr ' ' ',') - # TESTS will contain subset of configs that will be run on a container and we use multiple containers - # to run the test suite - gosu circleci \ - make -C src/test/regress \ - check-arbitrary-configs parallel=4 CONFIGS=$TESTS - no_output_timeout: 2m - - run: - name: 'Show regressions' - command: | - find src/test/regress/tmp_citus_test/ -name "regression*.diffs" -exec cat {} + - lines=$(find src/test/regress/tmp_citus_test/ -name "regression*.diffs" | wc -l) - if [ $lines -ne 0 ]; then - exit 1 - fi - - when: on_fail - - run: - name: 'Copy logfiles' - command: | - mkdir src/test/regress/tmp_citus_test/logfiles - find src/test/regress/tmp_citus_test/ -name "logfile_*" -exec cp -t src/test/regress/tmp_citus_test/logfiles/ {} + - when: on_fail - - store_artifacts: - name: 'Save logfiles' - path: src/test/regress/tmp_citus_test/logfiles - - save_logs_and_results - - stack_trace - - coverage: - flags: 'test_<< parameters.pg_major >>,upgrade' - - test-citus-upgrade: - description: Runs citus upgrade tests - parameters: - pg_major: - description: 'postgres major version' - type: integer - image: - description: 'docker image to use as for the tests' - type: string - default: citus/citusupgradetester - image_tag: - description: 'docker image tag to use' - type: string - docker: - - image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>' - working_directory: /home/circleci/project - steps: - - checkout - - attach_workspace: - at: . - - configure - - enable_core - - run: - name: 'Install and test citus upgrade' - command: | - # run make check-citus-upgrade for all citus versions - # the image has ${CITUS_VERSIONS} set with all verions it contains the binaries of - for citus_version in ${CITUS_VERSIONS}; do \ - gosu circleci \ - make -C src/test/regress \ - check-citus-upgrade \ - bindir=/usr/lib/postgresql/${PG_MAJOR}/bin \ - citus-old-version=${citus_version} \ - citus-pre-tar=/install-pg${PG_MAJOR}-citus${citus_version}.tar \ - citus-post-tar=/home/circleci/project/install-$PG_MAJOR.tar; \ - done; - - # run make check-citus-upgrade-mixed for all citus versions - # the image has ${CITUS_VERSIONS} set with all verions it contains the binaries of - for citus_version in ${CITUS_VERSIONS}; do \ - gosu circleci \ - make -C src/test/regress \ - check-citus-upgrade-mixed \ - citus-old-version=${citus_version} \ - bindir=/usr/lib/postgresql/${PG_MAJOR}/bin \ - citus-pre-tar=/install-pg${PG_MAJOR}-citus${citus_version}.tar \ - citus-post-tar=/home/circleci/project/install-$PG_MAJOR.tar; \ - done; - no_output_timeout: 2m - - save_logs_and_results - - save_regressions - - stack_trace - - coverage: - flags: 'test_<< parameters.pg_major >>,upgrade' - - test-query-generator: - description: Expects that the generated queries that are run on distributed and local tables would have the same results - parameters: - pg_major: - description: 'postgres major version' - type: integer - image: - description: 'docker image to use as for the tests' - type: string - default: citus/failtester - image_tag: - description: 'docker image tag to use' - type: string - docker: - - image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>' - working_directory: /home/circleci/project - steps: - - checkout - - attach_workspace: - at: . - - install_extension: - pg_major: << parameters.pg_major >> - - configure - - enable_core - - run: - name: 'Run Test' - command: | - gosu circleci make -C src/test/regress check-query-generator - no_output_timeout: 5m - - run: - name: 'Show regressions' - command: | - find src/test/regress/citus_tests/query_generator/out/ -name "local_dist.diffs" -exec cat {} + - lines=$(find src/test/regress/citus_tests/query_generator/out/ -name "local_dist.diffs" | wc -l) - if [ $lines -ne 0 ]; then - exit 1 - fi - when: on_fail - - run: - name: 'Copy logfiles' - command: | - mkdir src/test/regress/tmp_citus_test/logfiles - find src/test/regress/tmp_citus_test/ -name "logfile_*" -exec cp -t src/test/regress/tmp_citus_test/logfiles/ {} + - when: on_fail - - store_artifacts: - name: 'Save logfiles' - path: src/test/regress/tmp_citus_test/logfiles - - store_artifacts: - name: 'Save ddls' - path: src/test/regress/citus_tests/query_generator/out/ddls.sql - - store_artifacts: - name: 'Save dmls' - path: src/test/regress/citus_tests/query_generator/out/queries.sql - - store_artifacts: - name: 'Save diffs' - path: src/test/regress/citus_tests/query_generator/out/local_dist.diffs - - stack_trace - - coverage: - flags: 'test_<< parameters.pg_major >>,querygen' - - test-citus: - description: Runs the common tests of citus - parameters: - pg_major: - description: 'postgres major version' - type: integer - image: - description: 'docker image to use as for the tests' - type: string - default: citus/exttester - image_tag: - description: 'docker image tag to use' - type: string - make: - description: 'make target' - type: string - docker: - - image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>' - working_directory: /home/circleci/project - steps: - - checkout - - attach_workspace: - at: . - - install_extension: - pg_major: << parameters.pg_major >> - - configure - - enable_core - - run: - name: 'Run Test' - command: | - gosu circleci make -C src/test/regress << parameters.make >> - no_output_timeout: 2m - - save_logs_and_results - - save_regressions - - stack_trace - - coverage: - flags: 'test_<< parameters.pg_major >>,<< parameters.make >>' - - tap-test-citus: - description: Runs tap tests for citus - parameters: - pg_major: - description: 'postgres major version' - type: integer - image: - description: 'docker image to use as for the tests' - type: string - default: citus/exttester - image_tag: - description: 'docker image tag to use' - type: string - suite: - description: 'name of the tap test suite to run' - type: string - make: - description: 'make target' - type: string - default: installcheck - docker: - - image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>' - working_directory: /home/circleci/project - steps: - - checkout - - attach_workspace: - at: . - - install_extension: - pg_major: << parameters.pg_major >> - - configure - - enable_core - - run: - name: 'Run Test' - command: | - gosu circleci make -C src/test/<< parameters.suite >> << parameters.make >> - no_output_timeout: 2m - - store_artifacts: - name: 'Save tap logs' - path: /home/circleci/project/src/test/<< parameters.suite >>/tmp_check/log - - save_logs_and_results - - stack_trace - - coverage: - flags: 'test_<< parameters.pg_major >>,tap_<< parameters.suite >>_<< parameters.make >>' - - check-merge-to-enterprise: - docker: - - image: citus/extbuilder:<< pipeline.parameters.pg14_version >> - working_directory: /home/circleci/project - steps: - - checkout - - run: - command: | - ci/check_enterprise_merge.sh - - ch_benchmark: - docker: - - image: buildpack-deps:stretch - working_directory: /home/circleci/project - steps: - - checkout - - azure-cli/install - - azure-cli/login-with-service-principal - - run: - command: | - cd ./src/test/hammerdb - sh run_hammerdb.sh citusbot_ch_benchmark_rg - name: install dependencies and run ch_benchmark tests - no_output_timeout: 20m - - tpcc_benchmark: - docker: - - image: buildpack-deps:stretch - working_directory: /home/circleci/project - steps: - - checkout - - azure-cli/install - - azure-cli/login-with-service-principal - - run: - command: | - cd ./src/test/hammerdb - sh run_hammerdb.sh citusbot_tpcc_benchmark_rg - name: install dependencies and run ch_benchmark tests - no_output_timeout: 20m - - test-flakyness: - description: Runs a test multiple times to see if it's flaky - parallelism: 32 - parameters: - pg_major: - description: 'postgres major version' - type: integer - image: - description: 'docker image to use as for the tests' - type: string - default: citus/failtester - image_tag: - description: 'docker image tag to use' - type: string - test: - description: 'the test file path that should be run multiple times' - type: string - default: '' - runs: - description: 'number of times that the test should be run in total' - type: integer - default: 8 - skip: - description: 'A flag to bypass flaky test detection.' - type: boolean - default: false - docker: - - image: '<< parameters.image >>:<< parameters.image_tag >><< pipeline.parameters.image_suffix >>' - working_directory: /home/circleci/project - resource_class: small - steps: - - checkout - - attach_workspace: - at: . - - run: - name: 'Detect regression tests need to be ran' - command: | - skip=<< parameters.skip >> - if [ "$skip" = true ]; then - echo "Skipping flaky test detection." - circleci-agent step halt - fi - - testForDebugging="<< parameters.test >>" - - if [ -z "$testForDebugging" ]; then - detected_changes=$(git diff origin/main... --name-only --diff-filter=AM | (grep 'src/test/regress/sql/.*\.sql\|src/test/regress/spec/.*\.spec\|src/test/regress/citus_tests/test/test_.*\.py' || true)) - tests=${detected_changes} - else - tests=$testForDebugging; - fi - - if [ -z "$tests" ]; then - echo "No test found." - circleci-agent step halt - else - echo "Detected tests " $tests - fi - - echo export tests=\""$tests"\" >> "$BASH_ENV" - source "$BASH_ENV" - - install_extension: - pg_major: << parameters.pg_major >> - - configure - - enable_core - - run: - name: 'Run minimal tests' - command: | - tests_array=($tests) - for test in "${tests_array[@]}" - do - test_name=$(echo "$test" | sed -r "s/.+\/(.+)\..+/\1/") - gosu circleci src/test/regress/citus_tests/run_test.py $test_name --repeat << parameters.runs >> --use-base-schedule --use-whole-schedule-line - done - no_output_timeout: 2m - - save_logs_and_results - - save_regressions - - stack_trace - - upload-coverage: - docker: - - image: 'citus/exttester:<< pipeline.parameters.pg15_version >><< pipeline.parameters.image_suffix >>' - working_directory: /home/circleci/project - steps: - - attach_workspace: - at: . - - run: - name: Upload coverage results to Code Climate - command: | - cc-test-reporter sum-coverage codeclimate/*.json -o total.json - cc-test-reporter upload-coverage -i total.json - -workflows: - version: 2 - flaky_test_debugging: - when: << pipeline.parameters.flaky_test >> - jobs: - - build: - name: build-flaky-15 - pg_major: 15 - image_tag: '<< pipeline.parameters.pg15_version >>' - - - test-flakyness: - name: 'test-15_flaky' - pg_major: 15 - image_tag: '<< pipeline.parameters.pg15_version >>' - requires: [build-flaky-15] - test: '<< pipeline.parameters.flaky_test >>' - runs: << pipeline.parameters.flaky_test_runs_per_job >> - - build_and_test: - when: - not: << pipeline.parameters.flaky_test >> - jobs: - - build: - name: build-14 - pg_major: 14 - image_tag: '<< pipeline.parameters.pg14_version >>' - - build: - name: build-15 - pg_major: 15 - image_tag: '<< pipeline.parameters.pg15_version >>' - - build: - name: build-16 - pg_major: 16 - image_tag: '<< pipeline.parameters.pg16_version >>' - - - check-style - - check-sql-snapshots - - - test-citus: &test-citus-14 - name: 'test-14_check-split' - make: check-split - pg_major: 14 - image_tag: '<< pipeline.parameters.pg14_version >>' - requires: [build-14] - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-enterprise' - make: check-enterprise - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-enterprise-isolation' - make: check-enterprise-isolation - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-enterprise-isolation-logicalrep-1' - make: check-enterprise-isolation-logicalrep-1 - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-enterprise-isolation-logicalrep-2' - make: check-enterprise-isolation-logicalrep-2 - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-enterprise-isolation-logicalrep-3' - make: check-enterprise-isolation-logicalrep-3 - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-enterprise-failure' - image: citus/failtester - make: check-enterprise-failure - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-multi' - make: check-multi - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-multi-1' - make: check-multi-1 - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-mx' - make: check-multi-mx - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-vanilla' - make: check-vanilla - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-isolation' - make: check-isolation - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-operations' - make: check-operations - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-follower-cluster' - make: check-follower-cluster - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-columnar' - make: check-columnar - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-columnar-isolation' - make: check-columnar-isolation - - test-citus: - <<: *test-citus-14 - name: 'test-14_check-failure' - image: citus/failtester - make: check-failure - - - test-citus: &test-citus-15 - name: 'test-15_check-split' - make: check-split - pg_major: 15 - image_tag: '<< pipeline.parameters.pg15_version >>' - requires: [build-15] - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-enterprise' - make: check-enterprise - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-enterprise-isolation' - make: check-enterprise-isolation - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-enterprise-isolation-logicalrep-1' - make: check-enterprise-isolation-logicalrep-1 - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-enterprise-isolation-logicalrep-2' - make: check-enterprise-isolation-logicalrep-2 - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-enterprise-isolation-logicalrep-3' - make: check-enterprise-isolation-logicalrep-3 - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-enterprise-failure' - image: citus/failtester - make: check-enterprise-failure - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-multi' - make: check-multi - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-multi-1' - make: check-multi-1 - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-mx' - make: check-multi-mx - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-vanilla' - make: check-vanilla - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-isolation' - make: check-isolation - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-operations' - make: check-operations - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-follower-cluster' - make: check-follower-cluster - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-columnar' - make: check-columnar - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-columnar-isolation' - make: check-columnar-isolation - - test-citus: - <<: *test-citus-15 - name: 'test-15_check-failure' - image: citus/failtester - make: check-failure - - - test-citus: &test-citus-16 - name: 'test-16_check-split' - make: check-split - pg_major: 16 - image_tag: '<< pipeline.parameters.pg16_version >>' - requires: [build-16] - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-enterprise' - make: check-enterprise - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-enterprise-isolation' - make: check-enterprise-isolation - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-enterprise-isolation-logicalrep-1' - make: check-enterprise-isolation-logicalrep-1 - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-enterprise-isolation-logicalrep-2' - make: check-enterprise-isolation-logicalrep-2 - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-enterprise-isolation-logicalrep-3' - make: check-enterprise-isolation-logicalrep-3 - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-enterprise-failure' - image: citus/failtester - make: check-enterprise-failure - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-multi' - make: check-multi - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-multi-1' - make: check-multi-1 - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-mx' - make: check-multi-mx - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-vanilla' - make: check-vanilla - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-isolation' - make: check-isolation - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-operations' - make: check-operations - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-follower-cluster' - make: check-follower-cluster - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-columnar' - make: check-columnar - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-columnar-isolation' - make: check-columnar-isolation - - test-citus: - <<: *test-citus-16 - name: 'test-16_check-failure' - image: citus/failtester - make: check-failure - - - test-pytest: - name: 'test-14_pytest' - pg_major: 14 - image_tag: '<< pipeline.parameters.pg14_version >>' - requires: [build-14] - - - test-pytest: - name: 'test-15_pytest' - pg_major: 15 - image_tag: '<< pipeline.parameters.pg15_version >>' - requires: [build-15] - - - test-pytest: - name: 'test-16_pytest' - pg_major: 16 - image_tag: '<< pipeline.parameters.pg16_version >>' - requires: [build-16] - - - tap-test-citus: - name: 'test-15_tap-cdc' - suite: cdc - pg_major: 15 - image_tag: '<< pipeline.parameters.pg15_version >>' - requires: [build-15] - - - tap-test-citus: - name: 'test-16_tap-cdc' - suite: cdc - pg_major: 16 - image_tag: '<< pipeline.parameters.pg16_version >>' - requires: [build-16] - - - test-arbitrary-configs: - name: 'test-14_check-arbitrary-configs' - pg_major: 14 - image_tag: '<< pipeline.parameters.pg14_version >>' - requires: [build-14] - - - test-arbitrary-configs: - name: 'test-15_check-arbitrary-configs' - pg_major: 15 - image_tag: '<< pipeline.parameters.pg15_version >>' - requires: [build-15] - - - test-arbitrary-configs: - name: 'test-16_check-arbitrary-configs' - pg_major: 16 - image_tag: '<< pipeline.parameters.pg16_version >>' - requires: [build-16] - - - test-query-generator: - name: 'test-14_check-query-generator' - pg_major: 14 - image_tag: '<< pipeline.parameters.pg14_version >>' - requires: [build-14] - - - test-query-generator: - name: 'test-15_check-query-generator' - pg_major: 15 - image_tag: '<< pipeline.parameters.pg15_version >>' - requires: [build-15] - - - test-query-generator: - name: 'test-16_check-query-generator' - pg_major: 16 - image_tag: '<< pipeline.parameters.pg16_version >>' - requires: [build-16] - - - test-pg-upgrade: - name: 'test-14-15_check-pg-upgrade' - old_pg_major: 14 - new_pg_major: 15 - image_tag: '<< pipeline.parameters.upgrade_pg_versions >>' - requires: [build-14, build-15] - - - test-pg-upgrade: - name: 'test-15-16_check-pg-upgrade' - old_pg_major: 15 - new_pg_major: 16 - image_tag: '<< pipeline.parameters.upgrade_pg_versions >>' - requires: [build-15, build-16] - - - test-pg-upgrade: - name: 'test-14-16_check-pg-upgrade' - old_pg_major: 14 - new_pg_major: 16 - image_tag: '<< pipeline.parameters.upgrade_pg_versions >>' - requires: [build-14, build-16] - - - test-citus-upgrade: - name: test-14_check-citus-upgrade - pg_major: 14 - image_tag: '<< pipeline.parameters.pg14_version >>' - requires: [build-14] - - - upload-coverage: - requires: - - test-14_check-multi - - test-14_check-multi-1 - - test-14_check-mx - - test-14_check-vanilla - - test-14_check-isolation - - test-14_check-operations - - test-14_check-follower-cluster - - test-14_check-columnar - - test-14_check-columnar-isolation - - test-14_check-failure - - test-14_check-enterprise - - test-14_check-enterprise-isolation - - test-14_check-enterprise-isolation-logicalrep-1 - - test-14_check-enterprise-isolation-logicalrep-2 - - test-14_check-enterprise-isolation-logicalrep-3 - - test-14_check-enterprise-failure - - test-14_check-split - - test-14_check-arbitrary-configs - - test-14_check-query-generator - - test-15_check-multi - - test-15_check-multi-1 - - test-15_check-mx - - test-15_check-vanilla - - test-15_check-isolation - - test-15_check-operations - - test-15_check-follower-cluster - - test-15_check-columnar - - test-15_check-columnar-isolation - - test-15_check-failure - - test-15_check-enterprise - - test-15_check-enterprise-isolation - - test-15_check-enterprise-isolation-logicalrep-1 - - test-15_check-enterprise-isolation-logicalrep-2 - - test-15_check-enterprise-isolation-logicalrep-3 - - test-15_check-enterprise-failure - - test-15_check-split - - test-15_check-arbitrary-configs - - test-15_check-query-generator - - test-16_check-multi - - test-16_check-multi-1 - - test-16_check-mx - - test-16_check-vanilla - - test-16_check-isolation - - test-16_check-operations - - test-16_check-follower-cluster - - test-16_check-columnar - - test-16_check-columnar-isolation - - test-16_check-failure - - test-16_check-enterprise - - test-16_check-enterprise-isolation - - test-16_check-enterprise-isolation-logicalrep-1 - - test-16_check-enterprise-isolation-logicalrep-2 - - test-16_check-enterprise-isolation-logicalrep-3 - - test-16_check-enterprise-failure - - test-16_check-split - - test-16_check-arbitrary-configs - - test-16_check-query-generator - - test-14-15_check-pg-upgrade - - test-15-16_check-pg-upgrade - - test-14-16_check-pg-upgrade - - test-14_check-citus-upgrade - - - ch_benchmark: - requires: [build-14] - filters: - branches: - only: - - /ch_benchmark\/.*/ # match with ch_benchmark/ prefix - - tpcc_benchmark: - requires: [build-14] - filters: - branches: - only: - - /tpcc_benchmark\/.*/ # match with tpcc_benchmark/ prefix - - test-flakyness: - name: 'test-15_flaky' - pg_major: 15 - image_tag: '<< pipeline.parameters.pg15_version >>' - requires: [build-15] - skip: << pipeline.parameters.skip_flaky_tests >> diff --git a/.devcontainer/.gdbinit b/.devcontainer/.gdbinit new file mode 100644 index 00000000000..9d544512b8f --- /dev/null +++ b/.devcontainer/.gdbinit @@ -0,0 +1,33 @@ +# gdbpg.py contains scripts to nicely print the postgres datastructures +# while in a gdb session. Since the vscode debugger is based on gdb this +# actually also works when debugging with vscode. Providing nice tools +# to understand the internal datastructures we are working with. +source /root/gdbpg.py + +# when debugging postgres it is convenient to _always_ have a breakpoint +# trigger when an error is logged. Because .gdbinit is sourced before gdb +# is fully attached and has the sources loaded. To make sure the breakpoint +# is added when the library is loaded we temporary set the breakpoint pending +# to on. After we have added out breakpoint we revert back to the default +# configuration for breakpoint pending. +# The breakpoint is hard to read, but at entry of the function we don't have +# the level loaded in elevel. Instead we hardcode the location where the +# level of the current error is stored. Also gdb doesn't understand the +# ERROR symbol so we hardcode this to the value of ERROR. It is very unlikely +# this value will ever change in postgres, but if it does we might need to +# find a way to conditionally load the correct breakpoint. +set breakpoint pending on +break elog.c:errfinish if errordata[errordata_stack_depth].elevel == 21 +set breakpoint pending auto + +echo \n +echo ----------------------------------------------------------------------------------\n +echo when attaching to a postgres backend a breakpoint will be set on elog.c:errfinish \n +echo it will only break on errors being raised in postgres \n +echo \n +echo to disable this breakpoint from vscode run `-exec disable 1` in the debug console \n +echo this assumes it's the first breakpoint loaded as it is loaded from .gdbinit \n +echo this can be verified with `-exec info break`, enabling can be done with \n +echo `-exec enable 1` \n +echo ----------------------------------------------------------------------------------\n +echo \n diff --git a/.devcontainer/.gitignore b/.devcontainer/.gitignore new file mode 100644 index 00000000000..3a7f553fc49 --- /dev/null +++ b/.devcontainer/.gitignore @@ -0,0 +1 @@ +postgresql-*.tar.bz2 diff --git a/.devcontainer/.psqlrc b/.devcontainer/.psqlrc new file mode 100644 index 00000000000..7642a97149d --- /dev/null +++ b/.devcontainer/.psqlrc @@ -0,0 +1,7 @@ +\timing on +\pset linestyle unicode +\pset border 2 +\setenv PAGER 'pspg --no-mouse -bX --no-commandbar --no-topbar' +\set HISTSIZE 100000 +\set PROMPT1 '\n%[%033[1m%]%M %n@%/:%>-%p%R%[%033[0m%]%# ' +\set PROMPT2 ' ' diff --git a/.devcontainer/.vscode/Pipfile b/.devcontainer/.vscode/Pipfile new file mode 100644 index 00000000000..57909c897df --- /dev/null +++ b/.devcontainer/.vscode/Pipfile @@ -0,0 +1,12 @@ +[[source]] +url = "https://pypi.org/simple" +verify_ssl = true +name = "pypi" + +[packages] +docopt = "*" + +[dev-packages] + +[requires] +python_version = "3.9" diff --git a/.devcontainer/.vscode/Pipfile.lock b/.devcontainer/.vscode/Pipfile.lock new file mode 100644 index 00000000000..52ee8663cb8 --- /dev/null +++ b/.devcontainer/.vscode/Pipfile.lock @@ -0,0 +1,28 @@ +{ + "_meta": { + "hash": { + "sha256": "6956a6700ead5804aa56bd597c93bb4a13f208d2d49d3b5399365fd240ca0797" + }, + "pipfile-spec": 6, + "requires": { + "python_version": "3.9" + }, + "sources": [ + { + "name": "pypi", + "url": "https://pypi.org/simple", + "verify_ssl": true + } + ] + }, + "default": { + "docopt": { + "hashes": [ + "sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491" + ], + "index": "pypi", + "version": "==0.6.2" + } + }, + "develop": {} +} diff --git a/.devcontainer/.vscode/generate_c_cpp_properties-json.py b/.devcontainer/.vscode/generate_c_cpp_properties-json.py new file mode 100755 index 00000000000..6f49a1818e1 --- /dev/null +++ b/.devcontainer/.vscode/generate_c_cpp_properties-json.py @@ -0,0 +1,84 @@ +#! /usr/bin/env pipenv-shebang +"""Generate C/C++ properties file for VSCode. + +Uses pgenv to iterate postgres versions and generate +a C/C++ properties file for VSCode containing the +include paths for the postgres headers. + +Usage: + generate_c_cpp_properties-json.py + generate_c_cpp_properties-json.py (-h | --help) + generate_c_cpp_properties-json.py --version + +Options: + -h --help Show this screen. + --version Show version. + +""" +import json +import subprocess + +from docopt import docopt + + +def main(args): + target_path = args[''] + + output = subprocess.check_output(['pgenv', 'versions']) + # typical output is: + # 14.8 pgsql-14.8 + # * 15.3 pgsql-15.3 + # 16beta2 pgsql-16beta2 + # where the line marked with a * is the currently active version + # + # we are only interested in the first word of each line, which is the version number + # thus we strip the whitespace and the * from the line and split it into words + # and take the first word + versions = [line.strip('* ').split()[0] for line in output.decode('utf-8').splitlines()] + + # create the list of configurations per version + configurations = [] + for version in versions: + configurations.append(generate_configuration(version)) + + # create the json file + c_cpp_properties = { + "configurations": configurations, + "version": 4 + } + + # write the c_cpp_properties.json file + with open(target_path, 'w') as f: + json.dump(c_cpp_properties, f, indent=4) + + +def generate_configuration(version): + """Returns a configuration for the given postgres version. + + >>> generate_configuration('14.8') + { + "name": "Citus Development Configuration - Postgres 14.8", + "includePath": [ + "/usr/local/include", + "/home/citus/.pgenv/src/postgresql-14.8/src/**", + "${workspaceFolder}/**", + "${workspaceFolder}/src/include/", + ], + "configurationProvider": "ms-vscode.makefile-tools" + } + """ + return { + "name": f"Citus Development Configuration - Postgres {version}", + "includePath": [ + "/usr/local/include", + f"/home/citus/.pgenv/src/postgresql-{version}/src/**", + "${workspaceFolder}/**", + "${workspaceFolder}/src/include/", + ], + "configurationProvider": "ms-vscode.makefile-tools" + } + + +if __name__ == '__main__': + arguments = docopt(__doc__, version='0.1.0') + main(arguments) diff --git a/.devcontainer/.vscode/launch.json b/.devcontainer/.vscode/launch.json new file mode 100644 index 00000000000..6de90ce09d5 --- /dev/null +++ b/.devcontainer/.vscode/launch.json @@ -0,0 +1,40 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Attach Citus (devcontainer)", + "type": "cppdbg", + "request": "attach", + "processId": "${command:pickProcess}", + "program": "/home/citus/.pgenv/pgsql/bin/postgres", + "additionalSOLibSearchPath": "/home/citus/.pgenv/pgsql/lib", + "setupCommands": [ + { + "text": "handle SIGUSR1 noprint nostop pass", + "description": "let gdb not stop when SIGUSR1 is sent to process", + "ignoreFailures": true + } + ], + }, + { + "name": "Open core file", + "type": "cppdbg", + "request": "launch", + "program": "/home/citus/.pgenv/pgsql/bin/postgres", + "coreDumpPath": "${input:corefile}", + "cwd": "${workspaceFolder}", + "MIMode": "gdb", + } + ], + "inputs": [ + { + "id": "corefile", + "type": "command", + "command": "extension.commandvariable.file.pickFile", + "args": { + "dialogTitle": "Select core file", + "include": "**/core*", + }, + }, + ], +} diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 00000000000..13762e1e550 --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,217 @@ +FROM ubuntu:22.04 AS base + +# environment is to make python pass an interactive shell, probably not the best timezone given a wide variety of colleagues +ENV TZ=UTC +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +# install build tools +RUN apt update && apt install -y \ + bzip2 \ + cpanminus \ + curl \ + flex \ + gcc \ + git \ + libcurl4-gnutls-dev \ + libicu-dev \ + libkrb5-dev \ + liblz4-dev \ + libpam0g-dev \ + libreadline-dev \ + libselinux1-dev \ + libssl-dev \ + libxslt-dev \ + libzstd-dev \ + locales \ + make \ + perl \ + pkg-config \ + python3 \ + python3-pip \ + software-properties-common \ + sudo \ + uuid-dev \ + valgrind \ + zlib1g-dev \ + && add-apt-repository ppa:deadsnakes/ppa -y \ + && apt install -y \ + python3.9-full \ + # software properties pulls in pkexec, which makes the debugger unusable in vscode + && apt purge -y \ + software-properties-common \ + && apt autoremove -y \ + && apt clean + +RUN sudo pip3 install pipenv pipenv-shebang + +RUN cpanm install IPC::Run + +RUN locale-gen en_US.UTF-8 + +# add the citus user to sudoers and allow all sudoers to login without a password prompt +RUN useradd -ms /bin/bash citus \ + && usermod -aG sudo citus \ + && echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers + +WORKDIR /home/citus +USER citus + +# run all make commands with the number of cores available +RUN echo "export MAKEFLAGS=\"-j \$(nproc)\"" >> "/home/citus/.bashrc" + +RUN git clone --branch v1.3.2 --depth 1 https://github.com/theory/pgenv.git .pgenv +COPY --chown=citus:citus pgenv/config/ .pgenv/config/ +ENV PATH="/home/citus/.pgenv/bin:${PATH}" +ENV PATH="/home/citus/.pgenv/pgsql/bin:${PATH}" + +USER citus + +# build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions +FROM base AS pg14 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.11 +RUN rm .pgenv/src/*.tar* +RUN make -C .pgenv/src/postgresql-*/ clean +RUN make -C .pgenv/src/postgresql-*/src/include install + +# create a staging directory with all files we want to copy from our pgenv build +# we will copy the contents of the staged folder into the final image at once +RUN mkdir .pgenv-staging/ +RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/ +RUN rm .pgenv-staging/config/default.conf + +FROM base AS pg15 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.6 +RUN rm .pgenv/src/*.tar* +RUN make -C .pgenv/src/postgresql-*/ clean +RUN make -C .pgenv/src/postgresql-*/src/include install + +# create a staging directory with all files we want to copy from our pgenv build +# we will copy the contents of the staged folder into the final image at once +RUN mkdir .pgenv-staging/ +RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/ +RUN rm .pgenv-staging/config/default.conf + +FROM base AS pg16 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.2 +RUN rm .pgenv/src/*.tar* +RUN make -C .pgenv/src/postgresql-*/ clean +RUN make -C .pgenv/src/postgresql-*/src/include install + +# create a staging directory with all files we want to copy from our pgenv build +# we will copy the contents of the staged folder into the final image at once +RUN mkdir .pgenv-staging/ +RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/ +RUN rm .pgenv-staging/config/default.conf + +FROM base AS uncrustify-builder + +RUN sudo apt update && sudo apt install -y cmake tree + +WORKDIR /uncrustify +RUN curl -L https://github.com/uncrustify/uncrustify/archive/uncrustify-0.68.1.tar.gz | tar xz +WORKDIR /uncrustify/uncrustify-uncrustify-0.68.1/ +RUN mkdir build +WORKDIR /uncrustify/uncrustify-uncrustify-0.68.1/build/ +RUN cmake .. +RUN MAKEFLAGS="-j $(nproc)" make -s + +RUN make install DESTDIR=/uncrustify + +# builder for all pipenv's to get them contained in a single layer +FROM base AS pipenv + +WORKDIR /workspaces/citus/ + +# tools to sync pgenv with vscode +COPY --chown=citus:citus .vscode/Pipfile .vscode/Pipfile.lock .devcontainer/.vscode/ +RUN ( cd .devcontainer/.vscode && pipenv install ) + +# environment to run our failure tests +COPY --chown=citus:citus src/ src/ +RUN ( cd src/test/regress && pipenv install ) + +# assemble the final container by copying over the artifacts from separately build containers +FROM base AS devcontainer + +LABEL org.opencontainers.image.source=https://github.com/citusdata/citus +LABEL org.opencontainers.image.description="Development container for the Citus project" +LABEL org.opencontainers.image.licenses=AGPL-3.0-only + +RUN yes | sudo unminimize + +# install developer productivity tools +RUN sudo apt update \ + && sudo apt install -y \ + autoconf2.69 \ + bash-completion \ + fswatch \ + gdb \ + htop \ + libdbd-pg-perl \ + libdbi-perl \ + lsof \ + man \ + net-tools \ + psmisc \ + pspg \ + tree \ + vim \ + && sudo apt clean + +# Since gdb will run in the context of the root user when debugging citus we will need to both +# download the gdbpg.py script as the root user, into their home directory, as well as add .gdbinit +# as a file owned by root +# This will make that as soon as the debugger attaches to a postgres backend (or frankly any other process) +# the gdbpg.py script will be sourced and the developer can direcly use it. +RUN sudo curl -o /root/gdbpg.py https://raw.githubusercontent.com/tvesely/gdbpg/6065eee7872457785f830925eac665aa535caf62/gdbpg.py +COPY --chown=root:root .gdbinit /root/ + +# install developer dependencies in the global environment +RUN --mount=type=bind,source=requirements.txt,target=requirements.txt pip install -r requirements.txt + +# for persistent bash history across devcontainers we need to have +# a) a directory to store the history in +# b) a prompt command to append the history to the file +# c) specify the history file to store the history in +# b and c are done in the .bashrc to make it persistent across shells only +RUN sudo install -d -o citus -g citus /commandhistory \ + && echo "export PROMPT_COMMAND='history -a' && export HISTFILE=/commandhistory/.bash_history" >> "/home/citus/.bashrc" + +# install citus-dev +RUN git clone --branch develop https://github.com/citusdata/tools.git citus-tools \ + && ( cd citus-tools/citus_dev && pipenv install ) \ + && mkdir -p ~/.local/bin \ + && ln -s /home/citus/citus-tools/citus_dev/citus_dev-pipenv .local/bin/citus_dev \ + && sudo make -C citus-tools/uncrustify install bindir=/usr/local/bin pkgsysconfdir=/usr/local/etc/ \ + && mkdir -p ~/.local/share/bash-completion/completions/ \ + && ln -s ~/citus-tools/citus_dev/bash_completion ~/.local/share/bash-completion/completions/citus_dev + +# TODO some LC_ALL errors, possibly solved by locale-gen +RUN git clone https://github.com/so-fancy/diff-so-fancy.git \ + && mkdir -p ~/.local/bin \ + && ln -s /home/citus/diff-so-fancy/diff-so-fancy .local/bin/ + +COPY --link --from=uncrustify-builder /uncrustify/usr/ /usr/ + +COPY --link --from=pg14 /home/citus/.pgenv-staging/ /home/citus/.pgenv/ +COPY --link --from=pg15 /home/citus/.pgenv-staging/ /home/citus/.pgenv/ +COPY --link --from=pg16 /home/citus/.pgenv-staging/ /home/citus/.pgenv/ + +COPY --link --from=pipenv /home/citus/.local/share/virtualenvs/ /home/citus/.local/share/virtualenvs/ + +# place to run your cluster with citus_dev +VOLUME /data +RUN sudo mkdir /data \ + && sudo chown citus:citus /data + +COPY --chown=citus:citus .psqlrc . + +# with the copy linking of layers github actions seem to misbehave with the ownership of the +# directories leading upto the link, hence a small patch layer to have to right ownerships set +RUN sudo chown --from=root:root citus:citus -R ~ + +# sets default pg version +RUN pgenv switch 16.2 + +# make connecting to the coordinator easy +ENV PGPORT=9700 diff --git a/.devcontainer/Makefile b/.devcontainer/Makefile new file mode 100644 index 00000000000..8f417410406 --- /dev/null +++ b/.devcontainer/Makefile @@ -0,0 +1,11 @@ + +init: ../.vscode/c_cpp_properties.json ../.vscode/launch.json + +../.vscode: + mkdir -p ../.vscode + +../.vscode/launch.json: ../.vscode .vscode/launch.json + cp .vscode/launch.json ../.vscode/launch.json + +../.vscode/c_cpp_properties.json: ../.vscode + ./.vscode/generate_c_cpp_properties-json.py ../.vscode/c_cpp_properties.json diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 00000000000..cddfcebf4c5 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,37 @@ +{ + "image": "ghcr.io/citusdata/citus-devcontainer:main", + "runArgs": [ + "--cap-add=SYS_PTRACE", + "--ulimit=core=-1", + ], + "forwardPorts": [ + 9700 + ], + "customizations": { + "vscode": { + "extensions": [ + "eamodio.gitlens", + "GitHub.copilot-chat", + "GitHub.copilot", + "github.vscode-github-actions", + "github.vscode-pull-request-github", + "ms-vscode.cpptools-extension-pack", + "ms-vsliveshare.vsliveshare", + "rioj7.command-variable", + ], + "settings": { + "files.exclude": { + "**/*.o": true, + "**/.deps/": true, + } + }, + } + }, + "mounts": [ + "type=volume,target=/data", + "source=citus-bashhistory,target=/commandhistory,type=volume", + ], + "updateContentCommand": "./configure", + "postCreateCommand": "make -C .devcontainer/", +} + diff --git a/.devcontainer/pgenv/config/default.conf b/.devcontainer/pgenv/config/default.conf new file mode 100644 index 00000000000..ab55493f93e --- /dev/null +++ b/.devcontainer/pgenv/config/default.conf @@ -0,0 +1,15 @@ +PGENV_MAKE_OPTIONS=(-s) + +PGENV_CONFIGURE_OPTIONS=( + --enable-debug + --enable-depend + --enable-cassert + --enable-tap-tests + 'CFLAGS=-ggdb -Og -g3 -fno-omit-frame-pointer -DUSE_VALGRIND' + --with-openssl + --with-libxml + --with-libxslt + --with-uuid=e2fs + --with-icu + --with-lz4 +) diff --git a/.devcontainer/requirements.txt b/.devcontainer/requirements.txt new file mode 100644 index 00000000000..7300b3b89cc --- /dev/null +++ b/.devcontainer/requirements.txt @@ -0,0 +1,9 @@ +black==23.11.0 +click==8.1.7 +isort==5.12.0 +mypy-extensions==1.0.0 +packaging==23.2 +pathspec==0.11.2 +platformdirs==4.0.0 +tomli==2.0.1 +typing_extensions==4.8.0 diff --git a/.devcontainer/src/test/regress/Pipfile b/.devcontainer/src/test/regress/Pipfile new file mode 100644 index 00000000000..d4b2cc39f07 --- /dev/null +++ b/.devcontainer/src/test/regress/Pipfile @@ -0,0 +1,27 @@ +[[source]] +name = "pypi" +url = "https://pypi.python.org/simple" +verify_ssl = true + +[packages] +mitmproxy = {editable = true, ref = "main", git = "https://github.com/citusdata/mitmproxy.git"} +construct = "==2.9.45" +docopt = "==0.6.2" +cryptography = ">=41.0.4" +pytest = "*" +psycopg = "*" +filelock = "*" +pytest-asyncio = "*" +pytest-timeout = "*" +pytest-xdist = "*" +pytest-repeat = "*" +pyyaml = "*" + +[dev-packages] +black = "*" +isort = "*" +flake8 = "*" +flake8-bugbear = "*" + +[requires] +python_version = "3.9" diff --git a/.devcontainer/src/test/regress/Pipfile.lock b/.devcontainer/src/test/regress/Pipfile.lock new file mode 100644 index 00000000000..bdb42a1c319 --- /dev/null +++ b/.devcontainer/src/test/regress/Pipfile.lock @@ -0,0 +1,1010 @@ +{ + "_meta": { + "hash": { + "sha256": "b92bf682aeeea1a66a16beaf78584a5318fd0ae908ce85c7e2a4807aa2bee532" + }, + "pipfile-spec": 6, + "requires": { + "python_version": "3.9" + }, + "sources": [ + { + "name": "pypi", + "url": "https://pypi.python.org/simple", + "verify_ssl": true + } + ] + }, + "default": { + "asgiref": { + "hashes": [ + "sha256:4ef1ab46b484e3c706329cedeff284a5d40824200638503f5768edb6de7d58e9", + "sha256:ffc141aa908e6f175673e7b1b3b7af4fdb0ecb738fc5c8b88f69f055c2415214" + ], + "markers": "python_version >= '3.6'", + "version": "==3.4.1" + }, + "blinker": { + "hashes": [ + "sha256:471aee25f3992bd325afa3772f1063dbdbbca947a041b8b89466dc00d606f8b6" + ], + "version": "==1.4" + }, + "brotli": { + "hashes": [ + "sha256:02177603aaca36e1fd21b091cb742bb3b305a569e2402f1ca38af471777fb019", + "sha256:11d3283d89af7033236fa4e73ec2cbe743d4f6a81d41bd234f24bf63dde979df", + "sha256:12effe280b8ebfd389022aa65114e30407540ccb89b177d3fbc9a4f177c4bd5d", + "sha256:160c78292e98d21e73a4cc7f76a234390e516afcd982fa17e1422f7c6a9ce9c8", + "sha256:16d528a45c2e1909c2798f27f7bf0a3feec1dc9e50948e738b961618e38b6a7b", + "sha256:19598ecddd8a212aedb1ffa15763dd52a388518c4550e615aed88dc3753c0f0c", + "sha256:1c48472a6ba3b113452355b9af0a60da5c2ae60477f8feda8346f8fd48e3e87c", + "sha256:268fe94547ba25b58ebc724680609c8ee3e5a843202e9a381f6f9c5e8bdb5c70", + "sha256:269a5743a393c65db46a7bb982644c67ecba4b8d91b392403ad8a861ba6f495f", + "sha256:26d168aac4aaec9a4394221240e8a5436b5634adc3cd1cdf637f6645cecbf181", + "sha256:29d1d350178e5225397e28ea1b7aca3648fcbab546d20e7475805437bfb0a130", + "sha256:2aad0e0baa04517741c9bb5b07586c642302e5fb3e75319cb62087bd0995ab19", + "sha256:3148362937217b7072cf80a2dcc007f09bb5ecb96dae4617316638194113d5be", + "sha256:330e3f10cd01da535c70d09c4283ba2df5fb78e915bea0a28becad6e2ac010be", + "sha256:336b40348269f9b91268378de5ff44dc6fbaa2268194f85177b53463d313842a", + "sha256:3496fc835370da351d37cada4cf744039616a6db7d13c430035e901443a34daa", + "sha256:35a3edbe18e876e596553c4007a087f8bcfd538f19bc116917b3c7522fca0429", + "sha256:3b78a24b5fd13c03ee2b7b86290ed20efdc95da75a3557cc06811764d5ad1126", + "sha256:3b8b09a16a1950b9ef495a0f8b9d0a87599a9d1f179e2d4ac014b2ec831f87e7", + "sha256:3c1306004d49b84bd0c4f90457c6f57ad109f5cc6067a9664e12b7b79a9948ad", + "sha256:3ffaadcaeafe9d30a7e4e1e97ad727e4f5610b9fa2f7551998471e3736738679", + "sha256:40d15c79f42e0a2c72892bf407979febd9cf91f36f495ffb333d1d04cebb34e4", + "sha256:44bb8ff420c1d19d91d79d8c3574b8954288bdff0273bf788954064d260d7ab0", + "sha256:4688c1e42968ba52e57d8670ad2306fe92e0169c6f3af0089be75bbac0c64a3b", + "sha256:495ba7e49c2db22b046a53b469bbecea802efce200dffb69b93dd47397edc9b6", + "sha256:4d1b810aa0ed773f81dceda2cc7b403d01057458730e309856356d4ef4188438", + "sha256:503fa6af7da9f4b5780bb7e4cbe0c639b010f12be85d02c99452825dd0feef3f", + "sha256:56d027eace784738457437df7331965473f2c0da2c70e1a1f6fdbae5402e0389", + "sha256:5913a1177fc36e30fcf6dc868ce23b0453952c78c04c266d3149b3d39e1410d6", + "sha256:5b6ef7d9f9c38292df3690fe3e302b5b530999fa90014853dcd0d6902fb59f26", + "sha256:5bf37a08493232fbb0f8229f1824b366c2fc1d02d64e7e918af40acd15f3e337", + "sha256:5cb1e18167792d7d21e21365d7650b72d5081ed476123ff7b8cac7f45189c0c7", + "sha256:61a7ee1f13ab913897dac7da44a73c6d44d48a4adff42a5701e3239791c96e14", + "sha256:622a231b08899c864eb87e85f81c75e7b9ce05b001e59bbfbf43d4a71f5f32b2", + "sha256:68715970f16b6e92c574c30747c95cf8cf62804569647386ff032195dc89a430", + "sha256:6b2ae9f5f67f89aade1fab0f7fd8f2832501311c363a21579d02defa844d9296", + "sha256:6c772d6c0a79ac0f414a9f8947cc407e119b8598de7621f39cacadae3cf57d12", + "sha256:6d847b14f7ea89f6ad3c9e3901d1bc4835f6b390a9c71df999b0162d9bb1e20f", + "sha256:73fd30d4ce0ea48010564ccee1a26bfe39323fde05cb34b5863455629db61dc7", + "sha256:76ffebb907bec09ff511bb3acc077695e2c32bc2142819491579a695f77ffd4d", + "sha256:7bbff90b63328013e1e8cb50650ae0b9bac54ffb4be6104378490193cd60f85a", + "sha256:7cb81373984cc0e4682f31bc3d6be9026006d96eecd07ea49aafb06897746452", + "sha256:7ee83d3e3a024a9618e5be64648d6d11c37047ac48adff25f12fa4226cf23d1c", + "sha256:854c33dad5ba0fbd6ab69185fec8dab89e13cda6b7d191ba111987df74f38761", + "sha256:85f7912459c67eaab2fb854ed2bc1cc25772b300545fe7ed2dc03954da638649", + "sha256:87fdccbb6bb589095f413b1e05734ba492c962b4a45a13ff3408fa44ffe6479b", + "sha256:88c63a1b55f352b02c6ffd24b15ead9fc0e8bf781dbe070213039324922a2eea", + "sha256:8a674ac10e0a87b683f4fa2b6fa41090edfd686a6524bd8dedbd6138b309175c", + "sha256:8ed6a5b3d23ecc00ea02e1ed8e0ff9a08f4fc87a1f58a2530e71c0f48adf882f", + "sha256:93130612b837103e15ac3f9cbacb4613f9e348b58b3aad53721d92e57f96d46a", + "sha256:9744a863b489c79a73aba014df554b0e7a0fc44ef3f8a0ef2a52919c7d155031", + "sha256:9749a124280a0ada4187a6cfd1ffd35c350fb3af79c706589d98e088c5044267", + "sha256:97f715cf371b16ac88b8c19da00029804e20e25f30d80203417255d239f228b5", + "sha256:9bf919756d25e4114ace16a8ce91eb340eb57a08e2c6950c3cebcbe3dff2a5e7", + "sha256:9d12cf2851759b8de8ca5fde36a59c08210a97ffca0eb94c532ce7b17c6a3d1d", + "sha256:9ed4c92a0665002ff8ea852353aeb60d9141eb04109e88928026d3c8a9e5433c", + "sha256:a72661af47119a80d82fa583b554095308d6a4c356b2a554fdc2799bc19f2a43", + "sha256:afde17ae04d90fbe53afb628f7f2d4ca022797aa093e809de5c3cf276f61bbfa", + "sha256:b1375b5d17d6145c798661b67e4ae9d5496920d9265e2f00f1c2c0b5ae91fbde", + "sha256:b336c5e9cf03c7be40c47b5fd694c43c9f1358a80ba384a21969e0b4e66a9b17", + "sha256:b3523f51818e8f16599613edddb1ff924eeb4b53ab7e7197f85cbc321cdca32f", + "sha256:b43775532a5904bc938f9c15b77c613cb6ad6fb30990f3b0afaea82797a402d8", + "sha256:b663f1e02de5d0573610756398e44c130add0eb9a3fc912a09665332942a2efb", + "sha256:b83bb06a0192cccf1eb8d0a28672a1b79c74c3a8a5f2619625aeb6f28b3a82bb", + "sha256:ba72d37e2a924717990f4d7482e8ac88e2ef43fb95491eb6e0d124d77d2a150d", + "sha256:c2415d9d082152460f2bd4e382a1e85aed233abc92db5a3880da2257dc7daf7b", + "sha256:c83aa123d56f2e060644427a882a36b3c12db93727ad7a7b9efd7d7f3e9cc2c4", + "sha256:c8e521a0ce7cf690ca84b8cc2272ddaf9d8a50294fd086da67e517439614c755", + "sha256:cab1b5964b39607a66adbba01f1c12df2e55ac36c81ec6ed44f2fca44178bf1a", + "sha256:cb02ed34557afde2d2da68194d12f5719ee96cfb2eacc886352cb73e3808fc5d", + "sha256:cc0283a406774f465fb45ec7efb66857c09ffefbe49ec20b7882eff6d3c86d3a", + "sha256:cfc391f4429ee0a9370aa93d812a52e1fee0f37a81861f4fdd1f4fb28e8547c3", + "sha256:db844eb158a87ccab83e868a762ea8024ae27337fc7ddcbfcddd157f841fdfe7", + "sha256:defed7ea5f218a9f2336301e6fd379f55c655bea65ba2476346340a0ce6f74a1", + "sha256:e16eb9541f3dd1a3e92b89005e37b1257b157b7256df0e36bd7b33b50be73bcb", + "sha256:e1abbeef02962596548382e393f56e4c94acd286bd0c5afba756cffc33670e8a", + "sha256:e23281b9a08ec338469268f98f194658abfb13658ee98e2b7f85ee9dd06caa91", + "sha256:e2d9e1cbc1b25e22000328702b014227737756f4b5bf5c485ac1d8091ada078b", + "sha256:e48f4234f2469ed012a98f4b7874e7f7e173c167bed4934912a29e03167cf6b1", + "sha256:e4c4e92c14a57c9bd4cb4be678c25369bf7a092d55fd0866f759e425b9660806", + "sha256:ec1947eabbaf8e0531e8e899fc1d9876c179fc518989461f5d24e2223395a9e3", + "sha256:f909bbbc433048b499cb9db9e713b5d8d949e8c109a2a548502fb9aa8630f0b1" + ], + "version": "==1.0.9" + }, + "certifi": { + "hashes": [ + "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082", + "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9" + ], + "markers": "python_version >= '3.6'", + "version": "==2023.7.22" + }, + "cffi": { + "hashes": [ + "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc", + "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a", + "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417", + "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab", + "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520", + "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36", + "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743", + "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8", + "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed", + "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684", + "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56", + "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324", + "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d", + "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235", + "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e", + "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088", + "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000", + "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7", + "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e", + "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673", + "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c", + "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe", + "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2", + "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098", + "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8", + "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a", + "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0", + "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b", + "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896", + "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e", + "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9", + "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2", + "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b", + "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6", + "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404", + "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f", + "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0", + "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4", + "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc", + "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936", + "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba", + "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872", + "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb", + "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614", + "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1", + "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d", + "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969", + "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b", + "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4", + "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627", + "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956", + "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357" + ], + "markers": "python_version >= '3.8'", + "version": "==1.16.0" + }, + "click": { + "hashes": [ + "sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1", + "sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb" + ], + "markers": "python_version >= '3.6'", + "version": "==8.0.4" + }, + "construct": { + "hashes": [ + "sha256:2271a0efd0798679dea825ff47e22a4c550456a5db0ba8baa82f7eae0af0118c" + ], + "index": "pypi", + "version": "==2.9.45" + }, + "cryptography": { + "hashes": [ + "sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67", + "sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311", + "sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8", + "sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13", + "sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143", + "sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f", + "sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829", + "sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd", + "sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397", + "sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac", + "sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d", + "sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a", + "sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839", + "sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e", + "sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6", + "sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9", + "sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860", + "sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca", + "sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91", + "sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d", + "sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714", + "sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb", + "sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==41.0.4" + }, + "docopt": { + "hashes": [ + "sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491" + ], + "index": "pypi", + "version": "==0.6.2" + }, + "exceptiongroup": { + "hashes": [ + "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9", + "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3" + ], + "markers": "python_version < '3.11'", + "version": "==1.1.3" + }, + "execnet": { + "hashes": [ + "sha256:88256416ae766bc9e8895c76a87928c0012183da3cc4fc18016e6f050e025f41", + "sha256:cc59bc4423742fd71ad227122eb0dd44db51efb3dc4095b45ac9a08c770096af" + ], + "markers": "python_version >= '3.7'", + "version": "==2.0.2" + }, + "filelock": { + "hashes": [ + "sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4", + "sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==3.12.4" + }, + "flask": { + "hashes": [ + "sha256:59da8a3170004800a2837844bfa84d49b022550616070f7cb1a659682b2e7c9f", + "sha256:e1120c228ca2f553b470df4a5fa927ab66258467526069981b3eb0a91902687d" + ], + "markers": "python_version >= '3.6'", + "version": "==2.0.3" + }, + "h11": { + "hashes": [ + "sha256:36a3cb8c0a032f56e2da7084577878a035d3b61d104230d4bd49c0c6b555a9c6", + "sha256:47222cb6067e4a307d535814917cd98fd0a57b6788ce715755fa2b6c28b56042" + ], + "markers": "python_version >= '3.6'", + "version": "==0.12.0" + }, + "h2": { + "hashes": [ + "sha256:03a46bcf682256c95b5fd9e9a99c1323584c3eec6440d379b9903d709476bc6d", + "sha256:a83aca08fbe7aacb79fec788c9c0bac936343560ed9ec18b82a13a12c28d2abb" + ], + "markers": "python_full_version >= '3.6.1'", + "version": "==4.1.0" + }, + "hpack": { + "hashes": [ + "sha256:84a076fad3dc9a9f8063ccb8041ef100867b1878b25ef0ee63847a5d53818a6c", + "sha256:fc41de0c63e687ebffde81187a948221294896f6bdc0ae2312708df339430095" + ], + "markers": "python_full_version >= '3.6.1'", + "version": "==4.0.0" + }, + "hyperframe": { + "hashes": [ + "sha256:0ec6bafd80d8ad2195c4f03aacba3a8265e57bc4cff261e802bf39970ed02a15", + "sha256:ae510046231dc8e9ecb1a6586f63d2347bf4c8905914aa84ba585ae85f28a914" + ], + "markers": "python_full_version >= '3.6.1'", + "version": "==6.0.1" + }, + "iniconfig": { + "hashes": [ + "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", + "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374" + ], + "markers": "python_version >= '3.7'", + "version": "==2.0.0" + }, + "itsdangerous": { + "hashes": [ + "sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44", + "sha256:5dbbc68b317e5e42f327f9021763545dc3fc3bfe22e6deb96aaf1fc38874156a" + ], + "markers": "python_version >= '3.7'", + "version": "==2.1.2" + }, + "jinja2": { + "hashes": [ + "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852", + "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61" + ], + "markers": "python_version >= '3.7'", + "version": "==3.1.2" + }, + "kaitaistruct": { + "hashes": [ + "sha256:3d5845817ec8a4d5504379cc11bd570b038850ee49c4580bc0998c8fb1d327ad" + ], + "version": "==0.9" + }, + "ldap3": { + "hashes": [ + "sha256:2bc966556fc4d4fa9f445a1c31dc484ee81d44a51ab0e2d0fd05b62cac75daa6", + "sha256:5630d1383e09ba94839e253e013f1aa1a2cf7a547628ba1265cb7b9a844b5687", + "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70", + "sha256:5ab7febc00689181375de40c396dcad4f2659cd260fc5e94c508b6d77c17e9d5", + "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f" + ], + "version": "==2.9.1" + }, + "markupsafe": { + "hashes": [ + "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e", + "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e", + "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431", + "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686", + "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c", + "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559", + "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc", + "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb", + "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939", + "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c", + "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0", + "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4", + "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9", + "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575", + "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba", + "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d", + "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd", + "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3", + "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00", + "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155", + "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac", + "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52", + "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f", + "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8", + "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b", + "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007", + "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24", + "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea", + "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198", + "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0", + "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee", + "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be", + "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2", + "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1", + "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707", + "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6", + "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c", + "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58", + "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823", + "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779", + "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636", + "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c", + "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad", + "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee", + "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc", + "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2", + "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48", + "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7", + "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e", + "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b", + "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa", + "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5", + "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e", + "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb", + "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9", + "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57", + "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc", + "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc", + "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2", + "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11" + ], + "markers": "python_version >= '3.7'", + "version": "==2.1.3" + }, + "mitmproxy": { + "editable": true, + "git": "https://github.com/citusdata/mitmproxy.git", + "markers": "python_version >= '3.9'", + "ref": "2fd18ef051b987925a36337ab1d61aa674353b44" + }, + "msgpack": { + "hashes": [ + "sha256:04ad6069c86e531682f9e1e71b71c1c3937d6014a7c3e9edd2aa81ad58842862", + "sha256:0bfdd914e55e0d2c9e1526de210f6fe8ffe9705f2b1dfcc4aecc92a4cb4b533d", + "sha256:1dc93e8e4653bdb5910aed79f11e165c85732067614f180f70534f056da97db3", + "sha256:1e2d69948e4132813b8d1131f29f9101bc2c915f26089a6d632001a5c1349672", + "sha256:235a31ec7db685f5c82233bddf9858748b89b8119bf4538d514536c485c15fe0", + "sha256:27dcd6f46a21c18fa5e5deed92a43d4554e3df8d8ca5a47bf0615d6a5f39dbc9", + "sha256:28efb066cde83c479dfe5a48141a53bc7e5f13f785b92ddde336c716663039ee", + "sha256:3476fae43db72bd11f29a5147ae2f3cb22e2f1a91d575ef130d2bf49afd21c46", + "sha256:36e17c4592231a7dbd2ed09027823ab295d2791b3b1efb2aee874b10548b7524", + "sha256:384d779f0d6f1b110eae74cb0659d9aa6ff35aaf547b3955abf2ab4c901c4819", + "sha256:38949d30b11ae5f95c3c91917ee7a6b239f5ec276f271f28638dec9156f82cfc", + "sha256:3967e4ad1aa9da62fd53e346ed17d7b2e922cba5ab93bdd46febcac39be636fc", + "sha256:3e7bf4442b310ff154b7bb9d81eb2c016b7d597e364f97d72b1acc3817a0fdc1", + "sha256:3f0c8c6dfa6605ab8ff0611995ee30d4f9fcff89966cf562733b4008a3d60d82", + "sha256:484ae3240666ad34cfa31eea7b8c6cd2f1fdaae21d73ce2974211df099a95d81", + "sha256:4a7b4f35de6a304b5533c238bee86b670b75b03d31b7797929caa7a624b5dda6", + "sha256:4cb14ce54d9b857be9591ac364cb08dc2d6a5c4318c1182cb1d02274029d590d", + "sha256:4e71bc4416de195d6e9b4ee93ad3f2f6b2ce11d042b4d7a7ee00bbe0358bd0c2", + "sha256:52700dc63a4676669b341ba33520f4d6e43d3ca58d422e22ba66d1736b0a6e4c", + "sha256:572efc93db7a4d27e404501975ca6d2d9775705c2d922390d878fcf768d92c87", + "sha256:576eb384292b139821c41995523654ad82d1916da6a60cff129c715a6223ea84", + "sha256:5b0bf0effb196ed76b7ad883848143427a73c355ae8e569fa538365064188b8e", + "sha256:5b6ccc0c85916998d788b295765ea0e9cb9aac7e4a8ed71d12e7d8ac31c23c95", + "sha256:5ed82f5a7af3697b1c4786053736f24a0efd0a1b8a130d4c7bfee4b9ded0f08f", + "sha256:6d4c80667de2e36970ebf74f42d1088cc9ee7ef5f4e8c35eee1b40eafd33ca5b", + "sha256:730076207cb816138cf1af7f7237b208340a2c5e749707457d70705715c93b93", + "sha256:7687e22a31e976a0e7fc99c2f4d11ca45eff652a81eb8c8085e9609298916dcf", + "sha256:822ea70dc4018c7e6223f13affd1c5c30c0f5c12ac1f96cd8e9949acddb48a61", + "sha256:84b0daf226913133f899ea9b30618722d45feffa67e4fe867b0b5ae83a34060c", + "sha256:85765fdf4b27eb5086f05ac0491090fc76f4f2b28e09d9350c31aac25a5aaff8", + "sha256:8dd178c4c80706546702c59529ffc005681bd6dc2ea234c450661b205445a34d", + "sha256:8f5b234f567cf76ee489502ceb7165c2a5cecec081db2b37e35332b537f8157c", + "sha256:98bbd754a422a0b123c66a4c341de0474cad4a5c10c164ceed6ea090f3563db4", + "sha256:993584fc821c58d5993521bfdcd31a4adf025c7d745bbd4d12ccfecf695af5ba", + "sha256:a40821a89dc373d6427e2b44b572efc36a2778d3f543299e2f24eb1a5de65415", + "sha256:b291f0ee7961a597cbbcc77709374087fa2a9afe7bdb6a40dbbd9b127e79afee", + "sha256:b573a43ef7c368ba4ea06050a957c2a7550f729c31f11dd616d2ac4aba99888d", + "sha256:b610ff0f24e9f11c9ae653c67ff8cc03c075131401b3e5ef4b82570d1728f8a9", + "sha256:bdf38ba2d393c7911ae989c3bbba510ebbcdf4ecbdbfec36272abe350c454075", + "sha256:bfef2bb6ef068827bbd021017a107194956918ab43ce4d6dc945ffa13efbc25f", + "sha256:cab3db8bab4b7e635c1c97270d7a4b2a90c070b33cbc00c99ef3f9be03d3e1f7", + "sha256:cb70766519500281815dfd7a87d3a178acf7ce95390544b8c90587d76b227681", + "sha256:cca1b62fe70d761a282496b96a5e51c44c213e410a964bdffe0928e611368329", + "sha256:ccf9a39706b604d884d2cb1e27fe973bc55f2890c52f38df742bc1d79ab9f5e1", + "sha256:dc43f1ec66eb8440567186ae2f8c447d91e0372d793dfe8c222aec857b81a8cf", + "sha256:dd632777ff3beaaf629f1ab4396caf7ba0bdd075d948a69460d13d44357aca4c", + "sha256:e45ae4927759289c30ccba8d9fdce62bb414977ba158286b5ddaf8df2cddb5c5", + "sha256:e50ebce52f41370707f1e21a59514e3375e3edd6e1832f5e5235237db933c98b", + "sha256:ebbbba226f0a108a7366bf4b59bf0f30a12fd5e75100c630267d94d7f0ad20e5", + "sha256:ec79ff6159dffcc30853b2ad612ed572af86c92b5168aa3fc01a67b0fa40665e", + "sha256:f0936e08e0003f66bfd97e74ee530427707297b0d0361247e9b4f59ab78ddc8b", + "sha256:f26a07a6e877c76a88e3cecac8531908d980d3d5067ff69213653649ec0f60ad", + "sha256:f64e376cd20d3f030190e8c32e1c64582eba56ac6dc7d5b0b49a9d44021b52fd", + "sha256:f6ffbc252eb0d229aeb2f9ad051200668fc3a9aaa8994e49f0cb2ffe2b7867e7", + "sha256:f9a7c509542db4eceed3dcf21ee5267ab565a83555c9b88a8109dcecc4709002", + "sha256:ff1d0899f104f3921d94579a5638847f783c9b04f2d5f229392ca77fba5b82fc" + ], + "markers": "python_version >= '3.8'", + "version": "==1.0.7" + }, + "packaging": { + "hashes": [ + "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5", + "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7" + ], + "markers": "python_version >= '3.7'", + "version": "==23.2" + }, + "passlib": { + "hashes": [ + "sha256:aa6bca462b8d8bda89c70b382f0c298a20b5560af6cbfa2dce410c0a2fb669f1", + "sha256:defd50f72b65c5402ab2c573830a6978e5f202ad0d984793c8dde2c4152ebe04" + ], + "version": "==1.7.4" + }, + "pluggy": { + "hashes": [ + "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12", + "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7" + ], + "markers": "python_version >= '3.8'", + "version": "==1.3.0" + }, + "protobuf": { + "hashes": [ + "sha256:0c44e01f74109decea196b5b313b08edb5316df77313995594a6981e95674259", + "sha256:15cdecb0d192ab5f17cdc21a9c0ae7b5c6c4451e42c8a888a4f3344c190e369c", + "sha256:196a153e487c0e20d62259872bbf2e1c4fa18e2ce97e20984fcbf9d8b151058d", + "sha256:3149c373e9b7ce296bb24d42a3eb677d620185b5dff2c390b2cf57baf79afdc1", + "sha256:370a6b885e94adda021d4cbe43accdfbf6a02af651a0be337a28906a3fa77f3d", + "sha256:474247630834f93214fafce49d2ee6ff4c036c8c5382b88432b7eae6f08f131b", + "sha256:6380aae2683d0d1b41199e591c8ba06f867e8a778d44309af87073c1b34a9f3a", + "sha256:6741d7d1cfcbdd6cf610f38b7976cf8c0b41022203555298925e4061b6616608", + "sha256:700787cb56b4cb7b8ed5f7d197b9d8f30080f257f3c7431eec1fdd8060660929", + "sha256:8117b52c2531e4033f7d02b9be5a78564da41a8b02c255e1b731ad4bd75e7dc0", + "sha256:850da2072d98c6e576b7eb29734cdde6fd9f5d157e43d7818d79f4b373ef5d51", + "sha256:85d1fb5ff1d638a0045bbe4f01a8f287023aa4f2b29011445b1be0edc74a2103", + "sha256:93bca9aaeee8008e15696c2a6b5e56b992da03f9d237ff54310e397d635f8305", + "sha256:98d414513ec44bb3ba77ebdeffcbbe6ebbf3630c767d37a285890c2414fdd4e2", + "sha256:a7f91a4e5bf3cc58b2830c9cb01b04ac5e211c288048e9296cd407ec0455fb89", + "sha256:abbcb8ecd19cfb729b9b71f9a453e37c0c1c017be4bff47804ff25150685386d", + "sha256:b03966ca4d1aa7850f5bf0d841c22a8eeb6ce091f77e585ffeb8b95a6b0a96c4", + "sha256:cde2a73b03049b904dbc5d0f500b97e11abb4109dbe2940e6a1595e2eef4e8a9", + "sha256:d52a687e2c74c40f45abd6906f833d4e40f0f8cfa4226a80e4695fedafe6c57e", + "sha256:e68ad00695547d9397dd14abd3efba23cb31cef67228f4512d41396971889812", + "sha256:e9bffd52d6ee039a1cafb72475b2900c6fd0f0dca667fb7a09af0a3e119e78cb" + ], + "markers": "python_version >= '3.5'", + "version": "==3.18.3" + }, + "psycopg": { + "hashes": [ + "sha256:7542c45810ea16356e5126c9b4291cbc3802aa326fcbba09ff154fe380de29be", + "sha256:cd711edb64b07d7f8a233c365806caf7e55bbe7cbbd8d5c680f672bb5353c8d5" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==3.1.11" + }, + "publicsuffix2": { + "hashes": [ + "sha256:00f8cc31aa8d0d5592a5ced19cccba7de428ebca985db26ac852d920ddd6fe7b", + "sha256:786b5e36205b88758bd3518725ec8cfe7a8173f5269354641f581c6b80a99893" + ], + "version": "==2.20191221" + }, + "pyasn1": { + "hashes": [ + "sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57", + "sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde" + ], + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", + "version": "==0.5.0" + }, + "pycparser": { + "hashes": [ + "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9", + "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206" + ], + "version": "==2.21" + }, + "pyopenssl": { + "hashes": [ + "sha256:24f0dc5227396b3e831f4c7f602b950a5e9833d292c8e4a2e06b709292806ae2", + "sha256:276f931f55a452e7dea69c7173e984eb2a4407ce413c918aa34b55f82f9b8bac" + ], + "markers": "python_version >= '3.6'", + "version": "==23.2.0" + }, + "pyparsing": { + "hashes": [ + "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", + "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" + ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", + "version": "==2.4.7" + }, + "pyperclip": { + "hashes": [ + "sha256:105254a8b04934f0bc84e9c24eb360a591aaf6535c9def5f29d92af107a9bf57" + ], + "version": "==1.8.2" + }, + "pytest": { + "hashes": [ + "sha256:1d881c6124e08ff0a1bb75ba3ec0bfd8b5354a01c194ddd5a0a870a48d99b002", + "sha256:a766259cfab564a2ad52cb1aae1b881a75c3eb7e34ca3779697c23ed47c47069" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==7.4.2" + }, + "pytest-asyncio": { + "hashes": [ + "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d", + "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==0.21.1" + }, + "pytest-repeat": { + "hashes": [ + "sha256:4474a7d9e9137f6d8cc8ae297f8c4168d33c56dd740aa78cfffe562557e6b96e", + "sha256:5cd3289745ab3156d43eb9c8e7f7d00a926f3ae5c9cf425bec649b2fe15bad5b" + ], + "index": "pypi", + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", + "version": "==0.9.1" + }, + "pytest-timeout": { + "hashes": [ + "sha256:c07ca07404c612f8abbe22294b23c368e2e5104b521c1790195561f37e1ac3d9", + "sha256:f6f50101443ce70ad325ceb4473c4255e9d74e3c7cd0ef827309dfa4c0d975c6" + ], + "index": "pypi", + "markers": "python_version >= '3.6'", + "version": "==2.1.0" + }, + "pytest-xdist": { + "hashes": [ + "sha256:d5ee0520eb1b7bcca50a60a518ab7a7707992812c578198f8b44fdfac78e8c93", + "sha256:ff9daa7793569e6a68544850fd3927cd257cc03a7ef76c95e86915355e82b5f2" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==3.3.1" + }, + "pyyaml": { + "hashes": [ + "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5", + "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc", + "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df", + "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741", + "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206", + "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27", + "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595", + "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62", + "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98", + "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696", + "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290", + "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9", + "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d", + "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6", + "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867", + "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47", + "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486", + "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6", + "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3", + "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007", + "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938", + "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0", + "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c", + "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735", + "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d", + "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28", + "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4", + "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba", + "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8", + "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5", + "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd", + "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3", + "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0", + "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515", + "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c", + "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c", + "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924", + "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34", + "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43", + "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859", + "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673", + "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54", + "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a", + "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b", + "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab", + "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa", + "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c", + "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585", + "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d", + "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f" + ], + "index": "pypi", + "markers": "python_version >= '3.6'", + "version": "==6.0.1" + }, + "ruamel.yaml": { + "hashes": [ + "sha256:1a771fc92d3823682b7f0893ad56cb5a5c87c48e62b5399d6f42c8759a583b33", + "sha256:ea21da1198c4b41b8e7a259301cc9710d3b972bf8ba52f06218478e6802dd1f1" + ], + "markers": "python_version >= '3'", + "version": "==0.17.16" + }, + "ruamel.yaml.clib": { + "hashes": [ + "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d", + "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001", + "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462", + "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9", + "sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b", + "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b", + "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615", + "sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15", + "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b", + "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9", + "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675", + "sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1", + "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899", + "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7", + "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7", + "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312", + "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa", + "sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f", + "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91", + "sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa", + "sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b", + "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3", + "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334", + "sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5", + "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3", + "sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe", + "sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3", + "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed", + "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337", + "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880", + "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d", + "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248", + "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d", + "sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279", + "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf", + "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512", + "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069", + "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb", + "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942", + "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d", + "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31", + "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92", + "sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd", + "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5", + "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28", + "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d", + "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1", + "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2", + "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875", + "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412" + ], + "markers": "python_version < '3.10' and platform_python_implementation == 'CPython'", + "version": "==0.2.8" + }, + "sortedcontainers": { + "hashes": [ + "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88", + "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0" + ], + "version": "==2.4.0" + }, + "tomli": { + "hashes": [ + "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc", + "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f" + ], + "markers": "python_version < '3.11'", + "version": "==2.0.1" + }, + "tornado": { + "hashes": [ + "sha256:1bd19ca6c16882e4d37368e0152f99c099bad93e0950ce55e71daed74045908f", + "sha256:22d3c2fa10b5793da13c807e6fc38ff49a4f6e1e3868b0a6f4164768bb8e20f5", + "sha256:502fba735c84450974fec147340016ad928d29f1e91f49be168c0a4c18181e1d", + "sha256:65ceca9500383fbdf33a98c0087cb975b2ef3bfb874cb35b8de8740cf7f41bd3", + "sha256:71a8db65160a3c55d61839b7302a9a400074c9c753040455494e2af74e2501f2", + "sha256:7ac51f42808cca9b3613f51ffe2a965c8525cb1b00b7b2d56828b8045354f76a", + "sha256:7d01abc57ea0dbb51ddfed477dfe22719d376119844e33c661d873bf9c0e4a16", + "sha256:805d507b1f588320c26f7f097108eb4023bbaa984d63176d1652e184ba24270a", + "sha256:9dc4444c0defcd3929d5c1eb5706cbe1b116e762ff3e0deca8b715d14bf6ec17", + "sha256:ceb917a50cd35882b57600709dd5421a418c29ddc852da8bcdab1f0db33406b0", + "sha256:e7d8db41c0181c80d76c982aacc442c0783a2c54d6400fe028954201a2e032fe" + ], + "markers": "python_version >= '3.8'", + "version": "==6.3.3" + }, + "typing-extensions": { + "hashes": [ + "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0", + "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef" + ], + "markers": "python_version >= '3.8'", + "version": "==4.8.0" + }, + "urwid": { + "hashes": [ + "sha256:588bee9c1cb208d0906a9f73c613d2bd32c3ed3702012f51efe318a3f2127eae" + ], + "version": "==2.1.2" + }, + "werkzeug": { + "hashes": [ + "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc", + "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==3.0.1" + }, + "wsproto": { + "hashes": [ + "sha256:868776f8456997ad0d9720f7322b746bbe9193751b5b290b7f924659377c8c38", + "sha256:d8345d1808dd599b5ffb352c25a367adb6157e664e140dbecba3f9bc007edb9f" + ], + "markers": "python_full_version >= '3.6.1'", + "version": "==1.0.0" + }, + "zstandard": { + "hashes": [ + "sha256:1c5ef399f81204fbd9f0df3debf80389fd8aa9660fe1746d37c80b0d45f809e9", + "sha256:1faefe33e3d6870a4dce637bcb41f7abb46a1872a595ecc7b034016081c37543", + "sha256:1fb23b1754ce834a3a1a1e148cc2faad76eeadf9d889efe5e8199d3fb839d3c6", + "sha256:22f127ff5da052ffba73af146d7d61db874f5edb468b36c9cb0b857316a21b3d", + "sha256:2353b61f249a5fc243aae3caa1207c80c7e6919a58b1f9992758fa496f61f839", + "sha256:24cdcc6f297f7c978a40fb7706877ad33d8e28acc1786992a52199502d6da2a4", + "sha256:31e35790434da54c106f05fa93ab4d0fab2798a6350e8a73928ec602e8505836", + "sha256:3547ff4eee7175d944a865bbdf5529b0969c253e8a148c287f0668fe4eb9c935", + "sha256:378ac053c0cfc74d115cbb6ee181540f3e793c7cca8ed8cd3893e338af9e942c", + "sha256:3e1cd2db25117c5b7c7e86a17cde6104a93719a9df7cb099d7498e4c1d13ee5c", + "sha256:3fe469a887f6142cc108e44c7f42c036e43620ebaf500747be2317c9f4615d4f", + "sha256:4800ab8ec94cbf1ed09c2b4686288750cab0642cb4d6fba2a56db66b923aeb92", + "sha256:52de08355fd5cfb3ef4533891092bb96229d43c2069703d4aff04fdbedf9c92f", + "sha256:5752f44795b943c99be367fee5edf3122a1690b0d1ecd1bd5ec94c7fd2c39c94", + "sha256:5d53f02aeb8fdd48b88bc80bece82542d084fb1a7ba03bf241fd53b63aee4f22", + "sha256:69b7a5720b8dfab9005a43c7ddb2e3ccacbb9a2442908ae4ed49dd51ab19698a", + "sha256:6cc162b5b6e3c40b223163a9ea86cd332bd352ddadb5fd142fc0706e5e4eaaff", + "sha256:6f5d0330bc992b1e267a1b69fbdbb5ebe8c3a6af107d67e14c7a5b1ede2c5945", + "sha256:6ffadd48e6fe85f27ca3ca10cfd3ef3d0f933bef7316870285ffeb58d791ca9c", + "sha256:72a011678c654df8323aa7b687e3147749034fdbe994d346f139ab9702b59cea", + "sha256:77d26452676f471223571efd73131fd4a626622c7960458aab2763e025836fc5", + "sha256:7a88cc773ffe55992ff7259a8df5fb3570168d7138c69aadba40142d0e5ce39a", + "sha256:7b16bd74ae7bfbaca407a127e11058b287a4267caad13bd41305a5e630472549", + "sha256:855d95ec78b6f0ff66e076d5461bf12d09d8e8f7e2b3fc9de7236d1464fd730e", + "sha256:8baf7991547441458325ca8fafeae79ef1501cb4354022724f3edd62279c5b2b", + "sha256:8fb77dd152054c6685639d855693579a92f276b38b8003be5942de31d241ebfb", + "sha256:92d49cc3b49372cfea2d42f43a2c16a98a32a6bc2f42abcde121132dbfc2f023", + "sha256:94d0de65e37f5677165725f1fc7fb1616b9542d42a9832a9a0bdcba0ed68b63b", + "sha256:9867206093d7283d7de01bd2bf60389eb4d19b67306a0a763d1a8a4dbe2fb7c3", + "sha256:9ee3c992b93e26c2ae827404a626138588e30bdabaaf7aa3aa25082a4e718790", + "sha256:a4f8af277bb527fa3d56b216bda4da931b36b2d3fe416b6fc1744072b2c1dbd9", + "sha256:ab9f19460dfa4c5dd25431b75bee28b5f018bf43476858d64b1aa1046196a2a0", + "sha256:ac43c1821ba81e9344d818c5feed574a17f51fca27976ff7d022645c378fbbf5", + "sha256:af5a011609206e390b44847da32463437505bf55fd8985e7a91c52d9da338d4b", + "sha256:b0975748bb6ec55b6d0f6665313c2cf7af6f536221dccd5879b967d76f6e7899", + "sha256:b4963dad6cf28bfe0b61c3265d1c74a26a7605df3445bfcd3ba25de012330b2d", + "sha256:b7d3a484ace91ed827aa2ef3b44895e2ec106031012f14d28bd11a55f24fa734", + "sha256:bd3c478a4a574f412efc58ba7e09ab4cd83484c545746a01601636e87e3dbf23", + "sha256:c9e2dcb7f851f020232b991c226c5678dc07090256e929e45a89538d82f71d2e", + "sha256:d25c8eeb4720da41e7afbc404891e3a945b8bb6d5230e4c53d23ac4f4f9fc52c", + "sha256:dc8c03d0c5c10c200441ffb4cce46d869d9e5c4ef007f55856751dc288a2dffd", + "sha256:ec58e84d625553d191a23d5988a19c3ebfed519fff2a8b844223e3f074152163", + "sha256:eda0719b29792f0fea04a853377cfff934660cb6cd72a0a0eeba7a1f0df4a16e", + "sha256:edde82ce3007a64e8434ccaf1b53271da4f255224d77b880b59e7d6d73df90c8", + "sha256:f36722144bc0a5068934e51dca5a38a5b4daac1be84f4423244277e4baf24e7a", + "sha256:f8bb00ced04a8feff05989996db47906673ed45b11d86ad5ce892b5741e5f9dd", + "sha256:f98fc5750aac2d63d482909184aac72a979bfd123b112ec53fd365104ea15b1c", + "sha256:ff5b75f94101beaa373f1511319580a010f6e03458ee51b1a386d7de5331440a" + ], + "markers": "python_version >= '3.5'", + "version": "==0.15.2" + } + }, + "develop": { + "attrs": { + "hashes": [ + "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04", + "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015" + ], + "markers": "python_version >= '3.7'", + "version": "==23.1.0" + }, + "black": { + "hashes": [ + "sha256:031e8c69f3d3b09e1aa471a926a1eeb0b9071f80b17689a655f7885ac9325a6f", + "sha256:13a2e4a93bb8ca74a749b6974925c27219bb3df4d42fc45e948a5d9feb5122b7", + "sha256:13ef033794029b85dfea8032c9d3b92b42b526f1ff4bf13b2182ce4e917f5100", + "sha256:14f04c990259576acd093871e7e9b14918eb28f1866f91968ff5524293f9c573", + "sha256:24b6b3ff5c6d9ea08a8888f6977eae858e1f340d7260cf56d70a49823236b62d", + "sha256:403397c033adbc45c2bd41747da1f7fc7eaa44efbee256b53842470d4ac5a70f", + "sha256:50254ebfa56aa46a9fdd5d651f9637485068a1adf42270148cd101cdf56e0ad9", + "sha256:538efb451cd50f43aba394e9ec7ad55a37598faae3348d723b59ea8e91616300", + "sha256:638619a559280de0c2aa4d76f504891c9860bb8fa214267358f0a20f27c12948", + "sha256:6a3b50e4b93f43b34a9d3ef00d9b6728b4a722c997c99ab09102fd5efdb88325", + "sha256:6ccd59584cc834b6d127628713e4b6b968e5f79572da66284532525a042549f9", + "sha256:75a2dc41b183d4872d3a500d2b9c9016e67ed95738a3624f4751a0cb4818fe71", + "sha256:7d30ec46de88091e4316b17ae58bbbfc12b2de05e069030f6b747dfc649ad186", + "sha256:8431445bf62d2a914b541da7ab3e2b4f3bc052d2ccbf157ebad18ea126efb91f", + "sha256:8fc1ddcf83f996247505db6b715294eba56ea9372e107fd54963c7553f2b6dfe", + "sha256:a732b82747235e0542c03bf352c126052c0fbc458d8a239a94701175b17d4855", + "sha256:adc3e4442eef57f99b5590b245a328aad19c99552e0bdc7f0b04db6656debd80", + "sha256:c46767e8df1b7beefb0899c4a95fb43058fa8500b6db144f4ff3ca38eb2f6393", + "sha256:c619f063c2d68f19b2d7270f4cf3192cb81c9ec5bc5ba02df91471d0b88c4c5c", + "sha256:cf3a4d00e4cdb6734b64bf23cd4341421e8953615cba6b3670453737a72ec204", + "sha256:cf99f3de8b3273a8317681d8194ea222f10e0133a24a7548c73ce44ea1679377", + "sha256:d6bc09188020c9ac2555a498949401ab35bb6bf76d4e0f8ee251694664df6301" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==23.9.1" + }, + "click": { + "hashes": [ + "sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1", + "sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb" + ], + "markers": "python_version >= '3.6'", + "version": "==8.0.4" + }, + "flake8": { + "hashes": [ + "sha256:d5b3857f07c030bdb5bf41c7f53799571d75c4491748a3adcd47de929e34cd23", + "sha256:ffdfce58ea94c6580c77888a86506937f9a1a227dfcd15f245d694ae20a6b6e5" + ], + "index": "pypi", + "markers": "python_full_version >= '3.8.1'", + "version": "==6.1.0" + }, + "flake8-bugbear": { + "hashes": [ + "sha256:90cf04b19ca02a682feb5aac67cae8de742af70538590509941ab10ae8351f71", + "sha256:b182cf96ea8f7a8595b2f87321d7d9b28728f4d9c3318012d896543d19742cb5" + ], + "index": "pypi", + "markers": "python_full_version >= '3.8.1'", + "version": "==23.9.16" + }, + "isort": { + "hashes": [ + "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504", + "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6" + ], + "index": "pypi", + "markers": "python_full_version >= '3.8.0'", + "version": "==5.12.0" + }, + "mccabe": { + "hashes": [ + "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", + "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e" + ], + "markers": "python_version >= '3.6'", + "version": "==0.7.0" + }, + "mypy-extensions": { + "hashes": [ + "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", + "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782" + ], + "markers": "python_version >= '3.5'", + "version": "==1.0.0" + }, + "packaging": { + "hashes": [ + "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5", + "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7" + ], + "markers": "python_version >= '3.7'", + "version": "==23.2" + }, + "pathspec": { + "hashes": [ + "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20", + "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3" + ], + "markers": "python_version >= '3.7'", + "version": "==0.11.2" + }, + "platformdirs": { + "hashes": [ + "sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3", + "sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e" + ], + "markers": "python_version >= '3.7'", + "version": "==3.11.0" + }, + "pycodestyle": { + "hashes": [ + "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f", + "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67" + ], + "markers": "python_version >= '3.8'", + "version": "==2.11.1" + }, + "pyflakes": { + "hashes": [ + "sha256:4132f6d49cb4dae6819e5379898f2b8cce3c5f23994194c24b77d5da2e36f774", + "sha256:a0aae034c444db0071aa077972ba4768d40c830d9539fd45bf4cd3f8f6992efc" + ], + "markers": "python_version >= '3.8'", + "version": "==3.1.0" + }, + "tomli": { + "hashes": [ + "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc", + "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f" + ], + "markers": "python_version < '3.11'", + "version": "==2.0.1" + }, + "typing-extensions": { + "hashes": [ + "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0", + "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef" + ], + "markers": "python_version >= '3.8'", + "version": "==4.8.0" + } + } +} diff --git a/.github/actions/parallelization/action.yml b/.github/actions/parallelization/action.yml new file mode 100644 index 00000000000..1f7d002022d --- /dev/null +++ b/.github/actions/parallelization/action.yml @@ -0,0 +1,23 @@ +name: 'Parallelization matrix' +inputs: + count: + required: false + default: 32 +outputs: + json: + value: ${{ steps.generate_matrix.outputs.json }} +runs: + using: "composite" + steps: + - name: Generate parallelization matrix + id: generate_matrix + shell: bash + run: |- + json_array="{\"include\": [" + for ((i = 1; i <= ${{ inputs.count }}; i++)); do + json_array+="{\"id\":\"$i\"}," + done + json_array=${json_array%,} + json_array+=" ]}" + echo "json=$json_array" >> "$GITHUB_OUTPUT" + echo "json=$json_array" diff --git a/.github/actions/save_logs_and_results/action.yml b/.github/actions/save_logs_and_results/action.yml new file mode 100644 index 00000000000..0f238835d19 --- /dev/null +++ b/.github/actions/save_logs_and_results/action.yml @@ -0,0 +1,38 @@ +name: save_logs_and_results +inputs: + folder: + required: false + default: "log" +runs: + using: composite + steps: + - uses: actions/upload-artifact@v3.1.1 + name: Upload logs + with: + name: ${{ inputs.folder }} + if-no-files-found: ignore + path: | + src/test/**/proxy.output + src/test/**/results/ + src/test/**/tmp_check/master/log + src/test/**/tmp_check/worker.57638/log + src/test/**/tmp_check/worker.57637/log + src/test/**/*.diffs + src/test/**/out/ddls.sql + src/test/**/out/queries.sql + src/test/**/logfile_* + /tmp/pg_upgrade_newData_logs + - name: Publish regression.diffs + run: |- + diffs="$(find src/test/regress -name "*.diffs" -exec cat {} \;)" + if ! [ -z "$diffs" ]; then + echo '```diff' >> $GITHUB_STEP_SUMMARY + echo -E "$diffs" >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + echo -E $diffs + fi + shell: bash + - name: Print stack traces + run: "./ci/print_stack_trace.sh" + if: failure() + shell: bash diff --git a/.github/actions/setup_extension/action.yml b/.github/actions/setup_extension/action.yml new file mode 100644 index 00000000000..96b408e7e43 --- /dev/null +++ b/.github/actions/setup_extension/action.yml @@ -0,0 +1,35 @@ +name: setup_extension +inputs: + pg_major: + required: false + skip_installation: + required: false + default: false + type: boolean +runs: + using: composite + steps: + - name: Expose $PG_MAJOR to Github Env + run: |- + if [ -z "${{ inputs.pg_major }}" ]; then + echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV + else + echo "PG_MAJOR=${{ inputs.pg_major }}" >> $GITHUB_ENV + fi + shell: bash + - uses: actions/download-artifact@v3.0.1 + with: + name: build-${{ env.PG_MAJOR }} + - name: Install Extension + if: ${{ inputs.skip_installation == 'false' }} + run: tar xfv "install-$PG_MAJOR.tar" --directory / + shell: bash + - name: Configure + run: |- + chown -R circleci . + git config --global --add safe.directory ${GITHUB_WORKSPACE} + gosu circleci ./configure --without-pg-version-check + shell: bash + - name: Enable core dumps + run: ulimit -c unlimited + shell: bash diff --git a/.github/actions/upload_coverage/action.yml b/.github/actions/upload_coverage/action.yml new file mode 100644 index 00000000000..0b5f581a6a4 --- /dev/null +++ b/.github/actions/upload_coverage/action.yml @@ -0,0 +1,27 @@ +name: coverage +inputs: + flags: + required: false + codecov_token: + required: true +runs: + using: composite + steps: + - uses: codecov/codecov-action@v3 + with: + flags: ${{ inputs.flags }} + token: ${{ inputs.codecov_token }} + verbose: true + gcov: true + - name: Create codeclimate coverage + run: |- + lcov --directory . --capture --output-file lcov.info + lcov --remove lcov.info -o lcov.info '/usr/*' + sed "s=^SF:$PWD/=SF:=g" -i lcov.info # relative pats are required by codeclimate + mkdir -p /tmp/codeclimate + cc-test-reporter format-coverage -t lcov -o /tmp/codeclimate/${{ inputs.flags }}.json lcov.info + shell: bash + - uses: actions/upload-artifact@v3.1.1 + with: + path: "/tmp/codeclimate/*.json" + name: codeclimate diff --git a/.github/packaging/validate_build_output.sh b/.github/packaging/validate_build_output.sh index 64098811ec4..dab301aa5ee 100755 --- a/.github/packaging/validate_build_output.sh +++ b/.github/packaging/validate_build_output.sh @@ -32,7 +32,10 @@ python3 -m pip install -r tools/packaging_automation/requirements.txt echo "Package type: ${package_type}" echo "OS version: $(get_rpm_os_version)" - # if os version is centos 7 or oracle linux 7, then remove urllib3 with pip uninstall and install urllib3<2.0.0 with pip install + # For RHEL 7, we need to install urllib3<2 due to below execution error + # ImportError: urllib3 v2.0 only supports OpenSSL 1.1.1+, currently the 'ssl' + # module is compiled with 'OpenSSL 1.0.2k-fips 26 Jan 2017'. + # See: https://github.com/urllib3/urllib3/issues/2168 if [[ ${package_type} == "rpm" && $(get_rpm_os_version) == 7* ]]; then python3 -m pip uninstall -y urllib3 python3 -m pip install 'urllib3<2' diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml new file mode 100644 index 00000000000..cd4995e20e1 --- /dev/null +++ b/.github/workflows/build_and_test.yml @@ -0,0 +1,530 @@ +name: Build & Test +run-name: Build & Test - ${{ github.event.pull_request.title || github.ref_name }} +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true +on: + workflow_dispatch: + inputs: + skip_test_flakyness: + required: false + default: false + type: boolean + push: + branches: + - "main" + - "release-*" + pull_request: + types: [opened, reopened,synchronize] + merge_group: +jobs: + # Since GHA does not interpolate env varibles in matrix context, we need to + # define them in a separate job and use them in other jobs. + params: + runs-on: ubuntu-latest + name: Initialize parameters + outputs: + build_image_name: "citus/extbuilder" + test_image_name: "citus/exttester" + citusupgrade_image_name: "citus/citusupgradetester" + fail_test_image_name: "citus/failtester" + pgupgrade_image_name: "citus/pgupgradetester" + style_checker_image_name: "citus/stylechecker" + style_checker_tools_version: "0.8.18" + image_suffix: "-v390dab3" + pg14_version: '{ "major": "14", "full": "14.11" }' + pg15_version: '{ "major": "15", "full": "15.6" }' + pg16_version: '{ "major": "16", "full": "16.2" }' + upgrade_pg_versions: "14.11-15.6-16.2" + steps: + # Since GHA jobs needs at least one step we use a noop step here. + - name: Set up parameters + run: echo 'noop' + check-sql-snapshots: + needs: params + runs-on: ubuntu-20.04 + container: + image: ${{ needs.params.outputs.build_image_name }}:latest + options: --user root + steps: + - uses: actions/checkout@v3.5.0 + - name: Check Snapshots + run: | + git config --global --add safe.directory ${GITHUB_WORKSPACE} + ci/check_sql_snapshots.sh + check-style: + needs: params + runs-on: ubuntu-20.04 + container: + image: ${{ needs.params.outputs.style_checker_image_name }}:${{ needs.params.outputs.style_checker_tools_version }}${{ needs.params.outputs.image_suffix }} + steps: + - name: Check Snapshots + run: | + git config --global --add safe.directory ${GITHUB_WORKSPACE} + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Check C Style + run: citus_indent --check + - name: Check Python style + run: black --check . + - name: Check Python import order + run: isort --check . + - name: Check Python lints + run: flake8 . + - name: Fix whitespace + run: ci/editorconfig.sh && git diff --exit-code + - name: Remove useless declarations + run: ci/remove_useless_declarations.sh && git diff --cached --exit-code + - name: Sort and group includes + run: ci/sort_and_group_includes.sh && git diff --exit-code + - name: Normalize test output + run: ci/normalize_expected.sh && git diff --exit-code + - name: Check for C-style comments in migration files + run: ci/disallow_c_comments_in_migrations.sh && git diff --exit-code + - name: 'Check for comment--cached ns that start with # character in spec files' + run: ci/disallow_hash_comments_in_spec_files.sh && git diff --exit-code + - name: Check for gitignore entries .for source files + run: ci/fix_gitignore.sh && git diff --exit-code + - name: Check for lengths of changelog entries + run: ci/disallow_long_changelog_entries.sh + - name: Check for banned C API usage + run: ci/banned.h.sh + - name: Check for tests missing in schedules + run: ci/check_all_tests_are_run.sh + - name: Check if all CI scripts are actually run + run: ci/check_all_ci_scripts_are_run.sh + - name: Check if all GUCs are sorted alphabetically + run: ci/check_gucs_are_alphabetically_sorted.sh + - name: Check for missing downgrade scripts + run: ci/check_migration_files.sh + build: + needs: params + name: Build for PG${{ fromJson(matrix.pg_version).major }} + strategy: + fail-fast: false + matrix: + image_name: + - ${{ needs.params.outputs.build_image_name }} + image_suffix: + - ${{ needs.params.outputs.image_suffix}} + pg_version: + - ${{ needs.params.outputs.pg14_version }} + - ${{ needs.params.outputs.pg15_version }} + - ${{ needs.params.outputs.pg16_version }} + runs-on: ubuntu-20.04 + container: + image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ matrix.image_suffix }}" + options: --user root + steps: + - uses: actions/checkout@v4 + - name: Expose $PG_MAJOR to Github Env + run: echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV + shell: bash + - name: Build + run: "./ci/build-citus.sh" + shell: bash + - uses: actions/upload-artifact@v3.1.1 + with: + name: build-${{ env.PG_MAJOR }} + path: |- + ./build-${{ env.PG_MAJOR }}/* + ./install-${{ env.PG_MAJOR }}.tar + test-citus: + name: PG${{ fromJson(matrix.pg_version).major }} - ${{ matrix.make }} + strategy: + fail-fast: false + matrix: + suite: + - regress + image_name: + - ${{ needs.params.outputs.test_image_name }} + pg_version: + - ${{ needs.params.outputs.pg14_version }} + - ${{ needs.params.outputs.pg15_version }} + - ${{ needs.params.outputs.pg16_version }} + make: + - check-split + - check-multi + - check-multi-1 + - check-multi-mx + - check-vanilla + - check-isolation + - check-operations + - check-follower-cluster + - check-columnar + - check-columnar-isolation + - check-enterprise + - check-enterprise-isolation + - check-enterprise-isolation-logicalrep-1 + - check-enterprise-isolation-logicalrep-2 + - check-enterprise-isolation-logicalrep-3 + include: + - make: check-failure + pg_version: ${{ needs.params.outputs.pg14_version }} + suite: regress + image_name: ${{ needs.params.outputs.fail_test_image_name }} + - make: check-failure + pg_version: ${{ needs.params.outputs.pg15_version }} + suite: regress + image_name: ${{ needs.params.outputs.fail_test_image_name }} + - make: check-failure + pg_version: ${{ needs.params.outputs.pg16_version }} + suite: regress + image_name: ${{ needs.params.outputs.fail_test_image_name }} + - make: check-enterprise-failure + pg_version: ${{ needs.params.outputs.pg14_version }} + suite: regress + image_name: ${{ needs.params.outputs.fail_test_image_name }} + - make: check-enterprise-failure + pg_version: ${{ needs.params.outputs.pg15_version }} + suite: regress + image_name: ${{ needs.params.outputs.fail_test_image_name }} + - make: check-enterprise-failure + pg_version: ${{ needs.params.outputs.pg16_version }} + suite: regress + image_name: ${{ needs.params.outputs.fail_test_image_name }} + - make: check-pytest + pg_version: ${{ needs.params.outputs.pg14_version }} + suite: regress + image_name: ${{ needs.params.outputs.fail_test_image_name }} + - make: check-pytest + pg_version: ${{ needs.params.outputs.pg15_version }} + suite: regress + image_name: ${{ needs.params.outputs.fail_test_image_name }} + - make: check-pytest + pg_version: ${{ needs.params.outputs.pg16_version }} + suite: regress + image_name: ${{ needs.params.outputs.fail_test_image_name }} + - make: installcheck + suite: cdc + image_name: ${{ needs.params.outputs.test_image_name }} + pg_version: ${{ needs.params.outputs.pg15_version }} + - make: installcheck + suite: cdc + image_name: ${{ needs.params.outputs.test_image_name }} + pg_version: ${{ needs.params.outputs.pg16_version }} + - make: check-query-generator + pg_version: ${{ needs.params.outputs.pg14_version }} + suite: regress + image_name: ${{ needs.params.outputs.fail_test_image_name }} + - make: check-query-generator + pg_version: ${{ needs.params.outputs.pg15_version }} + suite: regress + image_name: ${{ needs.params.outputs.fail_test_image_name }} + - make: check-query-generator + pg_version: ${{ needs.params.outputs.pg16_version }} + suite: regress + image_name: ${{ needs.params.outputs.fail_test_image_name }} + runs-on: ubuntu-20.04 + container: + image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}" + options: --user root --dns=8.8.8.8 + # Due to Github creates a default network for each job, we need to use + # --dns= to have similar DNS settings as our other CI systems or local + # machines. Otherwise, we may see different results. + needs: + - params + - build + steps: + - uses: actions/checkout@v4 + - uses: "./.github/actions/setup_extension" + - name: Run Test + run: gosu circleci make -C src/test/${{ matrix.suite }} ${{ matrix.make }} + timeout-minutes: 20 + - uses: "./.github/actions/save_logs_and_results" + if: always() + with: + folder: ${{ fromJson(matrix.pg_version).major }}_${{ matrix.make }} + - uses: "./.github/actions/upload_coverage" + if: always() + with: + flags: ${{ env.PG_MAJOR }}_${{ matrix.suite }}_${{ matrix.make }} + codecov_token: ${{ secrets.CODECOV_TOKEN }} + test-arbitrary-configs: + name: PG${{ fromJson(matrix.pg_version).major }} - check-arbitrary-configs-${{ matrix.parallel }} + runs-on: ["self-hosted", "1ES.Pool=1es-gha-citusdata-pool"] + container: + image: "${{ matrix.image_name }}:${{ fromJson(matrix.pg_version).full }}${{ needs.params.outputs.image_suffix }}" + options: --user root + needs: + - params + - build + strategy: + fail-fast: false + matrix: + image_name: + - ${{ needs.params.outputs.fail_test_image_name }} + pg_version: + - ${{ needs.params.outputs.pg14_version }} + - ${{ needs.params.outputs.pg15_version }} + - ${{ needs.params.outputs.pg16_version }} + parallel: [0,1,2,3,4,5] # workaround for running 6 parallel jobs + steps: + - uses: actions/checkout@v4 + - uses: "./.github/actions/setup_extension" + - name: Test arbitrary configs + run: |- + # we use parallel jobs to split the tests into 6 parts and run them in parallel + # the script below extracts the tests for the current job + N=6 # Total number of jobs (see matrix.parallel) + X=${{ matrix.parallel }} # Current job number + TESTS=$(src/test/regress/citus_tests/print_test_names.py | + tr '\n' ',' | awk -v N="$N" -v X="$X" -F, '{ + split("", parts) + for (i = 1; i <= NF; i++) { + parts[i % N] = parts[i % N] $i "," + } + print substr(parts[X], 1, length(parts[X])-1) + }') + echo $TESTS + gosu circleci \ + make -C src/test/regress \ + check-arbitrary-configs parallel=4 CONFIGS=$TESTS + - uses: "./.github/actions/save_logs_and_results" + if: always() + - uses: "./.github/actions/upload_coverage" + if: always() + with: + flags: ${{ env.pg_major }}_upgrade + codecov_token: ${{ secrets.CODECOV_TOKEN }} + test-pg-upgrade: + name: PG${{ matrix.old_pg_major }}-PG${{ matrix.new_pg_major }} - check-pg-upgrade + runs-on: ubuntu-20.04 + container: + image: "${{ needs.params.outputs.pgupgrade_image_name }}:${{ needs.params.outputs.upgrade_pg_versions }}${{ needs.params.outputs.image_suffix }}" + options: --user root + needs: + - params + - build + strategy: + fail-fast: false + matrix: + include: + - old_pg_major: 14 + new_pg_major: 15 + - old_pg_major: 15 + new_pg_major: 16 + - old_pg_major: 14 + new_pg_major: 16 + env: + old_pg_major: ${{ matrix.old_pg_major }} + new_pg_major: ${{ matrix.new_pg_major }} + steps: + - uses: actions/checkout@v4 + - uses: "./.github/actions/setup_extension" + with: + pg_major: "${{ env.old_pg_major }}" + - uses: "./.github/actions/setup_extension" + with: + pg_major: "${{ env.new_pg_major }}" + - name: Install and test postgres upgrade + run: |- + gosu circleci \ + make -C src/test/regress \ + check-pg-upgrade \ + old-bindir=/usr/lib/postgresql/${{ env.old_pg_major }}/bin \ + new-bindir=/usr/lib/postgresql/${{ env.new_pg_major }}/bin + - name: Copy pg_upgrade logs for newData dir + run: |- + mkdir -p /tmp/pg_upgrade_newData_logs + if ls src/test/regress/tmp_upgrade/newData/*.log 1> /dev/null 2>&1; then + cp src/test/regress/tmp_upgrade/newData/*.log /tmp/pg_upgrade_newData_logs + fi + if: failure() + - uses: "./.github/actions/save_logs_and_results" + if: always() + - uses: "./.github/actions/upload_coverage" + if: always() + with: + flags: ${{ env.old_pg_major }}_${{ env.new_pg_major }}_upgrade + codecov_token: ${{ secrets.CODECOV_TOKEN }} + test-citus-upgrade: + name: PG${{ fromJson(needs.params.outputs.pg14_version).major }} - check-citus-upgrade + runs-on: ubuntu-20.04 + container: + image: "${{ needs.params.outputs.citusupgrade_image_name }}:${{ fromJson(needs.params.outputs.pg14_version).full }}${{ needs.params.outputs.image_suffix }}" + options: --user root + needs: + - params + - build + steps: + - uses: actions/checkout@v4 + - uses: "./.github/actions/setup_extension" + with: + skip_installation: true + - name: Install and test citus upgrade + run: |- + # run make check-citus-upgrade for all citus versions + # the image has ${CITUS_VERSIONS} set with all verions it contains the binaries of + for citus_version in ${CITUS_VERSIONS}; do \ + gosu circleci \ + make -C src/test/regress \ + check-citus-upgrade \ + bindir=/usr/lib/postgresql/${PG_MAJOR}/bin \ + citus-old-version=${citus_version} \ + citus-pre-tar=/install-pg${PG_MAJOR}-citus${citus_version}.tar \ + citus-post-tar=${GITHUB_WORKSPACE}/install-$PG_MAJOR.tar; \ + done; + # run make check-citus-upgrade-mixed for all citus versions + # the image has ${CITUS_VERSIONS} set with all verions it contains the binaries of + for citus_version in ${CITUS_VERSIONS}; do \ + gosu circleci \ + make -C src/test/regress \ + check-citus-upgrade-mixed \ + citus-old-version=${citus_version} \ + bindir=/usr/lib/postgresql/${PG_MAJOR}/bin \ + citus-pre-tar=/install-pg${PG_MAJOR}-citus${citus_version}.tar \ + citus-post-tar=${GITHUB_WORKSPACE}/install-$PG_MAJOR.tar; \ + done; + - uses: "./.github/actions/save_logs_and_results" + if: always() + - uses: "./.github/actions/upload_coverage" + if: always() + with: + flags: ${{ env.pg_major }}_upgrade + codecov_token: ${{ secrets.CODECOV_TOKEN }} + upload-coverage: + if: always() + env: + CC_TEST_REPORTER_ID: ${{ secrets.CC_TEST_REPORTER_ID }} + runs-on: ubuntu-20.04 + container: + image: ${{ needs.params.outputs.test_image_name }}:${{ fromJson(needs.params.outputs.pg16_version).full }}${{ needs.params.outputs.image_suffix }} + needs: + - params + - test-citus + - test-arbitrary-configs + - test-citus-upgrade + - test-pg-upgrade + steps: + - uses: actions/download-artifact@v3.0.1 + with: + name: "codeclimate" + path: "codeclimate" + - name: Upload coverage results to Code Climate + run: |- + cc-test-reporter sum-coverage codeclimate/*.json -o total.json + cc-test-reporter upload-coverage -i total.json + ch_benchmark: + name: CH Benchmark + if: startsWith(github.ref, 'refs/heads/ch_benchmark/') + runs-on: ubuntu-20.04 + needs: + - build + steps: + - uses: actions/checkout@v4 + - uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: install dependencies and run ch_benchmark tests + uses: azure/CLI@v1 + with: + inlineScript: | + cd ./src/test/hammerdb + chmod +x run_hammerdb.sh + run_hammerdb.sh citusbot_ch_benchmark_rg + tpcc_benchmark: + name: TPCC Benchmark + if: startsWith(github.ref, 'refs/heads/tpcc_benchmark/') + runs-on: ubuntu-20.04 + needs: + - build + steps: + - uses: actions/checkout@v4 + - uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: install dependencies and run tpcc_benchmark tests + uses: azure/CLI@v1 + with: + inlineScript: | + cd ./src/test/hammerdb + chmod +x run_hammerdb.sh + run_hammerdb.sh citusbot_tpcc_benchmark_rg + prepare_parallelization_matrix_32: + name: Parallel 32 + if: ${{ needs.test-flakyness-pre.outputs.tests != ''}} + needs: test-flakyness-pre + runs-on: ubuntu-20.04 + outputs: + json: ${{ steps.parallelization.outputs.json }} + steps: + - uses: actions/checkout@v4 + - uses: "./.github/actions/parallelization" + id: parallelization + with: + count: 32 + test-flakyness-pre: + name: Detect regression tests need to be ran + if: ${{ !inputs.skip_test_flakyness }}} + runs-on: ubuntu-20.04 + needs: build + outputs: + tests: ${{ steps.detect-regression-tests.outputs.tests }} + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Detect regression tests need to be ran + id: detect-regression-tests + run: |- + detected_changes=$(git diff origin/main... --name-only --diff-filter=AM | (grep 'src/test/regress/sql/.*\.sql\|src/test/regress/spec/.*\.spec\|src/test/regress/citus_tests/test/test_.*\.py' || true)) + tests=${detected_changes} + + # split the tests to be skipped --today we only skip upgrade tests + skipped_tests="" + not_skipped_tests="" + for test in $tests; do + if [[ $test =~ ^src/test/regress/sql/upgrade_ ]]; then + skipped_tests="$skipped_tests $test" + else + not_skipped_tests="$not_skipped_tests $test" + fi + done + + if [ ! -z "$skipped_tests" ]; then + echo "Skipped tests " $skipped_tests + fi + + if [ -z "$not_skipped_tests" ]; then + echo "Not detected any tests that flaky test detection should run" + else + echo "Detected tests " $not_skipped_tests + fi + + echo 'tests<> $GITHUB_OUTPUT + echo "$not_skipped_tests" >> "$GITHUB_OUTPUT" + echo 'EOF' >> $GITHUB_OUTPUT + test-flakyness: + if: ${{ needs.test-flakyness-pre.outputs.tests != ''}} + name: Test flakyness + runs-on: ubuntu-20.04 + container: + image: ${{ needs.params.outputs.fail_test_image_name }}:${{ fromJson(needs.params.outputs.pg16_version).full }}${{ needs.params.outputs.image_suffix }} + options: --user root + env: + runs: 8 + needs: + - params + - build + - test-flakyness-pre + - prepare_parallelization_matrix_32 + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.prepare_parallelization_matrix_32.outputs.json) }} + steps: + - uses: actions/checkout@v4 + - uses: "./.github/actions/setup_extension" + - name: Run minimal tests + run: |- + tests="${{ needs.test-flakyness-pre.outputs.tests }}" + tests_array=($tests) + for test in "${tests_array[@]}" + do + test_name=$(echo "$test" | sed -r "s/.+\/(.+)\..+/\1/") + gosu circleci src/test/regress/citus_tests/run_test.py $test_name --repeat ${{ env.runs }} --use-whole-schedule-line + done + shell: bash + - uses: "./.github/actions/save_logs_and_results" + if: always() diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 6478abf4b69..027f5a0487d 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Initialize CodeQL uses: github/codeql-action/init@v2 diff --git a/.github/workflows/devcontainer.yml b/.github/workflows/devcontainer.yml new file mode 100644 index 00000000000..dd5d506e4cf --- /dev/null +++ b/.github/workflows/devcontainer.yml @@ -0,0 +1,49 @@ +name: "Build devcontainer" + +# Since building of containers can be quite time consuming, and take up some storage, +# there is no need to finish a build for a tag if new changes are concurrently being made. +# This cancels any previous builds for the same tag, and only the latest one will be kept. +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +on: + push: + paths: + - ".devcontainer/**" + workflow_dispatch: + +jobs: + docker: + runs-on: ubuntu-latest + steps: + - + name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: | + ghcr.io/citusdata/citus-devcontainer + tags: | + type=ref,event=branch + type=sha + - + name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - + name: 'Login to GitHub Container Registry' + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{github.actor}} + password: ${{secrets.GITHUB_TOKEN}} + - + name: Build and push + uses: docker/build-push-action@v5 + with: + context: "{{defaultContext}}:.devcontainer" + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.github/workflows/flaky_test_debugging.yml b/.github/workflows/flaky_test_debugging.yml new file mode 100644 index 00000000000..7135f99fa02 --- /dev/null +++ b/.github/workflows/flaky_test_debugging.yml @@ -0,0 +1,79 @@ +name: Flaky test debugging +run-name: Flaky test debugging - ${{ inputs.flaky_test }} (${{ inputs.flaky_test_runs_per_job }}x${{ inputs.flaky_test_parallel_jobs }}) +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true +on: + workflow_dispatch: + inputs: + flaky_test: + required: true + type: string + description: Test to run + flaky_test_runs_per_job: + required: false + default: 8 + type: number + description: Number of times to run the test + flaky_test_parallel_jobs: + required: false + default: 32 + type: number + description: Number of parallel jobs to run +jobs: + build: + name: Build Citus + runs-on: ubuntu-latest + container: + image: ${{ vars.build_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }} + options: --user root + steps: + - uses: actions/checkout@v4 + - name: Configure, Build, and Install + run: | + echo "PG_MAJOR=${PG_MAJOR}" >> $GITHUB_ENV + ./ci/build-citus.sh + shell: bash + - uses: actions/upload-artifact@v3.1.1 + with: + name: build-${{ env.PG_MAJOR }} + path: |- + ./build-${{ env.PG_MAJOR }}/* + ./install-${{ env.PG_MAJOR }}.tar + prepare_parallelization_matrix: + name: Prepare parallelization matrix + runs-on: ubuntu-latest + outputs: + json: ${{ steps.parallelization.outputs.json }} + steps: + - uses: actions/checkout@v4 + - uses: "./.github/actions/parallelization" + id: parallelization + with: + count: ${{ inputs.flaky_test_parallel_jobs }} + test_flakyness: + name: Test flakyness + runs-on: ubuntu-latest + container: + image: ${{ vars.fail_test_image_name }}:${{ vars.pg15_version }}${{ vars.image_suffix }} + options: --user root + needs: + [build, prepare_parallelization_matrix] + env: + test: "${{ inputs.flaky_test }}" + runs: "${{ inputs.flaky_test_runs_per_job }}" + skip: false + strategy: + fail-fast: false + matrix: ${{ fromJson(needs.prepare_parallelization_matrix.outputs.json) }} + steps: + - uses: actions/checkout@v4 + - uses: "./.github/actions/setup_extension" + - name: Run minimal tests + run: |- + gosu circleci src/test/regress/citus_tests/run_test.py ${{ env.test }} --repeat ${{ env.runs }} --use-whole-schedule-line + shell: bash + - uses: "./.github/actions/save_logs_and_results" + if: always() + with: + folder: ${{ matrix.id }} diff --git a/.github/workflows/packaging-test-pipelines.yml b/.github/workflows/packaging-test-pipelines.yml index 9d3fb81be15..4ae741a911a 100644 --- a/.github/workflows/packaging-test-pipelines.yml +++ b/.github/workflows/packaging-test-pipelines.yml @@ -3,6 +3,7 @@ name: Build tests in packaging images on: pull_request: types: [opened, reopened,synchronize] + merge_group: workflow_dispatch: @@ -24,14 +25,16 @@ jobs: - name: Get Postgres Versions id: get-postgres-versions run: | - # Postgres versions are stored in .circleci/config.yml file in "build-[pg-version] format. Below command - # extracts the versions and get the unique values. - pg_versions=`grep -Eo 'build-[[:digit:]]{2}' .circleci/config.yml|sed -e "s/^build-//"|sort|uniq|tr '\n' ','| head -c -1` + set -euxo pipefail + # Postgres versions are stored in .github/workflows/build_and_test.yml + # file in json strings with major and full keys. + # Below command extracts the versions and get the unique values. + pg_versions=$(cat .github/workflows/build_and_test.yml | grep -oE '"major": "[0-9]+", "full": "[0-9.]+"' | sed -E 's/"major": "([0-9]+)", "full": "([0-9.]+)"/\1/g' | sort | uniq | tr '\n', ',') pg_versions_array="[ ${pg_versions} ]" echo "Supported PG Versions: ${pg_versions_array}" # Below line is needed to set the output variable to be used in the next job echo "pg_versions=${pg_versions_array}" >> $GITHUB_OUTPUT - + shell: bash rpm_build_tests: name: rpm_build_tests needs: get_postgres_versions_from_file @@ -109,11 +112,6 @@ jobs: PACKAGING_DOCKER_IMAGE: ${{ matrix.packaging_docker_image }} run: | echo "Postgres version: ${POSTGRES_VERSION}" - - ## Install required packages to execute packaging tools for rpm based distros - yum install python3-pip python3-devel postgresql-devel -y - python3 -m pip install wheel - ./.github/packaging/validate_build_output.sh "rpm" deb_build_tests: @@ -189,9 +187,4 @@ jobs: PACKAGING_DOCKER_IMAGE: ${{ matrix.packaging_docker_image }} run: | echo "Postgres version: ${POSTGRES_VERSION}" - - apt-get update -y - ## Install required packages to execute packaging tools for deb based distros - apt-get install python3-dev python3-pip -y - apt-get purge -y python3-yaml ./.github/packaging/validate_build_output.sh "deb" diff --git a/.gitignore b/.gitignore index df447746a9d..e636392ac36 100644 --- a/.gitignore +++ b/.gitignore @@ -55,3 +55,6 @@ lib*.pc # style related temporary outputs *.uncrustify .venv + +# added output when modifying check_gucs_are_alphabetically_sorted.sh +guc.out diff --git a/CHANGELOG.md b/CHANGELOG.md index 02fc91d04dd..02156009972 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,165 @@ +### citus v12.1.3 (April 18, 2024) ### + +* Allows overwriting host name for all inter-node connections by + supporting "host" parameter in citus.node_conninfo (#7541) + +* Changes the order in which the locks are acquired for the target and + reference tables, when a modify request is initiated from a worker + node that is not the "FirstWorkerNode" (#7542) + +* Fixes a performance issue when distributing a table that depends on an + extension (#7574) + +* Fixes a performance issue when using "\d tablename" on a server with + many tables (#7577) + +* Fixes a crash caused by some form of ALTER TABLE ADD COLUMN + statements. When adding multiple columns, if one of the ADD COLUMN + statements contains a FOREIGN constraint omitting the referenced + columns in the statement, a SEGFAULT was occurring. (#7522) + +* Fixes a performance issue when creating distributed tables if many + already exist (#7575, #7579) + +* Fixes a bug when hostname in pg_dist_node resolves to multiple IPs + (#7377) + +* Fixes performance issue when tracking foreign key constraints on + systems with many constraints (#7578) + +* Fixes segmentation fault when using CASE WHEN in DO block within + functions. (#7554) + +* Fixes undefined behavior in master_disable_node due to argument + mismatch (#7492) + +* Fixes some potential bugs by correctly marking some variables as + volatile (#7570) + +* Logs username in the failed connection message (#7432) + +### citus v11.0.10 (February 15, 2024) ### + +* Removes pg_send_cancellation and all references (#7135) + +### citus v12.1.2 (February 12, 2024) ### + +* Fixes the incorrect column count after ALTER TABLE (#7379) + +### citus v12.0.1 (July 11, 2023) ### + +* Fixes incorrect default value assumption for VACUUM(PROCESS_TOAST) #7122) + +* Fixes a bug that causes an unexpected error when adding a column + with a NULL constraint (#7093) + +* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152) + +* Fixes a bug with deleting colocation groups (#6929) + +* Fixes memory and memory contexts leaks in Foreign Constraint Graphs (#7236) + +* Fixes shard size bug with too many shards (#7018) + +* Fixes the incorrect column count after ALTER TABLE (#7379) + +* Improves citus_tables view performance (#7050) + +* Makes sure to disallow creating a replicated distributed table + concurrently (#7219) + +* Removes pg_send_cancellation and all references (#7135) + +### citus v11.3.1 (February 12, 2024) ### + +* Disallows MERGE when the query prunes down to zero shards (#6946) + +* Fixes a bug related to non-existent objects in DDL commands (#6984) + +* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152) + +* Fixes a bug with deleting colocation groups (#6929) + +* Fixes incorrect results on fetching scrollable with hold cursors (#7014) + +* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236) + +* Fixes replicate reference tables task fail when user is superuser (#6930) + +* Fixes the incorrect column count after ALTER TABLE (#7379) + +* Improves citus_shard_sizes performance (#7050) + +* Makes sure to disallow creating a replicated distributed table + concurrently (#7219) + +* Removes pg_send_cancellation and all references (#7135) + +### citus v11.2.2 (February 12, 2024) ### + +* Fixes a bug in background shard rebalancer where the replicate + reference tables task fails if the current user is not a superuser (#6930) + +* Fixes a bug related to non-existent objects in DDL commands (#6984) + +* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152) + +* Fixes a bug with deleting colocation groups (#6929) + +* Fixes incorrect results on fetching scrollable with hold cursors (#7014) + +* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236) + +* Fixes the incorrect column count after ALTER TABLE (#7379) + +* Improves failure handling of distributed execution (#7090) + +* Makes sure to disallow creating a replicated distributed table + concurrently (#7219) + +* Removes pg_send_cancellation (#7135) + +### citus v11.1.7 (February 12, 2024) ### + +* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236) + +* Fixes a bug related to non-existent objects in DDL commands (#6984) + +* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152) + +* Fixes a bug with deleting colocation groups (#6929) + +* Fixes incorrect results on fetching scrollable with hold cursors (#7014) + +* Fixes the incorrect column count after ALTER TABLE (#7379) + +* Improves failure handling of distributed execution (#7090) + +* Makes sure to disallow creating a replicated distributed table + concurrently (#7219) + +* Removes pg_send_cancellation and all references (#7135) + +### citus v11.0.9 (February 12, 2024) ### + +* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152) + +* Fixes a bug with deleting colocation groups (#6929) + +* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236) + +* Fixes the incorrect column count after ALTER TABLE (#7462) + +* Improve failure handling of distributed execution (#7090) + +### citus v12.1.1 (November 9, 2023) ### + +* Fixes leaking of memory and memory contexts in Citus foreign key cache + (#7236) + +* Makes sure to disallow creating a replicated distributed table concurrently + (#7219) + ### citus v12.1.0 (September 12, 2023) ### * Adds support for PostgreSQL 16.0 (#7173) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ac1f600abfe..e1900642d3e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -11,6 +11,30 @@ sign a Contributor License Agreement (CLA). For an explanation of why we ask this as well as instructions for how to proceed, see the [Microsoft CLA](https://cla.opensource.microsoft.com/). +### Devcontainer / Github Codespaces + +The easiest way to start contributing is via our devcontainer. This container works both locally in visual studio code with docker-desktop/docker-for-mac as well as [Github Codespaces](https://github.com/features/codespaces). To open the project in vscode you will need the [Dev Containers extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers). For codespaces you will need to [create a new codespace](https://codespace.new/citusdata/citus). + +With the extension installed you can run the following from the command pallet to get started + +``` +> Dev Containers: Clone Repository in Container Volume... +``` + +In the subsequent popup paste the url to the repo and hit enter. + +``` +https://github.com/citusdata/citus +``` + +This will create an isolated Workspace in vscode, complete with all tools required to build, test and run the Citus extension. We keep this container up to date with the supported postgres versions as well as the exact versions of tooling we use. + +To quickly start we suggest splitting your terminal once to have two shells. The left one in the `/workspaces/citus`, the second one changed to `/data`. The left terminal will be used to interact with the project, the right one with a testing cluster. + +To get citus installed from source we run `make install -s` in the first terminal. Once installed you can start a Citus cluster in the second terminal via `citus_dev make citus`. The cluster will run in the background, and can be interacted with via `citus_dev`. To get an overview of the available commands. + +With the Citus cluster running you can connect to the coordinator in the first terminal via `psql -p9700`. Because the coordinator is the most common entrypoint the `PGPORT` environment is set accordingly, so a simple `psql` will connect directly to the coordinator. + ### Getting and building [PostgreSQL documentation](https://www.postgresql.org/support/versioning/) has a @@ -151,7 +175,7 @@ that are missing in earlier minor versions. ### Following our coding conventions -CircleCI will automatically reject any PRs which do not follow our coding +CI pipeline will automatically reject any PRs which do not follow our coding conventions. The easiest way to ensure your PR adheres to those conventions is to use the [citus_indent](https://github.com/citusdata/tools/tree/develop/uncrustify) tool. This tool uses `uncrustify` under the hood. @@ -240,3 +264,9 @@ Any other SQL you can put directly in the main sql file, e.g. ### Running tests See [`src/test/regress/README.md`](https://github.com/citusdata/citus/blob/master/src/test/regress/README.md) + +### Documentation + +User-facing documentation is published on [docs.citusdata.com](https://docs.citusdata.com/). When adding a new feature, function, or setting, you can open a pull request or issue against the [Citus docs repo](https://github.com/citusdata/citus_docs/). + +Detailed descriptions of the implementation for Citus developers are provided in the [Citus Technical Documentation](src/backend/distributed/README.md). It is currently a single file for ease of searching. Please update the documentation if you make any changes that affect the design or add major new features. diff --git a/DEVCONTAINER.md b/DEVCONTAINER.md new file mode 100644 index 00000000000..d004e6f7122 --- /dev/null +++ b/DEVCONTAINER.md @@ -0,0 +1,43 @@ +# Devcontainer + +## Coredumps +When postgres/citus crashes, there is the option to create a coredump. This is useful for debugging the issue. Coredumps are enabled in the devcontainer by default. However, not all environments are configured correctly out of the box. The most important configuration that is not standardized is the `core_pattern`. The configuration can be verified from the container, however, you cannot change this setting from inside the container as the filesystem containing this setting is in read only mode while inside the container. + +To verify if corefiles are written run the following command in a terminal. This shows the filename pattern with which the corefile will be written. +```bash +cat /proc/sys/kernel/core_pattern +``` + +This should be configured with a relative path or simply a simple filename, such as `core`. When your environment shows an absolute path you will need to change this setting. How to change this setting depends highly on the underlying system as the setting needs to be changed on the kernel of the host running the container. + +You can put any pattern in `/proc/sys/kernel/core_pattern` as you see fit. eg. You can add the PID to the core pattern in one of two ways; +- You either include `%p` in the core_pattern. This gets substituted with the PID of the crashing process. +- Alternatively you could set `/proc/sys/kernel/core_uses_pid` to `1` in the same way as you set `core_pattern`. This will append the PID to the corefile if `%p` is not explicitly contained in the core_pattern. + +When a coredump is written you can use the debug/launch configuration `Open core file` which is preconfigured in the devcontainer. This will open a fileprompt that lists all coredumps that are found in your workspace. When you want to debug coredumps from `citus_dev` that are run in your `/data` directory, you can add the data directory to your workspace. In the command pallet of vscode you can run `>Workspace: Add Folder to Workspace...` and select the `/data` directory. This will allow you to open the coredumps from the `/data` directory in the `Open core file` debug configuration. + +### Windows (docker desktop) +When running in docker desktop on windows you will most likely need to change this setting. The linux guest in WSL2 that runs your container is the `docker-desktop` environment. The easiest way to get onto the host, where you can change this setting, is to open a powershell window and verify you have the docker-desktop environment listed. + +```powershell +wsl --list +``` + +Among others this should list both `docker-desktop` and `docker-desktop-data`. You can then open a shell in the `docker-desktop` environment. + +```powershell +wsl -d docker-desktop +``` + +Inside this shell you can verify that you have the right environment by running + +```bash +cat /proc/sys/kernel/core_pattern +``` + +This should show the same configuration as the one you see inside the devcontainer. You can then change the setting by running the following command. +This will change the setting for the current session. If you want to make the change permanent you will need to add this to a startup script. + +```bash +echo "core" > /proc/sys/kernel/core_pattern +``` diff --git a/Makefile b/Makefile index e42d0ffd31a..77d8cad3ee6 100644 --- a/Makefile +++ b/Makefile @@ -61,6 +61,7 @@ check-style: # depend on install-all so that downgrade scripts are installed as well check: all install-all - $(MAKE) -C src/test/regress check-full + # explicetely does not use $(MAKE) to avoid parallelism + make -C src/test/regress check .PHONY: all check clean install install-downgrades install-all diff --git a/README.md b/README.md index fd4564189b0..2cf17098ffc 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ -![Citus Banner](/citus-readme-banner.png) +![Citus Banner](images/citus-readme-banner.png) [![Latest Docs](https://img.shields.io/badge/docs-latest-brightgreen.svg)](https://docs.citusdata.com/) [![Stack Overflow](https://img.shields.io/badge/Stack%20Overflow-%20-545353?logo=Stack%20Overflow)](https://stackoverflow.com/questions/tagged/citus) @@ -31,7 +31,7 @@ You can use these Citus superpowers to make your Postgres database scale-out rea Our [SIGMOD '21](https://2021.sigmod.org/) paper [Citus: Distributed PostgreSQL for Data-Intensive Applications](https://doi.org/10.1145/3448016.3457551) gives a more detailed look into what Citus is, how it works, and why it works that way. -![Citus scales out from a single node](/citus-scale-out.png) +![Citus scales out from a single node](images/citus-scale-out.png) Since Citus is an extension to Postgres, you can use Citus with the latest Postgres versions. And Citus works seamlessly with the PostgreSQL tools and extensions you are already familiar with. @@ -423,12 +423,14 @@ A Citus database cluster grows from a single PostgreSQL node into a cluster by a Data in distributed tables is stored in “shards”, which are actually just regular PostgreSQL tables on the worker nodes. When querying a distributed table on the coordinator node, Citus will send regular SQL queries to the worker nodes. That way, all the usual PostgreSQL optimizations and extensions can automatically be used with Citus. -![Citus architecture](/citus-architecture.png) +![Citus architecture](images/citus-architecture.png) When you send a query in which all (co-located) distributed tables have the same filter on the distribution column, Citus will automatically detect that and send the whole query to the worker node that stores the data. That way, arbitrarily complex queries are supported with minimal routing overhead, which is especially useful for scaling transactional workloads. If queries do not have a specific filter, each shard is queried in parallel, which is especially useful in analytical workloads. The Citus distributed executor is adaptive and is designed to handle both query types at the same time on the same system under high concurrency, which enables large-scale mixed workloads. The schema and metadata of distributed tables and reference tables are automatically synchronized to all the nodes in the cluster. That way, you can connect to any node to run distributed queries. Schema changes and cluster administration still need to go through the coordinator. +Detailed descriptions of the implementation for Citus developers are provided in the [Citus Technical Documentation](src/backend/distributed/README.md). + ## When to use Citus Citus is uniquely capable of scaling both analytical and transactional workloads with up to petabytes of data. Use cases in which Citus is commonly used: diff --git a/ci/README.md b/ci/README.md index 37ef94f4f98..b8dad35acc4 100644 --- a/ci/README.md +++ b/ci/README.md @@ -385,3 +385,18 @@ definitions are in alphabetical order. ## `print_stack_trace.sh` This script prints stack traces for failed tests, if they left core files. + +## `sort_and_group_includes.sh` + +This script checks and fixes issues with include grouping and sorting in C files. + +Includes are grouped in the following groups: + - System includes (eg. `#include `) + - Postgres.h include (eg. `#include "postgres.h"`) + - Toplevel postgres includes (includes not in a directory eg. `#include "miscadmin.h`) + - Postgres includes in a directory (eg. `#include "catalog/pg_type.h"`) + - Toplevel citus includes (includes not in a directory eg. `#include "pg_version_constants.h"`) + - Columnar includes (eg. `#include "columnar/columnar.h"`) + - Distributed includes (eg. `#include "distributed/maintenanced.h"`) + +Within every group the include lines are sorted alphabetically. diff --git a/ci/build-citus.sh b/ci/build-citus.sh index 49f92e69134..678fd515cc6 100755 --- a/ci/build-citus.sh +++ b/ci/build-citus.sh @@ -15,9 +15,6 @@ PG_MAJOR=${PG_MAJOR:?please provide the postgres major version} codename=${VERSION#*(} codename=${codename%)*} -# get project from argument -project="${CIRCLE_PROJECT_REPONAME}" - # we'll do everything with absolute paths basedir="$(pwd)" @@ -28,7 +25,7 @@ build_ext() { pg_major="$1" builddir="${basedir}/build-${pg_major}" - echo "Beginning build of ${project} for PostgreSQL ${pg_major}..." >&2 + echo "Beginning build for PostgreSQL ${pg_major}..." >&2 # do everything in a subdirectory to avoid clutter in current directory mkdir -p "${builddir}" && cd "${builddir}" diff --git a/ci/check_all_ci_scripts_are_run.sh b/ci/check_all_ci_scripts_are_run.sh index 0b7abb3e35a..12516f793d1 100755 --- a/ci/check_all_ci_scripts_are_run.sh +++ b/ci/check_all_ci_scripts_are_run.sh @@ -14,8 +14,8 @@ ci_scripts=$( grep -v -E '^(ci_helpers.sh|fix_style.sh)$' ) for script in $ci_scripts; do - if ! grep "\\bci/$script\\b" .circleci/config.yml > /dev/null; then - echo "ERROR: CI script with name \"$script\" is not actually used in .circleci/config.yml" + if ! grep "\\bci/$script\\b" -r .github > /dev/null; then + echo "ERROR: CI script with name \"$script\" is not actually used in .github folder" exit 1 fi if ! grep "^## \`$script\`\$" ci/README.md > /dev/null; then diff --git a/ci/check_enterprise_merge.sh b/ci/check_enterprise_merge.sh deleted file mode 100755 index d29ffcad89a..00000000000 --- a/ci/check_enterprise_merge.sh +++ /dev/null @@ -1,96 +0,0 @@ -#!/bin/bash - -# Testing this script locally requires you to set the following environment -# variables: -# CIRCLE_BRANCH, GIT_USERNAME and GIT_TOKEN - -# fail if trying to reference a variable that is not set. -set -u -# exit immediately if a command fails -set -e -# Fail on pipe failures -set -o pipefail - -PR_BRANCH="${CIRCLE_BRANCH}" -ENTERPRISE_REMOTE="https://${GIT_USERNAME}:${GIT_TOKEN}@github.com/citusdata/citus-enterprise" - -# shellcheck disable=SC1091 -source ci/ci_helpers.sh - -# List executed commands. This is done so debugging this script is easier when -# it fails. It's explicitly done after git remote add so username and password -# are not shown in CI output (even though it's also filtered out by CircleCI) -set -x - -check_compile () { - echo "INFO: checking if merged code can be compiled" - ./configure --without-libcurl - make -j10 -} - -# Clone current git repo (which should be community) to a temporary working -# directory and go there -GIT_DIR_ROOT="$(git rev-parse --show-toplevel)" -TMP_GIT_DIR="$(mktemp --directory -t citus-merge-check.XXXXXXXXX)" -git clone "$GIT_DIR_ROOT" "$TMP_GIT_DIR" -cd "$TMP_GIT_DIR" - -# Fails in CI without this -git config user.email "citus-bot@microsoft.com" -git config user.name "citus bot" - -# Disable "set -x" temporarily, because $ENTERPRISE_REMOTE contains passwords -{ set +x ; } 2> /dev/null -git remote add enterprise "$ENTERPRISE_REMOTE" -set -x - -git remote set-url --push enterprise no-pushing - -# Fetch enterprise-master -git fetch enterprise enterprise-master - - -git checkout "enterprise/enterprise-master" - -if git merge --no-commit "origin/$PR_BRANCH"; then - echo "INFO: community PR branch could be merged into enterprise-master" - # check that we can compile after the merge - if check_compile; then - exit 0 - fi - - echo "WARN: Failed to compile after community PR branch was merged into enterprise" -fi - -# undo partial merge -git merge --abort - -# If we have a conflict on enterprise merge on the master branch, we have a problem. -# Provide an error message to indicate that enterprise merge is needed to fix this check. -if [[ $PR_BRANCH = master ]]; then - echo "ERROR: Master branch has merge conflicts with enterprise-master." - echo "Try re-running this CI job after merging your changes into enterprise-master." - exit 1 -fi - -if ! git fetch enterprise "$PR_BRANCH" ; then - echo "ERROR: enterprise/$PR_BRANCH was not found and community PR branch could not be merged into enterprise-master" - exit 1 -fi - -# Show the top commit of the enterprise PR branch to make debugging easier -git log -n 1 "enterprise/$PR_BRANCH" - -# Check that this branch contains the top commit of the current community PR -# branch. If it does not it means it's not up to date with the current PR, so -# the enterprise branch should be updated. -if ! git merge-base --is-ancestor "origin/$PR_BRANCH" "enterprise/$PR_BRANCH" ; then - echo "ERROR: enterprise/$PR_BRANCH is not up to date with community PR branch" - exit 1 -fi - -# Now check if we can merge the enterprise PR into enterprise-master without -# issues. -git merge --no-commit "enterprise/$PR_BRANCH" -# check that we can compile after the merge -check_compile diff --git a/ci/check_gucs_are_alphabetically_sorted.sh b/ci/check_gucs_are_alphabetically_sorted.sh index a769ae4fb16..018fc7d35d4 100755 --- a/ci/check_gucs_are_alphabetically_sorted.sh +++ b/ci/check_gucs_are_alphabetically_sorted.sh @@ -4,7 +4,22 @@ set -euo pipefail # shellcheck disable=SC1091 source ci/ci_helpers.sh -# extract citus gucs in the form of "citus.X" -grep -o -E "(\.*\"citus.\w+\")," src/backend/distributed/shared_library_init.c > gucs.out -sort -c gucs.out +# Find the line that exactly matches "RegisterCitusConfigVariables(void)" in +# shared_library_init.c. grep command returns something like +# "934:RegisterCitusConfigVariables(void)" and we extract the line number +# with cut. +RegisterCitusConfigVariables_begin_linenumber=$(grep -n "^RegisterCitusConfigVariables(void)$" src/backend/distributed/shared_library_init.c | cut -d: -f1) + +# Consider the lines starting from $RegisterCitusConfigVariables_begin_linenumber, +# grep the first line that starts with "}" and extract the line number with cut +# as in the previous step. +RegisterCitusConfigVariables_length=$(tail -n +$RegisterCitusConfigVariables_begin_linenumber src/backend/distributed/shared_library_init.c | grep -n -m 1 "^}$" | cut -d: -f1) + +# extract the function definition of RegisterCitusConfigVariables into a temp file +tail -n +$RegisterCitusConfigVariables_begin_linenumber src/backend/distributed/shared_library_init.c | head -n $(($RegisterCitusConfigVariables_length)) > RegisterCitusConfigVariables_func_def.out + +# extract citus gucs in the form of "citus.X" +grep -P "^[\t][\t]\"citus\.[a-zA-Z_0-9]+\"" RegisterCitusConfigVariables_func_def.out > gucs.out +LC_COLLATE=C sort -c gucs.out rm gucs.out +rm RegisterCitusConfigVariables_func_def.out diff --git a/ci/fix_style.sh b/ci/fix_style.sh index 3d6e7ae83eb..bb78d5f5017 100755 --- a/ci/fix_style.sh +++ b/ci/fix_style.sh @@ -19,3 +19,4 @@ ci/disallow_long_changelog_entries.sh ci/normalize_expected.sh ci/fix_gitignore.sh ci/print_stack_trace.sh +ci/sort_and_group_includes.sh diff --git a/ci/include_grouping.py b/ci/include_grouping.py new file mode 100755 index 00000000000..4b1370d6196 --- /dev/null +++ b/ci/include_grouping.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python3 +""" +easy command line to run against all citus-style checked files: + +$ git ls-files \ + | git check-attr --stdin citus-style \ + | grep 'citus-style: set' \ + | awk '{print $1}' \ + | cut -d':' -f1 \ + | xargs -n1 ./ci/include_grouping.py +""" + +import collections +import os +import sys + + +def main(args): + if len(args) < 2: + print("Usage: include_grouping.py ") + return + + file = args[1] + if not os.path.isfile(file): + sys.exit(f"File '{file}' does not exist") + + with open(file, "r") as in_file: + with open(file + ".tmp", "w") as out_file: + includes = [] + skipped_lines = [] + + # This calls print_sorted_includes on a set of consecutive #include lines. + # This implicitly keeps separation of any #include lines that are contained in + # an #ifdef, because it will order the #include lines inside and after the + # #ifdef completely separately. + for line in in_file: + # if a line starts with #include we don't want to print it yet, instead we + # want to collect all consecutive #include lines + if line.startswith("#include"): + includes.append(line) + skipped_lines = [] + continue + + # if we have collected any #include lines, we want to print them sorted + # before printing the current line. However, if the current line is empty + # we want to perform a lookahead to see if the next line is an #include. + # To maintain any separation between #include lines and their subsequent + # lines we keep track of all lines we have skipped inbetween. + if len(includes) > 0: + if len(line.strip()) == 0: + skipped_lines.append(line) + continue + + # we have includes that need to be grouped before printing the current + # line. + print_sorted_includes(includes, file=out_file) + includes = [] + + # print any skipped lines + print("".join(skipped_lines), end="", file=out_file) + skipped_lines = [] + + print(line, end="", file=out_file) + + # move out_file to file + os.rename(file + ".tmp", file) + + +def print_sorted_includes(includes, file=sys.stdout): + default_group_key = 1 + groups = collections.defaultdict(set) + + # define the groups that we separate correctly. The matchers are tested in the order + # of their priority field. The first matcher that matches the include is used to + # assign the include to a group. + # The groups are printed in the order of their group_key. + matchers = [ + { + "name": "system includes", + "matcher": lambda x: x.startswith("<"), + "group_key": -2, + "priority": 0, + }, + { + "name": "toplevel postgres includes", + "matcher": lambda x: "/" not in x, + "group_key": 0, + "priority": 9, + }, + { + "name": "postgres.h", + "matcher": lambda x: x.strip() in ['"postgres.h"'], + "group_key": -1, + "priority": -1, + }, + { + "name": "toplevel citus inlcudes", + "matcher": lambda x: x.strip() + in [ + '"citus_version.h"', + '"pg_version_compat.h"', + '"pg_version_constants.h"', + ], + "group_key": 3, + "priority": 0, + }, + { + "name": "columnar includes", + "matcher": lambda x: x.startswith('"columnar/'), + "group_key": 4, + "priority": 1, + }, + { + "name": "distributed includes", + "matcher": lambda x: x.startswith('"distributed/'), + "group_key": 5, + "priority": 1, + }, + ] + matchers.sort(key=lambda x: x["priority"]) + + # throughout our codebase we have some includes where either postgres or citus + # includes are wrongfully included with the syntax for system includes. Before we + # try to match those we will change the <> to "" to make them match our system. This + # will also rewrite the include to the correct syntax. + common_system_include_error_prefixes = [" 0: + print(file=file) + includes = group[1] + print("".join(sorted(includes)), end="", file=file) + + +if __name__ == "__main__": + main(sys.argv) diff --git a/ci/sort_and_group_includes.sh b/ci/sort_and_group_includes.sh new file mode 100755 index 00000000000..1c3a9145834 --- /dev/null +++ b/ci/sort_and_group_includes.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +set -euo pipefail +# shellcheck disable=SC1091 +source ci/ci_helpers.sh + +git ls-files \ + | git check-attr --stdin citus-style \ + | grep 'citus-style: set' \ + | awk '{print $1}' \ + | cut -d':' -f1 \ + | xargs -n1 ./ci/include_grouping.py diff --git a/images/2pc-recovery.png b/images/2pc-recovery.png new file mode 100755 index 00000000000..8fbe80124a2 Binary files /dev/null and b/images/2pc-recovery.png differ diff --git a/citus-architecture.png b/images/citus-architecture.png similarity index 100% rename from citus-architecture.png rename to images/citus-architecture.png diff --git a/citus-readme-banner.png b/images/citus-readme-banner.png similarity index 100% rename from citus-readme-banner.png rename to images/citus-readme-banner.png diff --git a/citus-scale-out.png b/images/citus-scale-out.png similarity index 100% rename from citus-scale-out.png rename to images/citus-scale-out.png diff --git a/images/coordinator_delegates_stored_procedure.png b/images/coordinator_delegates_stored_procedure.png new file mode 100644 index 00000000000..161bcecdd99 Binary files /dev/null and b/images/coordinator_delegates_stored_procedure.png differ diff --git a/images/deadlock-detection.png b/images/deadlock-detection.png new file mode 100755 index 00000000000..3a7eeb25ba6 Binary files /dev/null and b/images/deadlock-detection.png differ diff --git a/images/executor-connections.png b/images/executor-connections.png new file mode 100755 index 00000000000..064b43353d5 Binary files /dev/null and b/images/executor-connections.png differ diff --git a/images/executor-slow-start.png b/images/executor-slow-start.png new file mode 100755 index 00000000000..6ab32eb81b0 Binary files /dev/null and b/images/executor-slow-start.png differ diff --git a/images/insert-select-modes.png b/images/insert-select-modes.png new file mode 100755 index 00000000000..6d5de65dce8 Binary files /dev/null and b/images/insert-select-modes.png differ diff --git a/images/mx-dedicated-query-nodes.png b/images/mx-dedicated-query-nodes.png new file mode 100755 index 00000000000..d7db99c1e35 Binary files /dev/null and b/images/mx-dedicated-query-nodes.png differ diff --git a/images/single-repartition-join.png b/images/single-repartition-join.png new file mode 100755 index 00000000000..f4f7ddb5559 Binary files /dev/null and b/images/single-repartition-join.png differ diff --git a/src/backend/columnar/columnar.c b/src/backend/columnar/columnar.c index 85ec06d00f4..4914bbc3aa5 100644 --- a/src/backend/columnar/columnar.c +++ b/src/backend/columnar/columnar.c @@ -11,16 +11,18 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" - #include #include +#include "postgres.h" + #include "miscadmin.h" + #include "utils/guc.h" #include "utils/rel.h" #include "citus_version.h" + #include "columnar/columnar.h" #include "columnar/columnar_tableam.h" diff --git a/src/backend/columnar/columnar_compression.c b/src/backend/columnar/columnar_compression.c index 98a175b06fd..2ff35da982c 100644 --- a/src/backend/columnar/columnar_compression.c +++ b/src/backend/columnar/columnar_compression.c @@ -13,12 +13,13 @@ */ #include "postgres.h" -#include "citus_version.h" #include "common/pg_lzcompress.h" #include "lib/stringinfo.h" +#include "citus_version.h" +#include "pg_version_constants.h" + #include "columnar/columnar_compression.h" -#include "distributed/pg_version_constants.h" #if HAVE_CITUS_LIBLZ4 #include diff --git a/src/backend/columnar/columnar_customscan.c b/src/backend/columnar/columnar_customscan.c index 4ea96a121e4..9ed82a5bfec 100644 --- a/src/backend/columnar/columnar_customscan.c +++ b/src/backend/columnar/columnar_customscan.c @@ -10,18 +10,17 @@ *------------------------------------------------------------------------- */ -#include "citus_version.h" +#include #include "postgres.h" -#include +#include "miscadmin.h" #include "access/amapi.h" #include "access/skey.h" #include "catalog/pg_am.h" #include "catalog/pg_statistic.h" #include "commands/defrem.h" -#include "miscadmin.h" #include "nodes/extensible.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" @@ -44,10 +43,13 @@ #include "utils/selfuncs.h" #include "utils/spccache.h" +#include "citus_version.h" + #include "columnar/columnar.h" #include "columnar/columnar_customscan.h" #include "columnar/columnar_metadata.h" #include "columnar/columnar_tableam.h" + #include "distributed/listutils.h" /* diff --git a/src/backend/columnar/columnar_debug.c b/src/backend/columnar/columnar_debug.c index cbb0d554fb3..bf12108a9af 100644 --- a/src/backend/columnar/columnar_debug.c +++ b/src/backend/columnar/columnar_debug.c @@ -11,12 +11,12 @@ #include "postgres.h" #include "funcapi.h" +#include "miscadmin.h" + #include "access/nbtree.h" #include "access/table.h" #include "catalog/pg_am.h" #include "catalog/pg_type.h" -#include "distributed/pg_version_constants.h" -#include "miscadmin.h" #include "storage/fd.h" #include "storage/smgr.h" #include "utils/guc.h" @@ -25,6 +25,8 @@ #include "utils/tuplestore.h" #include "pg_version_compat.h" +#include "pg_version_constants.h" + #include "columnar/columnar.h" #include "columnar/columnar_storage.h" #include "columnar/columnar_version_compat.h" diff --git a/src/backend/columnar/columnar_metadata.c b/src/backend/columnar/columnar_metadata.c index e7a6bfa9500..215f9609109 100644 --- a/src/backend/columnar/columnar_metadata.c +++ b/src/backend/columnar/columnar_metadata.c @@ -19,48 +19,51 @@ */ +#include + #include "postgres.h" +#include "miscadmin.h" +#include "port.h" #include "safe_lib.h" -#include "citus_version.h" -#include "columnar/columnar.h" -#include "columnar/columnar_storage.h" -#include "columnar/columnar_version_compat.h" -#include "distributed/listutils.h" - -#include #include "access/heapam.h" #include "access/htup_details.h" #include "access/nbtree.h" #include "access/xact.h" #include "catalog/indexing.h" -#include "catalog/pg_namespace.h" +#include "catalog/namespace.h" #include "catalog/pg_collation.h" +#include "catalog/pg_namespace.h" #include "catalog/pg_type.h" -#include "catalog/namespace.h" #include "commands/defrem.h" #include "commands/sequence.h" #include "commands/trigger.h" #include "executor/executor.h" #include "executor/spi.h" -#include "miscadmin.h" -#include "nodes/execnodes.h" #include "lib/stringinfo.h" -#if PG_VERSION_NUM >= PG_VERSION_16 -#include "parser/parse_relation.h" -#endif -#include "port.h" +#include "nodes/execnodes.h" #include "storage/fd.h" #include "storage/lmgr.h" #include "storage/procarray.h" #include "storage/smgr.h" #include "utils/builtins.h" #include "utils/fmgroids.h" -#include "utils/memutils.h" #include "utils/lsyscache.h" +#include "utils/memutils.h" #include "utils/rel.h" + +#include "citus_version.h" +#include "pg_version_constants.h" + +#include "columnar/columnar.h" +#include "columnar/columnar_storage.h" +#include "columnar/columnar_version_compat.h" + +#include "distributed/listutils.h" + #if PG_VERSION_NUM >= PG_VERSION_16 +#include "parser/parse_relation.h" #include "storage/relfilelocator.h" #include "utils/relfilenumbermap.h" #else diff --git a/src/backend/columnar/columnar_reader.c b/src/backend/columnar/columnar_reader.c index 526dd03cbf5..7ef0d15d7da 100644 --- a/src/backend/columnar/columnar_reader.c +++ b/src/backend/columnar/columnar_reader.c @@ -22,16 +22,15 @@ #include "access/xact.h" #include "catalog/pg_am.h" #include "commands/defrem.h" -#include "distributed/listutils.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" -#include "optimizer/optimizer.h" #include "optimizer/clauses.h" +#include "optimizer/optimizer.h" #include "optimizer/restrictinfo.h" #include "storage/fd.h" #include "utils/guc.h" -#include "utils/memutils.h" #include "utils/lsyscache.h" +#include "utils/memutils.h" #include "utils/rel.h" #include "columnar/columnar.h" @@ -39,6 +38,8 @@ #include "columnar/columnar_tableam.h" #include "columnar/columnar_version_compat.h" +#include "distributed/listutils.h" + #define UNEXPECTED_STRIPE_READ_ERR_MSG \ "attempted to read an unexpected stripe while reading columnar " \ "table %s, stripe with id=" UINT64_FORMAT " is not flushed" diff --git a/src/backend/columnar/columnar_storage.c b/src/backend/columnar/columnar_storage.c index 21aa7ab9c93..0ae6ccca3f0 100644 --- a/src/backend/columnar/columnar_storage.c +++ b/src/backend/columnar/columnar_storage.c @@ -36,11 +36,11 @@ #include "postgres.h" +#include "miscadmin.h" #include "safe_lib.h" #include "access/generic_xlog.h" #include "catalog/storage.h" -#include "miscadmin.h" #include "storage/bufmgr.h" #include "storage/lmgr.h" diff --git a/src/backend/columnar/columnar_tableam.c b/src/backend/columnar/columnar_tableam.c index dade931df27..ca3a5f4c4aa 100644 --- a/src/backend/columnar/columnar_tableam.c +++ b/src/backend/columnar/columnar_tableam.c @@ -1,42 +1,38 @@ -#include "citus_version.h" +#include #include "postgres.h" -#include - #include "miscadmin.h" +#include "pgstat.h" +#include "safe_lib.h" +#include "access/detoast.h" #include "access/genam.h" #include "access/heapam.h" #include "access/multixact.h" #include "access/rewriteheap.h" #include "access/tableam.h" #include "access/tsmapi.h" -#include "access/detoast.h" #include "access/xact.h" #include "catalog/catalog.h" #include "catalog/index.h" #include "catalog/namespace.h" #include "catalog/objectaccess.h" #include "catalog/pg_am.h" +#include "catalog/pg_extension.h" #include "catalog/pg_publication.h" #include "catalog/pg_trigger.h" -#include "catalog/pg_extension.h" #include "catalog/storage.h" #include "catalog/storage_xlog.h" #include "commands/defrem.h" +#include "commands/extension.h" #include "commands/progress.h" #include "commands/vacuum.h" -#include "commands/extension.h" #include "executor/executor.h" #include "nodes/makefuncs.h" #include "optimizer/plancat.h" -#include "pg_version_compat.h" -#include "pgstat.h" -#include "safe_lib.h" #include "storage/bufmgr.h" #include "storage/bufpage.h" -#include "storage/bufmgr.h" #include "storage/lmgr.h" #include "storage/predicate.h" #include "storage/procarray.h" @@ -44,17 +40,22 @@ #include "tcop/utility.h" #include "utils/builtins.h" #include "utils/fmgroids.h" +#include "utils/lsyscache.h" #include "utils/memutils.h" #include "utils/pg_rusage.h" #include "utils/rel.h" #include "utils/relcache.h" -#include "utils/lsyscache.h" #include "utils/syscache.h" + +#include "citus_version.h" +#include "pg_version_compat.h" + #include "columnar/columnar.h" #include "columnar/columnar_customscan.h" #include "columnar/columnar_storage.h" #include "columnar/columnar_tableam.h" #include "columnar/columnar_version_compat.h" + #include "distributed/listutils.h" /* @@ -2945,7 +2946,7 @@ MajorVersionsCompatibleColumnar(char *leftVersion, char *rightVersion) } else { - rightComparisionLimit = strlen(leftVersion); + rightComparisionLimit = strlen(rightVersion); } /* we can error out early if hypens are not in the same position */ diff --git a/src/backend/columnar/columnar_writer.c b/src/backend/columnar/columnar_writer.c index 3b510ce7460..1bdc612c1f2 100644 --- a/src/backend/columnar/columnar_writer.c +++ b/src/backend/columnar/columnar_writer.c @@ -16,18 +16,25 @@ #include "postgres.h" +#include "miscadmin.h" #include "safe_lib.h" #include "access/heapam.h" #include "access/nbtree.h" #include "catalog/pg_am.h" -#include "miscadmin.h" -#include "pg_version_compat.h" #include "storage/fd.h" #include "storage/smgr.h" #include "utils/guc.h" #include "utils/memutils.h" #include "utils/rel.h" + +#include "pg_version_compat.h" +#include "pg_version_constants.h" + +#include "columnar/columnar.h" +#include "columnar/columnar_storage.h" +#include "columnar/columnar_version_compat.h" + #if PG_VERSION_NUM >= PG_VERSION_16 #include "storage/relfilelocator.h" #include "utils/relfilenumbermap.h" @@ -35,10 +42,6 @@ #include "utils/relfilenodemap.h" #endif -#include "columnar/columnar.h" -#include "columnar/columnar_storage.h" -#include "columnar/columnar_version_compat.h" - struct ColumnarWriteState { TupleDesc tupleDescriptor; diff --git a/src/backend/columnar/write_state_management.c b/src/backend/columnar/write_state_management.c index 27d902e61f5..7f35c5dd179 100644 --- a/src/backend/columnar/write_state_management.c +++ b/src/backend/columnar/write_state_management.c @@ -1,21 +1,17 @@ -#include "citus_version.h" +#include #include "postgres.h" -#include "columnar/columnar.h" - - -#include #include "miscadmin.h" +#include "pgstat.h" #include "access/genam.h" #include "access/heapam.h" +#include "access/heaptoast.h" #include "access/multixact.h" #include "access/rewriteheap.h" #include "access/tsmapi.h" -#include "access/heaptoast.h" -#include "common/hashfn.h" #include "access/xact.h" #include "catalog/catalog.h" #include "catalog/index.h" @@ -26,14 +22,12 @@ #include "catalog/storage_xlog.h" #include "commands/progress.h" #include "commands/vacuum.h" +#include "common/hashfn.h" #include "executor/executor.h" #include "nodes/makefuncs.h" #include "optimizer/plancat.h" -#include "pg_version_compat.h" -#include "pgstat.h" #include "storage/bufmgr.h" #include "storage/bufpage.h" -#include "storage/bufmgr.h" #include "storage/lmgr.h" #include "storage/predicate.h" #include "storage/procarray.h" @@ -44,6 +38,10 @@ #include "utils/rel.h" #include "utils/syscache.h" +#include "citus_version.h" +#include "pg_version_compat.h" + +#include "columnar/columnar.h" #include "columnar/columnar_customscan.h" #include "columnar/columnar_tableam.h" #include "columnar/columnar_version_compat.h" diff --git a/src/backend/distributed/README.md b/src/backend/distributed/README.md new file mode 100644 index 00000000000..e3a9a7a33dd --- /dev/null +++ b/src/backend/distributed/README.md @@ -0,0 +1,2522 @@ + +# Citus Technical Documentation + +The purpose of this document is to provide comprehensive technical documentation for Citus, in particular the distributed database implementation. + +# Table of Contents +- [Citus Concepts](#citus-concepts) + - [Principles](#principles) +- [Use of hooks](#use-of-hooks) +- [Query planner](#query-planner) + - [High-level design/flow:](#high-level-designflow) + - [Distributed Query Planning with Examples in Citus (as of Citus 12.1)](#distributed-query-planning-with-examples-in-citus-as-of-citus-121) + - [Logical Planner & Optimizer](#logical-planner--optimizer) + - [Combine query planner](#combine-query-planner) + - [Restriction Equivalence](#restriction-equivalence) + - [Recurring Tuples](#recurring-tuples) +- [Executor](#executor) + - [Custom scan](#custom-scan) + - [Function evaluation](#function-evaluation) + - [Prepared statements](#prepared-statements) + - [Adaptive executor](#adaptive-executor) + - [Local execution](#local-execution) + - [Subplans](#subplans) + - [Re-partitioning](#re-partitioning) + - [COPY .. FROM command](#copy--from-command) + - [COPY .. TO command](#copy--to-command) + - [INSERT..SELECT](#insertselect) + - [Merge command](#merge-command) +- [DDL](#ddl) + - [Object & dependency propagation](#object--dependency-propagation) + - [Foreign keys](#foreign-keys) + - [DROP Table](#drop-table) +- [Connection management](#connection-management) + - [Connection management](#connection-management-1) + - [Placement connection tracking](#placement-connection-tracking) + - [citus.max_cached_connections_per_worker](#citusmax_cached_connections_per_worker) + - [citus.max_shared_pool_size](#citusmax_shared_pool_size) +- [Transactions (2PC)](#transactions-2pc) + - [Single-node transactions](#single-node-transactions) + - [Multi-node transactions](#multi-node-transactions) + - [No distributed snapshot isolation](#no-distributed-snapshot-isolation) + - [Distributed Deadlocks](#distributed-deadlocks) +- [Locking](#locking) + - [Lock Levels](#lock-levels) + - [Lock Monitoring](#lock-monitoring) + - [Lock Types](#lock-types) +- [Rebalancing](#rebalancing) + - [Rebalancing algorithm](#rebalancing-algorithm) + - [Shard moves](#shard-moves) + - [Shard splits](#shard-splits) + - [Background tasks](#background-tasks) + - [Resource cleanup](#resource-cleanup) +- [Logical decoding / CDC](#logical-decoding--cdc) + - [CDC ordering](#cdc-ordering) +- [Global PID](#global-pid) +- [Function call delegation](#function-call-delegation) +- [Query from any node](#query-from-any-node) + - [Why didn’t we have dedicated Query Nodes and Data Nodes?](#why-didnt-we-have-dedicated-query-nodes-and-data-nodes) + - [Shard visibility](#shard-visibility) + +# Citus Concepts + +**Citus table**: A table managed by Citus through PostgreSQL hooks. Whenever the table is involved in a command, the command is handled by Citus. + +**Shell table**: The “original” (inaccessible) PostgreSQL table which has been converted to a Citus table. Every node has shell tables and the corresponding Citus metadata. + +**Metadata**: The Citus catalog tables, shell tables, and dependent objects that are replicated to all nodes to enable distributed queries from those nodes. + +**Metadata syncing**: The process of replicating metadata from the coordinator to other nodes, which happens when adding a node or any change to the metadata. + +There are several types of Citus tables: + +**Distributed tables** are created using `SELECT create_distributed_table('table_name', 'distribution_column')`. They have a distribution column and for each row the value in the distribution column determines to which shard the row is assigned. There are 3 different partitioning schemes for distributed tables, though only the first is supported: + +- Hash-distributed tables have a range of hash values in shardminvalue/shardmaxvalue in pg_dist_shard +- Range-distributed tables (deprecated) have shards with a distinct range of values in pg_dist_shard +- Append-distributed tables (deprecated) have shards with a range of values in pg_dist_shard, though the ranges can overlap. + +Hash-distributed tables can be **co-located** with each other, such that the shards with the same hash value range are always on the same node. From the planner point-of-view, range-distributed tables can also be colocated, but shards are not managed by Citus. + +Shards can be replicated (deprecated), in which case they have multiple shard placements. Writes to the placements are performed using 2PC and use aggressive locking to avoid diverging. + +**Reference tables** are created using SELECT create_reference_table(..). They have a single shard, which is replicated to all nodes. It is possible to have a node without reference table replicas, but in that case the reference tables are replicated before the next rebalance. Reference tables are always co-located with each other and have a common co-location ID. + +Writes to a reference table are performed using 2PC and use aggressive locking to avoid diverging. + +**Single shard tables** are a special type of distributed table without a distribution column and with a single shard. When using schema-based sharding, tables created in a distributed schema automatically become single shard tables. Single shard tables can be co-located with each other, but not replicated. Single shard tables can be explicitly created using `SELECT create_distributed_table('table_name', NULL);`, though are meant to be auto-generated by schema-based sharding. + +**Citus local tables**: A single shard table that can only be placed on the coordinator and are primarily used as a drop-in replacement for regular PostgreSQL tables when creating foreign keys to/from reference tables. All Citus local tables are implicitly co-located with each other, but do not have a co-location ID. Citus local tables can be explicitly created using `SELECT citus_add_local_table_to_metadata('table_name');`, though are meant to be auto-generated by foreign keys. + +All Citus table types have the notion of a “shard”, though in many cases there is only a single shard. + +**Shard**: The table that contains the actual data in a Citus table. Shards reside in the same schema as the Citus table and are named after the Citus table with the shard ID as a suffix. Shards are hidden from catalog queries unless you SET citus.override_table_visibility TO off. Hash-distributed tables have multiple shards, each of which has distinct shardminvalue/shardmaxvalue. + +**Colocation group**: A set of distributed tables that have the same co-location ID, such that their shards are always co-located. + +**Shard group**: A set of shards with the same shardminvalue/shardmaxvalue that are part of the same co-location group. Citus guarantees that shards from the same shard group are always placed on the same node group. + +**Shard placement**: The assignment of a shard to a node group. There can be multiple placements of the same shard if the table is replicated (e.g. reference tables). + +**Shard group placement**: The assignment of a shard to a node group must be the same for all shards in a shard group, since those are always co-located. We’ll refer to the group of placements of a shard group as a shard group placement. + +**Node**: A single PostgreSQL/Citus server listed in pg_dist_node and added via SELECT citus_add_node(..). + +**Node group**: Each primary node can have 0 or more physical replicas in read replica clusters. Together the nodes form a node group identified by the groupid in pg_dist_node. Per convention, the coordinator(s) have group ID 0. Each node can know its own node groupid by reading it from pg_dist_local_group. + +**Coordinator**: The node with groupid 0, which can perform reads, writes, and administrative operations such as adding a node, rebalancing, and schema changes. + +**Worker nodes**: Nodes with groupid > 0, which can perform reads and writes, but not administrative operations. + +**Cluster**: The combination of worker nodes and coordinator is a cluster. When the cluster has a read replica, the nodes in the read replica are listed in pg_dist_node with a different nodecluster value, and the servers have a corresponding citus.cluster_name in their postgresql.conf. That way, nodes know which other nodes in pg_dist_node belong to their cluster, and they will ignore others. + +**Read replica cluster**: In a read replica cluster, every node is a physical replica of a node in a primary Citus cluster. The read replica has a distinct citus.cluster_name value and the nodes in the read replica cluster should be added to pg_dist_node on the primary coordinator with the corresponding cluster name. + +**Client connections**: Connections made by the client to any of the nodes in the cluster. Each client connection is backed by a postgres process/backend, which we sometimes refer to as a client session. + +**Internal connections**: Connections to other nodes made by a command running on a client session. Each internal connection is backed by a process, which we sometimes refer to as an internal session. In the code, you can use IsCitusInternalBackend() + +**Maintenance daemon**: A background worker that is started in each database that has the Citus extension. It performs distributed deadlock detection, 2PC recovery, synchronizing node metadata after citus_update_node, resource cleanup, and other tasks. + +In the query planner, we use the following terminology: + +**Distributed query**: A query sent by the client that involves a Citus table and is therefore handled by the distributed query planner. + +**Shard query**: An internal query on shards (at most 1 shard group). + +**Intermediate result**: A temporary file that contains the result of a subquery or CTE, created as a result of a broadcast or repartition operation. Intermediate results are automatically cleaned up on transaction end or restart. + +## Principles + +Use cases: + +- Multi-tenant apps are the primary use case for Citus, which we can scale through distributing and co-locating by tenant ID, or through schema-based sharding. Citus is reasonably complete for this use case, but there are still SQL and operational improvements that can be made. +- Real-time analytics is another popular use case due the combination of parallel distributed queries with indexes & in-database materialization (ETL). Improvement areas include automated time partitioning, better columnar storage (perf and update/delete), and incremental materialized views. +- Citus works well for CRUD use cases, but would be far easier to use if we introduced a load balancer, DDL from any node (no explicit coordinator), and by better use of connection pooling for better performance (e.g. outbound pgbouncers). +- Marketplace use cases could work well if we made it easier to distribute tables twice by different dimensions or made it easier to keep paired tables in sync. + +Schema management: + + - Our goal is for all DDL commands on Citus tables to work transparently, and for global DDL commands (e.g. CREATE TYPE) to be propagated to all nodes. Not all DDL is implemented yet and may either error or not propagate. + - Since we cannot define custom DDL commands for sharding operations, we use functions that are called from a SELECT query. + +Query layer: + +- No incompatibilities with PostgreSQL – any query on a Citus table is supported on an equivalent PostgreSQL table. +- We optimize for maximum pushdown (& performance) over complete compatibility, but our long-term goal is for all queries to be supported in all cases. +- For single-shard queries, it is useful to avoid detailed query analysis through the fast path planner (simple, single table) and router planner (co-located joins) layers. However, multi-shard queries can go through disparate code paths that were added out of expediency and should eventually be unified. + +Transactional semantics: + +- Transactions scoped to a single node follow the same semantics as PostgreSQL. +- Transactions across nodes are atomic, durable, and consistent, but do not have full snapshot isolation: A multi-shard query may see a concurrently committing transaction as committed on one node, but not yet committed on another node. +- Read-your-writes consistency should be preserved. +- Monotonic read consistency should be preserved for tables without replication, may not always be the case for replicated/reference tables. + +Replication model: + +- High availability is achieved through hot standby nodes managed by a control plane or PostgreSQL HA solution like Patroni or pg_auto_failover. +- Read replicas are Citus clusters in which each node is a physical replica of a node in another Citus cluster. +- Hot standby nodes are, at the time of writing, not in the metadata. Instead, the hostname/IP is replaced or rerouted at failover time. +- The deprecated “statement based” replication is (as of Citus 11.0+) only useful for providing read scalability, not for HA as all modifications are done via 2PC. Reference tables do use statement-based replication. + +# Use of hooks + +A PostgreSQL extension consists of two parts: a set of SQL objects (e.g. metadata tables, functions, types) and a shared library that is loaded into PostgreSQL and can alter the behavior of PostgreSQL by setting certain hooks. You can find a high-level description of these concepts in [this talk](https://learn.microsoft.com/en-us/events/azure-cosmos-db-liftoff/foundations-of-distributed-postgresql-with-citus). + +Citus uses the following hooks: + +**User-defined functions (UDFs)** are callable from SQL queries as part of a transaction, but have an implementation in C, and are primarily used to manipulate the Citus metadata and implement remote procedure calls between servers. + +**Planner and executor hooks** are global function pointers that allow an extension to provide an alternative query plan and execution method. After PostgreSQL parses a query, Citus checks if the query involves a Citus table. If so, Citus generates a plan tree that contains a CustomScan operator, which encapsulates distributed query plan. + +**CustomScan** is an operator in a PostgreSQL query plan that returns tuples via custom function pointers. The Citus CustomScan calls the distributed query executor, which sends queries to other servers and collects the results before returning them to the PostgreSQL executor. + +**Transaction callbacks** are called at critical points in the lifecycle of a transaction (e.g. pre-commit, post-commit, abort). Citus uses these to implement distributed transactions. + +**Utility hook** is called after parsing any command that does not go through the regular query planner. Citus uses this hook primarily to apply DDL and COPY commands that affect Citus tables. + +**Background workers** run user-supplied code in separate processes. Citus uses this API to run a maintenance daemon. This daemon performs distributed deadlock detection, 2PC prepared transaction recovery, and cleanup. + +Through these hooks, Citus can intercept any interaction between the client and the PostgreSQL engine that involves Citus tables. Citus can then replace or augment PostgreSQL's behavior. + +# Query planner + +Citus has a layered planner architecture that accommodates different workloads. There are several useful presentations/papers that are relevant to Citus’ distributed planner, below we try to categorize them: + +## High-level design/flow: + +- Distributing Queries the Citus Way: Marco’s PG Con presentation provides a good introduction: https://postgresconf.org/system/events/document/000/000/233/Distributing_Queries_the_Citus_Way.pdf + +- Another useful content on this topic is the Planner README.md: https://github.com/citusdata/citus/blob/main/src/backend/distributed/planner/README.md + +- Onder’s talk at CitusCon: https://www.youtube.com/watch?v=raw3Pwv0jb8 +- Citus paper: https://dl.acm.org/doi/pdf/10.1145/3448016.3457551 + +- Logical planner design - 1: https://speakerdeck.com/marcocitus/scaling-out-postgre-sql + +- Logical Planner design - 2: https://www.youtube.com/watch?v=xJghcPs0ibQ + +- Logical Planner based on the paper: Correctness of query execution strategies in distributed databases: https://dl.acm.org/doi/pdf/10.1145/319996.320009 + +## Distributed Query Planning with Examples in Citus (as of Citus 12.1) + +This part of the documentation aims to provide a comprehensive understanding of how Citus handles distributed query planning with examples. We will use a set of realistic tables to demonstrate various queries. Through these examples, we hope to offer a step-by-step guide on how Citus chooses to plan queries. + +Citus hooks into the PostgreSQL planner using the top-level planner_hook function pointer, which sees the query tree after parsing and analysis. If the query tree contains a Citus table, we go through several planner stages: fast path planner, router planner, recursive planning, logical planner & optimizer. Each stage can handle more complex queries than the previous, but also comes with more overhead. That way, we can handle a mixture of high throughput transactional workloads (without adding significant planning overhead), as well as more complex analytical queries (with more sophisticated distributed query execution). For specific types of queries (e.g. insert..select), we have separate planner code paths. + +For a more comprehensive high-level overview of the planner, go to https://postgresconf.org/system/events/document/000/000/233/Distributing_Queries_the_Citus_Way.pdf + +### Table definitions used in this section + +```sql +-- Distributed Table: Users Table +CREATE TABLE users_table ( + user_id bigserial PRIMARY KEY, + username VARCHAR(50) NOT NULL, + email VARCHAR(50), + date_of_birth DATE, + country_code VARCHAR(3) +); +SELECT create_distributed_table('users_table', 'user_id'); + +-- Distributed Table: Orders Table +CREATE TABLE orders_table ( + order_id bigserial, + user_id BIGINT REFERENCES users_table(user_id), + product_id BIGINT, + order_date TIMESTAMPTZ, + status VARCHAR(20) +); +SELECT create_distributed_table('orders_table', 'user_id'); + + +-- Distributed Table: Products Table +CREATE TABLE products_table ( + product_id bigserial PRIMARY KEY, + product_name VARCHAR(100), + category_id INT, + price NUMERIC(10, 2) +); +SELECT create_distributed_table('products_table', 'product_id'); + +-- Reference Table: Country Codes +CREATE TABLE country_codes ( + country_code VARCHAR(3) PRIMARY KEY, + country_name VARCHAR(50) +); +SELECT create_reference_table('country_codes'); + +-- Reference Table: Order Status +CREATE TABLE order_status ( + status VARCHAR(20) PRIMARY KEY, + description TEXT +); +SELECT create_reference_table('order_status'); + +-- Reference Table: Product Categories +CREATE TABLE product_categories ( + category_id INT PRIMARY KEY, + category_name VARCHAR(50) +); +SELECT create_reference_table('product_categories'); +``` + +## Fast Path Router Planner + +The Fast Path Router Planner is specialized in optimizing queries that are both simple in structure and certain to touch a single shard. Importantly, it targets queries on a single shard distributed, citus local or reference tables. This does not mean the planner is restricted to trivial queries; it can handle complex SQL constructs like `GROUP BY`, `HAVING`, `DISTINCT`, etc., as long as these operate on a single table and involve an equality condition on the distribution key (`distribution_key = X`). The main SQL limitation for fast path distributed query planning is the subquery/CTE support. Those are left to the next planner: Router planner. + +The aim of this planner is to avoid relying on PostgreSQL's standard_planner() for planning, which performs unnecessary computations like cost estimation, irrelevant for distributed planning. Skipping the standard_planner has significant performance gains for OLTP workloads. By focusing on "shard-reachable" queries, the Fast Path Router Planner is able to bypass the need for more computationally expensive planning processes, thereby accelerating query execution. + +### Main C Functions Involved: + +- `FastPathPlanner()`: The primary function for creating the fast-path query plan. +- `FastPathRouterQuery()`: Validates if a query is eligible for fast-path routing by checking its structure and the WHERE clause. + +With set client_min_messages to debug4; you should see the following in the DEBUG messages: "DEBUG: Distributed planning for a fast-path router query" + +```sql +-- Fetches the count of users born in the same year, but only +-- for a single country, with a filter on the distribution column +-- Normally we have a single user with id = 15 because it's a PRIMARY KEY +-- this is just to demonstrate that fast-path can handle complex queries +-- with EXTRACT(), COUNT(), GROUP BY, HAVING, etc. +SELECT EXTRACT(YEAR FROM date_of_birth) as birth_year, COUNT(*) +FROM users_table +WHERE country_code = 'USA' AND user_id = 15 +GROUP BY birth_year +HAVING COUNT(*) > 10; +``` + +```sql +-- all INSERT commands are by definition fast path +-- router queries in the sense that they do not +-- need any information from Postgres' standard_planner() +INSERT INTO orders_table (user_id, product_id, order_date, status) +VALUES (42, 555, now(), 'NEW'); +``` + +```sql +-- UPDATE/DELETEs can also be qualified as fast path router +-- queries +UPDATE products_table SET price = price * 1.1 WHERE product_id = 555; +``` + +Fast path queries have another important characteristic named "deferredPruning." + +For regular queries, Citus does the shard pruning during the planning phase, meaning that the shards that the query touches are calculated during the planning phase. However, in an ideal world, the shard pruning should happen during the execution and, for a certain class of queries, we support that. In the code, that is denoted by "Job->deferredPruning" field. + +Given that fast path queries are performance critical, they can be planned with prepared statements. When this is done, "Job->deferredPruning" becomes "true". And, the meaning of that is Citus can support PREPARED statements as expected. The first 6 executions of the plan do distributed planning, the rest is cached similar to Postgres' plan caching, and the shard pruning is done during the execution phase. And, if you attach a debugger, you'd see that on the first 6 executions, the debugger will stop at distributed_planner() function, but on the rest, it will not. The shard pruning for the cached command will happen in CitusBeginScan() function. + +To see that in action, checkout the DEBUG messages: + +```sql +set client_min_messages to debug4; +PREPARE p1 (bigint) AS SELECT * FROM users_table WHERE user_id = $1; + +-- 1st execute +execute p1(1); +DEBUG: Deferred pruning for a fast-path router query +DEBUG: Creating router plan +.... +(0 rows) + +-- 2nd execute +execute p1(1); +DEBUG: Deferred pruning for a fast-path router query +DEBUG: Creating router plan +.... +(0 rows) +... +execute p1(1); +execute p1(1); +execute p1(1); +... + +-- 6th execute +execute p1(1); +DEBUG: Deferred pruning for a fast-path router query +DEBUG: Creating router plan +.... +(0 rows) + +-- now, on the 7th execute, you would **NOT** see the fast-path +-- planning anymore, because the query comes from Postgres' +-- query cache +execute p1(1); +DEBUG: constraint value: '1'::bigint +DEBUG: shard count after pruning for users_table: 1 +DEBUG: opening 1 new connections to localhost:9702 +DEBUG: established connection to localhost:9702 for session 8 in 9499 microseconds +DEBUG: task execution (0) for placement (46) on anchor shard (102049) finished in 1281 microseconds on worker node localhost:9702 +DEBUG: Total number of commands sent over the session 8: 1 to node localhost:9702 +(0 rows) +``` + + +## Router Planner in Citus + +### Overview + +The Router Planner plays a key role in Citus' query optimization landscape. While sharing some common traits with the Fast Path Router Planner, it offers unique capabilities as well. Router (and fast path router) planners are the bedrock for the multi-tenant use cases. + +#### Similarities with Fast Path Router Planner + +- **Single Node Routing**: Both planners send queries to a single node. Unlike the Fast Path Planner, the Router Planner can work with multiple colocated tables, provided they have filters on their distribution columns. + +- **Query Routing Mechanics**: Router Planner takes the query, verifies if it can be routed, and if so, it replaces original table names with their corresponding shard names, directing the query to the appropriate nodes. + +#### Differences + +- **Subqueries and CTEs**: The Router Planner can manage subqueries and Common Table Expressions (CTEs), routing the entire query to a single node as long as all involved tables have filters on their distribution columns. + +- **Standard Planner Reliance**: Router Planner relies on PostgreSQL's `standard_planner()` to learn the necessary filter restrictions on the tables. + +#### Main C Functions Involved + +- `PlanRouterQuery()`: Responsible for creating the router plan. +- `TargetShardIntervalsForRestrictInfo()`: Retrieves the shard intervals based on restrictions provided by PostgreSQL's `standard_planner()`. + +### Example Router Planner Queries + +```sql +-- Fetch user data and their respective orders for a given user_id +SELECT u.username, o.order_id +FROM users_table u, orders_table o +WHERE u.user_id = o.user_id AND u.user_id = 42; + +-- With Subqueries: +-- Fetch the username and their total order amount +-- for a specific user +SELECT u.username, + (SELECT COUNT(*) FROM orders_table o + WHERE o.user_id = 42 AND + o.user_id = u.user_id) +FROM users_table u +WHERE u.user_id = 42; + +-- Router planner works with CTEs (and UPDATE/DELETE Query): +-- Update the status of the most recent order for a specific user +WITH RecentOrder AS ( + SELECT MAX(order_id) as last_order_id + FROM orders_table + WHERE user_id = 42 +) +UPDATE orders_table +SET status = 'COMPLETED' +FROM RecentOrder +WHERE orders_table.user_id = 42 AND + orders_table.order_id = RecentOrder.last_order_id; +``` + + +## Query Pushdown Planning in Citus + +### Overview + +While Router and Fast-Path Router Planners are proficient at dealing with single-shard commands—making them ideal for multi-tenant and OLTP applications—Citus also excels in analytical use-cases. In these scenarios, a single query is broken down into multiple parallel sub-queries, which are run on various shards across multiple machines, thereby speeding up query execution times significantly. + +#### What is Query Pushdown Planning? + +Query Pushdown Planning is an extension of the Router Planning paradigm. Unlike the latter, which deals with single-shard, single-node queries, Query Pushdown can route a query to multiple shards across multiple nodes. Instead of verifying that all tables have the same filters, as in Router Planning, Query Pushdown ascertains that all tables are joined on their distribution keys. + +#### Core Functions + +The core C function responsible for this check is `RestrictionEquivalenceForPartitionKeys()`, which ensures that tables in the query are joined based on their distribution keys. Initially intended for subqueries, Query Pushdown has been extended to include other cases as well. The decision to utilize Query Pushdown is determined by the `ShouldUseSubqueryPushDown()` function. + +#### Understanding Query Pushdown + +Understanding Query Pushdown Planning and how it extends the simpler Router Planning can help you fully utilize Citus for your analytical workloads. + +#### Key Characteristics of Query Pushdown + +- **High Parallelism**: The query is broken down into multiple sub-queries, leading to parallel execution on multiple shards and nodes. +- **Worker Subquery**: You will typically notice the alias `worker_subquery` in the SQL queries sent to the shards, indicating a pushdown operation. + +### Examples of query pushdown + +#### Basic Example + +```sql +-- Count of distinct product_ids where user_ids from two different tables match +SELECT count(DISTINCT product_id) +FROM ( + SELECT DISTINCT user_id as distinct_user_id + FROM users_table +) foo, orders_table +WHERE orders_table.user_id = distinct_user_id; +``` + +#### Subquery in Target List + +```sql +-- retrieves the most recent order date for each user +SELECT (SELECT MAX(order_date) FROM orders_table o WHERE o.user_id = u.user_id) FROM users_table u; +``` + +#### Subquery in WHERE Clause + +```sql +-- Number of distinct users who have placed an order +SELECT COUNT(DISTINCT u.user_id) +FROM users_table u +WHERE u.user_id IN ( + SELECT o.user_id + FROM orders_table o +); +``` + +#### More Examples + +```sql +-- Count of distinct products per user, with maximum order date from orders +-- as a subquery in the target list + SELECT + (SELECT MAX(o.order_date) FROM orders_table o WHERE o.user_id = u.user_id), + COUNT(DISTINCT o.product_id) +FROM orders_table o, users_table u +WHERE o.user_id = u.user_id +GROUP BY u.user_id; +``` + +#### UPDATE and DELETE with Query Pushdown + +```sql +-- Update status in orders_table for users whose email ends with '@example.com' +UPDATE orders_table o +SET status = 'DISCOUNTED' +FROM users_table u +WHERE o.user_id = u.user_id AND u.email LIKE '%@example.com'; +``` + +```sql +-- Delete orders for users who were born before '2000-01-01' +DELETE FROM orders_table o +USING users_table u +WHERE o.user_id = u.user_id AND u.date_of_birth < '2000-01-01'; +``` + + +## Recursive Planning + +Central to understanding Citus' approach to distributed query planning are two closely interrelated concepts: "Query Pushdown Planning" and "Recursive Planning." These dual strategies lay the foundation for Citus' capacity to manage complex query structures across multiple shards and nodes effectively. + +While Query Pushdown Planning optimizes queries by breaking them into smaller components that can run in parallel across multiple shards, Recursive Planning takes a more nuanced approach. It works its way through the query tree from the deepest level upwards, scrutinizing each subquery to determine its suitability for pushdown. + +The essence of recursive planning lies in treating each recursively planned query in isolation. This means correlated subqueries can't take advantage of recursive planning. However, (sub)queries on local tables can be done via recursive planning. + +This process is primarily executed in the `RecursivelyPlanSubqueryWalker()` C function. In this function, the engine goes to the innermost subquery and assesses whether it can safely be pushed down as a stand-alone query. If it can, the query engine simply moves on. However, if the subquery isn't suitable for pushdown, Citus generates a separate "sub-plan" for that subquery, substituting it with a `read_intermediate_result()` function call. These sub-plans are later executed as independent queries, a task overseen by the `ExecuteSubPlans()` function. + +The engine continues this way, moving upward through each level of subqueries, evaluating and, if needed, creating sub-plans until it reaches the top-level query. + +### Intermediate Results as Reference Tables + +One of the key aspects of Recursive Planning is the use of "intermediate results." These are essentially the outcomes of subqueries that have been recursively planned and executed on worker nodes. Once these intermediate results are obtained, they are treated much like reference tables in the subsequent stages of query planning and execution. The key advantage here is that, like reference tables, these intermediate results can be joined with distributed tables on any column, not just the distribution key. + +### Full SQL Coverage via Recursive Planning + +The practice of recursively creating sub-plans and generating intermediate results offers a workaround for achieving full SQL coverage in Citus. If each subquery in a complex SQL query can be replaced with an intermediate result, then the entire query essentially becomes a query on a reference table. This feature is a crucial aspect for many users who require comprehensive SQL support in their distributed systems. + +### Trade-offs of using recursive planning + +While Recursive Planning brings a lot to the table, it's not without its drawbacks. First, the method inherently adds more network round-trips, as each recursively planned query is executed separately, and its results are pushed back to all worker nodes. Secondly, when functions like `read_intermediate_results` are used to fetch data from these intermediate results, it can confound the Postgres planner, particularly in the context of complex joins. As a result, query estimations may be inaccurate, leading to suboptimal execution plans. + +Understanding these facets of Recursive Planning can provide you with a comprehensive view of how Citus approaches distributed query planning, allowing you to better optimize your database operations. + +This may seem complex at first glance, but it's a bit like a step-by-step puzzle-solving process that the Citus query engine performs to optimize your database queries effectively. To help clarify these intricate mechanics, we'll present a series of examples. + +#### Recursive Plan Example 1: + +In the simplest example, we'll have a single subquery which is NOT pushdown-safe due to LIMIT 1, hence creating a subplan + +```sql +SET client_min_messages TO DEBUG1; +SELECT count(*) FROM (SELECT * FROM users_table LIMIT 1) as foo; +SET +Time: 0.765 ms +DEBUG: push down of limit count: 1 +DEBUG: generating subplan 7_1 for subquery SELECT user_id, username, email, date_of_birth, country_code FROM public.users_table LIMIT 1 +DEBUG: Plan 7 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result.username, intermediate_result.email, intermediate_result.date_of_birth, intermediate_result.country_code FROM read_intermediate_result('7_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id bigint, username character varying(50), email character varying(50), date_of_birth date, country_code character varying(3))) foo +``` + +#### Recursive Plan Example 2: + +Now, we have multiple subqueries in the same level which are NOT pushdown-safe due to LIMIT 1 and GROUP BY non distribution keys, hence creating a subplan + +```sql +SELECT count(*) FROM + (SELECT * FROM users_table LIMIT 1) as foo, + (SELECT count(*) FROM users_table GROUP BY country_code) as bar; +DEBUG: push down of limit count: 1 +DEBUG: generating subplan 9_1 for subquery SELECT user_id, username, email, date_of_birth, country_code FROM public.users_table LIMIT 1 +DEBUG: generating subplan 9_2 for subquery SELECT count(*) AS count FROM public.users_table GROUP BY country_code +DEBUG: Plan 9 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT intermediate_result.user_id, intermediate_result.username, intermediate_result.email, intermediate_result.date_of_birth, intermediate_result.country_code FROM read_intermediate_result('9_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id bigint, username character varying(50), email character varying(50), date_of_birth date, country_code character varying(3))) foo, (SELECT intermediate_result.count FROM read_intermediate_result('9_2'::text, 'binary'::citus_copy_format) intermediate_result(count bigint)) bar +``` + +#### Recursive Plan Example 3: + +We have a subquery foo that is NOT safe-to-pushdown but once that subquery is replaced with an intermediate result, the rest of the query becomes safe-to-pushdown + +```sql +SELECT count(*) FROM + (SELECT 1 FROM (SELECT user_id FROM users_table LIMIT 1) as foo, + (SELECT * FROM orders_table) as o1, + (SELECT * FROM users_table) as u2 + WHERE + foo.user_id = o1.user_id AND + o1.user_id = u2.user_id) as top_level_subquery; +DEBUG: push down of limit count: 1 +DEBUG: generating subplan 1_1 for subquery SELECT user_id FROM public.users_table LIMIT 1 +DEBUG: Plan 1 query after replacing subqueries and CTEs: SELECT count(*) AS count FROM (SELECT 1 AS "?column?" FROM (SELECT intermediate_result.user_id FROM read_intermediate_result('1_1'::text, 'binary'::citus_copy_format) intermediate_result(user_id bigint)) foo, (SELECT orders_table.order_id, orders_table.user_id, orders_table.product_id, orders_table.order_date, orders_table.status FROM public.orders_table) o1, (SELECT users_table.user_id, users_table.username, users_table.email, users_table.date_of_birth, users_table.country_code FROM public.users_table) u2 WHERE ((foo.user_id OPERATOR(pg_catalog.=) o1.user_id) AND (o1.user_id OPERATOR(pg_catalog.=) u2.user_id))) top_level_subquery +``` + + +### More advanced recursive planning constructs +In the previous parts of the recursive planning examples, we only dealt with a subquery at a time. However, recursive planning is capable of considering multiple subqueries in the same query level or converting tables to subqueries in the same level. In this part of the document, let's discuss these advanced query planning capabilities. + + +### Set operations via recursive planning (and query pushdown) + +Set operations like UNION, UNION ALL, and EXCEPT are essentially two subqueries in the same query level. + +> **Note:** The rules for set operation planning on Citus can be confusing and should be taken carefully. + +Citus is capable of "pushing down" certain set operations: UNION and UNION ALL. To allow this, two rules must be met, which are defined in the `SafeToPushdownUnionSubquery()` C code. + +1. The set operation cannot be on the top level; it should be wrapped into a subquery. This is purely an implementation limitation that can and should be eased. +2. For all subqueries, each leaf query should have a "distribution key" on the target list, and the ordinal positions of these "distribution keys" should match across all set operations. This second limitation is required to preserve correctness. + +#### Set operation query examples: + +```sql +-- safe to pushdown +SELECT * FROM (SELECT * FROM users_table UNION SELECT * FROM users_table) as foo; +``` + +```sql +-- not safe to pushdown because the set operation is NOT wrapped into a subquery. +-- Each leaf query is recursively planned. +SELECT * FROM users_table UNION SELECT * FROM users_table; +``` + +```sql +-- not safe to pushdown because the distribution columns do NOT match (e.g., not existing) +SELECT * FROM (SELECT username FROM users_table UNION SELECT username FROM users_table) as foo; +``` + +```sql +-- not safe to pushdown because the distribution columns do NOT match. +SELECT * FROM (SELECT user_id + 1 FROM users_table UNION SELECT user_id - 1 FROM users_table) as foo; +``` + +```sql +-- EXCEPT is never safe to pushdown +SELECT * FROM (SELECT * FROM users_table EXCEPT SELECT * FROM users_table) as foo; +``` + + + +### Set operations and joins + + +Although not very common, some users might have joins along with set operations. Example queries might look like: + +- `(SELECT .. t1 JOIN t2) UNION (SELECT t2 JOIN t3)` +- `(SELECT .. t1 UNION SELECT t2) JOIN t3 ..` +- `((SELECT .. t1 JOIN t2) UNION (SELECT t2 JOIN t3)) JOIN t4` + +For all these cases, similar rules apply: + +- JOINs should be made on the distribution keys. +- SET operations should satisfy the `SafeToPushdownUnionSubquery()` conditions. + +When combined, all conditions should match. + +#### Safe to Pushdown Examples: + +```sql +-- All joins are on the distribution key and all the unions have the distribution key in the same ordinal position. +SELECT * FROM ( + (SELECT user_id FROM users_table u1 JOIN users_table u2 USING (user_id)) + UNION + (SELECT user_id FROM users_table u1 JOIN users_table u2 USING (user_id)) +) as foo; +``` + +```sql +-- All joins are on the distribution key and all the unions have the distribution key in the same ordinal position. +SELECT * FROM + (SELECT user_id FROM users_table u1 UNION + SELECT user_id FROM users_table u2) as foo + JOIN + users_table u2 + USING (user_id); +``` + +### HAVING subqueries via recursive planning + + +Postgres allows the HAVING clause to contain subqueries. If the subqueries in the HAVING clause don't reference the outer query (i.e., not correlated), then it's possible to recursively plan the subquery in the HAVING clause. This involves using the `RecursivelyPlanAllSubqueries()` function specifically for the HAVING clause. + +#### Example: + +```sql +-- Find user_ids who have placed more orders than the average number of orders per user. +SELECT + u.user_id, + COUNT(o.order_id) AS total_orders +FROM + users_table u +JOIN + orders_table o ON u.user_id = o.user_id +GROUP BY + u.user_id +HAVING + COUNT(o.order_id) > (SELECT AVG(order_count) FROM ( + SELECT + user_id, + COUNT(order_id) AS order_count + FROM + orders_table + GROUP BY + user_id) AS subquery); +``` + + +### Non-colocated subqueries via recursive planning + +Assume that there are two subqueries; each subquery is individually joined on their distribution keys. However, when the two subqueries are joined on arbitrary keys, the non-colocated subquery join logic kicks in, as described in `RecursivelyPlanNonColocatedSubqueries()`. + +#### Non-colocated subquery Example 1: + +```sql +-- Find users who do not have orders with status 'shipped' and 'pending' +-- Sub1 and Sub2 are individually safe to pushdown. +-- The join condition between them is: sub1.user_id != sub2.user_id, which does not preserve distribution key equality. +-- Citus qualifies sub1 as the anchor subquery and checks whether all other subqueries are joined on the distribution key. +-- In this case, sub2 is not joined on the distribution key, so Citus decides to recursively plan the whole sub2. +SELECT sub1.user_id, sub2.user_id +FROM ( + SELECT u.user_id + FROM users_table u + JOIN orders_table o ON u.user_id = o.user_id + WHERE o.status = 'shipped' + GROUP BY u.user_id +) AS sub1 +JOIN ( + SELECT u.user_id + FROM users_table u + JOIN orders_table o ON u.user_id = o.user_id + WHERE o.status = 'pending' + GROUP BY u.user_id +) AS sub2 ON sub1.user_id != sub2.user_id; +``` + +#### Non-colocated subquery Example 2: + +```sql +-- Similar logic also applies for subqueries in the WHERE clause. +-- Both the query in the FROM clause and the subquery in the WHERE clause are individually safe to pushdown. +-- However, as a whole, the query is not safe to pushdown. +-- Therefore, Citus decides to recursively plan the subquery in the WHERE clause. +SELECT o1.order_id, o1.order_date +FROM orders_table o1, users_table u1 +WHERE o1.user_id = u1.user_id +AND o1.order_date IN ( + SELECT o2.order_date + FROM orders_table o2, users_table u2 + WHERE o2.user_id = u2.user_id AND o2.status = 'shipped' +); +``` + + +### Local table - distributed table JOINs via recursive planning + +In Citus, joins between a local table and a distributed table require special handling. The local table data resides on the Citus coordinator node, while the distributed table data is across multiple worker nodes. The `RecursivelyPlanLocalTableJoins()` C function handles this. + +#### Performance Characteristics + +Local and distributed table joins have specific performance traits. They push down filters and projections, meaning only relevant data is pulled to the coordinator. See the `RequiredAttrNumbersForRelation()` and `ReplaceRTERelationWithRteSubquery()` functions for more details. + +#### How It Works + +1. Citus scans the query tree to find joins between local and distributed tables. +2. Upon finding such a join, Citus forms a sub-plan for the local table. +3. This sub-plan retrieves relevant data from the local table into an intermediate result and distributes it across worker nodes. +4. The original query is then rewritten, replacing the local table with these intermediate results. +5. Finally, this new query, now only involving distributed tables, is executed using Citus's standard query execution engine. + +#### Example 1 + +For example, consider a local table `local_users` and a distributed table `orders_table`. A query like this: + +```sql +SELECT * +FROM local_users l, orders_table o +WHERE l.user_id = o.user_id; +``` + +Would be internally transformed by Citus as follows: + +```sql +-- Create a temporary reference table and populate it with local table data +CREATE TEMP TABLE temp_local_users AS SELECT * FROM local_users; +SELECT create_reference_table('temp_local_users'); + +-- Replace the local table with the temporary distributed table in the original query +SELECT * +FROM temp_local_users t, orders_table o +WHERE t.user_id = o.user_id; +``` + + +#### Configuration Option + +By tweaking `citus.local_table_join_policy`, you can control how Citus behaves for queries involving local and distributed tables. The default behavior is to pull local table data to the coordinator, with exceptions for distributed tables filtered on primary key or unique index. + +#### Example 2 + +For instance, when the distributed table is guaranteed to return at most one row, Citus chooses to recursively plan the distributed table: + +```sql +SELECT * +FROM local_users l, orders_table o +WHERE l.user_id = o.user_id AND o.primary_key = 55; +``` + + + +### Ref table LEFT JOIN distributed table JOINs via recursive planning + +Very much like local-distributed table joins, Citus can't push down queries formatted as: +```sql +"... ref_table LEFT JOIN distributed_table ..." +``` +This is the case when the outer side is a recurring tuple (e.g., reference table, intermediate results, or set returning functions). + +In these situations, Citus recursively plans the "distributed" part of the join. Even though it may seem excessive to recursively plan a distributed table, remember that Citus pushes down the filters and projections. Functions involved here include `RequiredAttrNumbersForRelation()` and `ReplaceRTERelationWithRteSubquery()`. + +The core function handling this logic is `RecursivelyPlanRecurringTupleOuterJoinWalker()`. There are likely numerous optimizations possible (e.g., first pushing down an inner JOIN then an outer join), but these have not been implemented due to their complexity. + +#### Example Query + +Here's an example that counts the number of orders for each status, including only statuses that also appear in the reference table: + +```sql +SELECT os.status, COUNT(o.order_id) +FROM order_status os +LEFT JOIN orders_table o ON os.status = o.status +GROUP BY os.status; +``` + +#### Debug Messages +``` +DEBUG: recursively planning right side of the left join since the outer side is a recurring rel +DEBUG: recursively planning distributed relation "orders_table" "o" since it is part of a distributed join node that is outer joined with a recurring rel +DEBUG: Wrapping relation "orders_table" "o" to a subquery +DEBUG: generating subplan 45_1 for subquery SELECT order_id, status FROM public.orders_table o WHERE true +``` + +### Recursive Planning When FROM Clause has Reference Table (or Recurring Tuples) + +This section discusses a specific scenario in Citus's recursive query planning: handling queries where the main query's `FROM` clause is recurring, but there are subqueries in the `SELECT` or `WHERE` clauses involving distributed tables. + +#### Definitions + +- **Recurring**: Here, "recurring" implies that the `FROM` clause doesn't contain any distributed tables. Instead, it may have reference tables, local tables, or set-returning functions. + +- **Subqueries in SELECT and WHERE**: In case the main query's `FROM` clause is recurring, then no distributed tables should be present in the `SELECT` and `WHERE` subqueries. + +#### Citus's Approach + +Citus solves this by recursively planning these problematic subqueries, effectively replacing them with calls to `read_intermediate_result()`. + +#### Handling the WHERE Clause + +For the `WHERE` clause, the function `RecursivelyPlanAllSubqueries` is called, transforming all subqueries within it. + +```sql +-- Main query FROM clause is recurring, but +-- WHERE clause contains a pushdownable subquery from +-- orders_table (distributed table) +SELECT country_name +FROM country_codes +WHERE country_code IN + (SELECT country_code FROM users_table WHERE user_id IN (SELECT user_id FROM orders_table)); +``` + +#### Handling the SELECT Clause + +Similarly, `RecursivelyPlanAllSubqueries` is called for the `SELECT` clause to replace any existing subqueries. + +```sql +-- Main query FROM clause is recurring, but SELECT clause contains a subquery from orders_table (distributed table) +SELECT + (SELECT COUNT(*) FROM orders_table WHERE status = 'shipped') AS shipped_orders, country_name +FROM country_codes; +``` + +In both examples, since the main query's `FROM` clause is recurring and involves subqueries on distributed tables in `WHERE` or `SELECT`, Citus uses `RecursivelyPlanAllSubqueries` to manage these subqueries. + +### Logical Planner & Optimizer + +At the high level, all multi-task queries go through the logical planner. However, when it comes to query pushdown or the recursive planner, the logical planner does very little. Most of its complexity deals with multi-shard queries that don't fall into these categories. Below, we are going to discuss those details. + +#### Simple Example + +The simplest example of a query processed by the logical planner would be: + +```sql +SELECT * FROM users_table; +``` + +#### Academic Background + +The logical planner implements the concepts from the paper: "Correctness of query execution strategies in distributed databases." The paper is available [here](https://dl.acm.org/doi/pdf/10.1145/319996.320009). + +If you find the paper hard to read, Marco provides a good introduction to the same concepts in the following presentation: + +- [YouTube Video](https://www.youtube.com/watch?v=xJghcPs0ibQ) +- [Speaker Deck](https://speakerdeck.com/marcocitus/scaling-out-postgre-sql) + +#### Core Functions + +We assume you have either watched the video or read the paper. The core C functions involved are `MultiLogicalPlanCreate()`, `MultiNodeTree()`, and `MultiLogicalPlanOptimize()`. + +Citus has a rules-based optimizer. The core function `MultiLogicalPlanCreate()` maps the SQL query to a C structure (e.g., `MultiNode`). Then `MultiLogicalPlanOptimize()` applies available optimizations to the `MultiNode`. + +For instance, one simple optimization pushes the "filter" operation below the "MultiCollect." Such rules are defined in the function `Commutative()` in `multi_logical_optimizer.c`. + +The most interesting part of the optimizer is usually in the final stage, when handling the more complex operators (GROUP BY, DISTINCT window functions, ORDER BY, aggregates). These operators are conjoined in a `MultiExtendedOpNode`. In many cases, they can only partially be pushed down into the worker nodes, which results in one `MultiExtendedOpNode` above the `MultiCollect` (which will run on the coordinator and aggregates across worker nodes), and another `MultiExtendedOpNode` below the `MultiCollect` (which will be pushed down to worker nodes). The bulk of the logic for generating the two nodes lives in `MasterExtendedOpNode()` and `WorkerExtendedOpNode()`, respectively. + +##### Aggregate functions + +[Aggregate functions](https://www.postgresql.org/docs/current/sql-createaggregate.html) can appear in the SELECT (target list) or HAVING clause of a query, often in the context of a `GROUP BY`. The aggregate primarily specify a state function (`sfunc`), which is called for every row in the group, and an `stype` which defines the data format in which intermediate state is held as a type, which maybe be `internal`. Many aggregates also have a `finalfunc`, which converts the last `stype` value to the final result of the aggregate function. + +Citus support distributing aggregate functions in several ways described below, each with an example. + +**Aggregate functions in queries that group by distribution column can be fully pushed down, since no cross-shard aggregation is needed**. This is mostly handled by the rules in `CanPushDownExpression`. + +Example: + +```sql +select x, avg(y) from test group by x; +DEBUG: combine query: SELECT x, avg FROM pg_catalog.citus_extradata_container(10, NULL::cstring(0), NULL::cstring(0), '(i 1)'::cstring(0)) remote_scan(x integer, avg numeric) +NOTICE: issuing SELECT x, avg(y) AS avg FROM public.test_102041 test WHERE true GROUP BY x +NOTICE: issuing SELECT x, avg(y) AS avg FROM public.test_102042 test WHERE true GROUP BY x +... +``` + +**Built-in, or well-known aggregate functions (based on their name) are distributed using custom rules**. An almost-complete list of aggregates that are handled in this way can be found in the `AggregateNames` variable. Examples are `avg`, `sum`, `count`, `min`, `max`. To distribute an aggregate function like `avg`, the optimizer implements rules such as injecting a `sum` and `count` aggregate in the worker target list, and doing a `sum(sum)/sum(count)` on the master target list. The logic is agnostic to types, so it will for work any custom type that implements aggregate functions with the same name. + +Example: + +```sql +select y, avg(x) from test group by y; +DEBUG: combine query: SELECT y, (pg_catalog.sum(avg) OPERATOR(pg_catalog./) pg_catalog.sum(avg_1)) AS avg FROM pg_catalog.citus_extradata_container(10, NULL::cstring(0), NULL::cs +tring(0), '(i 1)'::cstring(0)) remote_scan(y integer, avg bigint, avg_1 bigint) GROUP BY y +NOTICE: issuing SELECT y, sum(x) AS avg, count(x) AS avg FROM public.test_102041 test WHERE true GROUP BY y +NOTICE: issuing SELECT y, sum(x) AS avg, count(x) AS avg FROM public.test_102042 test WHERE true GROUP BY y +``` + +**Aggregates that specify a `combinefunc` and have an non-internal `stype` are distributed using generic aggregate functions**. The `worker_partial_agg` aggregate function is pushed down to the worker runs the `sfunc` of the custom aggregate across the tuples of a shard without running the `finalfunc` (which should come after `combinefunc`). The `coord_combine_agg` aggregate function runs the `combinefunc` across the `stype` values returned by `worker_partial_agg` and runs the `finalfunc` to obtain the final result of the aggregate function. This approach currently does not support aggregates whose `stype` is `internal`. A reason we for not handling `internal` is that it is not clear that they can always be safely transferred to a different server, though that may be overly pedantic. + +Example: +```sql +select st_memunion(geo) from test; +DEBUG: combine query: SELECT coord_combine_agg('351463'::oid, st_memunion, NULL::postgis_public.geometry) AS st_memunion FROM pg_catalog.citus_extradata_container(10, NULL::cstring(0), NULL::cstring(0), '(i 1)'::cstring(0)) remote_scan(st_memunion cstring) +NOTICE: issuing SELECT worker_partial_agg('postgis_public.st_memunion(postgis_public.geometry)'::regprocedure, geo) AS st_memunion FROM public.test_102041 test WHERE true +NOTICE: issuing SELECT worker_partial_agg('postgis_public.st_memunion(postgis_public.geometry)'::regprocedure, geo) AS st_memunion FROM public.test_102042 test WHERE true +``` + +**Other aggregates will be fully above the `MultiCollect` node, meaning the source data is pulled to the coordinator.** If this is undesirable due to the performance/load risk, it can be disabled using `citus.coordinator_aggregation_strategy = 'disabled'`, in which case the aggregate function calls would result in an error. + +Example: +```sql +select st_union(geo) from test; +DEBUG: combine query: SELECT postgis_public.st_union(st_union) AS st_union FROM pg_catalog.citus_extradata_container(10, NULL::cstring(0), NULL::cstring(0), '(i 1)'::cstring(0)) remote_scan(st_union postgis_public.geometry) +NOTICE: issuing SELECT geo AS st_union FROM public.test_102041 test WHERE true +NOTICE: issuing SELECT geo AS st_union FROM public.test_102042 test WHERE true +``` + +### Multi Join Order + +**Context and Use Case**: +This query planning mechanism is primarily geared towards data warehouse type of query planning. It's worth noting that the Citus team has not actively pursued optimizations in this direction, resulting in some non-optimized code paths. + +**Join Order Optimization**: +In Citus' logical planner, the `JoinOrderList()` function serves to choose the most efficient join order possible. However, its primary focus has been on joins that require repartitioning, as well as some specific non-repartition joins. For example, joins on distribution keys that are not eligible for pushdown planning may pass through this code path, although no optimizations are made in those cases. + +**Algorithm Simplicity**: +The current algorithm, encapsulated in the `BestJoinOrder()` function, is relatively naive. While it aims to minimize the number of repartition joins, it does not provide a performance evaluation for each of them. This function provides room for performance optimizations, especially when dealing with complex joins that necessitate repartitioning. + +**Control via GUCs**: +Two GUCs control the behavior of repartitioning in Citus: `citus.enable_single_hash_repartition_joins` and `citus.repartition_join_bucket_count_per_node`. + +- **citus.enable_single_hash_repartition_joins**: + The default value is "off". When "off", both tables involved in the join are repartitioned. When "on", if one table is already joined on its distribution key, only the other table is repartitioned. + +- **citus.repartition_join_bucket_count_per_node**: + This setting defines the level of parallelism during repartitioning. The reason for the "off" default is tied to this GUC. Opting for a fixed bucket count, rather than dynamically adjusting based on shard count, provides more stability and safety. If you ever consider changing these defaults, be cautious of the potential performance implications. + + +### Combine Query + +- **Overview**: + The multi-task SELECT queries pull results to the coordinator, and the tuples returned always go through the "combine query". + +- **Structure and Source**: + The `combineQuery` can be traced back through the `DistributedPlan->combineQuery` struct. This query is essentially constructed in the `CreatePhysicalDistributedPlan` function. However, the actual source comes from `MasterExtendedOpNode()` within the logical optimizer. For deeper insights into this logic, you can refer to the paper and video links shared under the "Logical Planner & Optimizer" section. + +- **Example**: + The simplest example is the following where Citus sends `count(*)` to the shards, and needs to do a `sum()` on top of the results collected from the workers. + ```sql + SET client_min_messages TO DEBUG4; + DEBUG: generated sql query for task 1 + DETAIL: query string: "SELECT count(*) AS count FROM public.users_table_102008 users_table WHERE true" + .... + DEBUG: combine query: SELECT COALESCE((pg_catalog.sum(count))::bigint, '0'::bigint) AS count FROM pg_catalog.citus_extradata_container(10, NULL::cstring(0), NULL::cstring(0), '(i 1)'::cstring(0)) remote_scan(count bigint) + D + ``` + + +### CTE Processing + +- **In Postgres 13 and Later Versions**: +In Postgres 13 and later versions, CTEs (Common Table Expressions) are almost like subqueries. Usually, these CTEs are transformed into subqueries during `standard_planner()`. Citus follows the same approach via `RecursivelyInlineCtesInQueryTree()`. + +- **Additional Consideration in Citus**: +For Citus, there's an additional consideration. CTEs that aren't inlined get materialized. In the Citus context, materialization converts these CTEs into intermediate results. Some users leverage this for achieving full-SQL coverage. + +- **Extra CTE Check in Citus**: + Citus includes an extra check before inlining CTEs, conducted by the function `TryCreateDistributedPlannedStmt`. Here, the planner first tries to inline all CTEs and then checks whether Citus can still plan the query. If not, the CTEs remain as is, leading to their materialization. If all CTEs are materialized (e.g., read_intermediate_result), then the query becomes equivalent of a query on reference table, hence full SQL. + + **Examples for Better Understanding**: + I understand the logic might seem complex at first. Simple examples will be provided for better understanding. + +```sql +-- a CTE that is inlined as subquery, and does a query-pushdown +WITH cte_1 AS (SELECT DISTINCT user_id FROM orders_table) +SELECT * FROM cte_1; + +``` + +So, from Citus' query planning perspective + the above CTE is equivalent to the following subquery + + ```sql +SELECT * FROM + (SELECT DISTINCT user_id FROM orders_table) cte_1; +``` + +Once a CTE is inlined, then the rest of the query + planning logic kicks in +for example, below, the cte is inlined and then +because the subquery is NOT safe to pushdown +it is recursively planned +```sql +WITH cte_1 AS (SELECT DISTINCT product_id FROM orders_table) +SELECT * FROM cte_1; +.. +DEBUG: CTE cte_1 is going to be inlined via distributed planning +DEBUG: generating subplan 81_1 for subquery SELECT DISTINCT product_id FROM public.orders_table +DEBUG: Plan 81 query after replacing subqueries and CTEs: SELECT product_id FROM (SELECT intermediate_result.product_id FROM read_intermediate_result('81_1'::text, 'binary'::citus_copy_format) intermediate_result(product_id bigint)) cte_1; +``` + +- **Which CTEs Are Materialized**: + Citus follows the same rules as Postgres. See [Postgres documentation](https://www.postgresql.org/docs/current/queries-with.html#id-1.5.6.12.7). + +```sql +-- the same query as the first query +-- but due to MATERIALIZED keyword +-- Citus converts the CTE to intermediate result +WITH cte_1 AS MATERIALIZED (SELECT DISTINCT user_id FROM orders_table) +SELECT * FROM cte_1; + +-- the same query as the first query +-- but as the same cte used twice +-- Citus converts the CTE to intermediate result +WITH cte_1 AS (SELECT DISTINCT user_id FROM orders_table) +SELECT * FROM cte_1 as c1 + JOIN cte_1 as c2 USING (user_id); +``` + +- **Citus Specific Materialization**: + Citus first tries to inline the CTEs, but if it decides that after inlining the query cannot be supported due Citus' SQL limitations, it lets the CTE to be materialized. + +As of writing this document, Citus does NOT support + GROUPING SETs on distributed tables/subqueries. So, + when we inline the CTE, then Citus would try to plan + a query with GROUPING SETs on a distributed table, which + would fail. Then, citus would materialize the cte + and the final query would be GROUPING SET on an + intermediate result, hence can be supported + +```sql +WITH users_that_have_orders AS (SELECT users_table.* FROM users_table JOIN orders_table USING (user_id)) +SELECT max(date_of_birth) +FROM users_that_have_orders +GROUP BY GROUPING SETS (user_id, email); +... +DEBUG: CTE users_that_have_orders is going to be inlined via distributed planning +... +DEBUG: Planning after CTEs inlined failed with +message: could not run distributed query with GROUPING SETS, CUBE, or ROLLUP +hint: Consider using an equality filter on the distributed table''s partition column. +... +DEBUG: generating subplan 98_1 for CTE users_that_have_orders: SELECT users_table.user_id, users_table.username, users_table.email, users_table.date_of_birth, users_table.country_code FROM (public.users_table JOIN public.orders_table USING (user_id)) +``` + + +### INSERT Query Planning + + **At a High-Level Overview**: +- There are approximately 4 different ways that an INSERT command can be planned in Citus. The first one is the INSERT ... SELECT command, which will be discussed separately. + + **INSERT with Sublink (Not Supported)**: +```sql +INSERT INTO users_table (user_id) VALUES ((SELECT count(8) FROM orders_table)); +ERROR: subqueries are not supported within INSERT queries +HINT: Try rewriting your queries with 'INSERT INTO ... SELECT' syntax. + +INSERT INTO users_table (user_id) VALUES (1) RETURNING (SELECT count(*) FROM users_table); +ERROR: subqueries are not supported within INSERT queries +HINT: Try rewriting your queries with 'INSERT INTO ... SELECT' syntax. +``` + + **Simple Inserts with a Single VALUES Clause**: +-- As noted in the "fast-path router planner", these INSERT commands are planned with fast-path planning. This does not require calling into `standard_planner()`, and the distribution key should be extracted from the query itself. +```sql +INSERT INTO users_table VALUES (1, 'onder', 'onderkalaci@gmail.com', now() - '5 years'::interval, 'TR'); +``` + + **Main Functions**: + The main functions involved in this path are `RegenerateTaskListForInsert()`, `FastPathRouterQuery()`, and `RouterInsertTaskList`. For single-row INSERT tasks, `Job->deferredPruning=true`, meaning we can always do the shard pruning during execution. + + **Multi-row INSERTs**: +For multi-row INSERTs, `RouterInsertTaskList()` becomes slightly more interesting. Citus groups rows by target shard. +```sql +INSERT INTO orders_table (order_id, user_id) VALUES + (1, 1), (2, 2), (3, 1), (4, 3), (5, 2); +``` + + **Debug Info**: + Debug information shows how the query is rebuilt for different user_ids. Here, the shard_count is 4. +```sql +-- for user_id: 1 +DEBUG: query after rebuilding: INSERT INTO public.orders_table_102041 AS citus_table_alias (order_id, user_id) VALUES ('1'::bigint,'1'::bigint), ('3'::bigint,'1'::bigint) + +-- for user_id: 3 +DEBUG: query after rebuilding: INSERT INTO public.orders_table_102055 AS citus_table_alias (order_id, user_id) VALUES ('4'::bigint,'3'::bigint) + +-- for user_id: 2 +DEBUG: query after rebuilding: INSERT INTO public.orders_table_102064 AS citus_table_alias (order_id, user_id) VALUES ('2'::bigint,'2'::bigint), ('5'::bigint,'2'::bigint) +``` + + + +### INSERT.. SELECT and MERGE Command Query Planning + + **Overview**: +-- This section discusses `INSERT .. SELECT` and `MERGE` commands, which share almost identical planning logic. + + **Planning Methods**: + Broadly, there are three methods to plan these commands: +1. Pushdown +2. Pull-to-coordinator +3. Repartition + + **Performance Considerations**: + When it comes to performance and resource utilization, pushdown is generally the most efficient. For handling large data sizes, the repartition method scales better than the pull-to-coordinator method. + + **Further Reading**: + For more detailed information on pushdown and repartition methods, refer to this [blog post](https://www.citusdata.com/blog/2023/07/27/how-citus-12-supports-postgres-merge/). The post focuses on the `MERGE` command but is also applicable to `INSERT .. SELECT`. + + **Examples**: + The following section will delve into examples, starting with simple ones and moving to more complex scenarios. + +### INSERT.. SELECT Query Planning + + **Overview**: + The `INSERT .. SELECT` pushdown logic builds upon the pushdown planning for `SELECT` commands. The key requirements include colocated tables and matching distribution columns. Relevant C functions are `CreateDistributedInsertSelectPlan`, `DistributedInsertSelectSupported()`, and `AllDistributionKeysInQueryAreEqual`. + + **Additional Conditions for INSERT .. SELECT pushdown**: +- The destination table's distribution keys should match the source query's distribution column. + + **Simplest INSERT .. SELECT Pushdown Example**: +```sql +INSERT INTO users_table SELECT * FROM users_table; +``` + + **INSERT .. SELECT with Subqueries/Joins**: + Provided subqueries can be pushed down, additional checks such as matching distribution columns are performed. +```sql +INSERT INTO users_table + SELECT users_table.* FROM users_table, + (SELECT user_id FROM users_table JOIN orders_table USING (user_id)) as foo + WHERE foo.user_id = users_table.user_id; +``` + + **Non-pushdownable Scenarios**: + + **Due to Distribution Key Mismatch**: + Citus opts for repartitioning since no "merge step" is needed for the `SELECT` query. The deciding function is `IsRedistributablePlan()`. + ```sql + INSERT INTO users_table (user_id) SELECT user_id + 1 FROM users_table; + ``` + + **Due to LIMIT**: + The `SELECT` query requires a "merge step" for the `LIMIT` clause. Citus uses the pull-to-coordinator strategy. + ```sql + INSERT INTO users_table SELECT * FROM users_table LIMIT 5; + ``` + + **Pull-to-Coordinator Details**: + Citus typically pulls `SELECT` results and initiates a `COPY` command to the destination table. See `NonPushableInsertSelectExecScan()`. + +**Special Cases**: + **ON CONFLICT or RETURNING**: + In these cases, a simple `COPY` is insufficient. Citus pushes results as "colocated intermediate files" on the workers, which are colocated with the target table's shards. Then, Citus performs an `INSERT .. SELECT` on these colocated intermediate results. See `ExecutePlanIntoColocatedIntermediateResults()` and `GenerateTaskListWithColocatedIntermediateResults()`. + + **Example: Pull-to-coordinator with COPY back to shards**: +```sql +INSERT INTO users_table SELECT * FROM users_table LIMIT 5; +``` + + **Example: Pull-to-coordinator with push as colocated intermediate results**: +```sql +INSERT INTO users_table SELECT * FROM users_table LIMIT 5 ON CONFLICT(user_id) DO NOTHING; +``` + + +### MERGE Command Query Planning + + **Overview**: + The `MERGE` command planning is similar to `INSERT .. SELECT`. The key difference is in the pull-to-coordinator strategy. `MERGE` always uses "colocated intermediate result" files, as the final executed command must be a `MERGE` command, not a `COPY`. The entry function in the code is `CreateMergePlan()`. + +**Further Reading**: + For more insights, check out this [blog post](https://www.citusdata.com/blog/2023/07/27/how-citus-12-supports-postgres-merge/). + + **Pushdown MERGE Example**: + The join is based on the distribution key. +```sql +MERGE INTO users_table u +USING orders_table o +ON (u.user_id = o.user_id) +WHEN MATCHED AND o.status = 'DONE' THEN DELETE; +``` + +**Pull-to-Coordinator MERGE Example**: + The source query requires a "merge step" on the coordinator. +```sql +MERGE INTO users_table u +USING (SELECT * FROM orders_table ORDER BY order_date LIMIT 50) o +ON (u.user_id = o.user_id) +WHEN MATCHED AND o.status = 'DONE' THEN DELETE; +``` + + **Repartition MERGE Example**: + The join is NOT on the distribution key, and the source query doesn't require a "merge step" on the coordinator. Note that this example is mostly hypothetical to illustrate the case. +```sql +MERGE INTO users_table u +USING (SELECT * FROM orders_table ORDER BY order_date) o +ON (u.user_id = o.product_id) +WHEN MATCHED AND o.status = 'DONE' THEN DELETE; +``` + +### UPDATE / DELETE Planning + + **Overview**: + The planning logic for UPDATE/DELETE queries is quite similar to what we've discussed for INSERT and MERGE commands. There are essentially four primary methods of planning: + + **1) Fast-Path Router Planning**: + Targets a single shard and filters on the distribution key in the WHERE clause. +```sql +UPDATE users_table SET email = 'new@email.com' WHERE user_id = 5; +``` + + + **2) Router Planning**: + Targets a single shard, but all the shards are on a single node and are colocated. +```sql +UPDATE users_table u + SET email = '' + FROM orders_table o + WHERE o.user_id = u.user_id AND + u.user_id = 5 AND + o.status = 'done'; +``` + + + **3) Pushdown Planning**: + The query can be pushed down to worker nodes, targeting multiple shards. Joins are also possible if they are on distribution keys. +```sql +UPDATE users_table SET email = 'new@email.com' +WHERE user_id IN (SELECT user_id FROM orders_table WHERE status = 'in progress'); +``` + + **Additional Example for Pushdown with Materialized CTE**: +```sql +WITH high_value_users AS ( + SELECT user_id FROM orders_table WHERE status = 'done' ORDER BY order_date LIMIT 50 +) +UPDATE users_table SET username = 'High Value' +WHERE user_id IN (SELECT user_id FROM high_value_users); +``` + + + **4) Recursive Planning**: +Used for more complex queries, like those with subqueries or joins that can't be pushed down. The queries are planned recursively. +```sql +DELETE FROM users_table WHERE user_id +IN (SELECT user_id FROM orders_table WHERE order_date < '2023-01-01' ORDER BY order_date LIMIT 5); +``` + +### Correlated/Lateral Subqueries in Planning + +**Overview**: +Correlated or LATERAL subqueries have special behavior in Citus. They can often be pushed down, especially when the join is on the distribution key. There are limitations for joins not on the distribution key. + + + **Key Code Details**: + For more information on the code, check the following functions: + `DeferErrorIfCannotPushdownSubquery()` -> + `ContainsReferencesToOuterQuery()`, `DeferErrorIfSubqueryRequiresMerge()`, `DeferredErrorIfUnsupportedLateralSubquery()`. LATERAL queries are different/unique: even if the subquery requires a merge step such as a `LIMIT`, if the correlation is on the distribution column, we can push it down. See [#4385](https://github.com/citusdata/citus/pull/4385). + + + + **Example 1**: Using LATERAL, where the join is on the distribution key. +```sql +SELECT u.*, o_sum.total +FROM users_table u, +LATERAL (SELECT count(DISTINCT status) as total FROM orders_table o WHERE o.user_id = u.user_id) o_sum; +``` + + + **Example 2**: Complex LATERAL with GROUP BY on a non-distribution key. It's pushdownable because the join is on the distribution key. +```sql +SELECT u.*, o_info.product, o_info.total +FROM users_table u, +LATERAL ( + SELECT o.product_id as product, count(DISTINCT o.status) as total + FROM orders_table o WHERE o.user_id = u.user_id + GROUP BY o.product_id +) o_info; +``` + + + + **Debug and Error Messages**: +When it's not possible to push down correlated subqueries, recursive planning also can't be used. +```sql +SELECT u.* +FROM users_table u, +LATERAL ( + SELECT o.product_id as product + FROM orders_table o WHERE o.user_id != u.user_id +) o_info; + +DEBUG: skipping recursive planning for the subquery since it contains references to outer queries +ERROR: complex joins are only supported when all distributed tables are joined on their distribution columns with equal operator +``` + + + + + + + + + +### Planning Methodologies in Citus: Compatibility and Incompatibility + + + +#### Compatibilities + + + +1. **Interleaving Recursive and Pushdown Planning**: + + - Recursive planning and pushdown planning can often be interleaved within a query. This allows for greater flexibility and optimized performance. + + + +2. **Router Queries in Recursive Planning**: + + - Subqueries in recursive planning can often be router queries. This includes both fast-path router and regular router queries. + + + +3. **Command Types**: + + - Command types like `UPDATE`, `DELETE`, `MERGE`, and `INSERT .. SELECT` can work well with both pushdown and recursive planning. + + + +#### Incompatibilities + +1. **Repartition Joins**: + + - Repartition joins are generally incompatible with both recursive and pushdown planning. If a query uses recursive planning, it can't also use repartition joins. However, re-partition joins can be in a CTE that is recursively planned. + + + +#### Examples of Compatibility and Incompatibility + +##### Recursive and Pushdown Planning + +```sql +-- Example 1: Recursive and Pushdown Planning Interleaved +-- subquery is recursively planned multi-shard command +WITH recent_orders AS ( + SELECT * FROM orders_table ORDER BY order_date LIMIT 10 +) +SELECT * FROM users_table WHERE user_id IN (SELECT user_id FROM recent_orders); +``` + +##### Router Queries in Recursive Planning + +```sql +-- Example 2: Subquery as Fast-Path Router Query is recursively planned +-- the rest is pushdown +WITH user_info AS ( + SELECT * FROM users_table WHERE user_id = 5 ORDER BY date_of_birth LIMIT 1 +) +SELECT * FROM orders_table WHERE user_id IN (SELECT user_id FROM user_info); +``` + +##### UPDATE Pushdown and Recursive Planning + +```sql +-- Example 3: UPDATE command with Pushdown, Router and Recursive Planning +-- recursively planned router query and the rest is pushdown +WITH high_value_users AS ( + SELECT user_id FROM orders_table WHERE user_id = 15 AND status = 'done' ORDER BY order_date LIMIT 50 +) +UPDATE users_table SET username = 'High Value' WHERE user_id IN (SELECT user_id FROM high_value_users); +``` + + + +#### Incompatibility with Repartition Joins + +```sql +-- Example 4: Incompatible Query involving Recursive Planning and Repartition Joins +-- This query will fail because it tries to use recursive planning for recent_orders +-- and trying to repartition joins between o2 and recent_orders +WITH recent_orders AS ( + SELECT * FROM orders_table WHERE order_date > '2023-01-01' LIMIT 10 +) +SELECT u.* +FROM users_table u +JOIN recent_orders o ON u.user_id = o.product_id +JOIN orders_table o2 ON o2.product_id = o.product_id; +ERROR: complex joins are only supported when all distributed tables are co-located and joined on their distribution columns +``` + +## Combine query planner + +The combine planner is the final stage of planning for multi-shard queries. The logical & physical planner path creates a combine query, which will run on the coordinator. The combine query contains a special function call (called the extra data container), which can be observed using debug messages emitted during planning: + +``` +SET client_min_messages TO debug4; +SELECT count(*) FROM test; +… +DEBUG: combine query: SELECT COALESCE((pg_catalog.sum(count))::bigint, '0'::bigint) AS count FROM pg_catalog.citus_extradata_container(10, NULL::cstring(0), NULL::cstring(0), '(i 1)'::cstring(0)) remote_scan(count bigint) +``` + +The reason we use a special function call is simply that it lets us put custom information in query trees. We use the same approach to pass sharding information into the deparser. + +In the combine query planner, we run the combine query through standard_planner and use the set_rel_pathlist_hook to inject a CustomPath plan for the function call. The CustomPath translates into the Citus Custom Scan that runs a Job. + +## Restriction Equivalence + +In the PostgreSQL source code, an `EquivalenceClass` is a data structure used in query optimization. It is a way to represent a set of expressions in a query that are all equal. The PostgreSQL query planner uses this information to choose the most efficient execution plan for a query. + +For example, let's say you have a query like this: + +```sql +SELECT * FROM table1, table2 WHERE table1.a = table2.b AND table1.a = 5; +``` + +Here, `table1.a`, `table2.b`, and `5` can all be considered to belong to the same equivalence class because they are equal. Knowing this, the query optimizer might choose to use an index on `table1.a` or `table2.b` to speed up the query, among other optimizations. + +One level beyond that, Postgres can also apply transitivity rules for joins: + +```sql +SELECT * FROM table1, table2,table3 WHERE table1.a = table2.a AND table1.a = table3.a; +``` + +Here, `table1.a`, `table2.a`, and `table3.a` can all be considered to belong to the same equivalence class because they are (transitively) equal. + + + +Citus finds this information important. But Citus and Postgres have different structures. In Postgres, each (sub)query inside a big query is planned by itself. Citus tries to plan the whole big query at one time for performance reasons (see Query Pushdown Planning). This makes how they use Equivalence Classes different. + +In Postgres, each subquery has its own Equivalence Classes. But Citus needs Equivalence Classes for the whole big query. For example: + +```sql +SELECT count(*) FROM (SELECT a FROM t1 JOIN t2 USING(a)) as foo, +(SELECT a FROM t3 JOIN t4 USING (a)) as bar USING (a); +``` + +For Postgres, it's enough to make Equivalence Classes for the subqueries `foo` and `bar`. Then make another one for the top-level query where `foo` and `bar` join. Postgres can plan joins this way. + +In Citus, we need to check if all tables in the big query (t1,t2,t3,t4) join on distribution columns. Citus makes Equivalence Classes for the whole big query. The logic is in the `AttributeEquivalenceClass` C struct. The function `GenerateAllAttributeEquivalences()` makes this structure in Citus. The idea is to simply merge all the Equivalence classes of different query levels into a one Equivalence Class (e.g., AttributeEquivalenceClass) + +Citus also introduces a new idea: RTEIdentity. Each table in the query gets a unique ID called RTEIdentity (see `AssignRTEIdentity()` C function). This ID helps make a new type of Equivalence Class that works for many levels of small queries. Without RTEIdentity, we can't tell tables apart in different levels of the query. We rely on a hack while assigning the RTEIdentities. We basically use a field in `RangeTblEntry` struct that we are sure it is not used for tables. In practice, this might break at some point. + + + +## Recurring Tuples + + + +The Recurring Tuples concept in Citus helps manage expressions that give the same set of results across multiple shards. This is mainly useful for JOIN operations. The idea is to understand and handle how some tables or functions behave the same way across different shards of a distributed table. This concept helps to provide accurate error messages if such recurring tuples are used in a way that might give wrong results. + +The `RecurringTuplesType` enum in the code helps categorize these recurring tuples into different types. The types include: + +- Reference Table +- Function +- Empty Join Tree +- Result Function +- Values + +The main point is that recurring tuples "recur" for each shard in a multi-shard query. + +For example, consider a JOIN between a distributed table and a reference table. The query on each shard would look something like this: + +```sql +SELECT ... FROM dist_table_shard_1 JOIN ref_table_shard_1; +SELECT ... FROM dist_table_shard_2 JOIN ref_table_shard_1; +... +SELECT ... FROM dist_table_shard_n JOIN ref_table_shard_1; +``` + +Here, `ref_table_shard_1` is a recurring tuple because it appears in each shard query of the distributed table (`dist_table_shard_X`). It "recurs" for each shard, making it a recurring tuple. + +In summary, the Recurring Tuples concept in Citus helps in managing and identifying expressions that behave the same way across different shards, mainly to ensure accurate query results and error handling. + +# Executor + +Citus primarily hooks into the PostgreSQL executor by producing a query plan with a CustomScan. The overall hierarchy of where Citus hooks into the executor looks like this: + +- PostgreSQL executor + - ExecutorRun_hook + - Subplans are executed before regular execution + - CustomScan functions are invoked as part of overall scan tree + - BeginCustomScan (which steps are included depends on the query) + - Function calls & distribution column parameters are evaluated + - Deferred shard pruning + - Lock shards to prevent concurrent move (write only) + - Find placements for shards + - ExecCustomScan + - Adaptive Executor executes a list of tasks and concatenates the results into a tuple store + - Re-partition jobs are executed + - Remote tasks are executed + - Local tasks are executed + +We describe each part in more detail below. + +## Custom scan + +The Custom scan is the main entry point for the executor into Citus. The whole query plan might be a single Custom Scan node (e.g. single shard queries), or it can be a leaf node in a query plan that aggregates results across shards. + +The BeginCustomScan function evaluates function calls, parameters, and performs deferred pruning, and local plan caching, which are described in the next few sections. The ExecCustomScan function runs the adaptive executor which executes a list of tasks across the worker nodes. + +We also use top-level executor hooks, but primarily to capture some execution time information. The one important thing we do in the top-level ExecutorRun hook is execute subplans. That is because we allow subqueries to appear in certain parts of the combine query, and in case of a subquery on a Citus table that subquery needs to be executed before the overall plan. + +We use a separate custom scans for insert..select and merge commands due to the specialized nature of these commands (multiple phases). + +![Diagram of CustomScan APIs](https://wiki.postgresql.org/images/0/05/CustomScan_Fig01.png) + +## Function evaluation + +It is often necessary to evaluate function calls on the coordinator, rather than pushing them down to the worker node. One example is evaluating the `nextval('my_sequence')` in an insert, or stable functions like `now()` that should return the same value for the duration of the query. This is especially true for writes to replicated (reference) tables, since we cannot afford to push down function calls that might return different values on different nodes. We perform function evaluation on the “job query” of the distributed plan in `ExecuteCoordinatorEvaluableExpressions`, before deparsing the query. + +Whether a function call should be evaluated once on the coordinator, or many times (e.g. for every row) depends on the context in which the function call appears. For instance, a function call in a WHERE or SELECT clause might be evaluated many times, while a function call in a VALUES clause will only be evaluated once. On the other hand, stable & immutable functions are expected to return the same result for the same input for the whole query execution, so they should be evaluated once, unless their input can change (e.g. parameter is a column). + +So far, the function evaluation logic does not distinguish between different contexts within queries. Instead, we follow a simple policy: + +- For inserts, evaluate all function calls, including calls to volatile functions, but disallow stable/volatile functions in RETURNING +- For update/delete, evaluate all function calls, but disallow volatile functions +- For select, do not evaluate function calls on coordinator (not entirely correct) + +When DML commands appear in a CTE, the restriction only applies to the CTE. In many cases, the CTE will in that case be planned and executed separately through recursive planning. + +A function call that takes a column (Var) as a parameter will not be evaluated on the coordinator, since it depends on data on the worker nodes and will need to be evaluated many times. However, if we did this on a replicated table then stable/volatile functions may return different results on different nodes, in the context of an update/delete it would cause replicas diverge. That is one of the reasons why we disallow stable/volatile functions in update/delete statements, but we could permit them for regular tables with a single replica. + +The reason we also disallow volatile functions in regular update/delete is purely implementation related: Our current function evaluation logic does not know how to distinguish between stable & volatile functions. If we were to run it on a query that contains WHERE x > random(), it would evaluate the random() once, even though it’s supposed to be pushed down and re-evaluated for every row. + +## Prepared statements + +Prepared statements is a feature that lets clients send a query once and then execute it multiple times. Plans may be cached across execution. Prepared statements can be created explicitly via PREPARE/EXECUTE commands, via protocol messages (what most clients do), via PL/pgSQL, and via SPI. + +Citus has limited prepared statement support in the sense that they functionally work, but there are only a few cases in which plans are meaningfully cached across executions. Despite the lack of meaningful optimization, prepared statements involve a lot of complexity and counterintuitive logic. Which parts are necessary and which parts are technical debt is left as an exercise to the reader. + +The plan of a prepared statement is only cached when the same prepared statement is executed 5 times by PostgreSQL (hard-coded value). The 5th time, the planner is called without supplying parameter values to obtain a “generic plan” and that plan is cached unless it is much costlier than using custom plan. Hence, the planner might be called twice on the 5th execution and if a generic plan is created then the planner may not be called again. + +There are a few important cases to distinguish in case of Citus: + +- Multi-shard queries vs. single shard (Fast path & router) +- Custom plan vs. Generic plan. +- Parameter in a filter on the distribution column vs. only on other columns +- Local vs. remote execution +- Combinations of parameters & function evaluation. + +Let’s start with the simplest case: Multi-shard queries. These queries have complex planning logic, and it would be even more complex if the planner did not know the values of parameters. Therefore, we dissuade PostgreSQL from using a generic plan by returning a mock PlannedStmt with an extremely high cost when asked for a generic plan (see `DissuadePlannerFromUsingPlan()`). That will cause PostgreSQL to keep using a custom plan with known parameter values. In addition, we replace any Params that appear in the query tree with their Const values in ResolveExternalParams before distributed planning, so the remaining planner logic does not need to concern itself with query parameters. + +For single shard queries, the story is a lot more complex. An important question is whether there is a parameter in the distribution column, and whether a query is single shard in the planner or not. A query like `SELECT * FROM table1 WHERE distcol = $1` will clearly go to a single shard, but for a query like `SELECT * FROM table1 WHERE distcol = $1 UNION ALL SELECT * FROM table2 WHERE distcol = $2` it may or may not be. + +We do not precisely distinguish all possible cases, but rather have a simple distinction: + +- Fast path queries are simple queries on a single table with a "distribution column" = "Param or Const" filter (or single row inserts). We know that they prune to at most 1 shard regardless of the parameter value. The case of “distcol = NULL” is false/null by definition (unlike “distcol IS NULL”) and therefore prunes to 0 shards. +- Router queries are arbitrarily complex queries that prune down to a single shard at planning time based on the RestrictInfo data structures obtained from postgres planner. + +We can only decide whether a query is a router query in the planner, because if it is not a router query, we need to fall back to the multi-shard query planning code path. Hence, we can only support generic router plans when all distribution column filters are constant, or there are only single shard/reference tables in the query. The router planner cannot prune based on unbound parameters and will therefore return a soft error. When the planner sees a soft error, we return a mock plan with a high cost, similar to multi-shard queries. + +Fast path queries prune to a single shard regardless of the parameter values. If the distribution column value is a parameter, we defer additional planning decisions, in particular “shard pruning” to the executor (deferredPruning flag in the Job). Currently, we resolve the parameters in `ExecuteCoordinatorEvaluableExpressions()` which replaces the Param nodes in the query tree, and then `TargetShardIntervalForFastPathQuery()` finds "distribution column" = "Const" filters in the WHERE clause. This could perhaps be optimized but keeps the logic consistent between parameters and non-parameterized queries. + +For both fast path queries and router queries, the job query tree for single shard queries still has all the parameters when we get to the executor. We resolve the parameters in the query tree before deparsing when: + +- pruning is deferred (has WHERE distcol = $1 …) +- the query is a DML that contains function calls that need to resolved + +The latter happens primarily because function evaluation also resolves parameters. Otherwise, it would not be able to resolve expressions like `stable_fn($1)`. If the parameters are not resolved in the executor, they are passed on to the worker node using the libpq functions that take parameters. + +Both fast path and router query plans can be stored in the PostgreSQL cache (plancache.c) if they are run at least five times. The way these plans are handled depends on whether or not the query includes a parameter on the distribution key. In the first case below, there is no parameter; in the second case, there is a parameter: + +- the query pruned to a single shard in the planner, the task is therefore static (always goes to the same shard group, with same query string) +- the query uses deferred pruning, the shard group is therefore decided in the executor (not cached, query string rebuilt) + +Both scenarios reduce compute cycles in terms of planning the distributed query, but the plan for the shard query is never cached, except in the local execution case, which is described in the next section. + +The current structure is “less than ideal”, but by now it is battle hardened and has extensive regression tests that cover all the cases. It should be improved, but with caution. Caching the wrong plan could easily lead to invalid results, and there are many subtle edge cases. + +### Local plan caching + +We currently only take advantage of plan caching for shard queries that access a single local shard group and use deferred pruning (described in the previous section). This avoids reparsing or replanning the query on the local shard. That works well in combination with smart clients that immediately connect to the right node, function call delegation, triggers, and Citus local tables. + +We can only know whether we are dealing with a local shard group after evaluating parameters and functions. Immediately after that, we plan the query on the local shard group and store the resulting (regular PG) plan in the distributed plan data structure (Job). The reason we store it in the distributed plan is that it is already cached by PostgreSQL, so anything we add to the plan will be cached along with it, with the correct lifecycle. We store a list of local plans, one for each shard plan. + +Local plan caching quite significantly improves performance for certain workloads, but it comes with a subtle caveat. For queries with deferred pruning, we only know whether the shard query is on a local shard query after evaluating parameters and function calls, which we do by replacing them in the query tree. However, to obtain a cacheable generic plan, we need to use the original query tree which still has the original function calls and parameters. That means re-execute those function calls when executing the shard query, which is unusual since we usually only execute them in the BeginCustomScan hook. Since we only do this for local execution, the function calls will still run in the same process and will therefore have the same effect, but it means we sometimes evaluate function calls twice. That is acceptable for stable functions, but not for volatile functions. We therefore skip caching when there are calls to volatile functions. + +## Adaptive executor + +Once function and parameter evaluation are completed and the final task list is ready, we call into the adaptive executor. The goal of the adaptive executor is to efficiently execute a list of tasks. A task is typically a shard query that is to be executed on 1 placement (read) or all placements (write). It can also be an arbitrary command unrelated to shards. Implementation-wise, its primary function is to concurrently execute multiple queries on multiple remote nodes using libpq in non-blocking mode with appropriate failure handling and adaptive connection pools. + +The adaptive executor tries to minimize network round trips for single shard statements (transactional workloads) by using a single, cached connection per node, and parallelize queries using multiple connections per node for multi-shard statements (analytical workloads, ETL, DDL). + +Historically, Citus executed single shard queries via a single connection per worker node (router executor), while it executed multi-shard queries via a connection per shard to parallelize across nodes and cores (real-time executor), but this approach had several limitations. + +**The executor must consider preceding writes and locks on shards in the transaction**. In the past, if the router executor performed 2 inserts on different shards over the same connection, then the real-time executor could no longer run. It is not valid to query those shards over two separate connections, since only one of them would see the inserts. The executor must ensure that after a write or lock on a shard group, all subsequent queries on the shard group use the same connection until transaction end. + +**The executor should consider fast vs. slow multi-shard commands**. We observed many cases in which multi-shard commands only took a few milliseconds (e.g. index lookups on a non-distribution column) and opening a connection per shard was excessive, since it could add tens or hundreds of milliseconds to a query that could otherwise finish in 10-20ms. _Whether parallelization is beneficial depends on the runtime of individual tasks._ Some tasks can also take much longer than others. + +**The executor should gracefully handle failures**. One of the more challenging parts of doing remote, concurrent query execution is handling a variety of failures, including timeouts, failed connections, and query errors. The handling can be different for reads and writes, since reads on replicated tables can fail over to a different placement. + +**The executor should consider replicated shards**. Writes to reference tables (or replicated shards) need to be sent to all nodes, while reads can fail over to other replicas. Update/delete can be performed in parallel due to the exclusive lock on the shard, while inserts need to run in a consistent order to avoid deadlocks in case of constraint violations. The executor also needs to consider that replicas may be on the local node and use local execution. + +To fulfill the first two requirements, the adaptive executor uses a (process-local) **pool of connections per node**, which typically starts at 1 connection, but can grow based on the runtime of the query. Queries on shard groups that were already modified are assigned to the connection that performed the modification(s), while remaining queries are assigned to the pool (to be parallelized at will). + +Adaptive executor pools + +**Both the pool and the session have a “pending queue” and a “ready queue”**. The pending queue is mainly used in case of replication (e.g. reference tables). In the case of reads, whether a (pending) task on placement 2 needs to run depends on whether the (ready) task on placement 1 succeeds. In case of inserts, we write to the placements in order, so the task on placement 2 runs only once placement 1 is done. + +**The main loop of the adaptive executor waits for IO on the overall list of connections using a WaitEventSet**. When a connection has IO events, it triggers the connection state machine logic (ConnectionStateMachine). When the connection is ready, it enters the transaction state machine logic (TransactionStateMachine) which is responsible for sending queries and processing their results. The executor is designed with state machines, and the code has an extensive comment describing the state machines, please refer there for the details + +When a connection is ready, we first send BEGIN if needed, and then take tasks from the session-level ready queue, and then tasks from the pool-level ready queue. We currently process one task at a time per connection. There are opportunities for optimization like pipelining/batching, though we need to be careful not to break parallelism. + +**Late binding of tasks to connections via the pool-level queue has nice emergent properties**. If there is a task list with one particularly slow task, then one connection will spend most of its time on that task, while other connections complete the shorter tasks. We can also easily increase the number of connections at runtime, which we do via a process called slow start (described below). Finally, we’re not dependent on a connection being successfully established. We can finish the query when some connections fail, and we finish the query if BEGIN never terminates on some connection, which might happen if we were connecting via outbound pgbouncers. + +**The pool expands via “slow start”, which grows the pool every ~10ms as long as tasks remain in the pool-level queue**. The name slow start is derived from the process in TCP which expands the window size (the amount of data TCP sends at once). As in the case of TCP, the name slow is a misnomer. While it starts very conservatively, namely with 1 connection, the _rate_ at which new connections open increases by 1 every 10ms, starting at 1. That means after 50ms, the executor is allowed to open 6 additional connections. In a very typical scenario of 16 shards per node, the executor would reach maximum parallelism after ~60ms. It will open at most as many additional connections as there are tasks in the ready queue. + +Adaptive executor slow start example + +The 10ms was chosen to be higher than a typical connection-establishment time, but low enough to quickly expand the pool when the runtime of the tasks is long enough to benefit from parallelism. The 10ms has mostly proven effective, but we have seen cases in which slow connection establishment due to Azure network latencies would justify a higher value. In addition, we found that workloads with many queries in the 20-60ms range would see a relatively high number of redundant connection attempts. To reduce that, we introduced “cost-based connection establishment”, which factors in the average task execution time compared to the average connection establishment time and thereby significantly reduced the number of redundant connections. + +**The citus.max_adaptive_executor_pool_size setting can be used to limit the per-process pool sizes**. The default behaviour of the adaptive executor is optimized for parallel query performance. In practice, we find that there is another factor than runtime that users care about: memory. The memory usage of a query that uses 16 connections can be 16 times higher than the memory usage of a query that uses 1 connection. For that reason, users often prefer to limit the pool size to a lower number (e.g. 4) using citus.max_adaptive_executor_pool_size. + +**The citus.max_shared_pool_size setting can be used to limit the pool sizes globally**. It’s important to reiterate that the adaptive executor operates in the context of a single process. Each coordinating process has its own pools of connections to other nodes. This would lead to issues if e.g. the client makes 200 connections which each make 4 connections per node (800 total) concurrently while max_connections is 500. Therefore, there is a global limit on the number of connections configured by max_shared_pool_size. The citus.max_shared_pool_size is implemented in the connection management layer rather than the executor. Refer to the connection management section for details. + +**The comment on top of [adaptive_executor.c](executor/adaptive_executor.c) has a detailed description of the underlying data structures.** While these data structures are complex and this might look like an area technical debt, the current data structures and algorithm have proven to be a relatively elegant and robust way to meet all the different requirements. It is worth noting that a significant part of the complexity comes from dealing with replication, and shard replication is mostly a deprecated feature, but keep in mind that reference tables are also replicated tables and most of the same logic applies. + +## Local execution + +When the adaptive executor completes all of its remote tasks, the final step is to perform local execution. We formally see this as part of the adaptive executor, though the code is largely separate (in local_executor.c). Local execution is essentially just executing the shard queries on local shards directly by invoking the planner & executor. In other words, there is no additional backends or connections are established for local execution. + +Some queries strictly require local execution. In particular, queries that depend on objects (types, tables) created by the current transaction, or joins between regular tables and Citus local or reference tables. + +In case of a multi-shard query, a downside of local execution is that there is no parallelism across shards. Therefore, the executor tries to avoid local execution for simple multi-shard queries outside of a transaction block. Instead, it will open multiple connections to localhost to run queries in parallel. In a multi-statement transaction, the executor always prefers local execution even for multi-shard queries, since the tranasaction might also perform operations that require local execution. + +Some queries cannot use local execution. For instance, we cannot use CREATE INDEX CONCURRENTLY as part of a bigger transaction, and we have not implemented a local version of EXPLAIN ANALYZE. We also cannot perform replication commands like creating a subscription via local execution. For the most part, these commands are typically executed outside of a transaction block or as internal commands, so it does not significantly affect the user experience. + +The executor always does the local execution after remote execution. That way, if there are any problems with the remote execution, Citus can still switch back (e.g., failover) to local execution. + +## Subplans + +Execution of subplans (CTEs, subqueries that cannot be pushed down) is relatively straight-forward. The distributed plan has a list of subplans, which can be regular or distributed, and they are passed to the PostgreSQL executor sequentially. + +The result of each subplan is broadcast to all participating nodes via the COPY .. WITH (format ‘result’) command, which writes to an intermediate result. The intermediate results are read in subsequent shard queries via the read_intermediate_result function. + +A current downside of the read_intermediate_result function is that it first copies all the tuples into a tuple store, which may be flushed to disk. This could be fixed through a CustomScan, or in PostgreSQL itself. + +## Re-partitioning + +Re-partitioning happens when joining distributed tables on columns other than the distribution column, or when the tables are not co-located. In the distributed plan, a re-partitioning operation is generally expressed through a Job which has dependent Jobs. The dependent Jobs are a special type of subplan whose results are re-partitioned. + +Two stages are executed to resolve the dependent jobs: + +- Run a query on all shards using the worker_partition_query_result function, which writes the result of the query to a set of intermediate results based on a partition column and set of hash ranges +- Fetch the intermediate results to the target node using fetch_intermediate_result, for each source shard group & target hash range pair. + +These stages are run in parallel for all dependent jobs (read: all tables in a join) by building a combined task list and passing it to the adaptive executor. This logic primarily lives in ExecuteTasksInDependencyOrder. + +Once all dependent jobs are finished, the main Job is executed via the regular adaptive executor code path. The main job will include calls to read_intermediate_result that concatenate all the intermediate results for a particular hash range. + +Single hash re-partition join example + +Dependent jobs have similarities with subplans. A Job can only be a distributed query without a merge step, which is what allows the results to be repartitioned, while a subplan can be any type of plan, and is always broadcast. One could imagine a subplan also being repartitioned if it is subsequently used in a join with a distributed table. The difference between subplans and jobs in the distributed query plans is one of the most significant technical debts. + +## COPY .. FROM command + +The COPY .. FROM command is used to load a CSV (or TSV, or binary copy format) file or stream from the client into a table. The \copy command is a psql command that can load files from the client, and internally does COPY .. FROM STDIN and sends the file contents over the socket. + +**Citus supports COPY into distributed tables via the ProcessUtility_hook by internally doing a COPY to each shard.** We go through the regular COPY parsing logic in PostgreSQL (BeginCopyFrom & NextCopyFrom), which reads from the socket, parses the CSV, and returns a tuple. The tuple is passed through the CitusCopyDestReceiver API. Most of the relevant logic lives in CitusSendTupleToPlacements. + +**The CitusCopyDestReceiver inspects the value in the distribution column and finds the corresponding shard.** It opens a connection to the node(s) on which the shard is placed, starts a COPY into the shard, and forwards the tuple. For performance reasons, we use the binary copy format over the internal connections, when possible (e.g. all types have send/receive), even if the client used CSV. + +**The COPY protocol does not require immediate confirmation when sending a tuple**, which means we can continue parsing the next tuple without waiting for the previous tuple to be fully processed. This creates nice pipelining behaviour where tuples are effectively ingested in parallel and can improve performance over regular PostgreSQL, even though parsing runs at the same speed. This effect will be more pronounced when insertions are relatively heavy-weight due to triggers or heavy indexes. + +**COPY does not always use a connection per shard.** If there were already writes to multiple shards on a specific connection earlier in the transaction (e.g. consecutive inserts), then that connection must be used for the writes done by the COPY (e.g. to be able to check unique constraints). However, we can only COPY into one table at a time. In this case, the COPY logic maps multiple shards to the same connection and switches back-and-forth between shards through multiple COPY commands (which has overhead). If we get a tuple for a shard that is currently active, we forward immediately over the connection. Otherwise, we add the tuple to a per-shard buffer, or switch the connection if we already sent `citus.copy_switchover_threshold` bytes to the current shard. + +There is a caveat in the current COPY logic. Citus always uses non-blocking I/O, which means libpq keeps outgoing bytes in a buffer when the socket is busy. We only run the relevant libpq functions to flush the per-connection libpq buffer (or flush the per-shard buffer) when there is a tuple for a particular connection, or when reaching the end of the stream. Ideally, we keep getting tuples for all the different shards, such that all connections are flushed. In some cases, when many consecutive tuples are for the same shard, a large amount of data can remain buffered on the coordinator in libpq or the per-shard buffer when we come to the end of the stream (CitusCopyDestReceiverShutdown). The connections and per-shard buffers are then flushed one by one. There is room for optimization where the COPY loop and final flush behave more like the adaptive executor and uses a WaitEventSet to check for I/O events on all the sockets, and flush the libpq buffer. + +Another (smaller) caveat is that the libpq buffer can fill up if the outgoing connection to the worker cannot keep up with the rate at which the coordinator is receiving and parsing tuples. To bound the size of the buffer and thereby avoid running out of memory, we force a flush on a connection after every `citus.remote_copy_flush_threshold` bytes that are written to a connection. We do this regardless of whether the libpq buffer is becoming large, because we do not have direct insight into its current size. Fortunately, it will only cause a very short pause if the buffer is not large or empty. + +For local shards, COPY can also use local execution. We use local execution by default in transaction blocks, but try to use connections to the local node for a single statement COPY because we get more parallelism benefits. + +## COPY .. TO command + +The COPY .. TO command is used to dump the data from a table, or to get the output of a query in CSV format. The COPY (SELECT ..) TO syntax does not use any special logic. PostgreSQL’s implementation will plan and execute the query in the usual way, and Citus will intercept queries on distributed tables. That means these commands do not use COPY internally to query the shards. Instead, the results of the query are buffered in a tuple store and then converted to CSV. + +The COPY distributed_table TO .. syntax will typically return a lot of data and buffering it all in a tuplestore would cause issues. Therefore, Citus uses the process utility hook to propagate the COPY distributed_table TO .. command to each shard one by one. The output is forwarded directly to the client. If the user asked for a header, it is only requested from the first shard to avoid repeating it for each shard. + +## INSERT..SELECT + +The INSERT.. SELECT command inserts the result of a SELECT query into a target table. In real-time analytics use cases, INSERT..SELECT enables transformation of an incoming stream of data inside the database. A typical example is maintaining a rollup table or converting raw data into a more structured form and adding indexes. + +INSERT..SELECT modes + +Citus has three different methods of handling INSERT..SELECT commands that insert into a distributed table as shown in the figure above. We identify these methods as: (1) co-located, where shards for the source and destination tables are co-located; (2) repartitioning, where the source and destination tables are not co-located and the operation requires a distributed reshuffle; and (3) pull to coordinator, where neither of the previous two methods can be applied. These three approaches can process around 100M, 10M, and 1M rows per second, respectively, in a single command. + +Co-located INSERT..SELECT is executed in a similar fashion to multi-shard update/delete commands. There is a single list of tasks with one task for each shard group, which runs via the adaptive executor. + +INSERT..SELECT with re-partitioning is architecturally similar to re-partition joins, but it goes via separate code path and uses more optimizations. Empty files are skipped and files traveling between the same pair of nodes are batched in a single call to fetch_intermediate_results, which saves round trips. The final step in INSERT..SELECT with re-partitioning runs queries like INSERT INTO dist_table SELECT .. FROM read_intermediate_result(…) with optional ON CONFLICT and RETURNING clauses. In principle, we could do an additional GROUP BY in the final step when grouping by the target distribution column, but that is not currently implemented and instead falls back to pull to coordinator. + +INSERT..SELECT via the coordinator logic uses the COPY code path to write results of an arbitrary SELECT into multiple shards at the same time. In case of ON CONFLICT or RETURNING, they are first written to intermediate results that are co-located with the destination shards. Then a co-located INSERT..SELECT between the intermediate results and the target shards is performed, similar to the final step of re-partitioning. + +## Merge command + +Merge command the same principles as INSERT .. SELECT processing. However, due to the nature of distributed systems, there are few more additional limitations on top of the INSERT .. SELECT processing. The [MERGE blog post](https://www.citusdata.com/blog/2023/07/27/how-citus-12-supports-postgres-merge/) dives deep on this topic. + +# DDL + +DDL commands are primarily handled via the citus_ProcessUtility hook, which gets the parse tree of the DDL command. For supported DDL commands, we always follow the same sequence of steps: + +1. Qualify the table names in the parse tree (simplifies deparsing, avoids sensitivity to search_path changes) +2. Pre-process logic +3. Call original previous ProcessUtility to execute the command on the local shell table +4. Post-process logic +5. Execute command on all other nodes +6. Execute command on shards (in case of table DDL) + +Either the pre-process or post-process step generates a "Distributed DDL Job", which contains a task list to run in steps 4 & 5 (via adaptive executor). + +In general pre-process should: + +- Acquire any locks that are needed beyond the ones PostgreSQL will acquire in step 3 +- Perform upfront error checks (e.g. is this unique constrained allowed on a distributed table?) + +Post-process should: + +- Ensure dependencies of the current object exist on worker nodes (e.g. types used in parameters when creating a function) +- Deparse the DDL parse tree to a string +- Generate a task list using the deparsed DDL command + +The reason for handling dependencies and deparsing in post-process step is that in case of a CREATE/ALTER, the object exist in its intended form at that point. In case of a DROP, the opposite is true and the pre-process should be used. Most commands have either a pre-process or post-process function. We have not been particularly careful about defining what should be done in pre-process vs. post-process, so the steps are not always the same across different commands. + +Not all table DDL is currently deparsed. In that case, the original command sent by the client is used. That is a shortcoming in our DDL logic that causes user-facing issues and should be addressed. We do not directly construct a separate DDL command for each shard. Instead, we call the `worker_apply_shard_ddl_command(shardid bigint, ddl_command text)` function which parses the DDL command, replaces the table names with shard names in the parse tree according to the shard ID, and then executes the command. That also has some shortcomings, because we cannot support more complex DDL commands in this manner (e.g. adding multiple foreign keys). Ideally, all DDL would be deparsed, and for table DDL the deparsed query string would have shard names, similar to regular queries. + +## Defining a new DDL command + +All commands that are propagated by Citus should be defined in DistributeObjectOps struct. Below is a sample DistributeObjectOps for ALTER DATABASE command that is defined in [distribute_object_ops.c](commands/distribute_object_ops.c) file. + +```c +static DistributeObjectOps Database_Alter = { + .deparse = DeparseAlterDatabaseStmt, + .qualify = NULL, + .preprocess = PreprocessAlterDatabaseStmt, + .postprocess = NULL, + .objectType = OBJECT_DATABASE, + .operationType = DIST_OPS_ALTER, + .address = NULL, + .markDistributed = false, +}; +``` + +Each field in the struct is documented in the comments within the `DistributeObjectOps`. When defining a new DDL command, follow these guidelines: + +- **Returning tasks for `preprocess` and `postprocess`**: Ensure that either `preprocess` or `postprocess` returns a list of "DDLJob"s. If both functions return non-empty lists, then you would get an assertion failure. + +- **Generic `preprocess` and `postprocess` methods**: The generic methods, `PreprocessAlterDistributedObjectStmt` and `PostprocessAlterDistributedObjectStmt`, serve as generic pre and post methods utilized for various statements. Both of these methods find application in distributed object operations. + + - The `PreprocessAlterDistributedObjectStmt` method carries out the following operations: + - Performs a qualification operation. + - Deparses the statement and generates a task list. + + - As for the `PostprocessAlterDistributedObjectStmt` method, it: + - Invokes the `EnsureAllObjectDependenciesExistOnAllNodes` function to propagate missing dependencies, both on the coordinator and the worker. + + - Before defining new `preprocess` or `postprocess` methods, it is advisable to assess whether the generic methods can be employed in your specific case. + + +- **`deparse`**: When propagating the command to worker nodes, make sure to define `deparse`. This is necessary because it generates a query string for each worker node. + +- **`markDistributed`**: Set this flag to true if you want to add a record to the `pg_dist_object` table. This is particularly important for `CREATE` statements when introducing a new object to the system. + +- **`address`**: If `markDistributed` is set to true, you must define the `address`. Failure to do so will result in a runtime error. The `address` is required to identify the fields that will be stored in the `pg_dist_object` table. + +- **`markDistributed` usage in `DROP` Statements**: Please note that `markDistributed` does not apply to `DROP` statements. For `DROP` statements, instead you need to call `UnmarkObjectDistributed()` for the object either in `preprocess` or `postprocess`. Otherwise, state records in ``pg_dist_object`` table will cause errors in UDF calls such as ``citus_add_node()``, which will try to copy the non-existent db object. + +- **`qualify`**: The `qualify` function is used to qualify the objects based on their schemas in the parse tree. It is employed to prevent sensitivity to changes in the `search_path` on worker nodes. Note that it is not mandatory to define this function for all DDL commands. It is only required for commands that involve objects that are bound to schemas, such as; tables, types, functions and so on. + +After defining the `DistributeObjectOps` structure, this structure should be implemented in the `GetDistributeObjectOps()` function as shown below: + +```c +// Example implementation in C code +const DistributeObjectOps * +GetDistributeObjectOps(Node *node) +{ + switch (nodeTag(node)) + { + case T_AlterDatabaseStmt: + { + return &Database_Alter; + } +... +``` + +Finally, when adding support for propagation of a new DDL command, you also need to make sure that: +* Use `quote_identifier()` or `quote_literal_cstr()` for the fields that might need escaping some characters or bare quotes when deparsing a DDL command. +* The code is tolerant to nullable fields within given `Stmt *` object, i.e., the ones that Postgres allows not specifying at all. +* You register the object into `pg_dist_object` if it's a CREATE command and you delete the object from `pg_dist_object` if it's a DROP command. +* Node activation (e.g., `citus_add_node()`) properly propagates the object and its dependencies to new nodes. +* Add tests cases for all the scenarios noted above. +* Add test cases for different options that can be specified for the settings. For example, `CREATE DATABASE .. IS_TEMPLATE = TRUE` and `CREATE DATABASE .. IS_TEMPLATE = FALSE` should be tested separately. + +## Object & dependency propagation + +These two topics are closely related, so we'll discuss them together. You can start the topic by reading [Nils' blog](https://www.citusdata.com/blog/2020/06/25/using-custom-types-with-citus-and-postgres/) on the topic. + +### The concept of "Dependency" for Postgres/Citus + +Starting with the basics, Postgres already understands object dependencies. For instance, it won't allow you to execute `DROP SCHEMA` without the `CASCADE` option if tables exist within the schema. In this case, the table is a `dependent object`, and the schema serves as the `referenced object`. + +```sql +CREATE SCHEMA sc1; +CREATE TABLE sc1.test(a int); +DROP SCHEMA sc1; +ERROR: cannot drop schema sc1 because other objects depend on it +DETAIL: table sc1.test depends on schema sc1 +HINT: Use DROP ... CASCADE to drop the dependent objects too. +``` + +The information about these dependencies is stored in a specific Postgres catalog table, known as [`pg_depend`](https://www.postgresql.org/docs/current/catalog-pg-depend.html). You can inspect the aforementioned dependency within this catalog using the following query: + +```sql +SELECT + pg_identify_object_as_address(classid, objid, objsubid) as dependent_object, + pg_identify_object_as_address(refclassid, refobjid, refobjsubid) as referenced_object +FROM + pg_depend +WHERE + (classid, objid, objsubid) + IN + (SELECT classid, objid, objsubid FROM pg_get_object_address('table', '{sc1,test}', '{}')); + +┌─────────────────────────┬───────────────────┐ +│ dependent_object │ referenced_object │ +├─────────────────────────┼───────────────────┤ +│ (table,"{sc1,test}",{}) │ (schema,{sc1},{}) │ +└─────────────────────────┴───────────────────┘ +(1 row) +``` + +### Citus' Approach to Object Creation and Dependency Tracking: `pg_dist_object` + +Citus employs its own catalog table called `pg_dist_object`. This table keeps records of all objects that need to be created on every node in the cluster. These objects are commonly referred to as `Distributed Objects`. + +When adding a new node to the cluster using `citus_add_node()`, Citus must ensure the creation of all dependent objects even before moving data to the new node. For instance, if a table relies on a custom type or an extension, these objects need to be created before any table is set up. In short, Citus is responsible for setting up all the dependent objects related to the tables. + +Similarly, when creating a new Citus table, Citus must confirm that all dependent objects, such as custom types, already exist before the shell table or shards are set up on the worker nodes. Note that this applies not just to tables; all distributed objects follow the same pattern. + +Here is a brief overview of `pg_dist_object`, which has a similar structure to `pg_depend` in terms of overlapping columns like `classid, objid, objsubid`: + +```sql +CREATE SCHEMA sc1; +CREATE TABLE sc1.test(a int); +SELECT create_distributed_table('sc1.test', 'a'); + +SELECT + pg_identify_object_as_address(classid, objid, objsubid) as distributed_object +FROM + pg_dist_object; +┌─────────────────────────────┐ +│ distributed_object │ +├─────────────────────────────┤ +│ (role,{onderkalaci},{}) │ +│ (database,{onderkalaci},{}) │ +│ (schema,{sc1},{}) │ +│ (table,"{sc1,test}",{}) │ +└─────────────────────────────┘ +(4 rows) +``` + + +### When Is `pg_dist_object` Populated? + +Generally, the process is straightforward: When a new object is created, Citus adds a record to `pg_dist_object`. The C functions responsible for this are `MarkObjectDistributed()` and `MarkObjectDistributedViaSuperuser()`. We'll discuss the difference between them in the next section. + +Citus employs a universal strategy for dealing with objects. Every object creation, alteration, or deletion event (like custom types, tables, or extensions) is represented by the C struct `DistributeObjectOps`. You can find a list of all supported object types in [`distribute_object_ops.c`](https://github.com/citusdata/citus/blob/2c190d068918d1c457894adf97f550e5b3739184/src/backend/distributed/commands/distribute_object_ops.c#L4). As of Citus 12.1, most Postgres objects are supported, although there are a few exceptions. + +Whenever `DistributeObjectOps->markDistributed` is set to true—usually during `CREATE` operations—Citus calls `MarkObjectDistributed()`. Citus also labels the same objects as distributed across all nodes via the `citus_internal.add_object_metadata()` UDF. + +Here's a simple example: + +```sql +-- Citus automatically creates the object on all nodes +CREATE TYPE type_test AS (a int, b int); +... +NOTICE: issuing SELECT worker_create_or_replace_object('CREATE TYPE public.type_test AS (a integer, b integer);'); +.... + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + ... + +-- Then, check pg_dist_object. This should be consistent across all nodes. +SELECT + pg_identify_object_as_address(classid, objid, objsubid) as distributed_object +FROM + pg_dist_object +WHERE + (classid, objid, objsubid) + IN + (SELECT classid, objid, objsubid FROM pg_get_object_address('type', '{type_test}', '{}')); +┌──────────────────────────────┐ +│ distributed_object │ +├──────────────────────────────┤ +│ (type,{public.type_test},{}) │ +└──────────────────────────────┘ +(1 row) +``` + +In rare cases, `pg_dist_object` is updated during Citus version upgrades. If you upgrade Citus from `version X` to `version Y`, and a certain object becomes a supported distributed object in `version Y` but wasn't in `version X`, Citus marks it as such during the `ALTER EXTENSION citus` command. The details can be found in the C function `PostprocessAlterExtensionCitusUpdateStmt()`. + +### How Are `pg_dist_object` and `pg_depend` Related? + +In the prior sections, we focused on standalone objects, but in reality, most objects have dependencies. That's where `pg_depend` and also `pg_shdepend` become crucial. Any mention of `pg_depend` in this section also applies to `pg_shdepend`. + +When Citus creates an object, it scans the `pg_depend` table to identify all dependencies and then automatically generates these dependencies as distributed objects. + +The core C function in this process is `EnsureDependenciesExistOnAllNodes()`. This function takes the object as an argument and conducts a depth-first search (DFS) on `pg_depend` and `pg_shdepend` tables. The DFS sequence is crucial because dependencies must be established in a specific order. For instance, if a table depends on a type `t1`, and `t1` relies on another type `t2`, then `t2` must be created before `t1`. The DFS ensures that this order is maintained. + +If Citus encounters a dependency it can't support, it will throw an error instead of silently allowing it. The rationale behind this approach is to avoid subtle issues later, especially when adding new nodes. For instance, Citus currently throws an error for circular dependencies. The main function performing these checks is `EnsureDependenciesCanBeDistributed()`. + +During the DFS, Citus might need to extend its search, especially for complex dependencies like array types that don't have a straightforward dependency on their element types. Citus expands the traversal to account for such cases. The main function responsible for this is `ExpandCitusSupportedTypes()`, which has extensive comments explaining the specific rules. + +### Current User vs `superuser` for Object Propagation: + +The difference between `MarkObjectDistributed()` and `MarkObjectDistributedViaSuperuser()` is important here. Generally, Citus tries to minimize the use of `superuser` operations for security reasons. However, there are cases where it's necessary. We employ `superuser` permissions primarily when marking the dependencies of an object we are working on. This is because creating dependencies might require higher-level privileges that the current user might not have. For example, if a schema depends on a role, and the current user doesn't have the privilege to create roles, an error will be thrown. To avoid this, we use `superuser` for creating dependencies. + +However, there's an exception. If the dependency is created within the same transaction, we use the current user. This prevents visibility issues and is mainly relevant for `serial` columns. More details can be found in [Citus GitHub PR 7028](https://github.com/citusdata/citus/pull/7028). + +### When Are the Objects in `pg_dist_object` Used? + +There are three main scenarios: + ++ When adding a new node—or more precisely, activating it—Citus reads all objects listed in `pg_dist_object`, sorts them by dependency, and then creates those objects on the new node. The core C function for this is `SendDependencyCreationCommands()`, and the sorting is done by `OrderObjectAddressListInDependencyOrder()`. + ++ When Citus creates a new object and processes its dependencies, any dependencies already marked as distributed are skipped. This is handled in `FollowNewSupportedDependencies()`, where dependencies are bypassed if `IsAnyObjectDistributed()` returns true. + ++ When user modifies an object, Citus acts only when the object is distributed. For non-distributed object, Citus gives the control back to Postgres. + +## Foreign keys + +Citus relies fully on Postgres to enforce foreign keys. To provide that, Citus requires the relevant shards to be colocated. That’s also why the foreign keys between distributed tables should always include the distribution key. When reference tables / citus local tables involved, Citus can relax some of the restrictions. [Onder’s talk Demystifying Postgres Foreign Key Constraints on Citus](https://www.youtube.com/watch?v=xReWGcSg7sc) at CitusCon discusses all the supported foreign key combinations within Citus. + +There is one tricky behavior regarding transactions when there is a foreign key from a distributed table to a reference table. If a statement in a transaction modifies the reference table, then Postgres acquires row locks on the referencing tables (e.g., shards of the distributed table) within the internal connection that modified the reference table. After that point, Citus cannot access the shards of the distributed table in parallel anymore. Otherwise, the multiple internal connections that would be opened via parallel command might compete to acquire the same locks, leading to a (self) distributed deadlock. To prevent these scenarios, Citus switches to sequential execution. The relevant function is `RecordParallelRelationAccessForTaskList()`, which documents the possible scenarios. The regression test file [foreign_key_restriction_enforcement](https://github.com/citusdata/citus/blob/2c190d068918d1c457894adf97f550e5b3739184/src/test/regress/sql/foreign_key_restriction_enforcement.sql) has lots of nice examples of this behavior. + + +## DROP TABLE + +Citus' handling of `DROP TABLE` is slightly different than other DDL operations. In this section, we aim to highlight the key differences and their reasoning. + +Citus implements an event trigger, [`citus_drop_trigger()`](https://github.com/citusdata/citus/blob/main/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql). The trigger is defined as: +```sql + select * from pg_event_trigger ; +┌───────┬────────────────────────────┬──────────┬──────────┬─────────┬────────────┬─────────┐ +│ oid │ evtname │ evtevent │ evtowner │ evtfoid │ evtenabled │ evttags │ +├───────┼────────────────────────────┼──────────┼──────────┼─────────┼────────────┼─────────┤ +│ 16676 │ citus_cascade_to_partition │ sql_drop │ 10 │ 16675 │ O │ │ +└───────┴────────────────────────────┴──────────┴──────────┴─────────┴────────────┴─────────┘ +(1 row) +``` + +The drop trigger proves useful for capturing all tables affected by the `CASCADE` operation. For instance, if you delete a parent table, Postgres will automatically execute a `DROP TABLE` command for its partitions. Citus can then seamlessly apply the same operation to all cascaded tables, eliminating the need for manual identification of tables that would be affected by the cascade. + +Another reason for utilizing a trigger for `DROP` processing is that after executing `standardProcess_Utility`, the `oid` of the table being dropped is eliminated from Postgres' catalog tables. This makes it more challenging to manage a dropped table in `PostProcessUtility`, as is customary for many `DDL` commands. Instead, we depend on the event trigger to supply the `oid` of the table that has been dropped. This allows us to delete all related metadata, such as entries in `pg_dist_partition` or `pg_dist_shard` from Citus' catalog tables. Additionally, we eliminate all relevant metadata from every node in the cluster. Ultimately, this enables us to remove the shard placements linked to the dropped Citus table. + +Also, if we were to rely on `standardProcess_Utility`, we'd need to handle all sorts of `DROP` commands that could cascade into `DROP TABLE`. With drop trigger, Postgres handles that and calls Citus' drop trigger. + +Generally speaking, there isn't a compelling reason to avoid using `PostProcessUtility` for managing `DROP TABLE` commands. Theoretically, one could implement all the same logic within `PostProcessUtility`. However, the drop trigger offers a convenient approach. + + Additional functionalities are present in [`PreprocessDropTableStmt()`](https://github.com/citusdata/citus/blob/c323f49e8378b5e8ce95457c845659b5fc14ccb1/src/backend/distributed/commands/table.c#L146), particularly concerning the handling of partitioned tables and colocation locking. These aspects are well-documented in the code, so for further details, please refer to the documentation there. + +# Connection management + +Each client session makes “internal” connections to other nodes in the cluster. Connection management is an important part of our overall execution logic. The design largely comes from the need to achieve a few different goals: + +- Cache and reuse connections for low latency. +- Parallelize commands over multiple connections per node, to use multiple cores. +- Multi-statement transactions have locks and uncommitted state that is only visible over a particular connection. We therefore need to make sure that: + - After a write to a shard, any access to that shard group should use the same connection as the write. We need to cover the whole shard group because writes and locks can cascade to other shards in the shard group via foreign keys, and they might be used together in a join. + - After a write to a reference tables, any subsequent read of a reference table, including joins between distributed table shards and reference tables, should use the same connection as the write. +- Metadata and global object changes should always use the same connection. +- We should not overload worker nodes with parallel connections. + +In some cases, these goals conflict. For instance, if a multi-statement transaction performs a parallel delete on a distributed table, and then inserts into a reference table, and then attempts to join the distributed table with the reference table, then there is no way to complete that transaction correctly, since there is no single connection that can see both the reference table update and all the updates to distributed table shards. The command that reaches the conflict will error out: + +```sql +-- may fail if delete is parallelized +begin; +delete from dist_table; +insert into reference_table values (1,2); +select * from dist_table join reference_table on (x = a); +ERROR: cannot perform query with placements that were modified over multiple connections +abort; + +``` + +The workaround is to `set citus.multi_shard_modify_mode TO 'sequential';` before or at the start of the transaction, which forces the delete (multi-shard modification) command to use a single connection, such that the insert and select can use the same connection. + +The primary goal of the connection management layer is not to solve all these problems, but to detect them and prevent any form of incorrectness, such as not seeing preceding changes in the transaction and self-deadlocks. A lot of important error-checking logic lives in FindPlacementListConnection, which attempts to find a suitable connection given a list of shard placements out of the connections that are already open, and also checks if the intent of the caller would lead to a conflict. + +The connection management logic is divided into two parts: + +- **connection_management.c** tracks all connections for various purposes and concerns itself with connection establishment, caching, and error handling. +- **placement_connections.c** concerns itself with finding the right connection for a given shard placement access based on preceding commands in the transaction. + +## Connection management + +Connection management tracks all the connections made by the current backend to other worker nodes. The connections can exist for the lifetime of the transaction, or longer when they are cached. The connections are kept in a hash that is keyed by hostname, port, user, database, and replication (yes/no). Each hash entry has a list of connections, since there can be multiple when the executor decides to parallelize a multi-shard query. + +Citus operations that need a connection call `StartNodeUserDatabaseConnection` (or a wrapper), which either returns an existing connection or a new one. the caller should wait for the connection to be fully established. + +When a Citus operation needs a connection to a worker node (hostname, port, user, database, replication), it can ask for it in a few different ways via flags: + +- Pick any connection (default), open a new one if none exists +- Force a new connection (FORCE_NEW_CONNECTION), even if connections already exist +- Pick a connection outside of a transaction (OUTSIDE_TRANSACTION), or open a new one if none exists +- Pick the connection used for metadata syncing (REQUIRE_METADATA_CONNECTION), or open a new one if none exists and mark it for metadata syncing + +In addition, the caller can claim a connection exclusively, in which case it will not be returned until it is unclaimed (or transaction end). For instance, the adaptive executor claims connections it uses exclusively. When it calls `StartNodeUserDatabaseConnection` again, it will always get a new connection that it can use to parallelize the query. + +It is important that global commands like creating a type, or a function, or changing Citus metadata, all use the same connection. Otherwise, we might end up creating a type over one connection, and a function that depends on it over another. The use of the REQUIRE_METADATA_CONNECTION flag prevents this. + +The FORCE_NEW_CONNECTION and OUTSIDE_TRANSACTION flags can BOTH be used to execute (and commit) commands outside of the current transaction. Many usages of the older FORCE_NEW_CONNECTION flag could perhaps be replaced by OUTSIDE_TRANSACTION. A benefit of FORCE_NEW_CONNECTION is that it can provide a more intuitive way to parallelize commands than claiming connections exclusively. For instance, the `run_command_on_shards` uses FORCE_NEW_CONNECTION for this purpose. + +It is worth noting that Citus currently always opens a new connection when switching to a different user (e.g. via SET ROLE), rather than propagating the SET ROLE command. That can lead to some inconsistent behaviour (e.g. cannot see uncommitted writes after SET ROLE). + +## Placement connection tracking + +The placement connection tracking logic stores which shard group placements were accessed over which connections during the current transactions, and whether they performed a SELECT, DML, or DDL. It considers whether to use same connection for accesses to the same shard group placement in the following cases: + +- SELECT after SELECT - can use different connection +- DML after SELECT – can use different connection +- All other cases – must use same connection + +The key function that deals with this logic is `FindPlacementListConnection` in [placement_connection.c](/src/backend/distributed/connection/placement_connection.c), which is called via `GetConnectionIfPlacementAccessedInXact` by the adaptive executor. + +We sometimes allow the same shard group placement to be accessed from different connections (first two cases). Consider a transaction that does a query on a reference table followed by a join between a distributed table and a reference table. Currently Citus would parallelize the second query, but that implicitly causes the reference table to be accessed from multiple connections. After that, we can still perform writes on the reference table (second case), because they do not conflict with the reads. However, we cannot perform most DDL commands involving the reference table because the locks would conflict with the reads, such that it would self-deadlock (blocked waiting for itself). We throw an error to prevent the self-deadlock and suggest set citus.multi_shard_modify_mode is ‘sequential’. Probably some DDL commands that take weaker locks would still be permissible, but we currently treat them all the same way. + +A downside of the current placement connection tracking logic is that it does not consider foreign keys to reference tables, and the fact that writes and locks can cascade from a write to a reference table. We have a separate subsystem for error checking those scenarios (relation_access_tracking.c), but it would be nice if they can be unified. + +## citus.max_cached_connections_per_worker + +An important part of the connection management is caching at least 1 outgoing connection per worker node in the session. Establishing a new connection for every query is quite expensive due to SSL establishment, forking a process on the worker node, and rebuilding caches. Transactional workloads that have a high rate of short-running queries benefit a lot from caching connections. For analytical queries that take hundreds of milliseconds or more, the relative benefit is smaller, but often still noticeable. + +At the end of a transaction, the connection management logic decides which connections to keep. It keeps at most `citus.max_cached_connections_per_worker` regular connections that are in a healthy state, unless they are open for more than `citus.max_cached_connection_lifetime` (10 minutes by default). For workloads with a high rate of multi-shard queries, it can be beneficial to increase `citus.max_cached_connections_per_worker`. + +## citus.max_shared_pool_size + +**The citus.max_shared_pool_size setting can be used to limit the number of outgoing connections across processes **. Each session has its own set of connections to other nodes. We often make multiple connections to the same worker node from the same session to parallelize analytical queries, but if all session are doing that we might overload the worker nodes with connections. That is prevented by setting citus.max_shared_pool_size, which should be at least `citus.max_client_connections` on coordinator node, and at most `max_connections - citus.max_client_connections` on worker node. + +The principle behind `citus.max_shared_pool_size` is that under high concurrency (all client connections used) it converges to each process having 1 connection per node. To do so, we distinguish between “optional” and “required” connections. When the executor asks the connection management layer for a connection, the first connection to a node is always required, and other connections are optional. If all connection slots are in use, the connection manager blocks until one is available when asking for a required connection, or returns NULL when asking for an optional connection. That signals to the executor that it cannot currently expand its pool. It may try again later. Most Citus code paths are tweaked to be able to complete their operation with 1 connection per node, and use local execution for local shards. + +Note that `citus.max_shared_pool_size` can only control the number of outgoing connections on a single node. When there are many nodes, the number of possible inbound internal connections is the sum of the `citus.max_shared_pool_size` on all other nodes. To ensure this does not exceed max_connections, we recommend that `sum(citus.max_client_connections) < max_connections`. + +# Transactions (2PC) + +Citus uses the transaction callbacks in PostgreSQL for pre-commit, post-commit, and abort to implement distributed transactions. In general, distributed transactions comprise a transaction on the coordinator and one or more transactions on worker nodes. For transactions that only involve a single worker node, Citus delegates responsibility to the worker node. For transactions that involve multiple nodes, Citus uses two-phase commit for atomicity and implements distributed deadlock detection. + +## Single-node transactions + +Most multi-tenant and high-performance CRUD workloads only involve transactions that access a single worker node (or rather, a single shard group). For example, multi-tenant applications typically distribute and co-locate tables by tenant and transactions typically only involve a single tenant. When all statements in a transaction are routed to the same worker node, the coordinator simply sends commit/abort commands to that worker node from the commit/abort callbacks. In this case, the transaction is effectively delegated to that worker node. The worker node, by definition, provides the same guarantees as a single PostgreSQL server. + +## Multi-node transactions + +For transactions that write to multiple nodes, Citus uses the built-in two-phase commit (2PC) machinery in PostgreSQL. In the pre-commit callback, a “prepare transaction” command is sent over all connections to worker nodes with open transaction blocks, then a commit record is stored on the coordinator. In the post-commit callback, “commit prepared” commands are sent to commit on the worker nodes. The maintenance daemon takes care of recovering failed 2PC transactions by comparing the commit records on the coordinator to the list of pending prepared transactions on the worker. The presence of a record implies the transaction was committed, while the absence implies it was aborted. Pending prepared transactions are moved forward accordingly. + +2PC recovery + +Nice animation at: [How Citus Executes Distributed Transactions on Postgres](https://www.citusdata.com/blog/2017/11/22/how-citus-executes-distributed-transactions/) + +## No distributed snapshot isolation + +Multi-node transactions provide atomicity, consistency, and durability guarantees. Since the prepared transactions commit at different times, they do not provide distributed snapshot isolation guarantees. + +An example anomaly that can occur is two distributed transactions: + +Two inserts in a transaction block into two different shards + +```sql +BEGIN; +INSERT INTO test (key, value) VALUES (1,2); +INSERT INTO test (key, value) VALUES (2,2); +END; +``` + +An update across shards + +```sql +UPDATE test SET value = 3 WHERE value = 2; +``` + +An update across shards +```sql +UPDATE test SET value = 3 WHERE value = 2; +``` +If Citus provided serializability, there could only be 2 outcomes (a happens first or b happens first). However, it can have at least 4 outcomes, because the update depends on the inserts, and it might see only one of the insert as committed. + +This can happen because the inserts commit using a 2PC if the shards are on different nodes, and therefore they might not become visible at exactly the same time. Since the commits happen in parallel, there are no guarantees w.r.t. which insert becomes visible first. The update could see either insert as committed, or none, or both, depending on exact timings. Hence, there is no well-defined order between a and b, theye are intertwined. + +If the inserts depend on the update, there may be even more possible outcomes. For instance, if there is a unique constraint on (key, value), and we do upserts concurrently with the multi-shard update: + +```sql +BEGIN; +INSERT INTO test (key, value) VALUES (1,2) ON CONFLICT DO NOTHING; +INSERT INTO test (key, value) VALUES (2,2) ON CONFLICT DO NOTHING; +END; +``` + +Now, whether the insert proceeds or does nothing depends on whether the update is already committed or not. Hence, this scenario has 6 possible outcomes. + +It is hard for users to understand these semantics and their implications. Therefore, many database researchers and engineers have a strong preference for serializability. Having fewer possible outcomes means less potential for bugs and unintended situations. On the other hand, the performance impacts of snapshot isolation are generally significant, and we have not seen a lot of problems due to the lack of snapshot isolation in practice. The types of transactional workloads that scale well and therefore benefit from Citus are the types of workloads that scope their transactions to a single node and therefore get all the usual PostgreSQL guarantees. + +Our long-term goal is to provide snapshot isolation as an optional feature, with at least read committed guarantees (default in PostgreSQL). + + + +## Distributed Deadlocks + +Deadlocks are inevitable in a database system which supports transactions, and Citus is no exception. Marco wrote a useful blog post on what are locks & deadlocks & distributed deadlocks, please read the blog post first: https://www.citusdata.com/blog/2017/08/31/databases-and-distributed-deadlocks-a-faq/ Another good introduction for distributed deadlocks can be found here: https://www.citusdata.com/blog/2017/11/22/how-citus-executes-distributed-transactions/ + + + +At a high-level, the applications should try to avoid (distributed) deadlocks. The application should avoid patterns that could cause deadlocks. If those patterns are unavailable, then the database can still resolve the deadlocks. + + + +### Distributed Deadlock Detection Implementation + +Citus heavily relies on PostgreSQL’s internal locking infrastructure for detecting distributed deadlocks. The entry function for the distributed deadlock detection is ` CheckForDistributedDeadlocks ()`. Distributed deadlock detection runs in the background as part of maintenance daemon. + +At a high level, Citus assigns “distributed transaction ids” for all backends running distributed transactions that might be part of a distributed deadlock (e.g., BEGIN; command; or any multi-shard command). See `assign_distributed_transaction()` function: +```sql + +BEGIN + +DELETE FROM test WHERE a = 1; + +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(0, 5, '2023-09-14 17:39:21.263272+03'); + +``` + +Then, Citus periodically (default 2 seconds), pulls all the “lock graphs” from all the nodes and combines them. If it finds a cycle in the combined graph, Citus concludes that there is a deadlock and kills one of the (distributed) processes involved in the graph. To give an example, assume we have the following case: + ++ Transaction 1 waits Transaction 2 on Node-1 ++ Transaction 2 waits Transaction 1 on Node-2 + +In this case, none of the transactions can continue. This is a distributed deadlock as neither `Node-1` nor `Node-2` can detect the deadlock as there is no local deadlocks. To find distributed deadlocks, we pull the `lock graph`s from each node and combine. + +Let’s dive a little deeper. A node-lock lock graph is created in C function ` BuildLocalWaitGraph ()`. The lock graph includes only distributed transactions that are assigned via `assign_distributed_transaction` UDF. As noted, Citus heavily relies on Postgres on which backends are waiting for others, for details see ` AddEdgesForLockWaits ()` and ` AddEdgesForWaitQueue ()` C functions. + +Once each local lock graph is created, then the results are combined in on the coordinator. Then, for each node in the graph, we do a DFS (depth-first search) to check if there is a cycle involving that node. If there is a cycle, we conculde that there is a distributed deadlock. + +While doing the DFS, we also keep track of the other backends that are involved in the cycle. Citus picks the `youngest` transaction as the candidate to cancel (e.g., sends SIGINT). The idea is that let’s allow longer running transactions to continue, such as a long running DDL. + +If there is a cycle in the local graph, typically Postgres’ deadlock detection kicks in before Citus’ deadlock detection, hence breaks the cycle. There is a safe race condition between Citus’ deadlock detection and Postgres’ deadlock detection. Even if the race happens, the worst-case scenario is that the multiple backends from the same cycle is cancelled. In practice, we do not see much, because Citus deadlock detection runs `2x` slower (e.g., `citus.distributed_deadlock_detection_factor`) than Postgres deadlock detection. + +Deadlock detection + +For debugging purposes, you can enable logging with distributed deadlock detection: `citus.log_distributed_deadlock_detection` + +With query from any node, we run the deadlock detection from all nodes. However, each node would only try to find deadlocks on the backends that are initiated on them. This helps to scale deadlock detection workload across all nodes. + +When there are too many active backends(>1000), creating lots of `waiting activity` (e.g., blocked on the same locks not necessarily deadlocks involved), then the deadlock detection process might become a bottleneck. There are probably some opportunities to optimize the code for these kinds of workloads. As a workaround, we suggest increase `citus.distributed_deadlock_detection_factor`. + +The distributed transactionId and backend/PID mapping is done via BackendData structure. For every Postgres backend, Citus keeps a `BackendData` structure. Each backends state is preserved in `MyBackendData` C structure. Assigning (and removing) distributed transaction id to a backend means to update this structure. + +If we were to implement distributed deadlock detection today, we would probably try to build it on top of `Global PID` concept instead of `distributed transaction id`. But, before changing that, we should make sure the `Global PID` is robust enough. `Global PID` today mostly used for observation of the cluster. We should slowly put more emphasis on the `Global PID` and once we feel confortable with it, we can consider using it for distributed deadlock detection as well. + +# Locking +Locks in a database like Postgres (and Citus) make sure that only one user can change a piece of data at a time. This helps to keep the data correct and safe. If two users try to change the same data at the same time, it could create problems or errors. + +Citus, a distributed database, needs extra locks because it stores data across multiple servers. These extra locks help make sure that even when many servers are involved, the data stays correct and safe during changes made by multiple users. + +In PostgreSQL and Citus, there are several types of locks that serve different purposes. In this section, we’d like to go over these different types of locks and explain when/how they are useful. + +## Lock Levels + +In database management, locking mechanisms are crucial for maintaining data integrity during concurrent operations. However, not all locks are created equal. As a building block, PostgreSQL allows different levels/modes of locks. So, this is probably different than what you have learned in your college, where if a lock is held, others have to wait. No, in PostgreSQL, some locks do not conflict with each other, whereas some do. This flexibility allows Postgres (and Citus) to implement sophisticated concurrency scenarios. + +For details of lock levels, please refer to PostgreSQL docs: https://www.postgresql.org/docs/current/explicit-locking.html + +Understanding these lock types and their levels of restrictiveness can help you better manage concurrent operations and ensure data integrity. + +## Lock Monitoring + +Both PostgreSQL and Citus provide comprehensive views for monitoring the locks held (or waiting on) for each backend. `pg_locks`: https://www.postgresql.org/docs/current/view-pg-locks.html. + +In Citus, we have the same view, but they are collected from all nodes in the cluster: `citus_locks` + +You can find lots of examples of how `pg_locks` (and `citus_locks`) can be used in debugging systems. One of the good one is from PostgreSQL’s wiki, ` Сombination of blocked and blocking activity`: https://wiki.postgresql.org/wiki/Lock_Monitoring The same query is also implemented within Citus for the distributed cluster, with the name: citus_lock_waits: https://github.com/citusdata/citus/blob/4e46708789478d6deccd3d121f2b4da7f631ebe3/src/backend/distributed/sql/udfs/citus_lock_waits/11.0-1.sql#L4 + +These simple monitoring tools are essential to understanding the concurrency in PostgreSQL and Citus. + +## Lock Types + +1. **Table-level Locks**: + +Table-level locks in PostgreSQL range from less restrictive to more restrictive, allowing various levels of concurrent access. They can lock an entire table to ensure data integrity during operations like adding columns. You can also acquire any of these locks explicitly with the command LOCK. Two transactions cannot hold locks of conflicting modes on the same table at the same time. (However, a transaction never conflicts with itself. For example, it might acquire `ACCESS EXCLUSIVE` lock and later acquire `ACCESS SHARE` lock on the same table.) + +Marco’s blog post on locks could provide a nice read on this topic: https://www.citusdata.com/blog/2018/02/15/when-postgresql-blocks/ + +As a rule, in most cases, Citus relies on PostgreSQL to acquire the table-level locks. For example, + ++ When a DDL is executed on a Citus table, Citus first executes Postgres’ `standardProcess_Utility()` function. One of the key reasons behind that is Postgres acquires the table-level lock, and Citus provides similar concurrency behavior with Postgres on Citus tables. If an `ALTER TABLE .. ADD COLUMN` is running on a distributed table, no `SELECT` command could run concurrently due to the table-level locks. + ++ When regular commands like `INSERT`/`UPDATE`/`DELETE`/`SELECT` is executed, Citus again relies on Postgres to acquire the table-level locks. PostgreSQL acquires the table-level locks during the parsing of the statements, which makes life simple for Citus, as parsing happens even before any Citus logic kicks in. If the command doesn’t require parsing, such as prepared statements, then Postgres still acquires the same locks before using the cached plan. So, from Citus’ perspective, there mostly is nothing to do for acquiring the table-level locks. + + + There is only a one expection to this rule, Citus' _local plan caching_. When Citus caches the queries by itself, Citus acquires the relevant table-level locks. See `ExecuteLocalTaskListExtended()` as the relevant C function. + + +Citus additionally use table-level locks for certain table management operations on tables. With all these operations, Citus aims to fit into the same concurrency behaviors as Postgres. For example, when `create_distributed_table()` is executed, Citus acquires an `ExclusiveLock` on the table. We do that because we want to block `write`s on the tables – which acquire RowExclusiveLock -- but let `read-only` queries to continue – which acquire AccessShareLock. An additional benefit of this approach is that no two concurrent `create_distributed_table` on the same table can run. + + +One another use case for table-level locks on Citus is the table-level locks acquired on the Citus metadata tables. Citus uses table-level locks on the metadata tables to define the concurrency behavior of certain operations. For example, while creating a new table or moving shards, it is common to acquire `ShareLock` on `pg_dist_node` table, and `citus_add_node` function to acquire `ExclusiveLock` on the same metadata table. The latter signals the rest of the backends that the node metadata is about to change, so it is not allowed to rely on the current state of `pg_dist_node` (or vice-versa `citus_add_node` should wait until rebalance finishes). + +The main C function involved in table-level locking is ` LockRelationOid ()`. So, you can put a break-point to this function and see when/how Citus and Postgres acquires table-level locks. + + +2. **Row-level Locks**: + +Row-level locks are more granular and lock only specific rows within a table. This allows multiple users to read and update different rows simultaneously, which can improve performance for multi-user scenarios. + + Citus does NOT involve in the row-level locks, fully relies on Postgres to acquire the locks on the shards. Marco’s blog-post gives a nice overview of row-level locks: https://www.citusdata.com/blog/2018/02/15/when-postgresql-blocks/ + + + +3. **Advisory Locks:** + +Advisory locks are a special type of lock in PostgreSQL that give you more control over database operations. Unlike standard table or row-level locks that automatically lock database objects, advisory locks serve as flexible markers or flags. Developers can implement these custom locks to define their own rules for managing concurrency, making them particularly useful for extensions and custom workflows. + +#### Importance of Advisory locks for Citus + +In a distributed system like Citus, advisory locks take on an even more critical role. Because Citus spreads data across multiple nodes, managing concurrent operations becomes a complex task. Citus heavily relies on advisory locks for a variety of essential operations. Whether it's handling queries from any node, moving/splitting shards, preventing deadlocks, or managing colocation of related data, advisory locks serve as a powerful tool for ensuring smooth operation and data integrity. + +By employing advisory locks, Citus effectively deals with the complexities that come with distributed databases. They allow the system to implement sophisticated concurrency scenarios, ensuring that data remains consistent and operations are efficient across all nodes in the cluster. + +Below, we list some of the crucial advisory locks that Citus relies on: + +#### Citus Advisory Locks 1: Distributed Execution locks + +The C code is for a function called `AcquireExecutorShardLocksForExecution()`. The function has an extensive comment for the specific rules. The function's main goal is to get advisory locks on shard IDs to make sure data stays safe and consistent across different operations. These locks are sometimes referred as `ShardResourceLock`s in the code. + +In the context of distributed databases like Citus, "safe" generally means avoiding situations where different nodes in the cluster are trying to modify the same data at the same time in a way that could lead to errors, inconsistencies, or even deadlocks. This is critical when data is replicated across nodes (e.g., multiple copies of the same shard like reference tables) or when a single operation affects multiple shards (e.g., multi-shard update). + +The "consistency" here primarily refers to two things: + +1. **Order of Operations on Replicated Tables**: In a replicated table setup, the same data exists on multiple nodes. The function aims to make sure that any updates, deletes, or inserts happen in the same order on all copies of the data (replicas). This way, all the replicas stay in sync. + +2. **Preventing Distributed Deadlocks**: When you're running multi-shard operations that update the data, you can run into distributed deadlocks if operations on different nodes lock shards in a different order. This function ensures that the locks are acquired in a specific, consistent order, thus minimizing the risk of deadlocks. + +There are also options (`GUCs`) to relax these locking mechanisms based on the user's needs, but they come with the trade-off of potentially reduced consistency or increased risk of deadlocks. + +So, in summary, this function is about acquiring the right kind of lock based on what the operation is doing and what kind of table it's affecting, all to ensure that the data stays consistent and that operations are executed safely. + + +#### Citus Advisory Locks 2: Metadata locks + +The second class of advisory locks referred as `metadata locks` in the code. See `AcquireMetadataLocks()` and `LockShardDistributionMetadata()` functions. + +The main goal is to prevent concurrent placement changes and query executions. Essentially ensure that the query execution always works on the accurate placement metadata (e.g., shard placements). + +Citus always acquire `Metadata Lock`s for shard moves and shard splits, irrespetive of blocking vs non-blocking operations. For blocking operations, the locks are held from the start of the operation whereas for non-blocking operations the locks are held briefly at the end right before metadata is updated. + +Citus always acquires `Metadata Lock`s for modification queries, at the `CitusBeginModifyScan` such that it serializes query modification with placement metadata changes. A modification query would always see up-to date metadata for the placements involved. Otherwise, the modification might get lost. + +Citus does not acquire Metadata Locks for SELECT queries. The main reason is that SELECTs are often long-running and would hold up the move. Instead, we allow SELECT commands to operate on the old placements in case of a concurrent shard move. The SELECT commands would already see the snapshot of the shard(s) when the SELECT started. So, there is no difference in terms of query correctness. We then later drop the old placements via "deferred drop" (see Resource cleanup). + +#### Citus Advisory Locks 3: Query from any node + +When users are allowed to run queries from any node, then in certain cases, we need to form a synchronization that involes multiple nodes. Advisory locks is a convinent tool for achieving these types of goals. + + Citus exposes few UDFs like `lock_shard_resources()` and `lock_shard_metadata()` which are simple wrappers around the metadata and executor locks we discussed above. + + When there are nodes with metadata, then Citus acquires some of the advisory locks on all nodes, like: + + ```sql + select citus_move_shard_placement(102008, 'localhost', 9701, 'localhost', 9702, shard_transfer_mode:='force_logical'); + .... + + DETAIL: on server onderkalaci@localhost:9702 connectionId: 2 +NOTICE: issuing SELECT lock_shard_metadata(7, ARRAY[102008]) +DETAIL: on server onderkalaci@localhost:9701 connectionId: 1 +NOTICE: issuing SELECT lock_shard_metadata(7, ARRAY[102008]) +DETAIL: on server onderkalaci@localhost:9702 connectionId: 2 +... +``` + +Another useful application of advisory locks in query from any node is modifications to reference tables. Reference tables (or in general replicated tables) have multiple placements for a given shard. So, modifications to the placements of the same shard should be serialized. We cannot allow multiple modification commands to modify the placements in different orders. It could cause diverging the contents of the data. + +The coordinator already serializes the modifications to reference tables (or in general all replicated tables) via `LockShardResource()` C function. When there are other nodes in the cluster, Citus sends the similar command, to the first worker node in the cluster. In general, Citus aims to serialize operations on the reference tables via acquiring advisory locks on the first worker node, see `SerializeNonCommutativeWrites()` and `LockShardListResourcesOnFirstWorker()` C functions. We use the `first worker node` as an optimization. Instead of acquiring the locks in all the nodes, each node sorts the worker nodes deterministically, and acquires the lock on the first node. Whichever distributed transaction acquires the lock, it has the autority to continue to the transaction. If there are any other transactions, they are blocked until the first distributed transaction finishes, as it would be in coordinator-only configuration. We used the first worker node as opposed to the coordinator for two reasons. First, the coordinator might already be getting lots of client queries, and we don't want to create additional load to the coordinator. Second, in some Citus deployments, the coordinator may **not** be in the metadata. Hence, the other nodes might not know about the coordinator. + +Getting back to the basic flow, the outcome of modifying the reference tables (or replicated tables) is the following where `lock_shard_resources` is acquired on the first node: + +```sql + BEGIN; +-- 9701 is the first worker node in the metadata +insert into reference_table VALUES (1); +NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(0, 8, '2023-09-18 16:30:18.715259+03'); +DETAIL: on server onderkalaci@localhost:9701 connectionId: 1 +NOTICE: issuing SELECT lock_shard_resources(3, ARRAY[102040]) +DETAIL: on server onderkalaci@localhost:9701 connectionId: 1 +NOTICE: issuing INSERT INTO public.reference_table_102040 (a) VALUES (1) + +``` + + +#### Citus Advisory Locks 4: Colocation locks + +Although not ideal, there are currently multiple advisory locks that deal with colocation: `AcquireRebalanceColocationLock()` and `AcquirePlacementColocationLock()`. + +The former has a simple logic. It is to prevent concurrent shard moves of the same shards with the same table colocation. So, basically, prevent user running `citus_move_shard_placement()` for the same colocated shards. + +The latter is a bit more interesting. It aims to ensure the placements of the same colocation group never diverges regarding where the placements reside. This might sound a given feature of Citus. However, with concurrent `create_distributed_table` and `shard move/split`s there are might be some race conditions. The former operation might use the view of the placements before the `shard move/split`, whereas the latter changes this. As a result, the new table might have a placement that is not colocated with the other placements anymore. + +To prevent that, Citus acquires `AcquirePlacementColocationLock()` while the metadata of placements changed/read. This lock introduced to fix a user reported bug: https://github.com/citusdata/citus/issues/6050 + + +4. **Low-level Locks (SpinLocks and LWLocks)**: + +SpinLocks and LWLocks are low-level locking mechanisms often used internally by the database system. +Citus uses `LwLocks` and `SpinLocks` as described in Postgres' source code: https://github.com/postgres/postgres/tree/master/src/backend/storage/lmgr + ++ **Spinlocks:** Spinlocks are used for very short-term locking and are meant to be held only for a few instructions. They don't have features like deadlock detection or automatic release, and waiting processes keep checking ("busy-loop") until they can acquire the lock. + +The lack of "automatic release" could be very critical. For other lock types, when the transaction finishes, the locks are released by Postgres automatically. It means that, say a `palloc` fails due to lack of enough memory, we rely on Postgres to release all the locks. However, this is **NOT** true for spin locks. So, do not even allocate any memory while holding spinlock, only do very simple assignments etc. Otherwise, the lock might be held until a restart of the server. + +In the past we had some bugs where we had a `palloc` failure while holding `SpinLock` and which prevented the lock to be released. So, be extra careful when using `SpinLock`. See the bugfix as a reference: https://github.com/citusdata/citus/pull/2568/files + ++ **Lightweight Locks (LWLocks):** These locks are mainly used for controlling access to shared memory data structures and support both exclusive and shared lock modes. While they also lack deadlock detection, they do automatically release when errors are raised, and waiting processes block on a SysV semaphore, conserving CPU time. + + +# Rebalancing + +A high-level overview of the rebalancer is given in [this rebalancer blog post](https://www.citusdata.com/blog/2021/03/13/scaling-out-postgres-with-citus-open-source-shard-rebalancer/). + +## Rebalancing algorithm + +## Shard moves + +Shard moves move a shard group placement to a different node (group). Moves are orchestrated by the `citus_move_shard_placement` UDF, which is also the function that the rebalancer runs to move a shard. + +We implement blocking and non-blocking shard splits. Non-blocking shard moves use logical replication, which has an important limitation. If the (distributed) table does not have a replica identity (usually the primary key), then update/delete commands will error out once we create a publication. That means using a non-blocking move without a replica identity does incur some downtime. Since a blocking move is generally faster (in part because it forces out regular work), it may be less invasive. We therefore force the user to choose when trying to move a shard group that includes a table without a replica identity by supplying `shard_transfer_mode := 'force_logical'` or `shard_transfer_mode := 'block_writes'`. + +The blocking-move is mostly a simplified variant of the non-blocking move (with locks taken upfront). A non-blocking move involves the following steps: + +- **Create the new shard group placement on the target node**. We also create constraints that do not involve an index and set up ownership and access control. +- **Create publication(s) on the source node**. We create publications containing the shards in the source shard group placement. We create one publications per table owner, mainly because we need one subscription per table owner to prevent privilege escalation issues on older versions of PostgreSQL (15 and below). +- **Create replication slot(s) and export snapshot(s)**. We create a slot per table owner because we use a separate subscription per table owner, similar to publications. Subscriptions can create the replication slot, but we (nowadays) copy the data outside of the subscription because we apply several optimizations. +- **Create subscription(s) in disabled state**. We create subscriptions upfront in case there are any errors (e.g. hitting resource limits, connectivity issues). We create one subscription per table owner, and we set the subscription owner to the table owner. The logical replication will happen with the permissions of the subscription owner. +- **Copy the data from the source to target** by calling `worker_shard_copy` function for each source shard placement via the executor. The `worker_shard_copy` function makes a single pass over the source shard and pushes it into the target shard via `COPY`. We found this to be faster than using the `copy_data` option in the subscription because we can benefit from binary copy, optimizations in the adaptive executor, uses fewer replication slots, and it simplifies the flow. Some of these optimizations might be obsolete as of PostgreSQL 16. +- **Enable subscription(s)**, which starts the replication of writes that happened on the source shard group placement during the data copy. +- **Wait for subscription(s) to catch up to the current source LSN**. This can take a while since many writes could have happened during the data copy. +- **Create indexes, unique/exclusion constraints, statistics, etc.**. For efficiency, we create these objects after copying the data and catching up to concurrent writes. +- **Wait for subscription(s) to catch up to the current source LSN**. This can take a while since many writes could have happened during the index creation. +- **Block writes to the split parent by acquiring metadata locks globally**. At this point, we wait for any ongoing INSERT/UPDATE/DELETE/COPY/MERGE to finish and block new ones. Once we acquire the locks we try to quickly finalize the split. +- **Wait for subscription(s) to catch up to the current source LSN**. Some writes could still have happened before acquiring locks, we wait for those writes to be replicated. +- **Update the metadata**. We globally update the `pg_dist_placement` record to point to the new node. Writes tactically acquire metadata locks just before reading from `pg_dist_placement`, so they will see the new placement as soon as we commit. +- **Create foreign keys on the target shard group placement**. Creating foreign keys is deferred until the replication is fully done, because we used multiple subscriptions for tables with different owners and this is the first time that the data is guaranteed to be consistent between shards. We avoid rechecking foreign keys by using the `citus.skip_constraint_validation` setting on the session. +- **Final cleanup of connections, resources**. We primarily lean on "Resource cleanup" to drop publications, replication slots, subscriptions, which ensures they are removed both in case of success and failure. The source shard group placement is dropped once all ongoing (read-only) queries are done, by repeatedly dropping with a short lock timeout until it succeeds. + +It is worth noting that the final commit happens in a 2PC, with all the characteristics of a 2PC. If the commit phase fails on one of the nodes, writes on the shell table remain blocked on that node until the prepared transaction is recovered, after which they will see the updated placement. The data movement generally happens outside of the 2PC, so the 2PC failing on the target node does not necessarily prevent access to the shard. + +A similar operation to shard moves is `citus_copy_shard_placement`, which can be used to add a replica to a shard group. We also use this function to replicate reference tables without blocking. The main difference is that dropping the old shard group placement is skipped. + +A workaround for the replica identity problem is to always assign REPLICA IDENTITY FULL to distributed tables / shards if they have no other replica identity. However, prior to PostgreSQL 16, replication of updates and delete to a table with replica identity full could be extremely slow (it does a sequential scan per tuple). As of PostgreSQL 16, the logical replication worker can use a regular btree index to find a matching tuple (if one exists). Even for distributed tables without any indexes, and without a replica identity, we could tactically set REPLICA IDENTITY FULL on the shards, and create a suitable index on the target shard group placement for the duration of the move. Once we implement this, we could avoid erroring for distributed tables without a replica identity. + +## Shard splits + +Shard splits convert one shard group ("split parent") into two or more shard groups ("split children") by splitting the hash range. The new shard groups can be placed on the node itself, or on other nodes. We implement blocking and non-blocking shard splits. The blocking variant is mostly a simplified version of non-blocking, so we only cover non-blocking here. Shard splits have many similarities to shard moves, and have the same `shard_transfer_mode` choice. + +The shard split is a lengthy process performed by the `NonBlockingShardSplit` function, supported by a custom output plugin to handle writes that happen during the split. There are a few different entry-points in this logic, namely: `citus_split_shard_by_split_points`, `create_distributed_table_concurrently`, and `isolate_tenant_to_node`. + +We currently do not build a separate .so file for the output plug-in, so it is part of citus.so and therefore the name of the output plug-in is "citus". The purpose of the output plug-in is to translate changes to the original shard group to changes to the split children, and emit them in pgoutput format (by calling the original pgoutput plug-in). In some cases, the schema of the split parent can be subtly different from the split children. In particular, some columns may have been dropped on the parent. Dropped columns remain in the metadata and remaining columns are not renumbered, but when we create the split children we only create it with current columns. When this scenario occurs, we convert the tuple in `GetTupleForTargetSchema`. + +A split involves the following steps: + +- **Create the new shard groups ("split children") on the target node(s)**. We also create constraints that do not involve an index and set up ownership and access control. +- **Create "dummy" shard groups on the source node**, unless the split child is also on the source node. The reason for creating the dummy shards is primarily to make the pgoutput output plug-in happy. Our output plug-in maps changes to the split parent into changes to split children before calling pgouput, and those tables need to exist for pgoutput to be able to interpret and emit the change, even when that table is not actually used on the source node. +- **Create replica identities on dummy shards**. This is also needed to keep pgoutput happy, because for updates and deletes it emits the values in the replica identity columns, so it needs to know what the replica identity is. +- **Create publication(s) on the source node**, which include both the parent and children. We add the split parent for our own output plug-in to recognize which shard group it should split, and we add the split children for pgoutput to recognize that it should emit them. We might make multiple publications per shard group because we use a separate publication and subscription per table owner, to prevent privilege escalation issues on older versions of PostgreSQL (15 and below). +- **Set up the shard split output plug-in**. We configure our output plug-in on the source node via `worker_split_shard_replication_setup`, which sets up a dynamic shared memory (DSM) segment that the output plug-in will read from. We currently only have one DSM segment, which would need to changed to support concurrent splits from the same node. +- **Create replication slot(s) and export snapshot(s)**. We cannot perform any write to the database before this step, because this step waits for all transactions that perform writes to finish. We create multiple slots because we use a separate slot per table owner, similar to publications. +- **Create subscription(s) in disabled state**. We create subscriptions upfront in case there are any errors (e.g. hitting resource limits, connectivity issues). We create a slot per table owner because we use a separate subscription per table owner, similar to publications. The logical replication will happen with the permissions of the subscription owner. +- **Split the data in the split parent into the split children** using `worker_split_copy` with the exported snapshot. The `worker_split_copy` function makes a single pass over the parent shard and pushes it into the split children via `COPY`, either via a connection to another node or by invoking the COPY logic locally when the split children are on the same node. Internally, it uses the DestReceiver APIS and effectively it layers the DestReceiver used in re-partition operations on top of the DestReceiver used by `worker_shard_copy` in shard moves. We run a separate `worker_split_copy` task for every shard in the shard group and execute them via the adaptive executor, which may elect to parallelize them. +- **Enable subscription(s)**, which starts the replication of writes that happened on the split parent during the data copy into the split children. +- **Wait for subscription(s) to catch up to the current source LSN**. This can take a while since many writes could have happened during the data copy. +- **Create indexes, unique/exclusion constraints, statistics, etc.**. For efficiency, we create these objects after copying the data and catching up to concurrent writes. +- **Wait for subscription(s) to catch up to the current source LSN**. This can take a while since many writes could have happened during the index creation. +- **Block writes to the split parent by acquiring metadata locks globally**. At this point, we wait for any ongoing INSERT/UPDATE/DELETE/COPY/MERGE to finish and block new ones. Once we acquire the locks we try to quickly finalize the split. +- **Wait for subscription(s) to catch up to the current source LSN**. Some writes could still have happened before acquiring locks, we wait for those writes to be replicated. +- **Update the metadata**. We globally delete the metadata of the split parent, and insert the metadata of the split children. In case of `create_distributed_table_concurrently` we also update `pg_dist_partition` and `pg_dist_colocation`. +- **Create partitioning hierarchy and foreign keys on the split children**. Creating these relationships is deferred until the replication is fully done, because we used multiple subscriptions for tables with different owners and this is the first time that the data is guaranteed to be consistent between shards. We avoid rechecking foreign keys by using the `citus.skip_constraint_validation` setting on the session. +- **Final cleanup of DSM, connections, resources**. We clean up all the resources we created such as publications, subscriptions, replication slots, dummy shards via "Resource lceanup", as well as the split parent (deferred, in case of success) or split children (in case of failure). We currently do not clean up the DSM in case of failure, but we always idempotently reset it when doing another split. + +A difference between splits and moves is that the old shard ID disappears. In case of a move, only the placement changes and for writes we always look up placement in the executor after acquiring locks that conflict with moves (wait until move is done). In case of a split, the query changes in more fundamental ways, and a single-shard query might actually become a multi-shard queryif it were replanned. When a writes get to the executor, after acquiring locks that conflict with the shard split (wait until split is done), we check whether the shard still exists _in the metadata_ and in case of fast path queries (which are strictly single shard), we try to reroute in `TryToRerouteFastPathModifyQuery`. Otherwise, we error in `EnsureAnchorShardsInJobExist`. In case of reads, we lean on the deferred drop logic to let the read proceed on the old shard placement. + +## Background tasks + +## Resource cleanup + +During a shard move/split, some PostgreSQL objects can be created that live outside of the scope of any transaction or are committed early. We need to make sure those objects are dropped once the shard move ends, either through failure or success. For instance, subscriptions and publications used for logical replication need to be dropped in case of failure, but also the target shard (in case of failure) and source shard (in case of success). + +To achieve that, we write records to pg_dist_cleanup before creating an object to remember that we need to clean it. We distinguish between a few scenarios: + +**Cleanup-on-failure**: Cleanup should only happen if the operation fails. The main example is the target shard of a move/split. We achieve cleanup-on-failure by writing pg_dist_cleanup records in a subtransaction (transaction on a localhost connection that commits immediately) and deleting them in the outer transaction that performs the move/split. That way, they remain in pg_dist_cleanup in case of failure, but disappear in case of success. + +**Cleanup-deferred-on-success**: Cleanup should only happen after the operation (move/split) succeeds. We use this to clean the source shards of a shard move. We previously dropped shards immediately as part of the transaction, but this frequently led to deadlocks at the end of a shard move. We achieve cleanup-on-success by writing pg_dist_cleanup records as part of the outer transaction that performs the move/split. + +**Cleanup-always**: For most resources that require cleanup records, cleanup should happen regardless of whether the operation succeeds or fails. For instance, subscriptions and publications should always be dropped. We achieve cleanup always by writing pg_dist_cleanup records in a subtransaction, and at the end of the operation we try to clean up object immediately and if it succeeds delete the record. If cleanup fails, we do not fail the whole operation, but instead leave the pg_dist_cleanup record in place for the maintenance daemon. + +Resource cleaner (currently shard_cleaner.c) is part of the maintenance daemon and periodically checks pg_dist_cleanup for cleanup tasks. It’s important to prevent cleanup of operations that are already running. Therefore, each operation has a unique operation ID (from a sequence) and takes an advisory lock on the operation ID. The resource cleaner learns the operation ID from pg_dist_cleanup and attempts to acquire this lock. If it cannot acquire the lock, the operation is not done and cleanup is skipped. If it can, the operation is done, and the resource cleaner rechecks whether the record still exists, since it could have been deleted by the operation. + +Cleanup records always need to be committed before creating the actual object. It’s also important for the cleanup operation to be idempotent, since the server might crash immediately after committing a cleanup record, but before actually creating the object. Hence, the object might not exist when trying to clean it up. In that case, the cleanup is seen as successful, and the cleanup record removed. + +# Logical decoding / CDC + +PostgreSQL supports change data capture (CDC) via the logical decoding interface. The basic idea behind logical decoding is that you make a replication connection (a special type of postgres connection), start replication, and then the backend process reads through the WAL and decodes the WAL records and emits it over the wire in a format defined by the output plugin. If we were to use regular logical decoding on the nodes of a Citus cluster, we would see the name of the shard in each write, and internal data transfers such as shard moves would result in inserts being emitted. We use several techniques to avoid this. + +All writes in PostgreSQL are marked with a replication origin (0 by default) and the decoder can make decisions on whether to emit the change based on the replication origin. We use this to filter out internal data transfers. If `citus.enable_change_data_capture` is enabled, all internal data transfers are marked with the special DoNotReplicateId replication origin by calling the `citus_internal.start_replication_origin_tracking()` UDF before writing the data. This replication origin ID is special in the sense that it does not need to be created (which prevents locking issues, especially when dropping replication origins). It is still up to output plugin to decide what to do with changes marked as DoNotReplicateId. + +We have very minimal control over replication commands like `CREATE_REPLICATION_SLOT`, since there are no direct hooks, and decoder names (e.g. “pgoutput”) are typically hard-coded in the client. The only method we found of overriding logical decoding behaviour is to overload the output plugin name in the dynamic library path. + +For specific output plugins, we supply a wrapper .so that has the same name as the existing output plugin, but in a separate directory in the PostgreSQL installation folder, and we automatically prefix the `dynamic_library_path` with this folder such that PostgreSQL will load our wrapper. The wrapper internally loads the original output plugin, and calls the original output plugin functions after applying two transformations: + +- Shard OIDS are mapped to distributed table OIDS +- Changes marked with DoNotReplicateId are skipped + +Mapping the shard OIDs to distributed table OIDs not only makes the output understandable for users, but also simplifies our implementation of the CREATE PUBLICATION command, which is used to configure the pgoutput plugin (used by logical replication). We create the same publication on all nodes using the same distributed table names. Since the original pgoutput plugin only sees changes to distributed tables, it can relate those to the set of distributed tables in the publication. + +We have to build a .so for each wrapper separately. We currently build wrappers for [pgoutput and wal2json](https://github.com/citusdata/citus/blob/main/src/backend/distributed/cdc/Makefile#L8). + +This approach fulfills our main requirements, though we currently have a small correctness issue. Logical decoding always deals with a situation in the past, and to do so they build a historical snapshot of the PostgreSQL catalogs. Tables may have been renamed or dropped since the change happened, but the historical snapshot shows the schema as it was at the time of the change. However, we cannot build a historical snapshot of the Citus catalogs, and we therefore rely on the present values. The main issue that can arise is that the shard may have been dropped, in which case the change might be emitted using its original shard name, since it’s not recognized as a shard name. In many cases, this issue is avoided by caching the Citus catalogs. + +An open issue with CDC is that there is no good way to get a consistent snapshot followed by a change stream that starts from the snapshot. One way to resolve this is to allow reading only from local shards using an exported snapshot. That way, clients can create a replication slot and export a snapshot from each node, pull a subset of the data from each node using the snapshots, and then start replication on each node from the snapshot LSN. + +## CDC ordering + +When you have a multi-node cluster, clients should connect to each node and combine the changes. It is important to note that there are no guarantees with regard to when and in what order changes will be emitted between nodes. It is especially important to understand that changes cannot be reordered (e.g. based on timestamp or transaction ID), because only the node-level order is correct. The lack of distributed snapshot isolation in Citus means that changes can be interleaved (a happens before b on node 1, b happens before a on node 2). The node-level decoder output will reflect that as it happened. + +_Do not reorder changes based on timestamp or distributed transaction ID or anything that is not guaranteed to preserve node-level order. It is never correct._ + +# Global PID + +The global PID (gpid) is used to give each client connection to the cluster a unique process identifier, and to understand which internal connections belong to a specific client connection. A gpid consists of the combination of the node ID and the PID of the coordinating process (i.e. the process serving a client connection). It can be seen in various monitoring views: + +```sql +SELECT citus_backend_gpid(); +SELECT * FROM citus_stat_activity; +SELECT * FROM citus_lock_waits; +SELECT * FROM citus_locks; +``` + +The gpid is passed over internal connections via the application_name setting. This is one of the few settings that pgbouncer automatically propagates when reusing a server connection for a different client connection. Hence, gpids are robust to having pgbouncers in between nodes, but it means that internal sessions might switch between gpids. + +Additional details: Monitor distributed Postgres activity with citus_stat_activity & citus_lock_waits (citusdata.com) + +# Function call delegation + +One of the downsides of multi-statement transactions in a distributed database is the extra network round trips involved in each individual statement, even when each statement goes to the same worker node. In Citus this can be solved by marking a function or stored procedure as distributed. A distributed function has a distribution argument and can be co-located with distributed tables. It can be created using: + +```sql +SELECT create_distributed_function('delivery(int,int)', '$1'); +``` + +When a distributed function is called, the argument is treated as a distribution column filter on a co-located distributed table and delegated to the worker node that stores the corresponding shards. Ideally, every statement in the function uses the distribution argument as a distribution column filter and only accesses co-located tables, such that the transaction remains local to the worker node. Otherwise, the worker assumes the role of coordinator and performs a distributed transaction. Function call delegation is especially useful in multi-tenant applications that involve complex transactions, as those transactions can be handled in a single network round-trip and with almost no overhead on the coordinator. + + +Citus coordinator delegates stored procedure call to worker nodes + +We’ve implemented function call delegation through early-stage checks in the planner hook and the utility hook (for CALL, in case of procedures). If the query matches the simple form shown in the figure above, and the function is marked as distributed, then the function call will be propagated to the right node based on the sharding metadata of a co-located table. + +On the target node, the function is executed as usual using the distributed tables (shell tables + metadata) on that node. The target node will hopefully find that most of the queries are on local shards and only use local execution and take advantage of _local plan caching_. + + + +# Query from any node + +Some Citus users have remarkably high query throughputs (>500k/s). A single-coordinator architecture could sometimes become a bottleneck for scaling such applications. To avoid that, Citus supports querying the database from any node in the cluster. + +In the past, this feature was referred to as Citus MX. We currently refer that as Query From Any Node. The idea is simple: Synchronize all the metadata (including the shell tables and pg_dist_XXX tables) on all the nodes. We do this for all DDL commands as well as when a new node is added to the cluster. In essence, all the nodes in the cluster always have the same metadata through 2PC. + +Once the metadata is synced, then each node can act as the coordinator, capable of doing the distributed query processing. We also provide some monitoring tools such that from the user’s perspective, it should not matter which node the client is connected to. The user should be able to monitor / cancel all the activity in the cluster, using the infrastructure described here: https://www.citusdata.com/blog/2022/07/21/citus-stat-activity-views-for-distributed-postgres/ + +One of the challenges with query from any node is the total number of connections in the cluster. In a single coordinator world, only the coordinator establishes connections per node. Now, each node connects to each other. Hence, the user should adjust the connection related settings, see here for details: https://www.citusdata.com/updates/v11-0/#connection-management + +By default, Citus hides all the shards from the applications because it had confused many users: https://www.citusdata.com/updates/v11-0/#shard-visibility We do it in a slightly hacky way. Read ` HideShardsFromSomeApplications ()` C function for the details. + + + +Another important piece of query from any node is that the managed service should provide a single connection string and do the load balancing for the user. It is impractical to have multiple connection strings to the database from any application’s perspective. As of writing this document (Sept. 2023), the managed service did not provide this infrastructure. Another future improvement area for query from any node is the “smart client”. A smart “pgbouncer” type of client might be able to route the client queries to the worker node with the relevant data. This could eliminate the need for additional query routing in case the query does not hit the worker node with the relevant data. + +Another future improvement is to allow running DDLs from any node. Currently, DDLs (including ALTER TABLE, create_distributed_table etc) all should go through the coordinator. + +## Why didn’t we have dedicated Query Nodes and Data Nodes? + +Some distributed databases distinguish the Query Nodes and Data Nodes. As the names imply, Query Nodes would only do the query processing, whereas Data Nodes would only hold the data. In Citus, we decided not to follow that route, mostly because our initial benchmarks showed that combined nodes performed better in terms of price/performance. Still, some people argued that it might be better to have different classes of nodes such that they can be tuned /scaled-up-out differently based on the load for a given application. + +Dedicated query nodes benchmarks + +If this discussion comes up again, we suggest running some more benchmarks and ensuring the performance characteristics do not change dramatically. We do not foresee any architectural problems with that. It mostly comes down to price, performance, and product discussions. Note that you can quickly test this by disallowing certain nodes to have shards on them. You should also consider whether reference tables should still be present on query nodes, and whether there are any behavioural differences between query nodes and the coordinator. + +## Shard visibility + +Shards live in the same schema as the distributed table they belong to, so you might expect to see them when connecting to a worker node and running `\d`. While this was previously the case, it caused confusion among users and also breaks tools like `pg_dump`. Therefore, we now aggressively hide the shards by default from any query on `pg_class`. We do this by injecting a `relation_is_a_known_shard(oid)` filter in the query tree via the planner hook when we encounter a RangeTblEntry for `pg_class`. The fact that shards are hidden from `pg_class` does not affect queries on the shards, since PostgreSQL internals will not go through the query planner when accessing `pg_class`. + +Shards can be revealed via two settings: + +- `citus.override_shard_visibility = off` disables shard hiding entirely +- `citus.show_shards_for_app_name_prefixes`= 'pgAdmin,psql'` disables shard hiding only for specific application_name values, by prefix diff --git a/src/backend/distributed/cdc/cdc_decoder.c b/src/backend/distributed/cdc/cdc_decoder.c index 2beb2777248..cf9f4963b72 100644 --- a/src/backend/distributed/cdc/cdc_decoder.c +++ b/src/backend/distributed/cdc/cdc_decoder.c @@ -8,8 +8,9 @@ *------------------------------------------------------------------------- */ -#include "cdc_decoder_utils.h" #include "postgres.h" + +#include "cdc_decoder_utils.h" #include "fmgr.h" #include "access/genam.h" diff --git a/src/backend/distributed/cdc/cdc_decoder_utils.c b/src/backend/distributed/cdc/cdc_decoder_utils.c index f5b23aa12d2..b571d18b93c 100644 --- a/src/backend/distributed/cdc/cdc_decoder_utils.c +++ b/src/backend/distributed/cdc/cdc_decoder_utils.c @@ -8,18 +8,21 @@ *------------------------------------------------------------------------- */ #include "postgres.h" -#include "commands/extension.h" + +#include "cdc_decoder_utils.h" #include "fmgr.h" #include "miscadmin.h" + #include "access/genam.h" #include "access/heapam.h" +#include "catalog/pg_namespace.h" +#include "commands/extension.h" #include "common/hashfn.h" #include "common/string.h" #include "utils/fmgroids.h" -#include "utils/typcache.h" #include "utils/lsyscache.h" -#include "catalog/pg_namespace.h" -#include "cdc_decoder_utils.h" +#include "utils/typcache.h" + #include "distributed/pg_dist_partition.h" #include "distributed/pg_dist_shard.h" #include "distributed/relay_utility.h" diff --git a/src/backend/distributed/cdc/cdc_decoder_utils.h b/src/backend/distributed/cdc/cdc_decoder_utils.h index 46d1e4ae569..8b9cb298bde 100644 --- a/src/backend/distributed/cdc/cdc_decoder_utils.h +++ b/src/backend/distributed/cdc/cdc_decoder_utils.h @@ -12,9 +12,11 @@ #define CITUS_CDC_DECODER_H #include "postgres.h" + +#include "c.h" #include "fmgr.h" + #include "replication/logical.h" -#include "c.h" #define InvalidRepOriginId 0 #define INVALID_SHARD_ID 0 diff --git a/src/backend/distributed/clock/causal_clock.c b/src/backend/distributed/clock/causal_clock.c index 74c87bad46b..eb4b8d9d362 100644 --- a/src/backend/distributed/clock/causal_clock.c +++ b/src/backend/distributed/clock/causal_clock.c @@ -11,36 +11,37 @@ #include #include "postgres.h" -#include "miscadmin.h" + #include "fmgr.h" #include "funcapi.h" #include "libpq-fe.h" +#include "miscadmin.h" -#include "utils/builtins.h" -#include "utils/datum.h" -#include "utils/numeric.h" -#include "utils/typcache.h" -#include "nodes/pg_list.h" #include "catalog/namespace.h" #include "commands/extension.h" #include "commands/sequence.h" #include "executor/spi.h" +#include "nodes/pg_list.h" #include "postmaster/postmaster.h" #include "storage/ipc.h" #include "storage/lwlock.h" +#include "storage/s_lock.h" #include "storage/shmem.h" #include "storage/spin.h" -#include "storage/s_lock.h" +#include "utils/builtins.h" +#include "utils/datum.h" +#include "utils/numeric.h" +#include "utils/typcache.h" #include "distributed/causal_clock.h" +#include "distributed/citus_safe_lib.h" +#include "distributed/coordinator_protocol.h" #include "distributed/listutils.h" -#include "distributed/lock_graph.h" #include "distributed/local_executor.h" +#include "distributed/lock_graph.h" #include "distributed/metadata_cache.h" -#include "distributed/remote_commands.h" #include "distributed/placement_connection.h" -#include "distributed/coordinator_protocol.h" -#include "distributed/citus_safe_lib.h" +#include "distributed/remote_commands.h" #define SAVE_AND_PERSIST(c) \ do { \ @@ -396,7 +397,7 @@ AdjustClocksToTransactionHighest(List *nodeConnectionList, /* Set the clock value on participating worker nodes */ appendStringInfo(queryToSend, - "SELECT pg_catalog.citus_internal_adjust_local_clock_to_remote" + "SELECT citus_internal.adjust_local_clock_to_remote" "('(%lu, %u)'::pg_catalog.cluster_clock);", transactionClockValue->logical, transactionClockValue->counter); diff --git a/src/backend/distributed/commands/alter_table.c b/src/backend/distributed/commands/alter_table.c index 8c2736a28db..030dbbe7869 100644 --- a/src/backend/distributed/commands/alter_table.c +++ b/src/backend/distributed/commands/alter_table.c @@ -34,9 +34,16 @@ #include "catalog/pg_am.h" #include "catalog/pg_depend.h" #include "catalog/pg_rewrite_d.h" +#include "commands/defrem.h" +#include "executor/spi.h" +#include "nodes/pg_list.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + #include "columnar/columnar.h" #include "columnar/columnar_tableam.h" -#include "commands/defrem.h" + #include "distributed/colocation_utils.h" #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" @@ -57,16 +64,11 @@ #include "distributed/reference_table_utils.h" #include "distributed/relation_access_tracking.h" #include "distributed/replication_origin_session_utils.h" -#include "distributed/shared_library_init.h" #include "distributed/shard_utils.h" +#include "distributed/shared_library_init.h" #include "distributed/tenant_schema_metadata.h" #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" -#include "executor/spi.h" -#include "nodes/pg_list.h" -#include "utils/builtins.h" -#include "utils/lsyscache.h" -#include "utils/syscache.h" /* Table Conversion Types */ @@ -207,12 +209,9 @@ static void ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommand static bool HasAnyGeneratedStoredColumns(Oid relationId); static List * GetNonGeneratedStoredColumnNameList(Oid relationId); static void CheckAlterDistributedTableConversionParameters(TableConversionState *con); -static char * CreateWorkerChangeSequenceDependencyCommand(char *sequenceSchemaName, - char *sequenceName, - char *sourceSchemaName, - char *sourceName, - char *targetSchemaName, - char *targetName); +static char * CreateWorkerChangeSequenceDependencyCommand(char *qualifiedSequeceName, + char *qualifiedSourceName, + char *qualifiedTargetName); static void ErrorIfMatViewSizeExceedsTheLimit(Oid matViewOid); static char * CreateMaterializedViewDDLCommand(Oid matViewOid); static char * GetAccessMethodForMatViewIfExists(Oid viewOid); @@ -789,13 +788,15 @@ ConvertTableInternal(TableConversionState *con) justBeforeDropCommands = lappend(justBeforeDropCommands, detachFromParentCommand); } + char *qualifiedRelationName = quote_qualified_identifier(con->schemaName, + con->relationName); + if (PartitionedTable(con->relationId)) { if (!con->suppressNoticeMessages) { ereport(NOTICE, (errmsg("converting the partitions of %s", - quote_qualified_identifier(con->schemaName, - con->relationName)))); + qualifiedRelationName))); } List *partitionList = PartitionList(con->relationId); @@ -868,9 +869,7 @@ ConvertTableInternal(TableConversionState *con) if (!con->suppressNoticeMessages) { - ereport(NOTICE, (errmsg("creating a new table for %s", - quote_qualified_identifier(con->schemaName, - con->relationName)))); + ereport(NOTICE, (errmsg("creating a new table for %s", qualifiedRelationName))); } TableDDLCommand *tableCreationCommand = NULL; @@ -997,8 +996,6 @@ ConvertTableInternal(TableConversionState *con) { continue; } - char *qualifiedRelationName = quote_qualified_identifier(con->schemaName, - con->relationName); TableConversionParameters cascadeParam = { .relationId = colocatedTableId, @@ -1748,9 +1745,7 @@ CreateMaterializedViewDDLCommand(Oid matViewOid) { StringInfo query = makeStringInfo(); - char *viewName = get_rel_name(matViewOid); - char *schemaName = get_namespace_name(get_rel_namespace(matViewOid)); - char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName); + char *qualifiedViewName = generate_qualified_relation_name(matViewOid); /* here we need to get the access method of the view to recreate it */ char *accessMethodName = GetAccessMethodForMatViewIfExists(matViewOid); @@ -1799,9 +1794,8 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands, bool suppressNoticeMessages) { char *sourceName = get_rel_name(sourceId); - char *targetName = get_rel_name(targetId); - Oid schemaId = get_rel_namespace(sourceId); - char *schemaName = get_namespace_name(schemaId); + char *qualifiedSourceName = generate_qualified_relation_name(sourceId); + char *qualifiedTargetName = generate_qualified_relation_name(targetId); StringInfo query = makeStringInfo(); @@ -1809,8 +1803,7 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands, { if (!suppressNoticeMessages) { - ereport(NOTICE, (errmsg("moving the data of %s", - quote_qualified_identifier(schemaName, sourceName)))); + ereport(NOTICE, (errmsg("moving the data of %s", qualifiedSourceName))); } if (!HasAnyGeneratedStoredColumns(sourceId)) @@ -1820,8 +1813,7 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands, * "INSERT INTO .. SELECT *"". */ appendStringInfo(query, "INSERT INTO %s SELECT * FROM %s", - quote_qualified_identifier(schemaName, targetName), - quote_qualified_identifier(schemaName, sourceName)); + qualifiedTargetName, qualifiedSourceName); } else { @@ -1836,9 +1828,8 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands, char *insertColumnString = StringJoin(nonStoredColumnNameList, ','); appendStringInfo(query, "INSERT INTO %s (%s) OVERRIDING SYSTEM VALUE SELECT %s FROM %s", - quote_qualified_identifier(schemaName, targetName), - insertColumnString, insertColumnString, - quote_qualified_identifier(schemaName, sourceName)); + qualifiedTargetName, insertColumnString, + insertColumnString, qualifiedSourceName); } ExecuteQueryViaSPI(query->data, SPI_OK_INSERT); @@ -1862,14 +1853,11 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands, */ if (ShouldSyncTableMetadata(targetId)) { - Oid sequenceSchemaOid = get_rel_namespace(sequenceOid); - char *sequenceSchemaName = get_namespace_name(sequenceSchemaOid); - char *sequenceName = get_rel_name(sequenceOid); + char *qualifiedSequenceName = generate_qualified_relation_name(sequenceOid); char *workerChangeSequenceDependencyCommand = - CreateWorkerChangeSequenceDependencyCommand(sequenceSchemaName, - sequenceName, - schemaName, sourceName, - schemaName, targetName); + CreateWorkerChangeSequenceDependencyCommand(qualifiedSequenceName, + qualifiedSourceName, + qualifiedTargetName); SendCommandToWorkersWithMetadata(workerChangeSequenceDependencyCommand); } else if (ShouldSyncTableMetadata(sourceId)) @@ -1892,25 +1880,23 @@ ReplaceTable(Oid sourceId, Oid targetId, List *justBeforeDropCommands, if (!suppressNoticeMessages) { - ereport(NOTICE, (errmsg("dropping the old %s", - quote_qualified_identifier(schemaName, sourceName)))); + ereport(NOTICE, (errmsg("dropping the old %s", qualifiedSourceName))); } resetStringInfo(query); appendStringInfo(query, "DROP %sTABLE %s CASCADE", IsForeignTable(sourceId) ? "FOREIGN " : "", - quote_qualified_identifier(schemaName, sourceName)); + qualifiedSourceName); ExecuteQueryViaSPI(query->data, SPI_OK_UTILITY); if (!suppressNoticeMessages) { - ereport(NOTICE, (errmsg("renaming the new table to %s", - quote_qualified_identifier(schemaName, sourceName)))); + ereport(NOTICE, (errmsg("renaming the new table to %s", qualifiedSourceName))); } resetStringInfo(query); appendStringInfo(query, "ALTER TABLE %s RENAME TO %s", - quote_qualified_identifier(schemaName, targetName), + qualifiedTargetName, quote_identifier(sourceName)); ExecuteQueryViaSPI(query->data, SPI_OK_UTILITY); } @@ -2170,18 +2156,13 @@ CheckAlterDistributedTableConversionParameters(TableConversionState *con) * worker_change_sequence_dependency query with the parameters. */ static char * -CreateWorkerChangeSequenceDependencyCommand(char *sequenceSchemaName, char *sequenceName, - char *sourceSchemaName, char *sourceName, - char *targetSchemaName, char *targetName) +CreateWorkerChangeSequenceDependencyCommand(char *qualifiedSequeceName, + char *qualifiedSourceName, + char *qualifiedTargetName) { - char *qualifiedSchemaName = quote_qualified_identifier(sequenceSchemaName, - sequenceName); - char *qualifiedSourceName = quote_qualified_identifier(sourceSchemaName, sourceName); - char *qualifiedTargetName = quote_qualified_identifier(targetSchemaName, targetName); - StringInfo query = makeStringInfo(); appendStringInfo(query, "SELECT worker_change_sequence_dependency(%s, %s, %s)", - quote_literal_cstr(qualifiedSchemaName), + quote_literal_cstr(qualifiedSequeceName), quote_literal_cstr(qualifiedSourceName), quote_literal_cstr(qualifiedTargetName)); diff --git a/src/backend/distributed/commands/begin.c b/src/backend/distributed/commands/begin.c index 3ff28ac20da..b19b044849d 100644 --- a/src/backend/distributed/commands/begin.c +++ b/src/backend/distributed/commands/begin.c @@ -9,12 +9,14 @@ */ #include "postgres.h" + #include "c.h" +#include "nodes/parsenodes.h" + #include "distributed/commands.h" #include "distributed/listutils.h" #include "distributed/transaction_management.h" -#include "nodes/parsenodes.h" /* diff --git a/src/backend/distributed/commands/call.c b/src/backend/distributed/commands/call.c index b2f0bfca13f..9e54513c694 100644 --- a/src/backend/distributed/commands/call.c +++ b/src/backend/distributed/commands/call.c @@ -11,12 +11,23 @@ */ #include "postgres.h" -#include "funcapi.h" -#include "distributed/pg_version_constants.h" +#include "funcapi.h" +#include "miscadmin.h" #include "catalog/pg_proc.h" #include "commands/defrem.h" +#include "nodes/nodeFuncs.h" +#include "nodes/parsenodes.h" +#include "nodes/primnodes.h" +#include "optimizer/clauses.h" +#include "tcop/dest.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + +#include "pg_version_constants.h" + +#include "distributed/adaptive_executor.h" #include "distributed/backend_data.h" #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" @@ -26,27 +37,17 @@ #include "distributed/connection_management.h" #include "distributed/deparse_shard_query.h" #include "distributed/function_call_delegation.h" -#include "distributed/metadata_utility.h" #include "distributed/metadata_cache.h" +#include "distributed/metadata_utility.h" #include "distributed/multi_executor.h" #include "distributed/multi_physical_planner.h" -#include "distributed/adaptive_executor.h" #include "distributed/reference_table_utils.h" #include "distributed/remote_commands.h" -#include "distributed/reference_table_utils.h" #include "distributed/shard_pruning.h" #include "distributed/tuple_destination.h" #include "distributed/version_compat.h" -#include "distributed/worker_manager.h" #include "distributed/worker_log_messages.h" -#include "optimizer/clauses.h" -#include "nodes/nodeFuncs.h" -#include "nodes/parsenodes.h" -#include "nodes/primnodes.h" -#include "miscadmin.h" -#include "tcop/dest.h" -#include "utils/lsyscache.h" -#include "utils/syscache.h" +#include "distributed/worker_manager.h" /* global variable tracking whether we are in a delegated procedure call */ diff --git a/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c b/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c index 1102a3a51f5..c88367462bd 100644 --- a/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c +++ b/src/backend/distributed/commands/cascade_table_operation_for_connected_relations.c @@ -12,12 +12,19 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "miscadmin.h" #include "access/xact.h" #include "catalog/pg_constraint.h" -#include "distributed/commands/utility_hook.h" +#include "executor/spi.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + +#include "pg_version_constants.h" + #include "distributed/commands.h" +#include "distributed/commands/utility_hook.h" #include "distributed/foreign_key_relationship.h" #include "distributed/listutils.h" #include "distributed/local_executor.h" @@ -26,11 +33,6 @@ #include "distributed/reference_table_utils.h" #include "distributed/relation_access_tracking.h" #include "distributed/worker_protocol.h" -#include "executor/spi.h" -#include "miscadmin.h" -#include "utils/builtins.h" -#include "utils/lsyscache.h" -#include "utils/syscache.h" static void EnsureSequentialModeForCitusTableCascadeFunction(List *relationIdList); diff --git a/src/backend/distributed/commands/citus_add_local_table_to_metadata.c b/src/backend/distributed/commands/citus_add_local_table_to_metadata.c index c713ce099c7..93f1e7d28ec 100644 --- a/src/backend/distributed/commands/citus_add_local_table_to_metadata.c +++ b/src/backend/distributed/commands/citus_add_local_table_to_metadata.c @@ -18,6 +18,7 @@ */ #include "postgres.h" + #include "miscadmin.h" #include "access/genam.h" @@ -25,29 +26,30 @@ #include "catalog/pg_constraint.h" #include "catalog/pg_statistic_ext.h" #include "catalog/pg_trigger.h" -#include "distributed/coordinator_protocol.h" +#include "foreign/foreign.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" +#include "utils/ruleutils.h" +#include "utils/syscache.h" + #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" #include "distributed/commands.h" #include "distributed/commands/sequence.h" #include "distributed/commands/utility_hook.h" -#include "distributed/metadata/distobject.h" -#include "distributed/metadata/dependency.h" +#include "distributed/coordinator_protocol.h" #include "distributed/foreign_key_relationship.h" #include "distributed/listutils.h" #include "distributed/local_executor.h" +#include "distributed/metadata/dependency.h" +#include "distributed/metadata/distobject.h" #include "distributed/metadata_sync.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/namespace_utils.h" #include "distributed/reference_table_utils.h" #include "distributed/worker_protocol.h" #include "distributed/worker_shard_visibility.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/lsyscache.h" -#include "utils/ruleutils.h" -#include "utils/syscache.h" -#include "foreign/foreign.h" /* @@ -1158,9 +1160,7 @@ DropIdentitiesOnTable(Oid relationId) if (attributeForm->attidentity) { - char *tableName = get_rel_name(relationId); - char *schemaName = get_namespace_name(get_rel_namespace(relationId)); - char *qualifiedTableName = quote_qualified_identifier(schemaName, tableName); + char *qualifiedTableName = generate_qualified_relation_name(relationId); StringInfo dropCommand = makeStringInfo(); @@ -1220,9 +1220,7 @@ DropViewsOnTable(Oid relationId) Oid viewId = InvalidOid; foreach_oid(viewId, reverseOrderedViews) { - char *viewName = get_rel_name(viewId); - char *schemaName = get_namespace_name(get_rel_namespace(viewId)); - char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName); + char *qualifiedViewName = generate_qualified_relation_name(viewId); StringInfo dropCommand = makeStringInfo(); appendStringInfo(dropCommand, "DROP %sVIEW IF EXISTS %s", diff --git a/src/backend/distributed/commands/citus_global_signal.c b/src/backend/distributed/commands/citus_global_signal.c index 8183d66735e..23df2d0c19d 100644 --- a/src/backend/distributed/commands/citus_global_signal.c +++ b/src/backend/distributed/commands/citus_global_signal.c @@ -11,14 +11,16 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "signal.h" + +#include "lib/stringinfo.h" + +#include "pg_version_constants.h" #include "distributed/backend_data.h" #include "distributed/metadata_cache.h" #include "distributed/remote_commands.h" #include "distributed/worker_manager.h" -#include "lib/stringinfo.h" -#include "signal.h" static bool CitusSignalBackend(uint64 globalPID, uint64 timeout, int sig); diff --git a/src/backend/distributed/commands/cluster.c b/src/backend/distributed/commands/cluster.c index 92fcb3ec642..7a1dac30256 100644 --- a/src/backend/distributed/commands/cluster.c +++ b/src/backend/distributed/commands/cluster.c @@ -10,11 +10,11 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" - +#include "catalog/namespace.h" #include "commands/defrem.h" -#include "catalog/namespace.h" +#include "pg_version_constants.h" + #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" #include "distributed/listutils.h" diff --git a/src/backend/distributed/commands/collation.c b/src/backend/distributed/commands/collation.c index 023197e158d..5ce3d1436cc 100644 --- a/src/backend/distributed/commands/collation.c +++ b/src/backend/distributed/commands/collation.c @@ -10,30 +10,32 @@ */ #include "postgres.h" -#include "pg_version_compat.h" +#include "miscadmin.h" #include "access/htup_details.h" #include "access/xact.h" #include "catalog/pg_collation.h" +#include "parser/parse_type.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + +#include "pg_version_compat.h" +#include "pg_version_constants.h" + #include "distributed/citus_safe_lib.h" -#include "distributed/commands/utility_hook.h" #include "distributed/commands.h" +#include "distributed/commands/utility_hook.h" #include "distributed/deparser.h" #include "distributed/listutils.h" -#include "distributed/metadata_utility.h" #include "distributed/metadata/dependency.h" #include "distributed/metadata/distobject.h" #include "distributed/metadata_sync.h" +#include "distributed/metadata_utility.h" #include "distributed/multi_executor.h" #include "distributed/relation_access_tracking.h" #include "distributed/worker_create_or_replace.h" -#include "distributed/pg_version_constants.h" #include "distributed/worker_manager.h" -#include "parser/parse_type.h" -#include "utils/builtins.h" -#include "utils/lsyscache.h" -#include "utils/syscache.h" -#include "miscadmin.h" static char * CreateCollationDDLInternal(Oid collationId, Oid *collowner, diff --git a/src/backend/distributed/commands/comment.c b/src/backend/distributed/commands/comment.c new file mode 100644 index 00000000000..e18a5c5ccc3 --- /dev/null +++ b/src/backend/distributed/commands/comment.c @@ -0,0 +1,131 @@ +/*------------------------------------------------------------------------- + * + * comment.c + * Commands to interact with the comments for all database + * object types. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/genam.h" +#include "access/htup_details.h" +#include "access/table.h" +#include "catalog/pg_shdescription.h" +#include "nodes/parsenodes.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/rel.h" + +#include "distributed/comment.h" + +static char * GetCommentForObject(Oid classOid, Oid objectOid); + + +List * +GetCommentPropagationCommands(Oid classOid, Oid objOoid, char *objectName, ObjectType + objectType) +{ + List *commands = NIL; + + StringInfo commentStmt = makeStringInfo(); + + /* Get the comment for the database */ + char *comment = GetCommentForObject(classOid, objOoid); + char const *commentObjectType = ObjectTypeNames[objectType]; + + /* Create the SQL command to propagate the comment to other nodes */ + if (comment != NULL) + { + appendStringInfo(commentStmt, "COMMENT ON %s %s IS %s;", commentObjectType, + quote_identifier(objectName), + quote_literal_cstr(comment)); + } + + + /* Add the command to the list */ + if (commentStmt->len > 0) + { + commands = list_make1(commentStmt->data); + } + + return commands; +} + + +static char * +GetCommentForObject(Oid classOid, Oid objectOid) +{ + HeapTuple tuple; + char *comment = NULL; + + /* Open pg_shdescription catalog */ + Relation shdescRelation = table_open(SharedDescriptionRelationId, AccessShareLock); + + /* Scan the table */ + ScanKeyData scanKey[2]; + + ScanKeyInit(&scanKey[0], + Anum_pg_shdescription_objoid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(objectOid)); + ScanKeyInit(&scanKey[1], + Anum_pg_shdescription_classoid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(classOid)); + bool indexOk = true; + int scanKeyCount = 2; + SysScanDesc scan = systable_beginscan(shdescRelation, SharedDescriptionObjIndexId, + indexOk, NULL, scanKeyCount, + scanKey); + if ((tuple = systable_getnext(scan)) != NULL) + { + bool isNull = false; + + TupleDesc tupdesc = RelationGetDescr(shdescRelation); + + Datum descDatum = heap_getattr(tuple, Anum_pg_shdescription_description, tupdesc, + &isNull); + + + /* Add the command to the list */ + if (!isNull) + { + comment = TextDatumGetCString(descDatum); + } + else + { + comment = NULL; + } + } + + /* End the scan and close the catalog */ + systable_endscan(scan); + table_close(shdescRelation, AccessShareLock); + + return comment; +} + + +/* + * CommentObjectAddress resolves the ObjectAddress for the object + * on which the comment is placed. Optionally errors if the object does not + * exist based on the missing_ok flag passed in by the caller. + */ +List * +CommentObjectAddress(Node *node, bool missing_ok, bool isPostprocess) +{ + CommentStmt *stmt = castNode(CommentStmt, node); + Relation relation; + + ObjectAddress objectAddress = get_object_address(stmt->objtype, stmt->object, + &relation, AccessExclusiveLock, + missing_ok); + + ObjectAddress *objectAddressCopy = palloc0(sizeof(ObjectAddress)); + *objectAddressCopy = objectAddress; + return list_make1(objectAddressCopy); +} diff --git a/src/backend/distributed/commands/common.c b/src/backend/distributed/commands/common.c index 797981d47d3..347a99e8af4 100644 --- a/src/backend/distributed/commands/common.c +++ b/src/backend/distributed/commands/common.c @@ -23,9 +23,9 @@ #include "distributed/commands/utility_hook.h" #include "distributed/deparser.h" #include "distributed/listutils.h" -#include "distributed/metadata_sync.h" #include "distributed/metadata/dependency.h" #include "distributed/metadata/distobject.h" +#include "distributed/metadata_sync.h" #include "distributed/multi_executor.h" #include "distributed/worker_transaction.h" diff --git a/src/backend/distributed/commands/create_distributed_table.c b/src/backend/distributed/commands/create_distributed_table.c index 1e89c6b933a..8c59aa19908 100644 --- a/src/backend/distributed/commands/create_distributed_table.c +++ b/src/backend/distributed/commands/create_distributed_table.c @@ -9,10 +9,8 @@ */ #include "postgres.h" -#include "miscadmin.h" -#include "distributed/pg_version_constants.h" -#include "distributed/commands/utility_hook.h" +#include "miscadmin.h" #include "access/genam.h" #include "access/hash.h" @@ -24,6 +22,7 @@ #include "catalog/dependency.h" #include "catalog/index.h" #include "catalog/pg_am.h" +#include "catalog/pg_attrdef.h" #include "catalog/pg_attribute.h" #include "catalog/pg_enum.h" #include "catalog/pg_extension.h" @@ -37,21 +36,47 @@ #include "commands/sequence.h" #include "commands/tablecmds.h" #include "commands/trigger.h" -#include "distributed/commands/multi_copy.h" +#include "executor/executor.h" +#include "executor/spi.h" +#include "nodes/execnodes.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "nodes/pg_list.h" +#include "parser/parse_expr.h" +#include "parser/parse_node.h" +#include "parser/parse_relation.h" +#include "parser/parser.h" +#include "postmaster/postmaster.h" +#include "storage/lmgr.h" +#include "tcop/pquery.h" +#include "tcop/tcopprot.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/inval.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" +#include "utils/rel.h" +#include "utils/snapmgr.h" +#include "utils/syscache.h" + +#include "pg_version_constants.h" + #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" #include "distributed/commands.h" +#include "distributed/commands/multi_copy.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/coordinator_protocol.h" #include "distributed/deparser.h" #include "distributed/distributed_execution_locks.h" #include "distributed/distribution_column.h" #include "distributed/listutils.h" #include "distributed/local_executor.h" -#include "distributed/metadata_utility.h" -#include "distributed/coordinator_protocol.h" #include "distributed/metadata/dependency.h" #include "distributed/metadata/distobject.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" +#include "distributed/metadata_utility.h" #include "distributed/multi_executor.h" #include "distributed/multi_logical_planner.h" #include "distributed/multi_partitioning_utils.h" @@ -67,33 +92,11 @@ #include "distributed/shard_split.h" #include "distributed/shard_transfer.h" #include "distributed/shared_library_init.h" -#include "distributed/shard_rebalancer.h" +#include "distributed/utils/distribution_column_map.h" +#include "distributed/version_compat.h" #include "distributed/worker_protocol.h" #include "distributed/worker_shard_visibility.h" #include "distributed/worker_transaction.h" -#include "distributed/utils/distribution_column_map.h" -#include "distributed/version_compat.h" -#include "executor/executor.h" -#include "executor/spi.h" -#include "nodes/execnodes.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "nodes/pg_list.h" -#include "parser/parse_expr.h" -#include "parser/parse_node.h" -#include "parser/parse_relation.h" -#include "parser/parser.h" -#include "postmaster/postmaster.h" -#include "storage/lmgr.h" -#include "tcop/pquery.h" -#include "tcop/tcopprot.h" -#include "utils/builtins.h" -#include "utils/lsyscache.h" -#include "utils/memutils.h" -#include "utils/rel.h" -#include "utils/snapmgr.h" -#include "utils/syscache.h" -#include "utils/inval.h" /* common params that apply to all Citus table types */ @@ -1322,10 +1325,7 @@ CreateCitusTable(Oid relationId, CitusTableType tableType, { List *partitionList = PartitionList(relationId); Oid partitionRelationId = InvalidOid; - Oid namespaceId = get_rel_namespace(relationId); - char *schemaName = get_namespace_name(namespaceId); - char *relationName = get_rel_name(relationId); - char *parentRelationName = quote_qualified_identifier(schemaName, relationName); + char *parentRelationName = generate_qualified_relation_name(relationId); /* * when there are many partitions, each call to CreateDistributedTable @@ -1698,52 +1698,39 @@ PropagatePrerequisiteObjectsForDistributedTable(Oid relationId) void EnsureSequenceTypeSupported(Oid seqOid, Oid attributeTypeId, Oid ownerRelationId) { - List *citusTableIdList = CitusTableTypeIdList(ANY_CITUS_TABLE_TYPE); - citusTableIdList = list_append_unique_oid(citusTableIdList, ownerRelationId); + Oid attrDefOid; + List *attrDefOids = GetAttrDefsFromSequence(seqOid); - Oid citusTableId = InvalidOid; - foreach_oid(citusTableId, citusTableIdList) + foreach_oid(attrDefOid, attrDefOids) { - List *seqInfoList = NIL; - GetDependentSequencesWithRelation(citusTableId, &seqInfoList, 0, DEPENDENCY_AUTO); + ObjectAddress columnAddress = GetAttrDefaultColumnAddress(attrDefOid); - SequenceInfo *seqInfo = NULL; - foreach_ptr(seqInfo, seqInfoList) + /* + * If another distributed table is using the same sequence + * in one of its column defaults, make sure the types of the + * columns match. + * + * We skip non-distributed tables, but we need to check the current + * table as it might reference the same sequence multiple times. + */ + if (columnAddress.objectId != ownerRelationId && + !IsCitusTable(columnAddress.objectId)) { - AttrNumber currentAttnum = seqInfo->attributeNumber; - Oid currentSeqOid = seqInfo->sequenceOid; - - if (!seqInfo->isNextValDefault) - { - /* - * If a sequence is not on the nextval, we don't need any check. - * This is a dependent sequence via ALTER SEQUENCE .. OWNED BY col - */ - continue; - } - - /* - * If another distributed table is using the same sequence - * in one of its column defaults, make sure the types of the - * columns match - */ - if (currentSeqOid == seqOid) - { - Oid currentAttributeTypId = GetAttributeTypeOid(citusTableId, - currentAttnum); - if (attributeTypeId != currentAttributeTypId) - { - char *sequenceName = generate_qualified_relation_name( - seqOid); - char *citusTableName = - generate_qualified_relation_name(citusTableId); - ereport(ERROR, (errmsg( - "The sequence %s is already used for a different" - " type in column %d of the table %s", - sequenceName, currentAttnum, - citusTableName))); - } - } + continue; + } + Oid currentAttributeTypId = GetAttributeTypeOid(columnAddress.objectId, + columnAddress.objectSubId); + if (attributeTypeId != currentAttributeTypId) + { + char *sequenceName = generate_qualified_relation_name( + seqOid); + char *citusTableName = + generate_qualified_relation_name(columnAddress.objectId); + ereport(ERROR, (errmsg( + "The sequence %s is already used for a different" + " type in column %d of the table %s", + sequenceName, columnAddress.objectSubId, + citusTableName))); } } } diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 944ff627d4e..5479a59edcf 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -11,33 +11,104 @@ #include "postgres.h" +#include "miscadmin.h" + +#include "access/genam.h" +#include "access/heapam.h" #include "access/htup_details.h" +#include "access/table.h" #include "access/xact.h" #include "catalog/objectaddress.h" +#include "catalog/pg_collation.h" #include "catalog/pg_database.h" +#include "catalog/pg_database_d.h" +#include "catalog/pg_tablespace.h" #include "commands/dbcommands.h" -#include "miscadmin.h" +#include "commands/defrem.h" #include "nodes/parsenodes.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" +#include "utils/rel.h" +#include "utils/relcache.h" #include "utils/syscache.h" +#include "distributed/adaptive_executor.h" #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" +#include "distributed/comment.h" +#include "distributed/deparse_shard_query.h" #include "distributed/deparser.h" +#include "distributed/listutils.h" +#include "distributed/local_executor.h" +#include "distributed/metadata/distobject.h" #include "distributed/metadata_sync.h" #include "distributed/metadata_utility.h" #include "distributed/multi_executor.h" #include "distributed/relation_access_tracking.h" +#include "distributed/serialize_distributed_ddls.h" +#include "distributed/shard_cleaner.h" +#include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" + +/* + * Used to save original name of the database before it is replaced with a + * temporary name for failure handling purposes in PreprocessCreateDatabaseStmt(). + */ +static char *CreateDatabaseCommandOriginalDbName = NULL; + + +/* + * The format string used when creating a temporary databases for failure + * handling purposes. + * + * The fields are as follows to ensure using a unique name for each temporary + * database: + * - operationId: The operation id returned by RegisterOperationNeedingCleanup(). + * - groupId: The group id of the worker node where CREATE DATABASE command + * is issued from. + */ +#define TEMP_DATABASE_NAME_FMT "citus_temp_database_%lu_%d" + + +/* + * DatabaseCollationInfo is used to store collation related information of a database. + */ +typedef struct DatabaseCollationInfo +{ + char *datcollate; + char *datctype; + +#if PG_VERSION_NUM >= PG_VERSION_15 + char *daticulocale; + char *datcollversion; +#endif + +#if PG_VERSION_NUM >= PG_VERSION_16 + char *daticurules; +#endif +} DatabaseCollationInfo; + +static char * GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database + databaseForm); +static DatabaseCollationInfo GetDatabaseCollation(Oid dbOid); static AlterOwnerStmt * RecreateAlterDatabaseOwnerStmt(Oid databaseOid); -static Oid get_database_owner(Oid db_oid); -List * PreprocessGrantOnDatabaseStmt(Node *node, const char *queryString, - ProcessUtilityContext processUtilityContext); +#if PG_VERSION_NUM >= PG_VERSION_15 +static char * GetLocaleProviderString(char datlocprovider); +#endif +static char * GetTablespaceName(Oid tablespaceOid); +static ObjectAddress * GetDatabaseAddressFromDatabaseName(char *databaseName, + bool missingOk); + +static List * FilterDistributedDatabases(List *databases); +static Oid get_database_owner(Oid dbId); + /* controlled via GUC */ +bool EnableCreateDatabasePropagation = false; bool EnableAlterDatabaseOwner = true; - /* * AlterDatabaseOwnerObjectAddress returns the ObjectAddress of the database that is the * object of the AlterOwnerStmt. Errors if missing_ok is false. @@ -94,13 +165,13 @@ RecreateAlterDatabaseOwnerStmt(Oid databaseOid) * get_database_owner returns the Oid of the role owning the database */ static Oid -get_database_owner(Oid db_oid) +get_database_owner(Oid dbId) { - HeapTuple tuple = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(db_oid)); + HeapTuple tuple = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(dbId)); if (!HeapTupleIsValid(tuple)) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_DATABASE), - errmsg("database with OID %u does not exist", db_oid))); + errmsg("database with OID %u does not exist", dbId))); } Oid dba = ((Form_pg_database) GETSTRUCT(tuple))->datdba; @@ -130,17 +201,23 @@ PreprocessGrantOnDatabaseStmt(Node *node, const char *queryString, GrantStmt *stmt = castNode(GrantStmt, node); Assert(stmt->objtype == OBJECT_DATABASE); - List *databaseList = stmt->objects; + List *distributedDatabases = FilterDistributedDatabases(stmt->objects); - if (list_length(databaseList) == 0) + if (list_length(distributedDatabases) == 0) { return NIL; } EnsureCoordinator(); + List *originalObjects = stmt->objects; + + stmt->objects = distributedDatabases; + char *sql = DeparseTreeNode((Node *) stmt); + stmt->objects = originalObjects; + List *commands = list_make3(DISABLE_DDL_PROPAGATION, (void *) sql, ENABLE_DDL_PROPAGATION); @@ -149,57 +226,128 @@ PreprocessGrantOnDatabaseStmt(Node *node, const char *queryString, } +/* + * FilterDistributedDatabases filters the database list and returns the distributed ones, + * as a list. + */ +static List * +FilterDistributedDatabases(List *databases) +{ + List *distributedDatabases = NIL; + String *databaseName = NULL; + foreach_ptr(databaseName, databases) + { + bool missingOk = true; + ObjectAddress *dbAddress = + GetDatabaseAddressFromDatabaseName(strVal(databaseName), missingOk); + if (IsAnyObjectDistributed(list_make1(dbAddress))) + { + distributedDatabases = lappend(distributedDatabases, databaseName); + } + } + + return distributedDatabases; +} + + +/* + * IsSetTablespaceStatement returns true if the statement is a SET TABLESPACE statement, + * false otherwise. + */ +static bool +IsSetTablespaceStatement(AlterDatabaseStmt *stmt) +{ + DefElem *def = NULL; + foreach_ptr(def, stmt->options) + { + if (strcmp(def->defname, "tablespace") == 0) + { + return true; + } + } + return false; +} + + /* * PreprocessAlterDatabaseStmt is executed before the statement is applied to the local * postgres instance. * * In this stage we can prepare the commands that need to be run on all workers to grant * on databases. + * + * We also serialize database commands globally by acquiring a Citus specific advisory + * lock based on OCLASS_DATABASE on the first primary worker node. */ List * PreprocessAlterDatabaseStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) { - if (!ShouldPropagate()) + bool missingOk = false; + AlterDatabaseStmt *stmt = castNode(AlterDatabaseStmt, node); + ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname, + missingOk); + + if (!ShouldPropagate() || !IsAnyObjectDistributed(list_make1(dbAddress))) { return NIL; } - AlterDatabaseStmt *stmt = castNode(AlterDatabaseStmt, node); - EnsureCoordinator(); + SerializeDistributedDDLsOnObjectClassObject(OCLASS_DATABASE, stmt->dbname); char *sql = DeparseTreeNode((Node *) stmt); List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, + sql, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + if (IsSetTablespaceStatement(stmt)) + { + /* + * Set tablespace does not work inside a transaction.Therefore, we need to use + * NontransactionalNodeDDLTask to run the command on the workers outside + * the transaction block. + */ + bool warnForPartialFailure = true; + return NontransactionalNodeDDLTaskList(NON_COORDINATOR_NODES, commands, + warnForPartialFailure); + } + else + { + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + } } #if PG_VERSION_NUM >= PG_VERSION_15 /* - * PreprocessAlterDatabaseSetStmt is executed before the statement is applied to the local - * postgres instance. + * PreprocessAlterDatabaseRefreshCollStmt is executed before the statement is applied to + * the local postgres instance. * * In this stage we can prepare the commands that need to be run on all workers to grant * on databases. + * + * We also serialize database commands globally by acquiring a Citus specific advisory + * lock based on OCLASS_DATABASE on the first primary worker node. */ List * PreprocessAlterDatabaseRefreshCollStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) { - if (!ShouldPropagate()) + bool missingOk = true; + AlterDatabaseRefreshCollStmt *stmt = castNode(AlterDatabaseRefreshCollStmt, node); + ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname, + missingOk); + + if (!ShouldPropagate() || !IsAnyObjectDistributed(list_make1(dbAddress))) { return NIL; } - AlterDatabaseRefreshCollStmt *stmt = castNode(AlterDatabaseRefreshCollStmt, node); - EnsureCoordinator(); + SerializeDistributedDDLsOnObjectClassObject(OCLASS_DATABASE, stmt->dbname); char *sql = DeparseTreeNode((Node *) stmt); @@ -214,25 +362,104 @@ PreprocessAlterDatabaseRefreshCollStmt(Node *node, const char *queryString, #endif +/* + * PreprocessAlterDatabaseRenameStmt is executed before the statement is applied to + * the local postgres instance. + * + * We also serialize database commands globally by acquiring a Citus specific advisory + * lock based on OCLASS_DATABASE on the first primary worker node. + * + * We acquire this lock here instead of PostprocessAlterDatabaseRenameStmt because the + * command renames the database and SerializeDistributedDDLsOnObjectClass resolves the + * object on workers based on database name. For this reason, we need to acquire the lock + * before the command is applied to the local postgres instance. + */ +List * +PreprocessAlterDatabaseRenameStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + bool missingOk = true; + RenameStmt *stmt = castNode(RenameStmt, node); + ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->subname, + missingOk); + + if (!ShouldPropagate() || !IsAnyObjectDistributed(list_make1(dbAddress))) + { + return NIL; + } + + EnsureCoordinator(); + + /* + * Different than other ALTER DATABASE commands, we first acquire a lock + * by providing InvalidOid because we want ALTER TABLE .. RENAME TO .. + * commands to block not only with ALTER DATABASE operations but also + * with CREATE DATABASE operations because they might cause name conflicts + * and that could also cause deadlocks too. + */ + SerializeDistributedDDLsOnObjectClass(OCLASS_DATABASE); + SerializeDistributedDDLsOnObjectClassObject(OCLASS_DATABASE, stmt->subname); + + return NIL; +} + + +/* + * PostprocessAlterDatabaseRenameStmt is executed after the statement is applied to the local + * postgres instance. In this stage we prepare ALTER DATABASE RENAME statement to be run on + * all workers. + */ +List * +PostprocessAlterDatabaseRenameStmt(Node *node, const char *queryString) +{ + bool missingOk = false; + RenameStmt *stmt = castNode(RenameStmt, node); + ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->newname, + missingOk); + + if (!ShouldPropagate() || !IsAnyObjectDistributed(list_make1(dbAddress))) + { + return NIL; + } + + EnsureCoordinator(); + + char *sql = DeparseTreeNode((Node *) stmt); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + /* * PreprocessAlterDatabaseSetStmt is executed before the statement is applied to the local * postgres instance. * * In this stage we can prepare the commands that need to be run on all workers to grant * on databases. + * + * We also serialize database commands globally by acquiring a Citus specific advisory + * lock based on OCLASS_DATABASE on the first primary worker node. */ List * PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext) { - if (!ShouldPropagate()) + AlterDatabaseSetStmt *stmt = castNode(AlterDatabaseSetStmt, node); + + bool missingOk = true; + ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname, + missingOk); + if (!ShouldPropagate() || !IsAnyObjectDistributed(list_make1(dbAddress))) { return NIL; } - AlterDatabaseSetStmt *stmt = castNode(AlterDatabaseSetStmt, node); - EnsureCoordinator(); + SerializeDistributedDDLsOnObjectClassObject(OCLASS_DATABASE, stmt->dbname); char *sql = DeparseTreeNode((Node *) stmt); @@ -242,3 +469,552 @@ PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString, return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); } + + +/* + * PreprocessCreateDatabaseStmt is executed before the statement is applied to the local + * Postgres instance. + * + * In this stage, we perform validations that we want to ensure before delegating to + * previous utility hooks because it might not be convenient to throw an error in an + * implicit transaction that creates a database. Also in this stage, we save the original + * database name and replace dbname field with a temporary name for failure handling + * purposes. We let Postgres create the database with the temporary name, insert a cleanup + * record for the temporary database name on all nodes and let PostprocessCreateDatabaseStmt() + * to return the distributed DDL job that both creates the database with the temporary name + * and then renames it back to its original name. + * + * We also serialize database commands globally by acquiring a Citus specific advisory + * lock based on OCLASS_DATABASE on the first primary worker node. + */ +List * +PreprocessCreateDatabaseStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + if (!EnableCreateDatabasePropagation || !ShouldPropagate()) + { + return NIL; + } + + EnsureCoordinatorIsInMetadata(); + + CreatedbStmt *stmt = castNode(CreatedbStmt, node); + EnsureSupportedCreateDatabaseCommand(stmt); + + SerializeDistributedDDLsOnObjectClass(OCLASS_DATABASE); + + OperationId operationId = RegisterOperationNeedingCleanup(); + + char *tempDatabaseName = psprintf(TEMP_DATABASE_NAME_FMT, + operationId, GetLocalGroupId()); + + List *remoteNodes = TargetWorkerSetNodeList(ALL_SHARD_NODES, RowShareLock); + WorkerNode *remoteNode = NULL; + foreach_ptr(remoteNode, remoteNodes) + { + InsertCleanupRecordOutsideTransaction( + CLEANUP_OBJECT_DATABASE, + pstrdup(quote_identifier(tempDatabaseName)), + remoteNode->groupId, + CLEANUP_ON_FAILURE + ); + } + + CreateDatabaseCommandOriginalDbName = stmt->dbname; + stmt->dbname = tempDatabaseName; + + /* + * Delete cleanup records in the same transaction so that if the current + * transactions fails for some reason, then the cleanup records won't be + * deleted. In the happy path, we will delete the cleanup records without + * deferring them to the background worker. + */ + FinalizeOperationNeedingCleanupOnSuccess("create database"); + + return NIL; +} + + +/* + * PostprocessCreateDatabaseStmt is executed after the statement is applied to the local + * postgres instance. + * + * In this stage, we first rename the temporary database back to its original name for + * local node and then return a list of distributed DDL jobs to create the database with + * the temporary name and then to rename it back to its original name. That way, if CREATE + * DATABASE fails on any of the nodes, the temporary database will be cleaned up by the + * cleanup records that we inserted in PreprocessCreateDatabaseStmt() and in case of a + * failure, we won't leak any databases called as the name that user intended to use for + * the database. + */ +List * +PostprocessCreateDatabaseStmt(Node *node, const char *queryString) +{ + if (!EnableCreateDatabasePropagation || !ShouldPropagate()) + { + return NIL; + } + + EnsurePropagationToCoordinator(); + + /* + * Given that CREATE DATABASE doesn't support "IF NOT EXISTS" and we're + * in the post-process, database must exist, hence missingOk = false. + */ + bool missingOk = false; + bool isPostProcess = true; + List *addresses = GetObjectAddressListFromParseTree(node, missingOk, + isPostProcess); + EnsureAllObjectDependenciesExistOnAllNodes(addresses); + + char *createDatabaseCommand = DeparseTreeNode(node); + + List *createDatabaseCommands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) createDatabaseCommand, + ENABLE_DDL_PROPAGATION); + + /* + * Since the CREATE DATABASE statements cannot be executed in a transaction + * block, we need to use NontransactionalNodeDDLTaskList() to send the CREATE + * DATABASE statement to the workers. + */ + bool warnForPartialFailure = false; + List *createDatabaseDDLJobList = + NontransactionalNodeDDLTaskList(REMOTE_NODES, createDatabaseCommands, + warnForPartialFailure); + + CreatedbStmt *stmt = castNode(CreatedbStmt, node); + + char *renameDatabaseCommand = + psprintf("ALTER DATABASE %s RENAME TO %s", + quote_identifier(stmt->dbname), + quote_identifier(CreateDatabaseCommandOriginalDbName)); + + List *renameDatabaseCommands = list_make3(DISABLE_DDL_PROPAGATION, + renameDatabaseCommand, + ENABLE_DDL_PROPAGATION); + + /* + * We use NodeDDLTaskList() to send the RENAME DATABASE statement to the + * workers because we want to execute it in a coordinated transaction. + */ + List *renameDatabaseDDLJobList = + NodeDDLTaskList(REMOTE_NODES, renameDatabaseCommands); + + /* + * Temporarily disable citus.enable_ddl_propagation before issuing + * rename command locally because we don't want to execute it on remote + * nodes yet. We will execute it on remote nodes by returning it as a + * distributed DDL job. + * + * The reason why we don't want to execute it on remote nodes yet is that + * the database is not created on remote nodes yet. + */ + int saveNestLevel = NewGUCNestLevel(); + set_config_option("citus.enable_ddl_propagation", "off", + (superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION, + GUC_ACTION_LOCAL, true, 0, false); + + ExecuteUtilityCommand(renameDatabaseCommand); + + AtEOXact_GUC(true, saveNestLevel); + + /* + * Restore the original database name because MarkObjectDistributed() + * resolves oid of the object based on the database name and is called + * after executing the distributed DDL job that renames temporary database. + */ + stmt->dbname = CreateDatabaseCommandOriginalDbName; + + return list_concat(createDatabaseDDLJobList, renameDatabaseDDLJobList); +} + + +/* + * PreprocessDropDatabaseStmt is executed before the statement is applied to the local + * postgres instance. In this stage we can prepare the commands that need to be run on + * all workers to drop the database. + * + * We also serialize database commands globally by acquiring a Citus specific advisory + * lock based on OCLASS_DATABASE on the first primary worker node. + */ +List * +PreprocessDropDatabaseStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext) +{ + if (!EnableCreateDatabasePropagation || !ShouldPropagate()) + { + return NIL; + } + + EnsurePropagationToCoordinator(); + + DropdbStmt *stmt = (DropdbStmt *) node; + + bool isPostProcess = false; + List *addresses = GetObjectAddressListFromParseTree(node, stmt->missing_ok, + isPostProcess); + + if (list_length(addresses) != 1) + { + ereport(ERROR, (errmsg("unexpected number of objects found when " + "executing DROP DATABASE command"))); + } + + ObjectAddress *address = (ObjectAddress *) linitial(addresses); + if (address->objectId == InvalidOid || !IsAnyObjectDistributed(list_make1(address))) + { + return NIL; + } + + SerializeDistributedDDLsOnObjectClassObject(OCLASS_DATABASE, stmt->dbname); + + char *dropDatabaseCommand = DeparseTreeNode(node); + + List *dropDatabaseCommands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) dropDatabaseCommand, + ENABLE_DDL_PROPAGATION); + + /* + * Due to same reason stated in PostprocessCreateDatabaseStmt(), we need to + * use NontransactionalNodeDDLTaskList() to send the DROP DATABASE statement + * to the workers. + */ + bool warnForPartialFailure = true; + List *dropDatabaseDDLJobList = + NontransactionalNodeDDLTaskList(REMOTE_NODES, dropDatabaseCommands, + warnForPartialFailure); + return dropDatabaseDDLJobList; +} + + +/* + * DropDatabaseStmtObjectAddress gets the ObjectAddress of the database that is the + * object of the DropdbStmt. + */ +List * +DropDatabaseStmtObjectAddress(Node *node, bool missingOk, bool isPostprocess) +{ + DropdbStmt *stmt = castNode(DropdbStmt, node); + ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname, + missingOk); + return list_make1(dbAddress); +} + + +/* + * CreateDatabaseStmtObjectAddress gets the ObjectAddress of the database that is the + * object of the CreatedbStmt. + */ +List * +CreateDatabaseStmtObjectAddress(Node *node, bool missingOk, bool isPostprocess) +{ + CreatedbStmt *stmt = castNode(CreatedbStmt, node); + ObjectAddress *dbAddress = GetDatabaseAddressFromDatabaseName(stmt->dbname, + missingOk); + return list_make1(dbAddress); +} + + +/* + * EnsureSupportedCreateDatabaseCommand validates the options provided for the CREATE + * DATABASE command. + * + * Parameters: + * stmt: A CreatedbStmt struct representing a CREATE DATABASE command. + * The options field is a list of DefElem structs, each representing an option. + * + * Currently, this function checks for the following: + * - The "oid" option is not supported. + * - The "template" option is only supported with the value "template1". + * - The "strategy" option is only supported with the value "wal_log". + */ +void +EnsureSupportedCreateDatabaseCommand(CreatedbStmt *stmt) +{ + DefElem *option = NULL; + foreach_ptr(option, stmt->options) + { + if (strcmp(option->defname, "oid") == 0) + { + ereport(ERROR, + errmsg("CREATE DATABASE option \"%s\" is not supported", + option->defname)); + } + + char *optionValue = defGetString(option); + + if (strcmp(option->defname, "template") == 0 && + strcmp(optionValue, "template1") != 0) + { + ereport(ERROR, errmsg("Only template1 is supported as template " + "parameter for CREATE DATABASE")); + } + + if (strcmp(option->defname, "strategy") == 0 && + strcmp(optionValue, "wal_log") != 0) + { + ereport(ERROR, errmsg("Only wal_log is supported as strategy " + "parameter for CREATE DATABASE")); + } + } +} + + +/* + * GetDatabaseAddressFromDatabaseName gets the database name and returns the ObjectAddress + * of the database. + */ +static ObjectAddress * +GetDatabaseAddressFromDatabaseName(char *databaseName, bool missingOk) +{ + Oid databaseOid = get_database_oid(databaseName, missingOk); + ObjectAddress *dbObjectAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*dbObjectAddress, DatabaseRelationId, databaseOid); + return dbObjectAddress; +} + + +/* + * GetTablespaceName gets the tablespace oid and returns the tablespace name. + */ +static char * +GetTablespaceName(Oid tablespaceOid) +{ + HeapTuple tuple = SearchSysCache1(TABLESPACEOID, ObjectIdGetDatum(tablespaceOid)); + if (!HeapTupleIsValid(tuple)) + { + return NULL; + } + + Form_pg_tablespace tablespaceForm = (Form_pg_tablespace) GETSTRUCT(tuple); + char *tablespaceName = pstrdup(NameStr(tablespaceForm->spcname)); + + ReleaseSysCache(tuple); + + return tablespaceName; +} + + +/* + * GetDatabaseMetadataSyncCommands returns a list of sql statements + * for the given database id. The list contains the database ddl command, + * grant commands and comment propagation commands. + */ +List * +GetDatabaseMetadataSyncCommands(Oid dbOid) +{ + char *databaseName = get_database_name(dbOid); + char *databaseDDLCommand = CreateDatabaseDDLCommand(dbOid); + + List *ddlCommands = list_make1(databaseDDLCommand); + + List *grantDDLCommands = GrantOnDatabaseDDLCommands(dbOid); + List *commentDDLCommands = GetCommentPropagationCommands(DatabaseRelationId, dbOid, + databaseName, + OBJECT_DATABASE); + + ddlCommands = list_concat(ddlCommands, grantDDLCommands); + ddlCommands = list_concat(ddlCommands, commentDDLCommands); + + return ddlCommands; +} + + +/* + * GetDatabaseCollation gets oid of a database and returns all the collation related information + * We need this method since collation related info in Form_pg_database is not accessible. + */ +static DatabaseCollationInfo +GetDatabaseCollation(Oid dbOid) +{ + DatabaseCollationInfo info; + memset(&info, 0, sizeof(DatabaseCollationInfo)); + + Relation rel = table_open(DatabaseRelationId, AccessShareLock); + HeapTuple tup = get_catalog_object_by_oid(rel, Anum_pg_database_oid, dbOid); + if (!HeapTupleIsValid(tup)) + { + elog(ERROR, "cache lookup failed for database %u", dbOid); + } + + bool isNull = false; + + TupleDesc tupdesc = RelationGetDescr(rel); + + Datum collationDatum = heap_getattr(tup, Anum_pg_database_datcollate, tupdesc, + &isNull); + info.datcollate = TextDatumGetCString(collationDatum); + + Datum ctypeDatum = heap_getattr(tup, Anum_pg_database_datctype, tupdesc, &isNull); + info.datctype = TextDatumGetCString(ctypeDatum); + +#if PG_VERSION_NUM >= PG_VERSION_15 + + Datum icuLocaleDatum = heap_getattr(tup, Anum_pg_database_daticulocale, tupdesc, + &isNull); + if (!isNull) + { + info.daticulocale = TextDatumGetCString(icuLocaleDatum); + } + + Datum collverDatum = heap_getattr(tup, Anum_pg_database_datcollversion, tupdesc, + &isNull); + if (!isNull) + { + info.datcollversion = TextDatumGetCString(collverDatum); + } +#endif + +#if PG_VERSION_NUM >= PG_VERSION_16 + Datum icurulesDatum = heap_getattr(tup, Anum_pg_database_daticurules, tupdesc, + &isNull); + if (!isNull) + { + info.daticurules = TextDatumGetCString(icurulesDatum); + } +#endif + + table_close(rel, AccessShareLock); + heap_freetuple(tup); + + return info; +} + + +#if PG_VERSION_NUM >= PG_VERSION_15 + +/* + * GetLocaleProviderString gets the datlocprovider stored in pg_database + * and returns the string representation of the datlocprovider + */ +static char * +GetLocaleProviderString(char datlocprovider) +{ + switch (datlocprovider) + { + case 'c': + { + return "libc"; + } + + case 'i': + { + return "icu"; + } + + default: + { + ereport(ERROR, (errmsg("unexpected datlocprovider value: %c", + datlocprovider))); + } + } +} + + +#endif + + +/* + * GenerateCreateDatabaseStatementFromPgDatabase gets the pg_database tuple and returns the + * CREATE DATABASE statement that can be used to create given database. + * + * Note that this doesn't deparse OID of the database and this is not a + * problem as we anyway don't allow specifying custom OIDs for databases + * when creating them. + */ +static char * +GenerateCreateDatabaseStatementFromPgDatabase(Form_pg_database databaseForm) +{ + DatabaseCollationInfo collInfo = GetDatabaseCollation(databaseForm->oid); + + StringInfoData str; + initStringInfo(&str); + + appendStringInfo(&str, "CREATE DATABASE %s", + quote_identifier(NameStr(databaseForm->datname))); + + appendStringInfo(&str, " CONNECTION LIMIT %d", databaseForm->datconnlimit); + + appendStringInfo(&str, " ALLOW_CONNECTIONS = %s", + quote_literal_cstr(databaseForm->datallowconn ? "true" : "false")); + + appendStringInfo(&str, " IS_TEMPLATE = %s", + quote_literal_cstr(databaseForm->datistemplate ? "true" : "false")); + + appendStringInfo(&str, " LC_COLLATE = %s", + quote_literal_cstr(collInfo.datcollate)); + + appendStringInfo(&str, " LC_CTYPE = %s", quote_literal_cstr(collInfo.datctype)); + + appendStringInfo(&str, " OWNER = %s", + quote_identifier(GetUserNameFromId(databaseForm->datdba, false))); + + appendStringInfo(&str, " TABLESPACE = %s", + quote_identifier(GetTablespaceName(databaseForm->dattablespace))); + + appendStringInfo(&str, " ENCODING = %s", + quote_literal_cstr(pg_encoding_to_char(databaseForm->encoding))); + +#if PG_VERSION_NUM >= PG_VERSION_15 + if (collInfo.datcollversion != NULL) + { + appendStringInfo(&str, " COLLATION_VERSION = %s", + quote_identifier(collInfo.datcollversion)); + } + + if (collInfo.daticulocale != NULL) + { + appendStringInfo(&str, " ICU_LOCALE = %s", quote_identifier( + collInfo.daticulocale)); + } + + appendStringInfo(&str, " LOCALE_PROVIDER = %s", + quote_identifier(GetLocaleProviderString( + databaseForm->datlocprovider))); +#endif + +#if PG_VERSION_NUM >= PG_VERSION_16 + if (collInfo.daticurules != NULL) + { + appendStringInfo(&str, " ICU_RULES = %s", quote_identifier( + collInfo.daticurules)); + } +#endif + + return str.data; +} + + +/* + * CreateDatabaseDDLCommand returns a CREATE DATABASE command to create given + * database + * + * Command is wrapped by citus_internal_database_command() UDF + * to avoid from transaction block restrictions that apply to database commands. + */ +char * +CreateDatabaseDDLCommand(Oid dbId) +{ + HeapTuple tuple = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(dbId)); + if (!HeapTupleIsValid(tuple)) + { + ereport(ERROR, (errcode(ERRCODE_UNDEFINED_DATABASE), + errmsg("database with OID %u does not exist", dbId))); + } + + Form_pg_database databaseForm = (Form_pg_database) GETSTRUCT(tuple); + + char *createStmt = GenerateCreateDatabaseStatementFromPgDatabase(databaseForm); + + StringInfo outerDbStmt = makeStringInfo(); + + /* Generate the CREATE DATABASE statement */ + appendStringInfo(outerDbStmt, + "SELECT citus_internal.database_command(%s)", + quote_literal_cstr(createStmt)); + + ReleaseSysCache(tuple); + + return outerDbStmt->data; +} diff --git a/src/backend/distributed/commands/dependencies.c b/src/backend/distributed/commands/dependencies.c index 977efb14589..c7de5d874b7 100644 --- a/src/backend/distributed/commands/dependencies.c +++ b/src/backend/distributed/commands/dependencies.c @@ -10,9 +10,14 @@ #include "postgres.h" +#include "miscadmin.h" + #include "catalog/dependency.h" #include "catalog/objectaddress.h" #include "commands/extension.h" +#include "storage/lmgr.h" +#include "utils/lsyscache.h" + #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" #include "distributed/connection_management.h" @@ -25,57 +30,147 @@ #include "distributed/remote_commands.h" #include "distributed/worker_manager.h" #include "distributed/worker_transaction.h" -#include "miscadmin.h" -#include "storage/lmgr.h" -#include "utils/lsyscache.h" + +typedef enum RequiredObjectSet +{ + REQUIRE_ONLY_DEPENDENCIES = 1, + REQUIRE_OBJECT_AND_DEPENDENCIES = 2, +} RequiredObjectSet; static void EnsureDependenciesCanBeDistributed(const ObjectAddress *relationAddress); static void ErrorIfCircularDependencyExists(const ObjectAddress *objectAddress); static int ObjectAddressComparator(const void *a, const void *b); static void EnsureDependenciesExistOnAllNodes(const ObjectAddress *target); +static void EnsureRequiredObjectSetExistOnAllNodes(const ObjectAddress *target, + RequiredObjectSet requiredObjectSet); static List * GetDependencyCreateDDLCommands(const ObjectAddress *dependency); static bool ShouldPropagateObject(const ObjectAddress *address); static char * DropTableIfExistsCommand(Oid relationId); /* - * EnsureDependenciesExistOnAllNodes finds all the dependencies that we support and makes - * sure these are available on all workers. If not available they will be created on the - * workers via a separate session that will be committed directly so that the objects are - * visible to potentially multiple sessions creating the shards. + * EnsureObjectAndDependenciesExistOnAllNodes is a wrapper around + * EnsureRequiredObjectSetExistOnAllNodes to ensure the "object itself" (together + * with its dependencies) is available on all nodes. + * + * Different than EnsureDependenciesExistOnAllNodes, we return early if the + * target object is distributed already. + * + * The reason why we don't do the same in EnsureDependenciesExistOnAllNodes + * is that it's is used when altering an object too and hence the target object + * may instantly have a dependency that needs to be propagated now. For example, + * when "⁠GRANT non_dist_role TO dist_role" is executed, we need to propagate + * "non_dist_role" to all nodes before propagating the "GRANT" command itself. + * For this reason, we call EnsureDependenciesExistOnAllNodes for "dist_role" + * and it would automatically discover that "non_dist_role" is a dependency of + * "dist_role" and propagate it beforehand. + * + * However, when we're requested to create the target object itself (and + * implicitly its dependencies), we're sure that we're not altering the target + * object itself, hence we can return early if the target object is already + * distributed. This is the case, for example, when + * "REASSIGN OWNED BY dist_role TO non_dist_role" is executed. In that case, + * "non_dist_role" is not a dependency of "dist_role" but we want to distribute + * "non_dist_role" beforehand and we call this function for "non_dist_role", + * not for "dist_role". + * + * See EnsureRequiredObjectExistOnAllNodes to learn more about how this + * function deals with an object created within the same transaction. + */ +void +EnsureObjectAndDependenciesExistOnAllNodes(const ObjectAddress *target) +{ + if (IsAnyObjectDistributed(list_make1((ObjectAddress *) target))) + { + return; + } + EnsureRequiredObjectSetExistOnAllNodes(target, REQUIRE_OBJECT_AND_DEPENDENCIES); +} + + +/* + * EnsureDependenciesExistOnAllNodes is a wrapper around + * EnsureRequiredObjectSetExistOnAllNodes to ensure "all dependencies" of given + * object --but not the object itself-- are available on all nodes. + * + * See EnsureRequiredObjectSetExistOnAllNodes to learn more about how this + * function deals with an object created within the same transaction. + */ +static void +EnsureDependenciesExistOnAllNodes(const ObjectAddress *target) +{ + EnsureRequiredObjectSetExistOnAllNodes(target, REQUIRE_ONLY_DEPENDENCIES); +} + + +/* + * EnsureRequiredObjectSetExistOnAllNodes finds all the dependencies that we support and makes + * sure these are available on all nodes if required object set is REQUIRE_ONLY_DEPENDENCIES. + * Otherwise, i.e., if required object set is REQUIRE_OBJECT_AND_DEPENDENCIES, then this + * function creates the object itself on all nodes too. This function ensures that each + * of the dependencies are supported by Citus but doesn't check the same for the target + * object itself (when REQUIRE_OBJECT_AND_DEPENDENCIES) is provided because we assume that + * callers don't call this function for an unsupported function at all. + * + * If not available, they will be created on the nodes via a separate session that will be + * committed directly so that the objects are visible to potentially multiple sessions creating + * the shards. * * Note; only the actual objects are created via a separate session, the records to * pg_dist_object are created in this session. As a side effect the objects could be - * created on the workers without a catalog entry. Updates to the objects on the coordinator - * are not propagated to the workers until the record is visible on the coordinator. + * created on the nodes without a catalog entry. Updates to the objects on local node + * are not propagated to the remote nodes until the record is visible on local node. * * This is solved by creating the dependencies in an idempotent manner, either via * postgres native CREATE IF NOT EXISTS, or citus helper functions. */ static void -EnsureDependenciesExistOnAllNodes(const ObjectAddress *target) +EnsureRequiredObjectSetExistOnAllNodes(const ObjectAddress *target, + RequiredObjectSet requiredObjectSet) { - List *dependenciesWithCommands = NIL; + Assert(requiredObjectSet == REQUIRE_ONLY_DEPENDENCIES || + requiredObjectSet == REQUIRE_OBJECT_AND_DEPENDENCIES); + + + List *objectsWithCommands = NIL; List *ddlCommands = NULL; /* * If there is any unsupported dependency or circular dependency exists, Citus can * not ensure dependencies will exist on all nodes. + * + * Note that we don't check whether "target" is distributable (in case + * REQUIRE_OBJECT_AND_DEPENDENCIES is provided) because we expect callers + * to not even call this function if Citus doesn't know how to propagate + * "target" object itself. */ EnsureDependenciesCanBeDistributed(target); /* collect all dependencies in creation order and get their ddl commands */ - List *dependencies = GetDependenciesForObject(target); - ObjectAddress *dependency = NULL; - foreach_ptr(dependency, dependencies) + List *objectsToBeCreated = GetDependenciesForObject(target); + + /* + * Append the target object to make sure that it's created after its + * dependencies are created, if requested. + */ + if (requiredObjectSet == REQUIRE_OBJECT_AND_DEPENDENCIES) { - List *dependencyCommands = GetDependencyCreateDDLCommands(dependency); + ObjectAddress *targetCopy = palloc(sizeof(ObjectAddress)); + *targetCopy = *target; + + objectsToBeCreated = lappend(objectsToBeCreated, targetCopy); + } + + ObjectAddress *object = NULL; + foreach_ptr(object, objectsToBeCreated) + { + List *dependencyCommands = GetDependencyCreateDDLCommands(object); ddlCommands = list_concat(ddlCommands, dependencyCommands); - /* create a new list with dependencies that actually created commands */ + /* create a new list with objects that actually created commands */ if (list_length(dependencyCommands) > 0) { - dependenciesWithCommands = lappend(dependenciesWithCommands, dependency); + objectsWithCommands = lappend(objectsWithCommands, object); } } if (list_length(ddlCommands) <= 0) @@ -95,29 +190,31 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target) * either get it now, or get it in citus_add_node after this transaction finishes and * the pg_dist_object record becomes visible. */ - List *workerNodeList = ActivePrimaryNonCoordinatorNodeList(RowShareLock); + List *remoteNodeList = ActivePrimaryRemoteNodeList(RowShareLock); /* - * Lock dependent objects explicitly to make sure same DDL command won't be sent + * Lock objects to be created explicitly to make sure same DDL command won't be sent * multiple times from parallel sessions. * - * Sort dependencies that will be created on workers to not to have any deadlock + * Sort the objects that will be created on workers to not to have any deadlock * issue if different sessions are creating different objects. */ - List *addressSortedDependencies = SortList(dependenciesWithCommands, + List *addressSortedDependencies = SortList(objectsWithCommands, ObjectAddressComparator); - foreach_ptr(dependency, addressSortedDependencies) + foreach_ptr(object, addressSortedDependencies) { - LockDatabaseObject(dependency->classId, dependency->objectId, - dependency->objectSubId, ExclusiveLock); + LockDatabaseObject(object->classId, object->objectId, + object->objectSubId, ExclusiveLock); } /* - * We need to propagate dependencies via the current user's metadata connection if - * any dependency for the target is created in the current transaction. Our assumption - * is that if we rely on a dependency created in the current transaction, then the - * current user, most probably, has permissions to create the target object as well. + * We need to propagate objects via the current user's metadata connection if + * any of the objects that we're interested in are created in the current transaction. + * Our assumption is that if we rely on an object created in the current transaction, + * then the current user, most probably, has permissions to create the target object + * as well. + * * Note that, user still may not be able to create the target due to no permissions * for any of its dependencies. But this is ok since it should be rare. * @@ -125,14 +222,25 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target) * have visibility issues since propagated dependencies would be invisible to * the separate connection until we locally commit. */ - if (HasAnyDependencyInPropagatedObjects(target)) + List *createdObjectList = GetAllSupportedDependenciesForObject(target); + + /* consider target as well if we're requested to create it too */ + if (requiredObjectSet == REQUIRE_OBJECT_AND_DEPENDENCIES) + { + ObjectAddress *targetCopy = palloc(sizeof(ObjectAddress)); + *targetCopy = *target; + + createdObjectList = lappend(createdObjectList, targetCopy); + } + + if (HasAnyObjectInPropagatedObjects(createdObjectList)) { - SendCommandListToWorkersWithMetadata(ddlCommands); + SendCommandListToRemoteNodesWithMetadata(ddlCommands); } else { WorkerNode *workerNode = NULL; - foreach_ptr(workerNode, workerNodeList) + foreach_ptr(workerNode, remoteNodeList) { const char *nodeName = workerNode->workerName; uint32 nodePort = workerNode->workerPort; @@ -144,11 +252,11 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target) } /* - * We do this after creating the objects on the workers, we make sure - * that objects have been created on worker nodes before marking them + * We do this after creating the objects on remote nodes, we make sure + * that objects have been created on remote nodes before marking them * distributed, so MarkObjectDistributed wouldn't fail. */ - foreach_ptr(dependency, dependenciesWithCommands) + foreach_ptr(object, objectsWithCommands) { /* * pg_dist_object entries must be propagated with the super user, since @@ -158,7 +266,7 @@ EnsureDependenciesExistOnAllNodes(const ObjectAddress *target) * Only dependent object's metadata should be propagated with super user. * Metadata of the table itself must be propagated with the current user. */ - MarkObjectDistributedViaSuperUser(dependency); + MarkObjectDistributedViaSuperUser(object); } } @@ -457,16 +565,29 @@ GetDependencyCreateDDLCommands(const ObjectAddress *dependency) case OCLASS_DATABASE: { - List *databaseDDLCommands = NIL; + /* + * For the database where Citus is installed, only propagate the ownership of the + * database, only when the feature is on. + * + * This is because this database must exist on all nodes already so we shouldn't + * need to "CREATE" it on other nodes. However, we still need to correctly reflect + * its owner on other nodes too. + */ + if (dependency->objectId == MyDatabaseId && EnableAlterDatabaseOwner) + { + return DatabaseOwnerDDLCommands(dependency); + } - /* only propagate the ownership of the database when the feature is on */ - if (EnableAlterDatabaseOwner) + /* + * For the other databases, create the database on all nodes, only when the feature + * is on. + */ + if (dependency->objectId != MyDatabaseId && EnableCreateDatabasePropagation) { - List *ownerDDLCommands = DatabaseOwnerDDLCommands(dependency); - databaseDDLCommands = list_concat(databaseDDLCommands, ownerDDLCommands); + return GetDatabaseMetadataSyncCommands(dependency->objectId); } - return databaseDDLCommands; + return NIL; } case OCLASS_PROC: diff --git a/src/backend/distributed/commands/distribute_object_ops.c b/src/backend/distributed/commands/distribute_object_ops.c index a0a306e8d15..5a62dd2c868 100644 --- a/src/backend/distributed/commands/distribute_object_ops.c +++ b/src/backend/distributed/commands/distribute_object_ops.c @@ -12,11 +12,13 @@ #include "postgres.h" +#include "pg_version_constants.h" + #include "distributed/commands.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/comment.h" #include "distributed/deparser.h" -#include "distributed/pg_version_constants.h" #include "distributed/version_compat.h" -#include "distributed/commands/utility_hook.h" static DistributeObjectOps NoDistributeOps = { .deparse = NULL, @@ -274,6 +276,17 @@ static DistributeObjectOps Any_CreateRole = { .address = CreateRoleStmtObjectAddress, .markDistributed = true, }; + +static DistributeObjectOps Any_ReassignOwned = { + .deparse = DeparseReassignOwnedStmt, + .qualify = NULL, + .preprocess = NULL, + .postprocess = PostprocessReassignOwnedStmt, + .operationType = DIST_OPS_ALTER, + .address = NULL, + .markDistributed = false, +}; + static DistributeObjectOps Any_DropOwned = { .deparse = DeparseDropOwnedStmt, .qualify = NULL, @@ -292,6 +305,17 @@ static DistributeObjectOps Any_DropRole = { .address = NULL, .markDistributed = false, }; + +static DistributeObjectOps Role_Comment = { + .deparse = DeparseCommentStmt, + .qualify = NULL, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = NULL, + .objectType = OBJECT_DATABASE, + .operationType = DIST_OPS_ALTER, + .address = CommentObjectAddress, + .markDistributed = false, +}; static DistributeObjectOps Any_CreateForeignServer = { .deparse = DeparseCreateForeignServerStmt, .qualify = NULL, @@ -374,6 +398,15 @@ static DistributeObjectOps Any_Rename = { .address = NULL, .markDistributed = false, }; +static DistributeObjectOps Any_SecLabel = { + .deparse = DeparseSecLabelStmt, + .qualify = NULL, + .preprocess = NULL, + .postprocess = PostprocessSecLabelStmt, + .operationType = DIST_OPS_ALTER, + .address = SecLabelStmtObjectAddress, + .markDistributed = false, +}; static DistributeObjectOps Attribute_Rename = { .deparse = DeparseRenameAttributeStmt, .qualify = QualifyRenameAttributeStmt, @@ -466,6 +499,28 @@ static DistributeObjectOps Database_Alter = { .markDistributed = false, }; +static DistributeObjectOps Database_Create = { + .deparse = DeparseCreateDatabaseStmt, + .qualify = NULL, + .preprocess = PreprocessCreateDatabaseStmt, + .postprocess = PostprocessCreateDatabaseStmt, + .objectType = OBJECT_DATABASE, + .operationType = DIST_OPS_CREATE, + .address = CreateDatabaseStmtObjectAddress, + .markDistributed = true, +}; + +static DistributeObjectOps Database_Drop = { + .deparse = DeparseDropDatabaseStmt, + .qualify = NULL, + .preprocess = PreprocessDropDatabaseStmt, + .postprocess = NULL, + .objectType = OBJECT_DATABASE, + .operationType = DIST_OPS_DROP, + .address = DropDatabaseStmtObjectAddress, + .markDistributed = false, +}; + #if PG_VERSION_NUM >= PG_VERSION_15 static DistributeObjectOps Database_RefreshColl = { .deparse = DeparseAlterDatabaseRefreshCollStmt, @@ -490,6 +545,27 @@ static DistributeObjectOps Database_Set = { .markDistributed = false, }; +static DistributeObjectOps Database_Comment = { + .deparse = DeparseCommentStmt, + .qualify = NULL, + .preprocess = PreprocessAlterDistributedObjectStmt, + .postprocess = NULL, + .objectType = OBJECT_DATABASE, + .operationType = DIST_OPS_ALTER, + .address = CommentObjectAddress, + .markDistributed = false, +}; + +static DistributeObjectOps Database_Rename = { + .deparse = DeparseAlterDatabaseRenameStmt, + .qualify = NULL, + .preprocess = PreprocessAlterDatabaseRenameStmt, + .postprocess = PostprocessAlterDatabaseRenameStmt, + .objectType = OBJECT_DATABASE, + .operationType = DIST_OPS_ALTER, + .address = NULL, + .markDistributed = false, +}; static DistributeObjectOps Domain_Alter = { .deparse = DeparseAlterDomainStmt, @@ -919,13 +995,18 @@ static DistributeObjectOps TextSearchConfig_AlterOwner = { .markDistributed = false, }; static DistributeObjectOps TextSearchConfig_Comment = { - .deparse = DeparseTextSearchConfigurationCommentStmt, + .deparse = DeparseCommentStmt, + + /* TODO: When adding new comment types we should create an abstracted + * qualify function, just like we have an abstract deparse + * and adress function + */ .qualify = QualifyTextSearchConfigurationCommentStmt, .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_TSCONFIGURATION, .operationType = DIST_OPS_ALTER, - .address = TextSearchConfigurationCommentObjectAddress, + .address = CommentObjectAddress, .markDistributed = false, }; static DistributeObjectOps TextSearchConfig_Define = { @@ -988,13 +1069,13 @@ static DistributeObjectOps TextSearchDict_AlterOwner = { .markDistributed = false, }; static DistributeObjectOps TextSearchDict_Comment = { - .deparse = DeparseTextSearchDictionaryCommentStmt, + .deparse = DeparseCommentStmt, .qualify = QualifyTextSearchDictionaryCommentStmt, .preprocess = PreprocessAlterDistributedObjectStmt, .postprocess = NULL, .objectType = OBJECT_TSDICTIONARY, .operationType = DIST_OPS_ALTER, - .address = TextSearchDictCommentObjectAddress, + .address = CommentObjectAddress, .markDistributed = false, }; static DistributeObjectOps TextSearchDict_Define = { @@ -1334,6 +1415,16 @@ GetDistributeObjectOps(Node *node) return &Database_Alter; } + case T_CreatedbStmt: + { + return &Database_Create; + } + + case T_DropdbStmt: + { + return &Database_Drop; + } + #if PG_VERSION_NUM >= PG_VERSION_15 case T_AlterDatabaseRefreshCollStmt: { @@ -1717,6 +1808,16 @@ GetDistributeObjectOps(Node *node) return &TextSearchDict_Comment; } + case OBJECT_DATABASE: + { + return &Database_Comment; + } + + case OBJECT_ROLE: + { + return &Role_Comment; + } + default: { return &NoDistributeOps; @@ -1826,6 +1927,11 @@ GetDistributeObjectOps(Node *node) return &Any_DropOwned; } + case T_ReassignOwnedStmt: + { + return &Any_ReassignOwned; + } + case T_DropStmt: { DropStmt *stmt = castNode(DropStmt, node); @@ -2020,6 +2126,11 @@ GetDistributeObjectOps(Node *node) return &Vacuum_Analyze; } + case T_SecLabelStmt: + { + return &Any_SecLabel; + } + case T_RenameStmt: { RenameStmt *stmt = castNode(RenameStmt, node); @@ -2040,6 +2151,11 @@ GetDistributeObjectOps(Node *node) return &Collation_Rename; } + case OBJECT_DATABASE: + { + return &Database_Rename; + } + case OBJECT_DOMAIN: { return &Domain_Rename; diff --git a/src/backend/distributed/commands/drop_distributed_table.c b/src/backend/distributed/commands/drop_distributed_table.c index 26579cd60ea..c3d488b09bf 100644 --- a/src/backend/distributed/commands/drop_distributed_table.c +++ b/src/backend/distributed/commands/drop_distributed_table.c @@ -9,20 +9,21 @@ */ #include "postgres.h" + #include "miscadmin.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" #include "distributed/colocation_utils.h" -#include "distributed/commands/utility_hook.h" #include "distributed/commands.h" -#include "distributed/metadata_utility.h" +#include "distributed/commands/utility_hook.h" #include "distributed/coordinator_protocol.h" #include "distributed/metadata_sync.h" +#include "distributed/metadata_utility.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/tenant_schema_metadata.h" #include "distributed/worker_transaction.h" -#include "utils/builtins.h" -#include "utils/lsyscache.h" /* local function forward declarations */ diff --git a/src/backend/distributed/commands/extension.c b/src/backend/distributed/commands/extension.c index 5bddf1ede4e..8d4c6431b77 100644 --- a/src/backend/distributed/commands/extension.c +++ b/src/backend/distributed/commands/extension.c @@ -12,32 +12,35 @@ #include "access/genam.h" #include "access/xact.h" -#include "citus_version.h" #include "catalog/dependency.h" #include "catalog/pg_depend.h" #include "catalog/pg_extension_d.h" -#include "columnar/columnar.h" #include "catalog/pg_foreign_data_wrapper.h" #include "commands/defrem.h" #include "commands/extension.h" +#include "foreign/foreign.h" +#include "nodes/makefuncs.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + +#include "citus_version.h" + +#include "columnar/columnar.h" + #include "distributed/citus_ruleutils.h" #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" +#include "distributed/coordinator_protocol.h" #include "distributed/deparser.h" #include "distributed/listutils.h" -#include "distributed/coordinator_protocol.h" -#include "distributed/metadata_sync.h" #include "distributed/metadata/dependency.h" #include "distributed/metadata/distobject.h" +#include "distributed/metadata_sync.h" #include "distributed/multi_executor.h" #include "distributed/relation_access_tracking.h" #include "distributed/transaction_management.h" -#include "foreign/foreign.h" -#include "nodes/makefuncs.h" -#include "utils/lsyscache.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/syscache.h" /* Local functions forward declarations for helper functions */ @@ -773,7 +776,7 @@ PreprocessCreateExtensionStmtForCitusColumnar(Node *parsetree) /*create extension citus version xxx*/ if (newVersionValue) { - char *newVersion = strdup(defGetString(newVersionValue)); + char *newVersion = pstrdup(defGetString(newVersionValue)); versionNumber = GetExtensionVersionNumber(newVersion); } @@ -793,7 +796,7 @@ PreprocessCreateExtensionStmtForCitusColumnar(Node *parsetree) Oid citusOid = get_extension_oid("citus", true); if (citusOid != InvalidOid) { - char *curCitusVersion = strdup(get_extension_version(citusOid)); + char *curCitusVersion = pstrdup(get_extension_version(citusOid)); int curCitusVersionNum = GetExtensionVersionNumber(curCitusVersion); if (curCitusVersionNum < 1110) { @@ -888,7 +891,7 @@ PreprocessAlterExtensionCitusStmtForCitusColumnar(Node *parseTree) if (newVersionValue) { char *newVersion = defGetString(newVersionValue); - double newVersionNumber = GetExtensionVersionNumber(strdup(newVersion)); + double newVersionNumber = GetExtensionVersionNumber(pstrdup(newVersion)); /*alter extension citus update to version >= 11.1-1, and no citus_columnar installed */ if (newVersionNumber >= 1110 && citusColumnarOid == InvalidOid) @@ -932,7 +935,7 @@ PostprocessAlterExtensionCitusStmtForCitusColumnar(Node *parseTree) if (newVersionValue) { char *newVersion = defGetString(newVersionValue); - double newVersionNumber = GetExtensionVersionNumber(strdup(newVersion)); + double newVersionNumber = GetExtensionVersionNumber(pstrdup(newVersion)); if (newVersionNumber >= 1110 && citusColumnarOid != InvalidOid) { /*upgrade citus, after "ALTER EXTENSION citus update to xxx" updates citus_columnar Y to version Z. */ @@ -1090,33 +1093,26 @@ List * GetDependentFDWsToExtension(Oid extensionId) { List *extensionFDWs = NIL; - ScanKeyData key[3]; - int scanKeyCount = 3; + ScanKeyData key[1]; HeapTuple tup; Relation pgDepend = table_open(DependRelationId, AccessShareLock); ScanKeyInit(&key[0], - Anum_pg_depend_refclassid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(ExtensionRelationId)); - ScanKeyInit(&key[1], - Anum_pg_depend_refobjid, - BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(extensionId)); - ScanKeyInit(&key[2], Anum_pg_depend_classid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(ForeignDataWrapperRelationId)); - SysScanDesc scan = systable_beginscan(pgDepend, InvalidOid, false, - NULL, scanKeyCount, key); + SysScanDesc scan = systable_beginscan(pgDepend, DependDependerIndexId, true, + NULL, lengthof(key), key); while (HeapTupleIsValid(tup = systable_getnext(scan))) { Form_pg_depend pgDependEntry = (Form_pg_depend) GETSTRUCT(tup); - if (pgDependEntry->deptype == DEPENDENCY_EXTENSION) + if (pgDependEntry->deptype == DEPENDENCY_EXTENSION && + pgDependEntry->refclassid == ExtensionRelationId && + pgDependEntry->refobjid == extensionId) { extensionFDWs = lappend_oid(extensionFDWs, pgDependEntry->objid); } diff --git a/src/backend/distributed/commands/foreign_constraint.c b/src/backend/distributed/commands/foreign_constraint.c index 7c2d50f44bc..2f60c3fb11f 100644 --- a/src/backend/distributed/commands/foreign_constraint.c +++ b/src/backend/distributed/commands/foreign_constraint.c @@ -12,35 +12,38 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "miscadmin.h" +#include "access/genam.h" #include "access/htup_details.h" #include "access/sysattr.h" #include "access/xact.h" #include "catalog/namespace.h" #include "catalog/pg_constraint.h" -#include "access/genam.h" +#include "catalog/pg_depend.h" #include "catalog/pg_type.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/inval.h" +#include "utils/lsyscache.h" +#include "utils/rel.h" +#include "utils/relcache.h" +#include "utils/ruleutils.h" +#include "utils/syscache.h" + +#include "pg_version_constants.h" + #include "distributed/colocation_utils.h" #include "distributed/commands.h" #include "distributed/commands/sequence.h" #include "distributed/coordinator_protocol.h" +#include "distributed/hash_helpers.h" #include "distributed/listutils.h" -#include "distributed/coordinator_protocol.h" #include "distributed/multi_join_order.h" #include "distributed/namespace_utils.h" #include "distributed/reference_table_utils.h" #include "distributed/utils/array_type.h" #include "distributed/version_compat.h" -#include "miscadmin.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/inval.h" -#include "utils/lsyscache.h" -#include "utils/rel.h" -#include "utils/relcache.h" -#include "utils/ruleutils.h" -#include "utils/syscache.h" #define BehaviorIsRestrictOrNoAction(x) \ @@ -1197,6 +1200,114 @@ TableHasExternalForeignKeys(Oid relationId) } +/* + * ForeignConstraintMatchesFlags is a function with logic that's very specific + * to GetForeignKeyOids. There's no reason to use it in any other context. + */ +static bool +ForeignConstraintMatchesFlags(Form_pg_constraint constraintForm, + int flags) +{ + if (constraintForm->contype != CONSTRAINT_FOREIGN) + { + return false; + } + + bool inheritedConstraint = OidIsValid(constraintForm->conparentid); + if (inheritedConstraint) + { + /* + * We only consider the constraints that are explicitly created on + * the table as we already process the constraints from parent tables + * implicitly when a command is issued + */ + return false; + } + + bool excludeSelfReference = (flags & EXCLUDE_SELF_REFERENCES); + bool isSelfReference = (constraintForm->conrelid == constraintForm->confrelid); + if (excludeSelfReference && isSelfReference) + { + return false; + } + + Oid otherTableId = InvalidOid; + if (flags & INCLUDE_REFERENCING_CONSTRAINTS) + { + otherTableId = constraintForm->confrelid; + } + else + { + otherTableId = constraintForm->conrelid; + } + + return IsTableTypeIncluded(otherTableId, flags); +} + + +/* + * GetForeignKeyOidsForReferencedTable returns a list of foreign key OIDs that + * reference the relationId and match the given flags. + * + * This is separated from GetForeignKeyOids because we need to scan pg_depend + * instead of pg_constraint directly. The reason for this is that there is no + * index on the confrelid of pg_constraint, so searching by that column + * requires a seqscan. + */ +static List * +GetForeignKeyOidsForReferencedTable(Oid relationId, int flags) +{ + HTAB *foreignKeyOidsSet = CreateSimpleHashSetWithName( + Oid, "ReferencingForeignKeyOidsSet"); + List *foreignKeyOidsList = NIL; + ScanKeyData key[2]; + HeapTuple dependTup; + Relation depRel = table_open(DependRelationId, AccessShareLock); + + ScanKeyInit(&key[0], + Anum_pg_depend_refclassid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationRelationId)); + ScanKeyInit(&key[1], + Anum_pg_depend_refobjid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(relationId)); + SysScanDesc scan = systable_beginscan(depRel, DependReferenceIndexId, true, + NULL, lengthof(key), key); + while (HeapTupleIsValid(dependTup = systable_getnext(scan))) + { + Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(dependTup); + + if (deprec->classid != ConstraintRelationId || + deprec->deptype != DEPENDENCY_NORMAL || + hash_search(foreignKeyOidsSet, &deprec->objid, HASH_FIND, NULL)) + { + continue; + } + + + HeapTuple constraintTup = SearchSysCache1(CONSTROID, ObjectIdGetDatum( + deprec->objid)); + if (!HeapTupleIsValid(constraintTup)) /* can happen during DROP TABLE */ + { + continue; + } + + Form_pg_constraint constraint = (Form_pg_constraint) GETSTRUCT(constraintTup); + if (constraint->confrelid == relationId && + ForeignConstraintMatchesFlags(constraint, flags)) + { + foreignKeyOidsList = lappend_oid(foreignKeyOidsList, constraint->oid); + hash_search(foreignKeyOidsSet, &constraint->oid, HASH_ENTER, NULL); + } + ReleaseSysCache(constraintTup); + } + systable_endscan(scan); + table_close(depRel, AccessShareLock); + return foreignKeyOidsList; +} + + /* * GetForeignKeyOids takes in a relationId, and returns a list of OIDs for * foreign constraints that the relation with relationId is involved according @@ -1206,9 +1317,8 @@ TableHasExternalForeignKeys(Oid relationId) List * GetForeignKeyOids(Oid relationId, int flags) { - AttrNumber pgConstraintTargetAttrNumber = InvalidAttrNumber; - - bool extractReferencing = (flags & INCLUDE_REFERENCING_CONSTRAINTS); + bool extractReferencing PG_USED_FOR_ASSERTS_ONLY = (flags & + INCLUDE_REFERENCING_CONSTRAINTS); bool extractReferenced = (flags & INCLUDE_REFERENCED_CONSTRAINTS); /* @@ -1219,85 +1329,33 @@ GetForeignKeyOids(Oid relationId, int flags) Assert(!(extractReferencing && extractReferenced)); Assert(extractReferencing || extractReferenced); - bool useIndex = false; - Oid indexOid = InvalidOid; - - if (extractReferencing) - { - pgConstraintTargetAttrNumber = Anum_pg_constraint_conrelid; - - useIndex = true; - indexOid = ConstraintRelidTypidNameIndexId; - } - else if (extractReferenced) + if (extractReferenced) { - pgConstraintTargetAttrNumber = Anum_pg_constraint_confrelid; + return GetForeignKeyOidsForReferencedTable(relationId, flags); } - bool excludeSelfReference = (flags & EXCLUDE_SELF_REFERENCES); - List *foreignKeyOids = NIL; ScanKeyData scanKey[1]; int scanKeyCount = 1; Relation pgConstraint = table_open(ConstraintRelationId, AccessShareLock); - ScanKeyInit(&scanKey[0], pgConstraintTargetAttrNumber, + ScanKeyInit(&scanKey[0], Anum_pg_constraint_conrelid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(relationId)); - SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, indexOid, useIndex, + + SysScanDesc scanDescriptor = systable_beginscan(pgConstraint, + ConstraintRelidTypidNameIndexId, true, NULL, scanKeyCount, scanKey); - HeapTuple heapTuple = systable_getnext(scanDescriptor); - while (HeapTupleIsValid(heapTuple)) + HeapTuple heapTuple; + while (HeapTupleIsValid(heapTuple = systable_getnext(scanDescriptor))) { Form_pg_constraint constraintForm = (Form_pg_constraint) GETSTRUCT(heapTuple); - if (constraintForm->contype != CONSTRAINT_FOREIGN) + if (ForeignConstraintMatchesFlags(constraintForm, flags)) { - heapTuple = systable_getnext(scanDescriptor); - continue; - } - - bool inheritedConstraint = OidIsValid(constraintForm->conparentid); - if (inheritedConstraint) - { - /* - * We only consider the constraints that are explicitly created on - * the table as we already process the constraints from parent tables - * implicitly when a command is issued - */ - heapTuple = systable_getnext(scanDescriptor); - continue; - } - - Oid constraintId = constraintForm->oid; - - bool isSelfReference = (constraintForm->conrelid == constraintForm->confrelid); - if (excludeSelfReference && isSelfReference) - { - heapTuple = systable_getnext(scanDescriptor); - continue; + foreignKeyOids = lappend_oid(foreignKeyOids, constraintForm->oid); } - - Oid otherTableId = InvalidOid; - if (extractReferencing) - { - otherTableId = constraintForm->confrelid; - } - else if (extractReferenced) - { - otherTableId = constraintForm->conrelid; - } - - if (!IsTableTypeIncluded(otherTableId, flags)) - { - heapTuple = systable_getnext(scanDescriptor); - continue; - } - - foreignKeyOids = lappend_oid(foreignKeyOids, constraintId); - - heapTuple = systable_getnext(scanDescriptor); } systable_endscan(scanDescriptor); diff --git a/src/backend/distributed/commands/foreign_data_wrapper.c b/src/backend/distributed/commands/foreign_data_wrapper.c index c9a08c41a7e..a181e63a733 100644 --- a/src/backend/distributed/commands/foreign_data_wrapper.c +++ b/src/backend/distributed/commands/foreign_data_wrapper.c @@ -11,17 +11,18 @@ #include "postgres.h" #include "catalog/pg_foreign_data_wrapper.h" -#include "distributed/commands/utility_hook.h" -#include "distributed/commands.h" -#include "distributed/deparser.h" -#include "distributed/listutils.h" -#include "distributed/metadata_sync.h" -#include "distributed/metadata/distobject.h" #include "foreign/foreign.h" #include "nodes/makefuncs.h" #include "nodes/parsenodes.h" #include "utils/syscache.h" +#include "distributed/commands.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" +#include "distributed/metadata/distobject.h" +#include "distributed/metadata_sync.h" + static bool NameListHasFDWOwnedByDistributedExtension(List *FDWNames); static ObjectAddress GetObjectAddressByFDWName(char *FDWName, bool missing_ok); diff --git a/src/backend/distributed/commands/foreign_server.c b/src/backend/distributed/commands/foreign_server.c index 7d19f9336e9..d2e5755643d 100644 --- a/src/backend/distributed/commands/foreign_server.c +++ b/src/backend/distributed/commands/foreign_server.c @@ -9,11 +9,18 @@ */ #include "postgres.h" + #include "miscadmin.h" #include "catalog/pg_foreign_server.h" -#include "distributed/commands/utility_hook.h" +#include "foreign/foreign.h" +#include "nodes/makefuncs.h" +#include "nodes/parsenodes.h" +#include "nodes/primnodes.h" +#include "utils/builtins.h" + #include "distributed/commands.h" +#include "distributed/commands/utility_hook.h" #include "distributed/deparser.h" #include "distributed/listutils.h" #include "distributed/log_utils.h" @@ -21,11 +28,6 @@ #include "distributed/metadata_sync.h" #include "distributed/multi_executor.h" #include "distributed/worker_transaction.h" -#include "foreign/foreign.h" -#include "nodes/makefuncs.h" -#include "nodes/parsenodes.h" -#include "nodes/primnodes.h" -#include "utils/builtins.h" static char * GetForeignServerAlterOwnerCommand(Oid serverId); static Node * RecreateForeignServerStmt(Oid serverId); diff --git a/src/backend/distributed/commands/function.c b/src/backend/distributed/commands/function.c index 01911677d76..6d2dd0ba975 100644 --- a/src/backend/distributed/commands/function.c +++ b/src/backend/distributed/commands/function.c @@ -18,35 +18,47 @@ */ #include "postgres.h" -#include "miscadmin.h" -#include "funcapi.h" -#include "distributed/pg_version_constants.h" +#include "funcapi.h" +#include "miscadmin.h" #include "access/genam.h" #include "access/htup_details.h" #include "access/xact.h" -#include "catalog/pg_aggregate.h" #include "catalog/dependency.h" #include "catalog/namespace.h" +#include "catalog/pg_aggregate.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "commands/extension.h" +#include "nodes/makefuncs.h" +#include "parser/parse_coerce.h" +#include "parser/parse_type.h" +#include "storage/lmgr.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/fmgrprotos.h" +#include "utils/lsyscache.h" +#include "utils/regproc.h" +#include "utils/syscache.h" + +#include "pg_version_constants.h" + #include "distributed/citus_depended_object.h" #include "distributed/citus_ruleutils.h" #include "distributed/citus_safe_lib.h" #include "distributed/colocation_utils.h" #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" +#include "distributed/coordinator_protocol.h" #include "distributed/deparser.h" #include "distributed/listutils.h" #include "distributed/maintenanced.h" -#include "distributed/metadata_utility.h" #include "distributed/metadata/dependency.h" -#include "distributed/coordinator_protocol.h" #include "distributed/metadata/distobject.h" #include "distributed/metadata/pg_dist_object.h" #include "distributed/metadata_sync.h" +#include "distributed/metadata_utility.h" #include "distributed/multi_executor.h" #include "distributed/namespace_utils.h" #include "distributed/pg_dist_node.h" @@ -55,16 +67,6 @@ #include "distributed/version_compat.h" #include "distributed/worker_create_or_replace.h" #include "distributed/worker_transaction.h" -#include "nodes/makefuncs.h" -#include "parser/parse_coerce.h" -#include "parser/parse_type.h" -#include "storage/lmgr.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/fmgrprotos.h" -#include "utils/lsyscache.h" -#include "utils/syscache.h" -#include "utils/regproc.h" #define DISABLE_LOCAL_CHECK_FUNCTION_BODIES "SET LOCAL check_function_bodies TO off;" #define RESET_CHECK_FUNCTION_BODIES "RESET check_function_bodies;" @@ -883,6 +885,7 @@ UpdateFunctionDistributionInfo(const ObjectAddress *distAddress, char *workerPgDistObjectUpdateCommand = MarkObjectsDistributedCreateCommand(objectAddressList, + NIL, distArgumentIndexList, colocationIdList, forceDelegationList); @@ -978,7 +981,6 @@ GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace) char *argmodes = NULL; int insertorderbyat = -1; int argsprinted = 0; - int inputargno = 0; HeapTuple proctup = SearchSysCache1(PROCOID, funcOid); if (!HeapTupleIsValid(proctup)) @@ -1058,7 +1060,6 @@ GetAggregateDDLCommand(const RegProcedure funcOid, bool useCreateOrReplace) } } - inputargno++; /* this is a 1-based counter */ if (argsprinted == insertorderbyat) { appendStringInfoString(&buf, " ORDER BY "); diff --git a/src/backend/distributed/commands/grant.c b/src/backend/distributed/commands/grant.c index c7861060ab2..c4278cee1bd 100644 --- a/src/backend/distributed/commands/grant.c +++ b/src/backend/distributed/commands/grant.c @@ -10,15 +10,16 @@ #include "postgres.h" +#include "lib/stringinfo.h" +#include "nodes/parsenodes.h" +#include "utils/lsyscache.h" + #include "distributed/citus_ruleutils.h" #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" #include "distributed/metadata/distobject.h" #include "distributed/metadata_cache.h" #include "distributed/version_compat.h" -#include "lib/stringinfo.h" -#include "nodes/parsenodes.h" -#include "utils/lsyscache.h" /* Local functions forward declarations for helper functions */ diff --git a/src/backend/distributed/commands/index.c b/src/backend/distributed/commands/index.c index 8271cc4f464..e97312df271 100644 --- a/src/backend/distributed/commands/index.c +++ b/src/backend/distributed/commands/index.c @@ -10,7 +10,8 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "miscadmin.h" + #include "access/genam.h" #include "access/htup_details.h" #include "access/xact.h" @@ -18,40 +19,43 @@ #include "catalog/index.h" #include "catalog/namespace.h" #include "catalog/pg_class.h" -#if PG_VERSION_NUM >= PG_VERSION_16 -#include "catalog/pg_namespace.h" -#endif #include "commands/defrem.h" #include "commands/tablecmds.h" +#include "lib/stringinfo.h" +#include "nodes/parsenodes.h" +#include "parser/parse_utilcmd.h" +#include "storage/lmgr.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/inval.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + +#include "pg_version_constants.h" + #include "distributed/citus_ruleutils.h" #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" +#include "distributed/coordinator_protocol.h" #include "distributed/deparse_shard_query.h" #include "distributed/deparser.h" #include "distributed/distributed_planner.h" #include "distributed/listutils.h" #include "distributed/local_executor.h" -#include "distributed/coordinator_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/multi_executor.h" -#include "distributed/multi_physical_planner.h" #include "distributed/multi_partitioning_utils.h" +#include "distributed/multi_physical_planner.h" #include "distributed/namespace_utils.h" -#include "distributed/resource_lock.h" #include "distributed/relation_access_tracking.h" #include "distributed/relation_utils.h" +#include "distributed/resource_lock.h" #include "distributed/version_compat.h" #include "distributed/worker_manager.h" -#include "lib/stringinfo.h" -#include "miscadmin.h" -#include "nodes/parsenodes.h" -#include "parser/parse_utilcmd.h" -#include "storage/lmgr.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/inval.h" -#include "utils/lsyscache.h" -#include "utils/syscache.h" + +#if PG_VERSION_NUM >= PG_VERSION_16 +#include "catalog/pg_namespace.h" +#endif /* Local functions forward declarations for helper functions */ @@ -180,6 +184,8 @@ PreprocessIndexStmt(Node *node, const char *createIndexCommand, return NIL; } + EnsureCoordinator(); + if (createIndexStatement->idxname == NULL) { /* @@ -487,6 +493,7 @@ GenerateCreateIndexDDLJob(IndexStmt *createIndexStatement, const char *createInd ddlJob->startNewTransaction = createIndexStatement->concurrent; ddlJob->metadataSyncCommand = createIndexCommand; ddlJob->taskList = CreateIndexTaskList(createIndexStatement); + ddlJob->warnForPartialFailure = true; return ddlJob; } @@ -646,6 +653,7 @@ PreprocessReindexStmt(Node *node, const char *reindexCommand, "concurrently"); ddlJob->metadataSyncCommand = reindexCommand; ddlJob->taskList = CreateReindexTaskList(relationId, reindexStatement); + ddlJob->warnForPartialFailure = true; ddlJobs = list_make1(ddlJob); } @@ -774,6 +782,7 @@ PreprocessDropIndexStmt(Node *node, const char *dropIndexCommand, ddlJob->metadataSyncCommand = dropIndexCommand; ddlJob->taskList = DropIndexTaskList(distributedRelationId, distributedIndexId, dropIndexStatement); + ddlJob->warnForPartialFailure = true; ddlJobs = list_make1(ddlJob); } @@ -938,7 +947,7 @@ CreateIndexTaskList(IndexStmt *indexStmt) task->dependentTaskList = NULL; task->anchorShardId = shardId; task->taskPlacementList = ActiveShardPlacementList(shardId); - task->cannotBeExecutedInTransction = indexStmt->concurrent; + task->cannotBeExecutedInTransaction = indexStmt->concurrent; taskList = lappend(taskList, task); @@ -983,7 +992,7 @@ CreateReindexTaskList(Oid relationId, ReindexStmt *reindexStmt) task->dependentTaskList = NULL; task->anchorShardId = shardId; task->taskPlacementList = ActiveShardPlacementList(shardId); - task->cannotBeExecutedInTransction = + task->cannotBeExecutedInTransaction = IsReindexWithParam_compat(reindexStmt, "concurrently"); taskList = lappend(taskList, task); @@ -1309,7 +1318,7 @@ DropIndexTaskList(Oid relationId, Oid indexId, DropStmt *dropStmt) task->dependentTaskList = NULL; task->anchorShardId = shardId; task->taskPlacementList = ActiveShardPlacementList(shardId); - task->cannotBeExecutedInTransction = dropStmt->concurrent; + task->cannotBeExecutedInTransaction = dropStmt->concurrent; taskList = lappend(taskList, task); diff --git a/src/backend/distributed/commands/local_multi_copy.c b/src/backend/distributed/commands/local_multi_copy.c index 7dbf0ae3626..13ff8835336 100644 --- a/src/backend/distributed/commands/local_multi_copy.c +++ b/src/backend/distributed/commands/local_multi_copy.c @@ -19,24 +19,27 @@ *------------------------------------------------------------------------- */ +#include /* for htons */ + #include "postgres.h" -#include "commands/copy.h" + +#include "safe_lib.h" + #include "catalog/namespace.h" +#include "commands/copy.h" +#include "nodes/makefuncs.h" #include "parser/parse_relation.h" #include "utils/lsyscache.h" -#include "nodes/makefuncs.h" -#include "safe_lib.h" -#include /* for htons */ -#include "distributed/transmit.h" #include "distributed/commands/multi_copy.h" #include "distributed/intermediate_results.h" -#include "distributed/multi_partitioning_utils.h" #include "distributed/local_executor.h" #include "distributed/local_multi_copy.h" +#include "distributed/multi_partitioning_utils.h" +#include "distributed/replication_origin_session_utils.h" #include "distributed/shard_utils.h" +#include "distributed/transmit.h" #include "distributed/version_compat.h" -#include "distributed/replication_origin_session_utils.h" /* managed via GUC, default is 512 kB */ int LocalCopyFlushThresholdByte = 512 * 1024; diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index a684d06cc3f..23847ac0150 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -43,19 +43,18 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" -#include "libpq-fe.h" -#include "miscadmin.h" -#include "pgstat.h" - #include /* for htons */ #include /* for htons */ #include -#include "distributed/pg_version_constants.h" +#include "postgres.h" + +#include "libpq-fe.h" +#include "miscadmin.h" +#include "pgstat.h" -#include "access/htup_details.h" #include "access/htup.h" +#include "access/htup_details.h" #include "access/sdir.h" #include "access/sysattr.h" #include "access/xact.h" @@ -65,54 +64,55 @@ #include "commands/copy.h" #include "commands/defrem.h" #include "commands/progress.h" +#include "executor/executor.h" +#include "foreign/foreign.h" +#include "libpq/libpq.h" +#include "libpq/pqformat.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "parser/parse_func.h" +#include "parser/parse_type.h" +#include "tcop/cmdtag.h" +#include "tsearch/ts_locale.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" +#include "utils/rel.h" +#include "utils/syscache.h" + +#include "pg_version_constants.h" + #include "distributed/citus_safe_lib.h" #include "distributed/commands/multi_copy.h" #include "distributed/commands/utility_hook.h" +#include "distributed/coordinator_protocol.h" +#include "distributed/hash_helpers.h" #include "distributed/intermediate_results.h" #include "distributed/listutils.h" #include "distributed/local_executor.h" +#include "distributed/local_multi_copy.h" +#include "distributed/locally_reserved_shared_connections.h" #include "distributed/log_utils.h" -#include "distributed/coordinator_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/multi_executor.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_router_planner.h" -#include "distributed/multi_executor.h" -#include "distributed/listutils.h" -#include "distributed/locally_reserved_shared_connections.h" #include "distributed/placement_connection.h" #include "distributed/relation_access_tracking.h" -#if PG_VERSION_NUM >= PG_VERSION_16 -#include "distributed/relation_utils.h" -#endif #include "distributed/remote_commands.h" #include "distributed/remote_transaction.h" #include "distributed/replication_origin_session_utils.h" #include "distributed/resource_lock.h" #include "distributed/shard_pruning.h" #include "distributed/shared_connection_stats.h" +#include "distributed/transmit.h" #include "distributed/version_compat.h" #include "distributed/worker_protocol.h" -#include "distributed/local_multi_copy.h" -#include "distributed/hash_helpers.h" -#include "distributed/transmit.h" -#include "executor/executor.h" -#include "foreign/foreign.h" -#include "libpq/libpq.h" -#include "libpq/pqformat.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "parser/parse_func.h" -#include "parser/parse_type.h" -#include "tcop/cmdtag.h" -#include "tsearch/ts_locale.h" -#include "utils/builtins.h" -#include "utils/lsyscache.h" -#include "utils/rel.h" -#include "utils/syscache.h" -#include "utils/memutils.h" +#if PG_VERSION_NUM >= PG_VERSION_16 +#include "distributed/relation_utils.h" +#endif /* constant used in binary protocol */ @@ -2547,12 +2547,8 @@ ShardIdForTuple(CitusCopyDestReceiver *copyDest, Datum *columnValues, bool *colu if (columnNulls[partitionColumnIndex]) { - Oid relationId = copyDest->distributedRelationId; - char *relationName = get_rel_name(relationId); - Oid schemaOid = get_rel_namespace(relationId); - char *schemaName = get_namespace_name(schemaOid); - char *qualifiedTableName = quote_qualified_identifier(schemaName, - relationName); + char *qualifiedTableName = generate_qualified_relation_name( + copyDest->distributedRelationId); ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("the partition column of table %s cannot be NULL", @@ -2667,7 +2663,6 @@ CreateLocalColocatedIntermediateFile(CitusCopyDestReceiver *copyDest, CreateIntermediateResultsDirectory(); const int fileFlags = (O_CREAT | O_RDWR | O_TRUNC); - const int fileMode = (S_IRUSR | S_IWUSR); StringInfo filePath = makeStringInfo(); appendStringInfo(filePath, "%s_%ld", copyDest->colocatedIntermediateResultIdPrefix, @@ -2675,7 +2670,7 @@ CreateLocalColocatedIntermediateFile(CitusCopyDestReceiver *copyDest, const char *fileName = QueryResultFileName(filePath->data); shardState->fileDest = - FileCompatFromFileStart(FileOpenForTransmit(fileName, fileFlags, fileMode)); + FileCompatFromFileStart(FileOpenForTransmit(fileName, fileFlags)); CopyOutState localFileCopyOutState = shardState->copyOutState; bool isBinaryCopy = localFileCopyOutState->binary; diff --git a/src/backend/distributed/commands/non_main_db_distribute_object_ops.c b/src/backend/distributed/commands/non_main_db_distribute_object_ops.c new file mode 100644 index 00000000000..b777936d3e4 --- /dev/null +++ b/src/backend/distributed/commands/non_main_db_distribute_object_ops.c @@ -0,0 +1,351 @@ +/*------------------------------------------------------------------------- + * + * non_main_db_distribute_object_ops.c + * + * Routines to support node-wide object management commands from non-main + * databases. + * + * RunPreprocessNonMainDBCommand and RunPostprocessNonMainDBCommand are + * the entrypoints for this module. These functions are called from + * utility_hook.c to support some of the node-wide object management + * commands from non-main databases. + * + * To add support for a new command type, one needs to define a new + * NonMainDbDistributeObjectOps object within OperationArray. Also, if + * the command requires marking or unmarking some objects as distributed, + * the necessary operations can be implemented in + * RunPreprocessNonMainDBCommand and RunPostprocessNonMainDBCommand. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "access/xact.h" +#include "catalog/pg_authid_d.h" +#include "nodes/nodes.h" +#include "nodes/parsenodes.h" +#include "utils/builtins.h" + +#include "distributed/commands.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" +#include "distributed/metadata_cache.h" +#include "distributed/remote_transaction.h" + + +#define EXECUTE_COMMAND_ON_REMOTE_NODES_AS_USER \ + "SELECT citus_internal.execute_command_on_remote_nodes_as_user(%s, %s)" +#define START_MANAGEMENT_TRANSACTION \ + "SELECT citus_internal.start_management_transaction('%lu')" +#define MARK_OBJECT_DISTRIBUTED \ + "SELECT citus_internal.mark_object_distributed(%d, %s, %d, %s)" +#define UNMARK_OBJECT_DISTRIBUTED \ + "SELECT pg_catalog.citus_unmark_object_distributed(%d, %d, %d, %s)" + + +/* + * NonMainDbDistributeObjectOps contains the necessary callbacks / flags to + * support node-wide object management commands from non-main databases. + * + * cannotBeExecutedInTransaction: + * Indicates whether the statement cannot be executed in a transaction. If + * this is set to true, the statement will be executed directly on the main + * database because there are no transactional visibility issues for such + * commands. + * + * checkSupportedObjectType: + * Callback function that checks whether type of the object referred to by + * given statement is supported. Can be NULL if not applicable for the + * statement type. + */ +typedef struct NonMainDbDistributeObjectOps +{ + bool cannotBeExecutedInTransaction; + bool (*checkSupportedObjectType)(Node *parsetree); +} NonMainDbDistributeObjectOps; + + +/* + * checkSupportedObjectType callbacks for OperationArray. + */ +static bool CreateDbStmtCheckSupportedObjectType(Node *node); +static bool DropDbStmtCheckSupportedObjectType(Node *node); +static bool GrantStmtCheckSupportedObjectType(Node *node); +static bool SecLabelStmtCheckSupportedObjectType(Node *node); + +/* + * OperationArray that holds NonMainDbDistributeObjectOps for different command types. + */ +static const NonMainDbDistributeObjectOps *const OperationArray[] = { + [T_CreateRoleStmt] = &(NonMainDbDistributeObjectOps) { + .cannotBeExecutedInTransaction = false, + .checkSupportedObjectType = NULL + }, + [T_DropRoleStmt] = &(NonMainDbDistributeObjectOps) { + .cannotBeExecutedInTransaction = false, + .checkSupportedObjectType = NULL + }, + [T_AlterRoleStmt] = &(NonMainDbDistributeObjectOps) { + .cannotBeExecutedInTransaction = false, + .checkSupportedObjectType = NULL + }, + [T_GrantRoleStmt] = &(NonMainDbDistributeObjectOps) { + .cannotBeExecutedInTransaction = false, + .checkSupportedObjectType = NULL + }, + [T_CreatedbStmt] = &(NonMainDbDistributeObjectOps) { + .cannotBeExecutedInTransaction = true, + .checkSupportedObjectType = CreateDbStmtCheckSupportedObjectType + }, + [T_DropdbStmt] = &(NonMainDbDistributeObjectOps) { + .cannotBeExecutedInTransaction = true, + .checkSupportedObjectType = DropDbStmtCheckSupportedObjectType + }, + [T_GrantStmt] = &(NonMainDbDistributeObjectOps) { + .cannotBeExecutedInTransaction = false, + .checkSupportedObjectType = GrantStmtCheckSupportedObjectType + }, + [T_SecLabelStmt] = &(NonMainDbDistributeObjectOps) { + .cannotBeExecutedInTransaction = false, + .checkSupportedObjectType = SecLabelStmtCheckSupportedObjectType + }, +}; + + +/* other static function declarations */ +const NonMainDbDistributeObjectOps * GetNonMainDbDistributeObjectOps(Node *parsetree); +static void CreateRoleStmtMarkDistGloballyOnMainDbs(CreateRoleStmt *createRoleStmt); +static void DropRoleStmtUnmarkDistOnLocalMainDb(DropRoleStmt *dropRoleStmt); +static void MarkObjectDistributedGloballyOnMainDbs(Oid catalogRelId, Oid objectId, + char *objectName); +static void UnmarkObjectDistributedOnLocalMainDb(uint16 catalogRelId, Oid objectId); + + +/* + * RunPreprocessNonMainDBCommand runs the necessary commands for a query, in main + * database before query is run on the local node with PrevProcessUtility. + * + * Returns true if previous utility hook needs to be skipped after completing + * preprocess phase. + */ +bool +RunPreprocessNonMainDBCommand(Node *parsetree) +{ + if (IsMainDB) + { + return false; + } + + const NonMainDbDistributeObjectOps *ops = GetNonMainDbDistributeObjectOps(parsetree); + if (!ops) + { + return false; + } + + char *queryString = DeparseTreeNode(parsetree); + + /* + * For the commands that cannot be executed in a transaction, there are no + * transactional visibility issues. We directly route them to main database + * so that we only have to consider one code-path for such commands. + */ + if (ops->cannotBeExecutedInTransaction) + { + IsMainDBCommandInXact = false; + RunCitusMainDBQuery((char *) queryString); + return true; + } + + IsMainDBCommandInXact = true; + + StringInfo mainDBQuery = makeStringInfo(); + appendStringInfo(mainDBQuery, + START_MANAGEMENT_TRANSACTION, + GetCurrentFullTransactionId().value); + RunCitusMainDBQuery(mainDBQuery->data); + + mainDBQuery = makeStringInfo(); + appendStringInfo(mainDBQuery, + EXECUTE_COMMAND_ON_REMOTE_NODES_AS_USER, + quote_literal_cstr(queryString), + quote_literal_cstr(CurrentUserName())); + RunCitusMainDBQuery(mainDBQuery->data); + + if (IsA(parsetree, DropRoleStmt)) + { + DropRoleStmtUnmarkDistOnLocalMainDb((DropRoleStmt *) parsetree); + } + + return false; +} + + +/* + * RunPostprocessNonMainDBCommand runs the necessary commands for a query, in main + * database after query is run on the local node with PrevProcessUtility. + */ +void +RunPostprocessNonMainDBCommand(Node *parsetree) +{ + if (IsMainDB || !GetNonMainDbDistributeObjectOps(parsetree)) + { + return; + } + + if (IsA(parsetree, CreateRoleStmt)) + { + CreateRoleStmtMarkDistGloballyOnMainDbs((CreateRoleStmt *) parsetree); + } +} + + +/* + * GetNonMainDbDistributeObjectOps returns the NonMainDbDistributeObjectOps for given + * command if it's node-wide object management command that's supported from non-main + * databases. + */ +const NonMainDbDistributeObjectOps * +GetNonMainDbDistributeObjectOps(Node *parsetree) +{ + NodeTag tag = nodeTag(parsetree); + if (tag >= lengthof(OperationArray)) + { + return NULL; + } + + const NonMainDbDistributeObjectOps *ops = OperationArray[tag]; + + if (ops == NULL) + { + return NULL; + } + + if (!ops->checkSupportedObjectType || + ops->checkSupportedObjectType(parsetree)) + { + return ops; + } + + return NULL; +} + + +/* + * CreateRoleStmtMarkDistGloballyOnMainDbs marks the role as + * distributed on all main databases globally. + */ +static void +CreateRoleStmtMarkDistGloballyOnMainDbs(CreateRoleStmt *createRoleStmt) +{ + /* object must exist as we've just created it */ + bool missingOk = false; + Oid roleId = get_role_oid(createRoleStmt->role, missingOk); + + MarkObjectDistributedGloballyOnMainDbs(AuthIdRelationId, roleId, + createRoleStmt->role); +} + + +/* + * DropRoleStmtUnmarkDistOnLocalMainDb unmarks the roles as + * distributed on the local main database. + */ +static void +DropRoleStmtUnmarkDistOnLocalMainDb(DropRoleStmt *dropRoleStmt) +{ + RoleSpec *roleSpec = NULL; + foreach_ptr(roleSpec, dropRoleStmt->roles) + { + Oid roleOid = get_role_oid(roleSpec->rolename, + dropRoleStmt->missing_ok); + if (roleOid == InvalidOid) + { + continue; + } + + UnmarkObjectDistributedOnLocalMainDb(AuthIdRelationId, roleOid); + } +} + + +/* + * MarkObjectDistributedGloballyOnMainDbs marks an object as + * distributed on all main databases globally. + */ +static void +MarkObjectDistributedGloballyOnMainDbs(Oid catalogRelId, Oid objectId, char *objectName) +{ + StringInfo mainDBQuery = makeStringInfo(); + appendStringInfo(mainDBQuery, + MARK_OBJECT_DISTRIBUTED, + catalogRelId, + quote_literal_cstr(objectName), + objectId, + quote_literal_cstr(CurrentUserName())); + RunCitusMainDBQuery(mainDBQuery->data); +} + + +/* + * UnmarkObjectDistributedOnLocalMainDb unmarks an object as + * distributed on the local main database. + */ +static void +UnmarkObjectDistributedOnLocalMainDb(uint16 catalogRelId, Oid objectId) +{ + const int subObjectId = 0; + const char *checkObjectExistence = "false"; + + StringInfo query = makeStringInfo(); + appendStringInfo(query, + UNMARK_OBJECT_DISTRIBUTED, + catalogRelId, objectId, + subObjectId, checkObjectExistence); + RunCitusMainDBQuery(query->data); +} + + +/* + * checkSupportedObjectTypes callbacks for OperationArray lie below. + */ +static bool +CreateDbStmtCheckSupportedObjectType(Node *node) +{ + /* + * We don't try to send the query to the main database if the CREATE + * DATABASE command is for the main database itself, this is a very + * rare case but it's exercised by our test suite. + */ + CreatedbStmt *stmt = castNode(CreatedbStmt, node); + return strcmp(stmt->dbname, MainDb) != 0; +} + + +static bool +DropDbStmtCheckSupportedObjectType(Node *node) +{ + /* + * We don't try to send the query to the main database if the DROP + * DATABASE command is for the main database itself, this is a very + * rare case but it's exercised by our test suite. + */ + DropdbStmt *stmt = castNode(DropdbStmt, node); + return strcmp(stmt->dbname, MainDb) != 0; +} + + +static bool +GrantStmtCheckSupportedObjectType(Node *node) +{ + GrantStmt *stmt = castNode(GrantStmt, node); + return stmt->objtype == OBJECT_DATABASE; +} + + +static bool +SecLabelStmtCheckSupportedObjectType(Node *node) +{ + SecLabelStmt *stmt = castNode(SecLabelStmt, node); + return stmt->objtype == OBJECT_ROLE; +} diff --git a/src/backend/distributed/commands/owned.c b/src/backend/distributed/commands/owned.c index c8f6a4bbe09..30374ce26a9 100644 --- a/src/backend/distributed/commands/owned.c +++ b/src/backend/distributed/commands/owned.c @@ -10,42 +10,46 @@ #include "postgres.h" +#include "miscadmin.h" + +#include "access/genam.h" #include "access/heapam.h" #include "access/htup_details.h" -#include "access/genam.h" #include "access/table.h" #include "access/xact.h" #include "catalog/catalog.h" +#include "catalog/objectaddress.h" #include "catalog/pg_auth_members.h" #include "catalog/pg_authid.h" #include "catalog/pg_db_role_setting.h" #include "catalog/pg_type.h" -#include "catalog/objectaddress.h" #include "commands/dbcommands.h" +#include "nodes/makefuncs.h" +#include "nodes/parsenodes.h" +#include "nodes/pg_list.h" +#include "parser/scansup.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/rel.h" +#include "utils/syscache.h" +#include "utils/varlena.h" + #include "distributed/citus_ruleutils.h" #include "distributed/citus_safe_lib.h" #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" +#include "distributed/coordinator_protocol.h" #include "distributed/deparser.h" #include "distributed/listutils.h" -#include "distributed/coordinator_protocol.h" #include "distributed/metadata/distobject.h" #include "distributed/metadata_sync.h" -#include "distributed/metadata/distobject.h" #include "distributed/multi_executor.h" #include "distributed/relation_access_tracking.h" #include "distributed/version_compat.h" #include "distributed/worker_transaction.h" -#include "miscadmin.h" -#include "nodes/makefuncs.h" -#include "nodes/parsenodes.h" -#include "nodes/pg_list.h" -#include "parser/scansup.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/rel.h" -#include "utils/varlena.h" -#include "utils/syscache.h" + + +static ObjectAddress * GetNewRoleAddress(ReassignOwnedStmt *stmt); /* * PreprocessDropOwnedStmt finds the distributed role out of the ones @@ -88,3 +92,81 @@ PreprocessDropOwnedStmt(Node *node, const char *queryString, return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); } + + +/* + * PostprocessReassignOwnedStmt takes a Node pointer representing a REASSIGN + * OWNED statement and performs any necessary post-processing after the statement + * has been executed locally. + * + * We filter out local roles in OWNED BY clause before deparsing the command, + * meaning that we skip reassigning what is owned by local roles. However, + * if the role specified in TO clause is local, we automatically distribute + * it before deparsing the command. + */ +List * +PostprocessReassignOwnedStmt(Node *node, const char *queryString) +{ + ReassignOwnedStmt *stmt = castNode(ReassignOwnedStmt, node); + List *allReassignRoles = stmt->roles; + + List *distributedReassignRoles = FilterDistributedRoles(allReassignRoles); + + if (list_length(distributedReassignRoles) <= 0) + { + return NIL; + } + + if (!ShouldPropagate()) + { + return NIL; + } + + EnsureCoordinator(); + + stmt->roles = distributedReassignRoles; + char *sql = DeparseTreeNode((Node *) stmt); + stmt->roles = allReassignRoles; + + ObjectAddress *newRoleAddress = GetNewRoleAddress(stmt); + + /* + * We temporarily enable create / alter role propagation to properly + * propagate the role specified in TO clause. + */ + int saveNestLevel = NewGUCNestLevel(); + set_config_option("citus.enable_create_role_propagation", "on", + (superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION, + GUC_ACTION_LOCAL, true, 0, false); + set_config_option("citus.enable_alter_role_propagation", "on", + (superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION, + GUC_ACTION_LOCAL, true, 0, false); + + set_config_option("citus.enable_alter_role_set_propagation", "on", + (superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION, + GUC_ACTION_LOCAL, true, 0, false); + + EnsureObjectAndDependenciesExistOnAllNodes(newRoleAddress); + + /* rollback GUCs to the state before this session */ + AtEOXact_GUC(true, saveNestLevel); + + List *commands = list_make3(DISABLE_DDL_PROPAGATION, + sql, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); +} + + +/* + * GetNewRoleAddress returns the ObjectAddress of the new role + */ +static ObjectAddress * +GetNewRoleAddress(ReassignOwnedStmt *stmt) +{ + Oid roleOid = get_role_oid(stmt->newrole->rolename, false); + ObjectAddress *address = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*address, AuthIdRelationId, roleOid); + return address; +} diff --git a/src/backend/distributed/commands/policy.c b/src/backend/distributed/commands/policy.c index 0d66e150e86..a2a926b6677 100644 --- a/src/backend/distributed/commands/policy.c +++ b/src/backend/distributed/commands/policy.c @@ -10,15 +10,10 @@ */ #include "postgres.h" +#include "miscadmin.h" + #include "catalog/namespace.h" #include "commands/policy.h" -#include "distributed/citus_ruleutils.h" -#include "distributed/commands.h" -#include "distributed/commands/utility_hook.h" -#include "distributed/coordinator_protocol.h" -#include "distributed/listutils.h" -#include "distributed/metadata_cache.h" -#include "miscadmin.h" #include "nodes/makefuncs.h" #include "parser/parse_clause.h" #include "parser/parse_relation.h" @@ -27,6 +22,13 @@ #include "utils/builtins.h" #include "utils/ruleutils.h" +#include "distributed/citus_ruleutils.h" +#include "distributed/commands.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/coordinator_protocol.h" +#include "distributed/listutils.h" +#include "distributed/metadata_cache.h" + static const char * unparse_policy_command(const char aclchar); static RowSecurityPolicy * GetPolicyByName(Oid relationId, const char *policyName); diff --git a/src/backend/distributed/commands/publication.c b/src/backend/distributed/commands/publication.c index 581f7f874f1..c1cfd5e7732 100644 --- a/src/backend/distributed/commands/publication.c +++ b/src/backend/distributed/commands/publication.c @@ -9,18 +9,11 @@ */ #include "postgres.h" + #include "miscadmin.h" #include "catalog/pg_publication.h" #include "catalog/pg_publication_rel.h" -#include "distributed/commands.h" -#include "distributed/deparser.h" -#include "distributed/listutils.h" -#include "distributed/metadata_utility.h" -#include "distributed/metadata_sync.h" -#include "distributed/metadata/distobject.h" -#include "distributed/reference_table_utils.h" -#include "distributed/worker_create_or_replace.h" #include "nodes/makefuncs.h" #include "nodes/parsenodes.h" #include "utils/builtins.h" @@ -29,6 +22,15 @@ #include "pg_version_compat.h" +#include "distributed/commands.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" +#include "distributed/metadata/distobject.h" +#include "distributed/metadata_sync.h" +#include "distributed/metadata_utility.h" +#include "distributed/reference_table_utils.h" +#include "distributed/worker_create_or_replace.h" + static CreatePublicationStmt * BuildCreatePublicationStmt(Oid publicationId); #if (PG_VERSION_NUM >= PG_VERSION_15) @@ -175,7 +177,6 @@ BuildCreatePublicationStmt(Oid publicationId) PUBLICATION_PART_ROOT : PUBLICATION_PART_LEAF); Oid relationId = InvalidOid; - int citusTableCount PG_USED_FOR_ASSERTS_ONLY = 0; /* mainly for consistent ordering in test output */ relationIds = SortList(relationIds, CompareOids); @@ -199,11 +200,6 @@ BuildCreatePublicationStmt(Oid publicationId) createPubStmt->tables = lappend(createPubStmt->tables, rangeVar); #endif - - if (IsCitusTable(relationId)) - { - citusTableCount++; - } } /* WITH (publish_via_partition_root = true) option */ diff --git a/src/backend/distributed/commands/rename.c b/src/backend/distributed/commands/rename.c index 5e313d68c3a..362fc57bb1b 100644 --- a/src/backend/distributed/commands/rename.c +++ b/src/backend/distributed/commands/rename.c @@ -12,11 +12,12 @@ #include "catalog/index.h" #include "catalog/namespace.h" +#include "nodes/parsenodes.h" +#include "utils/lsyscache.h" + #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" #include "distributed/metadata_cache.h" -#include "nodes/parsenodes.h" -#include "utils/lsyscache.h" /* diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c index 754be1a2ba7..7f5f697f2b1 100644 --- a/src/backend/distributed/commands/role.c +++ b/src/backend/distributed/commands/role.c @@ -10,49 +10,51 @@ #include "postgres.h" -#include "pg_version_compat.h" - -#include "distributed/pg_version_constants.h" +#include "miscadmin.h" +#include "access/genam.h" #include "access/heapam.h" #include "access/htup_details.h" -#include "access/genam.h" #include "access/table.h" #include "access/xact.h" #include "catalog/catalog.h" +#include "catalog/objectaddress.h" #include "catalog/pg_auth_members.h" #include "catalog/pg_authid.h" #include "catalog/pg_db_role_setting.h" +#include "catalog/pg_shseclabel.h" #include "catalog/pg_type.h" -#include "catalog/objectaddress.h" #include "commands/dbcommands.h" +#include "nodes/makefuncs.h" +#include "nodes/parsenodes.h" +#include "nodes/pg_list.h" +#include "parser/scansup.h" +#include "utils/acl.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/guc.h" +#include "utils/guc_tables.h" +#include "utils/rel.h" +#include "utils/syscache.h" +#include "utils/varlena.h" + +#include "pg_version_compat.h" +#include "pg_version_constants.h" + #include "distributed/citus_ruleutils.h" #include "distributed/citus_safe_lib.h" #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" +#include "distributed/comment.h" +#include "distributed/coordinator_protocol.h" #include "distributed/deparser.h" #include "distributed/listutils.h" -#include "distributed/coordinator_protocol.h" #include "distributed/metadata/distobject.h" #include "distributed/metadata_sync.h" -#include "distributed/metadata/distobject.h" #include "distributed/multi_executor.h" #include "distributed/relation_access_tracking.h" #include "distributed/version_compat.h" #include "distributed/worker_transaction.h" -#include "miscadmin.h" -#include "nodes/makefuncs.h" -#include "nodes/parsenodes.h" -#include "nodes/pg_list.h" -#include "parser/scansup.h" -#include "utils/acl.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/guc_tables.h" -#include "utils/guc.h" -#include "utils/rel.h" -#include "utils/varlena.h" -#include "utils/syscache.h" static const char * ExtractEncryptedPassword(Oid roleOid); static const char * CreateAlterRoleIfExistsCommand(AlterRoleStmt *stmt); @@ -65,6 +67,7 @@ static DefElem * makeDefElemBool(char *name, bool value); static List * GenerateRoleOptionsList(HeapTuple tuple); static List * GenerateGrantRoleStmtsFromOptions(RoleSpec *roleSpec, List *options); static List * GenerateGrantRoleStmtsOfRole(Oid roleid); +static List * GenerateSecLabelOnRoleStmts(Oid roleid, char *rolename); static void EnsureSequentialModeForRoleDDL(void); static char * GetRoleNameFromDbRoleSetting(HeapTuple tuple, @@ -78,7 +81,6 @@ static const char * WrapQueryInAlterRoleIfExistsCall(const char *query, RoleSpec static VariableSetStmt * MakeVariableSetStmt(const char *config); static int ConfigGenericNameCompare(const void *lhs, const void *rhs); static List * RoleSpecToObjectAddress(RoleSpec *role, bool missing_ok); -static bool IsGrantRoleWithInheritOrSetOption(GrantRoleStmt *stmt); /* controlled via GUC */ bool EnableCreateRolePropagation = true; @@ -156,7 +158,7 @@ PostprocessAlterRoleStmt(Node *node, const char *queryString) return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); AlterRoleStmt *stmt = castNode(AlterRoleStmt, node); @@ -185,7 +187,7 @@ PostprocessAlterRoleStmt(Node *node, const char *queryString) (void *) CreateAlterRoleIfExistsCommand(stmt), ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NodeDDLTaskList(REMOTE_NODES, commands); } @@ -231,7 +233,7 @@ PreprocessAlterRoleSetStmt(Node *node, const char *queryString, return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); QualifyTreeNode((Node *) stmt); const char *sql = DeparseTreeNode((Node *) stmt); @@ -240,7 +242,7 @@ PreprocessAlterRoleSetStmt(Node *node, const char *queryString, (void *) sql, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commandList); + return NodeDDLTaskList(REMOTE_NODES, commandList); } @@ -489,18 +491,17 @@ GenerateRoleOptionsList(HeapTuple tuple) options = lappend(options, makeDefElem("password", NULL, -1)); } - /* load valid unitl data from the heap tuple, use default of infinity if not set */ + /* load valid until data from the heap tuple */ Datum rolValidUntilDatum = SysCacheGetAttr(AUTHNAME, tuple, Anum_pg_authid_rolvaliduntil, &isNull); - char *rolValidUntil = "infinity"; if (!isNull) { - rolValidUntil = pstrdup((char *) timestamptz_to_str(rolValidUntilDatum)); - } + char *rolValidUntil = pstrdup((char *) timestamptz_to_str(rolValidUntilDatum)); - Node *validUntilStringNode = (Node *) makeString(rolValidUntil); - DefElem *validUntilOption = makeDefElem("validUntil", validUntilStringNode, -1); - options = lappend(options, validUntilOption); + Node *validUntilStringNode = (Node *) makeString(rolValidUntil); + DefElem *validUntilOption = makeDefElem("validUntil", validUntilStringNode, -1); + options = lappend(options, validUntilOption); + } return options; } @@ -515,13 +516,14 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid) { HeapTuple roleTuple = SearchSysCache1(AUTHOID, ObjectIdGetDatum(roleOid)); Form_pg_authid role = ((Form_pg_authid) GETSTRUCT(roleTuple)); + char *rolename = pstrdup(NameStr(role->rolname)); CreateRoleStmt *createRoleStmt = NULL; if (EnableCreateRolePropagation) { createRoleStmt = makeNode(CreateRoleStmt); createRoleStmt->stmt_type = ROLESTMT_ROLE; - createRoleStmt->role = pstrdup(NameStr(role->rolname)); + createRoleStmt->role = rolename; createRoleStmt->options = GenerateRoleOptionsList(roleTuple); } @@ -532,7 +534,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid) alterRoleStmt->role = makeNode(RoleSpec); alterRoleStmt->role->roletype = ROLESPEC_CSTRING; alterRoleStmt->role->location = -1; - alterRoleStmt->role->rolename = pstrdup(NameStr(role->rolname)); + alterRoleStmt->role->rolename = rolename; alterRoleStmt->action = 1; alterRoleStmt->options = GenerateRoleOptionsList(roleTuple); } @@ -544,7 +546,7 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid) { /* add a worker_create_or_alter_role command if any of them are set */ char *createOrAlterRoleQuery = CreateCreateOrAlterRoleCommand( - pstrdup(NameStr(role->rolname)), + rolename, createRoleStmt, alterRoleStmt); @@ -566,6 +568,31 @@ GenerateCreateOrAlterRoleCommand(Oid roleOid) { completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt)); } + + /* + * append SECURITY LABEL ON ROLE commands for this specific user + * When we propagate user creation, we also want to make sure that we propagate + * all the security labels it has been given. For this, we check pg_shseclabel + * for the ROLE entry corresponding to roleOid, and generate the relevant + * SecLabel stmts to be run in the new node. + */ + List *secLabelOnRoleStmts = GenerateSecLabelOnRoleStmts(roleOid, rolename); + stmt = NULL; + foreach_ptr(stmt, secLabelOnRoleStmts) + { + completeRoleList = lappend(completeRoleList, DeparseTreeNode(stmt)); + } + + /* + * append COMMENT ON ROLE commands for this specific user + * When we propagate user creation, we also want to make sure that we propagate + * all the comments it has been given. For this, we check pg_shdescription + * for the ROLE entry corresponding to roleOid, and generate the relevant + * Comment stmts to be run in the new node. + */ + List *commentStmts = GetCommentPropagationCommands(AuthIdRelationId, roleOid, + rolename, OBJECT_ROLE); + completeRoleList = list_concat(completeRoleList, commentStmts); } return completeRoleList; @@ -858,6 +885,14 @@ GenerateGrantRoleStmtsOfRole(Oid roleid) { Form_pg_auth_members membership = (Form_pg_auth_members) GETSTRUCT(tuple); + ObjectAddress *roleAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*roleAddress, AuthIdRelationId, membership->grantor); + if (!IsAnyObjectDistributed(list_make1(roleAddress))) + { + /* we only need to propagate the grant if the grantor is distributed */ + continue; + } + GrantRoleStmt *grantRoleStmt = makeNode(GrantRoleStmt); grantRoleStmt->is_grant = true; @@ -873,13 +908,38 @@ GenerateGrantRoleStmtsOfRole(Oid roleid) granteeRole->rolename = GetUserNameFromId(membership->member, true); grantRoleStmt->grantee_roles = list_make1(granteeRole); - grantRoleStmt->grantor = NULL; + RoleSpec *grantorRole = makeNode(RoleSpec); + grantorRole->roletype = ROLESPEC_CSTRING; + grantorRole->location = -1; + grantorRole->rolename = GetUserNameFromId(membership->grantor, false); + grantRoleStmt->grantor = grantorRole; #if PG_VERSION_NUM >= PG_VERSION_16 + + /* inherit option is always included */ + DefElem *inherit_opt; + if (membership->inherit_option) + { + inherit_opt = makeDefElem("inherit", (Node *) makeBoolean(true), -1); + } + else + { + inherit_opt = makeDefElem("inherit", (Node *) makeBoolean(false), -1); + } + grantRoleStmt->opt = list_make1(inherit_opt); + + /* admin option is false by default, only include true case */ if (membership->admin_option) { - DefElem *opt = makeDefElem("admin", (Node *) makeBoolean(true), -1); - grantRoleStmt->opt = list_make1(opt); + DefElem *admin_opt = makeDefElem("admin", (Node *) makeBoolean(true), -1); + grantRoleStmt->opt = lappend(grantRoleStmt->opt, admin_opt); + } + + /* set option is true by default, only include false case */ + if (!membership->set_option) + { + DefElem *set_opt = makeDefElem("set", (Node *) makeBoolean(false), -1); + grantRoleStmt->opt = lappend(grantRoleStmt->opt, set_opt); } #else grantRoleStmt->admin_opt = membership->admin_option; @@ -895,6 +955,54 @@ GenerateGrantRoleStmtsOfRole(Oid roleid) } +/* + * GenerateSecLabelOnRoleStmts generates the SecLabelStmts for the role + * whose oid is roleid. + */ +static List * +GenerateSecLabelOnRoleStmts(Oid roleid, char *rolename) +{ + List *secLabelStmts = NIL; + + /* + * Note that roles are shared database objects, therefore their + * security labels are stored in pg_shseclabel instead of pg_seclabel. + */ + Relation pg_shseclabel = table_open(SharedSecLabelRelationId, AccessShareLock); + ScanKeyData skey[1]; + ScanKeyInit(&skey[0], Anum_pg_shseclabel_objoid, BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(roleid)); + SysScanDesc scan = systable_beginscan(pg_shseclabel, SharedSecLabelObjectIndexId, + true, NULL, 1, &skey[0]); + + HeapTuple tuple = NULL; + while (HeapTupleIsValid(tuple = systable_getnext(scan))) + { + SecLabelStmt *secLabelStmt = makeNode(SecLabelStmt); + secLabelStmt->objtype = OBJECT_ROLE; + secLabelStmt->object = (Node *) makeString(pstrdup(rolename)); + + Datum datumArray[Natts_pg_shseclabel]; + bool isNullArray[Natts_pg_shseclabel]; + + heap_deform_tuple(tuple, RelationGetDescr(pg_shseclabel), datumArray, + isNullArray); + + secLabelStmt->provider = TextDatumGetCString( + datumArray[Anum_pg_shseclabel_provider - 1]); + secLabelStmt->label = TextDatumGetCString( + datumArray[Anum_pg_shseclabel_label - 1]); + + secLabelStmts = lappend(secLabelStmts, secLabelStmt); + } + + systable_endscan(scan); + table_close(pg_shseclabel, AccessShareLock); + + return secLabelStmts; +} + + /* * PreprocessCreateRoleStmt creates a worker_create_or_alter_role query for the * role that is being created. With that query we can create the role in the @@ -910,7 +1018,8 @@ PreprocessCreateRoleStmt(Node *node, const char *queryString, return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); + EnsureSequentialModeForRoleDDL(); LockRelationOid(DistNodeRelationId(), RowShareLock); @@ -945,7 +1054,7 @@ PreprocessCreateRoleStmt(Node *node, const char *queryString, commands = lappend(commands, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NodeDDLTaskList(REMOTE_NODES, commands); } @@ -1041,7 +1150,8 @@ PreprocessDropRoleStmt(Node *node, const char *queryString, return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); + EnsureSequentialModeForRoleDDL(); @@ -1053,7 +1163,7 @@ PreprocessDropRoleStmt(Node *node, const char *queryString, sql, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NodeDDLTaskList(REMOTE_NODES, commands); } @@ -1130,7 +1240,7 @@ PreprocessGrantRoleStmt(Node *node, const char *queryString, return NIL; } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); GrantRoleStmt *stmt = castNode(GrantRoleStmt, node); List *allGranteeRoles = stmt->grantee_roles; @@ -1142,25 +1252,6 @@ PreprocessGrantRoleStmt(Node *node, const char *queryString, return NIL; } - if (IsGrantRoleWithInheritOrSetOption(stmt)) - { - if (EnableUnsupportedFeatureMessages) - { - ereport(NOTICE, (errmsg("not propagating GRANT/REVOKE commands with specified" - " INHERIT/SET options to worker nodes"), - errhint( - "Connect to worker nodes directly to manually run the same" - " GRANT/REVOKE command after disabling DDL propagation."))); - } - return NIL; - } - - /* - * Postgres don't seem to use the grantor. Even dropping the grantor doesn't - * seem to affect the membership. If this changes, we might need to add grantors - * to the dependency resolution too. For now we just don't propagate it. - */ - stmt->grantor = NULL; stmt->grantee_roles = distributedGranteeRoles; char *sql = DeparseTreeNode((Node *) stmt); stmt->grantee_roles = allGranteeRoles; @@ -1170,7 +1261,7 @@ PreprocessGrantRoleStmt(Node *node, const char *queryString, sql, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NodeDDLTaskList(REMOTE_NODES, commands); } @@ -1181,11 +1272,13 @@ PreprocessGrantRoleStmt(Node *node, const char *queryString, List * PostprocessGrantRoleStmt(Node *node, const char *queryString) { - if (!EnableCreateRolePropagation || !IsCoordinator() || !ShouldPropagate()) + if (!EnableCreateRolePropagation || !ShouldPropagate()) { return NIL; } + EnsurePropagationToCoordinator(); + GrantRoleStmt *stmt = castNode(GrantRoleStmt, node); RoleSpec *role = NULL; @@ -1204,27 +1297,6 @@ PostprocessGrantRoleStmt(Node *node, const char *queryString) } -/* - * IsGrantRoleWithInheritOrSetOption returns true if the given - * GrantRoleStmt has inherit or set option specified in its options - */ -static bool -IsGrantRoleWithInheritOrSetOption(GrantRoleStmt *stmt) -{ -#if PG_VERSION_NUM >= PG_VERSION_16 - DefElem *opt = NULL; - foreach_ptr(opt, stmt->opt) - { - if (strcmp(opt->defname, "inherit") == 0 || strcmp(opt->defname, "set") == 0) - { - return true; - } - } -#endif - return false; -} - - /* * ConfigGenericNameCompare compares two config_generic structs based on their * name fields. If the name fields contain the same strings two structs are @@ -1333,7 +1405,7 @@ PreprocessAlterRoleRenameStmt(Node *node, const char *queryString, Assert(stmt->renameType == OBJECT_ROLE); - EnsureCoordinator(); + EnsurePropagationToCoordinator(); char *sql = DeparseTreeNode((Node *) stmt); @@ -1341,7 +1413,7 @@ PreprocessAlterRoleRenameStmt(Node *node, const char *queryString, (void *) sql, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commands); + return NodeDDLTaskList(REMOTE_NODES, commands); } diff --git a/src/backend/distributed/commands/schema.c b/src/backend/distributed/commands/schema.c index d48a73647b8..7f79897faed 100644 --- a/src/backend/distributed/commands/schema.c +++ b/src/backend/distributed/commands/schema.c @@ -19,27 +19,27 @@ #include "catalog/namespace.h" #include "catalog/pg_class.h" #include "catalog/pg_namespace.h" +#include "nodes/parsenodes.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" +#include "utils/relcache.h" + #include "distributed/colocation_utils.h" #include "distributed/commands.h" -#include #include "distributed/commands/utility_hook.h" +#include "distributed/connection_management.h" #include "distributed/deparser.h" #include "distributed/listutils.h" #include "distributed/metadata/distobject.h" #include "distributed/metadata_cache.h" -#include +#include "distributed/metadata_sync.h" #include "distributed/multi_executor.h" #include "distributed/reference_table_utils.h" #include "distributed/relation_access_tracking.h" +#include "distributed/remote_commands.h" #include "distributed/resource_lock.h" -#include -#include #include "distributed/tenant_schema_metadata.h" #include "distributed/version_compat.h" -#include "nodes/parsenodes.h" -#include "utils/fmgroids.h" -#include "utils/lsyscache.h" -#include "utils/relcache.h" static List * GetObjectAddressBySchemaName(char *schemaName, bool missing_ok); diff --git a/src/backend/distributed/commands/schema_based_sharding.c b/src/backend/distributed/commands/schema_based_sharding.c index 65d2b812726..7cde9698295 100644 --- a/src/backend/distributed/commands/schema_based_sharding.c +++ b/src/backend/distributed/commands/schema_based_sharding.c @@ -7,27 +7,29 @@ */ #include "postgres.h" + #include "miscadmin.h" #include "access/genam.h" #include "catalog/catalog.h" #include "catalog/pg_namespace_d.h" #include "commands/extension.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + #include "distributed/argutils.h" #include "distributed/backend_data.h" #include "distributed/colocation_utils.h" #include "distributed/commands.h" #include "distributed/listutils.h" -#include "distributed/metadata_sync.h" #include "distributed/metadata/distobject.h" +#include "distributed/metadata_sync.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/shard_transfer.h" #include "distributed/tenant_schema_metadata.h" #include "distributed/worker_shard_visibility.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/lsyscache.h" -#include "utils/syscache.h" /* return value of CreateCitusMoveSchemaParams() */ diff --git a/src/backend/distributed/commands/seclabel.c b/src/backend/distributed/commands/seclabel.c new file mode 100644 index 00000000000..1d274a05627 --- /dev/null +++ b/src/backend/distributed/commands/seclabel.c @@ -0,0 +1,121 @@ +/*------------------------------------------------------------------------- + * + * seclabel.c + * + * This file contains the logic of SECURITY LABEL statement propagation. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "distributed/commands.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/coordinator_protocol.h" +#include "distributed/deparser.h" +#include "distributed/log_utils.h" +#include "distributed/metadata/distobject.h" +#include "distributed/metadata_sync.h" + + +/* + * PostprocessSecLabelStmt prepares the commands that need to be run on all workers to assign + * security labels on distributed objects, currently supporting just Role objects. + * It also ensures that all object dependencies exist on all + * nodes for the object in the SecLabelStmt. + */ +List * +PostprocessSecLabelStmt(Node *node, const char *queryString) +{ + if (!EnableAlterRolePropagation || !ShouldPropagate()) + { + return NIL; + } + + SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node); + + List *objectAddresses = GetObjectAddressListFromParseTree(node, false, true); + if (!IsAnyObjectDistributed(objectAddresses)) + { + return NIL; + } + + if (secLabelStmt->objtype != OBJECT_ROLE) + { + /* + * If we are not in the coordinator, we don't want to interrupt the security + * label command with notices, the user expects that from the worker node + * the command will not be propagated + */ + if (EnableUnsupportedFeatureMessages && IsCoordinator()) + { + ereport(NOTICE, (errmsg("not propagating SECURITY LABEL commands whose " + "object type is not role"), + errhint("Connect to worker nodes directly to manually " + "run the same SECURITY LABEL command."))); + } + return NIL; + } + + + EnsurePropagationToCoordinator(); + EnsureAllObjectDependenciesExistOnAllNodes(objectAddresses); + + const char *secLabelCommands = DeparseTreeNode((Node *) secLabelStmt); + + List *commandList = list_make3(DISABLE_DDL_PROPAGATION, + (void *) secLabelCommands, + ENABLE_DDL_PROPAGATION); + + return NodeDDLTaskList(REMOTE_NODES, commandList); +} + + +/* + * SecLabelStmtObjectAddress returns the object address of the object on + * which this statement operates (secLabelStmt->object). Note that it has no limitation + * on the object type being OBJECT_ROLE. This is intentionally implemented like this + * since it is fairly simple to implement and we might extend SECURITY LABEL propagation + * in the future to include more object types. + */ +List * +SecLabelStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess) +{ + SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node); + + Relation rel = NULL; + ObjectAddress address = get_object_address(secLabelStmt->objtype, + secLabelStmt->object, &rel, + AccessShareLock, missing_ok); + if (rel != NULL) + { + relation_close(rel, AccessShareLock); + } + + ObjectAddress *addressPtr = palloc0(sizeof(ObjectAddress)); + *addressPtr = address; + return list_make1(addressPtr); +} + + +/* + * citus_test_object_relabel is a dummy function for check_object_relabel_type hook. + * It is meant to be used in tests combined with citus_test_register_label_provider + */ +void +citus_test_object_relabel(const ObjectAddress *object, const char *seclabel) +{ + if (seclabel == NULL || + strcmp(seclabel, "citus_unclassified") == 0 || + strcmp(seclabel, "citus_classified") == 0 || + strcmp(seclabel, "citus '!unclassified") == 0) + { + return; + } + + ereport(ERROR, + (errcode(ERRCODE_INVALID_NAME), + errmsg("'%s' is not a valid security label for Citus tests.", seclabel))); +} diff --git a/src/backend/distributed/commands/sequence.c b/src/backend/distributed/commands/sequence.c index 9ff586c8c2d..cfb55faf740 100644 --- a/src/backend/distributed/commands/sequence.c +++ b/src/backend/distributed/commands/sequence.c @@ -14,8 +14,15 @@ #include "access/xact.h" #include "catalog/dependency.h" #include "catalog/namespace.h" +#include "catalog/pg_attrdef.h" #include "commands/defrem.h" #include "commands/extension.h" +#include "nodes/makefuncs.h" +#include "nodes/parsenodes.h" +#include "rewrite/rewriteHandler.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" + #include "distributed/commands.h" #include "distributed/commands/sequence.h" #include "distributed/commands/utility_hook.h" @@ -24,12 +31,7 @@ #include "distributed/metadata/distobject.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" -#include "nodes/makefuncs.h" #include "distributed/worker_create_or_replace.h" -#include "nodes/parsenodes.h" -#include "rewrite/rewriteHandler.h" -#include "utils/builtins.h" -#include "utils/lsyscache.h" /* Local functions forward declarations for helper functions */ static bool OptionsSpecifyOwnedBy(List *optionList, Oid *ownedByTableId); @@ -506,22 +508,14 @@ PreprocessAlterSequenceStmt(Node *node, const char *queryString, static Oid SequenceUsedInDistributedTable(const ObjectAddress *sequenceAddress, char depType) { - List *citusTableIdList = CitusTableTypeIdList(ANY_CITUS_TABLE_TYPE); - Oid citusTableId = InvalidOid; - foreach_oid(citusTableId, citusTableIdList) + Oid relationId; + List *relations = GetDependentRelationsWithSequence(sequenceAddress->objectId, + depType); + foreach_oid(relationId, relations) { - List *seqInfoList = NIL; - GetDependentSequencesWithRelation(citusTableId, &seqInfoList, 0, depType); - SequenceInfo *seqInfo = NULL; - foreach_ptr(seqInfo, seqInfoList) + if (IsCitusTable(relationId)) { - /* - * This sequence is used in a distributed table - */ - if (seqInfo->sequenceOid == sequenceAddress->objectId) - { - return citusTableId; - } + return relationId; } } diff --git a/src/backend/distributed/commands/statistics.c b/src/backend/distributed/commands/statistics.c index dae72ada9c8..5fac767fd70 100644 --- a/src/backend/distributed/commands/statistics.c +++ b/src/backend/distributed/commands/statistics.c @@ -19,6 +19,8 @@ #include "postgres.h" +#include "miscadmin.h" + #include "access/genam.h" #include "access/htup_details.h" #include "access/xact.h" @@ -26,8 +28,16 @@ #include "catalog/pg_namespace.h" #include "catalog/pg_statistic_ext.h" #include "catalog/pg_type.h" -#include "distributed/commands/utility_hook.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/fmgrprotos.h" +#include "utils/lsyscache.h" +#include "utils/relcache.h" +#include "utils/ruleutils.h" +#include "utils/syscache.h" + #include "distributed/commands.h" +#include "distributed/commands/utility_hook.h" #include "distributed/deparse_shard_query.h" #include "distributed/deparser.h" #include "distributed/listutils.h" @@ -37,14 +47,6 @@ #include "distributed/relation_access_tracking.h" #include "distributed/resource_lock.h" #include "distributed/worker_transaction.h" -#include "miscadmin.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/fmgrprotos.h" -#include "utils/lsyscache.h" -#include "utils/relcache.h" -#include "utils/ruleutils.h" -#include "utils/syscache.h" #define DEFAULT_STATISTICS_TARGET -1 #define ALTER_INDEX_COLUMN_SET_STATS_COMMAND \ diff --git a/src/backend/distributed/commands/subscription.c b/src/backend/distributed/commands/subscription.c index 59603b559d3..f5f80d17a68 100644 --- a/src/backend/distributed/commands/subscription.c +++ b/src/backend/distributed/commands/subscription.c @@ -8,20 +8,22 @@ *------------------------------------------------------------------------- */ +#include + #include "postgres.h" +#include "libpq-fe.h" #include "safe_lib.h" -#include - #include "commands/defrem.h" +#include "nodes/parsenodes.h" +#include "utils/builtins.h" + +#include "pg_version_constants.h" + #include "distributed/commands.h" #include "distributed/connection_management.h" -#include "distributed/pg_version_constants.h" #include "distributed/version_compat.h" -#include "libpq-fe.h" -#include "nodes/parsenodes.h" -#include "utils/builtins.h" static char * GenerateConninfoWithAuth(char *conninfo); diff --git a/src/backend/distributed/commands/table.c b/src/backend/distributed/commands/table.c index 500c6f3f2e9..30b028b79b1 100644 --- a/src/backend/distributed/commands/table.c +++ b/src/backend/distributed/commands/table.c @@ -9,7 +9,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" + #include "access/genam.h" #include "access/htup_details.h" #include "access/xact.h" @@ -20,38 +20,41 @@ #include "catalog/pg_depend.h" #include "catalog/pg_type.h" #include "commands/tablecmds.h" +#include "foreign/foreign.h" +#include "lib/stringinfo.h" +#include "nodes/parsenodes.h" +#include "parser/parse_expr.h" +#include "parser/parse_type.h" +#include "storage/lmgr.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + +#include "pg_version_constants.h" + #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" #include "distributed/coordinator_protocol.h" -#include "distributed/deparser.h" #include "distributed/deparse_shard_query.h" +#include "distributed/deparser.h" #include "distributed/distribution_column.h" #include "distributed/foreign_key_relationship.h" -#include "distributed/local_executor.h" #include "distributed/listutils.h" -#include "distributed/metadata_sync.h" +#include "distributed/local_executor.h" #include "distributed/metadata/dependency.h" #include "distributed/metadata/distobject.h" +#include "distributed/metadata_sync.h" #include "distributed/multi_executor.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/reference_table_utils.h" #include "distributed/relation_access_tracking.h" #include "distributed/resource_lock.h" +#include "distributed/tenant_schema_metadata.h" #include "distributed/version_compat.h" #include "distributed/worker_shard_visibility.h" -#include "distributed/tenant_schema_metadata.h" -#include "foreign/foreign.h" -#include "lib/stringinfo.h" -#include "nodes/parsenodes.h" -#include "parser/parse_expr.h" -#include "parser/parse_type.h" -#include "storage/lmgr.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/lsyscache.h" -#include "utils/syscache.h" /* controlled via GUC, should be accessed via GetEnableLocalReferenceForeignKeys() */ @@ -3050,11 +3053,15 @@ ErrorUnsupportedAlterTableAddColumn(Oid relationId, AlterTableCmd *command, else if (constraint->contype == CONSTR_FOREIGN) { RangeVar *referencedTable = constraint->pktable; - char *referencedColumn = strVal(lfirst(list_head(constraint->pk_attrs))); Oid referencedRelationId = RangeVarGetRelid(referencedTable, NoLock, false); - appendStringInfo(errHint, "FOREIGN KEY (%s) REFERENCES %s(%s)", colName, - get_rel_name(referencedRelationId), referencedColumn); + appendStringInfo(errHint, "FOREIGN KEY (%s) REFERENCES %s", colName, + get_rel_name(referencedRelationId)); + + if (list_length(constraint->pk_attrs) > 0) + { + AppendColumnNameList(errHint, constraint->pk_attrs); + } if (constraint->fk_del_action == FKCONSTR_ACTION_SETNULL) { diff --git a/src/backend/distributed/commands/text_search.c b/src/backend/distributed/commands/text_search.c index 54dfdae8546..cce246a7315 100644 --- a/src/backend/distributed/commands/text_search.c +++ b/src/backend/distributed/commands/text_search.c @@ -10,6 +10,8 @@ #include "postgres.h" +#include "fmgr.h" + #include "access/genam.h" #include "access/xact.h" #include "catalog/namespace.h" @@ -22,7 +24,6 @@ #include "commands/comment.h" #include "commands/defrem.h" #include "commands/extension.h" -#include "fmgr.h" #include "nodes/makefuncs.h" #include "tsearch/ts_cache.h" #include "tsearch/ts_public.h" @@ -789,45 +790,6 @@ AlterTextSearchDictionarySchemaStmtObjectAddress(Node *node, bool missing_ok, bo } -/* - * TextSearchConfigurationCommentObjectAddress resolves the ObjectAddress for the TEXT - * SEARCH CONFIGURATION on which the comment is placed. Optionally errors if the - * configuration does not exist based on the missing_ok flag passed in by the caller. - */ -List * -TextSearchConfigurationCommentObjectAddress(Node *node, bool missing_ok, bool - isPostprocess) -{ - CommentStmt *stmt = castNode(CommentStmt, node); - Assert(stmt->objtype == OBJECT_TSCONFIGURATION); - - Oid objid = get_ts_config_oid(castNode(List, stmt->object), missing_ok); - - ObjectAddress *address = palloc0(sizeof(ObjectAddress)); - ObjectAddressSet(*address, TSConfigRelationId, objid); - return list_make1(address); -} - - -/* - * TextSearchDictCommentObjectAddress resolves the ObjectAddress for the TEXT SEARCH - * DICTIONARY on which the comment is placed. Optionally errors if the dictionary does not - * exist based on the missing_ok flag passed in by the caller. - */ -List * -TextSearchDictCommentObjectAddress(Node *node, bool missing_ok, bool isPostprocess) -{ - CommentStmt *stmt = castNode(CommentStmt, node); - Assert(stmt->objtype == OBJECT_TSDICTIONARY); - - Oid objid = get_ts_dict_oid(castNode(List, stmt->object), missing_ok); - - ObjectAddress *address = palloc0(sizeof(ObjectAddress)); - ObjectAddressSet(*address, TSDictionaryRelationId, objid); - return list_make1(address); -} - - /* * AlterTextSearchConfigurationOwnerObjectAddress resolves the ObjectAddress for the TEXT * SEARCH CONFIGURATION for which the owner is changed. Optionally errors if the diff --git a/src/backend/distributed/commands/trigger.c b/src/backend/distributed/commands/trigger.c index 7577dfd312a..74cb6259ffb 100644 --- a/src/backend/distributed/commands/trigger.c +++ b/src/backend/distributed/commands/trigger.c @@ -9,7 +9,6 @@ *------------------------------------------------------------------------- */ #include "postgres.h" -#include "distributed/pg_version_constants.h" #include "access/genam.h" #include "access/table.h" @@ -18,6 +17,14 @@ #include "catalog/pg_trigger.h" #include "commands/extension.h" #include "commands/trigger.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/fmgrprotos.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + +#include "pg_version_constants.h" + #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" #include "distributed/coordinator_protocol.h" @@ -29,11 +36,6 @@ #include "distributed/namespace_utils.h" #include "distributed/shard_utils.h" #include "distributed/worker_protocol.h" -#include "utils/builtins.h" -#include "utils/fmgrprotos.h" -#include "utils/fmgroids.h" -#include "utils/lsyscache.h" -#include "utils/syscache.h" /* appropriate lock modes for the owner relation according to postgres */ diff --git a/src/backend/distributed/commands/truncate.c b/src/backend/distributed/commands/truncate.c index 4de518a062b..0eb43f529cd 100644 --- a/src/backend/distributed/commands/truncate.c +++ b/src/backend/distributed/commands/truncate.c @@ -9,12 +9,19 @@ */ #include "postgres.h" + #include "miscadmin.h" #include "catalog/namespace.h" #include "catalog/pg_class.h" #include "commands/tablecmds.h" #include "commands/trigger.h" +#include "storage/lmgr.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/regproc.h" +#include "utils/rel.h" + #include "distributed/adaptive_executor.h" #include "distributed/citus_ruleutils.h" #include "distributed/commands.h" @@ -31,13 +38,8 @@ #include "distributed/reference_table_utils.h" #include "distributed/resource_lock.h" #include "distributed/transaction_management.h" -#include "distributed/worker_transaction.h" #include "distributed/worker_shard_visibility.h" -#include "storage/lmgr.h" -#include "utils/builtins.h" -#include "utils/lsyscache.h" -#include "utils/regproc.h" -#include "utils/rel.h" +#include "distributed/worker_transaction.h" /* Local functions forward declarations for unsupported command checks */ diff --git a/src/backend/distributed/commands/type.c b/src/backend/distributed/commands/type.c index 02e5f0dee5c..b1e57363828 100644 --- a/src/backend/distributed/commands/type.c +++ b/src/backend/distributed/commands/type.c @@ -43,7 +43,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "miscadmin.h" #include "access/genam.h" #include "access/htup_details.h" @@ -52,6 +52,18 @@ #include "catalog/pg_enum.h" #include "catalog/pg_type.h" #include "commands/extension.h" +#include "nodes/makefuncs.h" +#include "parser/parse_type.h" +#include "storage/lmgr.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" +#include "utils/regproc.h" +#include "utils/syscache.h" +#include "utils/typcache.h" + +#include "pg_version_constants.h" + #include "distributed/citus_safe_lib.h" #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" @@ -64,20 +76,10 @@ #include "distributed/relation_access_tracking.h" #include "distributed/remote_commands.h" #include "distributed/transaction_management.h" -#include "distributed/worker_create_or_replace.h" #include "distributed/version_compat.h" +#include "distributed/worker_create_or_replace.h" #include "distributed/worker_manager.h" #include "distributed/worker_transaction.h" -#include "miscadmin.h" -#include "nodes/makefuncs.h" -#include "parser/parse_type.h" -#include "storage/lmgr.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/lsyscache.h" -#include "utils/regproc.h" -#include "utils/syscache.h" -#include "utils/typcache.h" #define AlterEnumIsRename(stmt) (stmt->oldVal != NULL) #define AlterEnumIsAddValue(stmt) (stmt->oldVal == NULL) diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index cf8e0644eec..9426e13c0ea 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -25,9 +25,8 @@ *------------------------------------------------------------------------- */ -#include "distributed/pg_version_constants.h" - #include "postgres.h" + #include "miscadmin.h" #include "access/attnum.h" @@ -35,11 +34,28 @@ #include "access/htup_details.h" #include "catalog/catalog.h" #include "catalog/dependency.h" -#include "citus_version.h" +#include "catalog/pg_authid.h" +#include "catalog/pg_database.h" #include "commands/dbcommands.h" #include "commands/defrem.h" #include "commands/extension.h" #include "commands/tablecmds.h" +#include "foreign/foreign.h" +#include "lib/stringinfo.h" +#include "nodes/makefuncs.h" +#include "nodes/parsenodes.h" +#include "nodes/pg_list.h" +#include "postmaster/postmaster.h" +#include "tcop/utility.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/inval.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + +#include "citus_version.h" +#include "pg_version_constants.h" + #include "distributed/adaptive_executor.h" #include "distributed/backend_data.h" #include "distributed/citus_depended_object.h" @@ -48,39 +64,28 @@ #include "distributed/commands/multi_copy.h" #include "distributed/commands/utility_hook.h" /* IWYU pragma: keep */ #include "distributed/coordinator_protocol.h" -#include "distributed/deparser.h" #include "distributed/deparse_shard_query.h" +#include "distributed/deparser.h" #include "distributed/executor_util.h" #include "distributed/foreign_key_relationship.h" #include "distributed/listutils.h" #include "distributed/local_executor.h" #include "distributed/maintenanced.h" -#include "distributed/multi_logical_replication.h" -#include "distributed/multi_partitioning_utils.h" -#include "distributed/metadata_sync.h" #include "distributed/metadata/distobject.h" +#include "distributed/metadata_sync.h" #include "distributed/multi_executor.h" #include "distributed/multi_explain.h" +#include "distributed/multi_logical_replication.h" +#include "distributed/multi_partitioning_utils.h" #include "distributed/multi_physical_planner.h" #include "distributed/reference_table_utils.h" +#include "distributed/remote_commands.h" #include "distributed/resource_lock.h" #include "distributed/string_utils.h" #include "distributed/transaction_management.h" #include "distributed/version_compat.h" #include "distributed/worker_shard_visibility.h" #include "distributed/worker_transaction.h" -#include "foreign/foreign.h" -#include "lib/stringinfo.h" -#include "nodes/parsenodes.h" -#include "nodes/pg_list.h" -#include "nodes/makefuncs.h" -#include "tcop/utility.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/inval.h" -#include "utils/lsyscache.h" -#include "utils/syscache.h" - bool EnableDDLPropagation = true; /* ddl propagation is enabled */ int CreateObjectPropagationMode = CREATE_OBJECT_PROPAGATION_IMMEDIATE; @@ -95,13 +100,13 @@ int UtilityHookLevel = 0; /* Local functions forward declarations for helper functions */ -static void ProcessUtilityInternal(PlannedStmt *pstmt, - const char *queryString, - ProcessUtilityContext context, - ParamListInfo params, - struct QueryEnvironment *queryEnv, - DestReceiver *dest, - QueryCompletion *completionTag); +static void citus_ProcessUtilityInternal(PlannedStmt *pstmt, + const char *queryString, + ProcessUtilityContext context, + ParamListInfo params, + struct QueryEnvironment *queryEnv, + DestReceiver *dest, + QueryCompletion *completionTag); static void set_indexsafe_procflags(void); static char * CurrentSearchPath(void); static void IncrementUtilityHookCountersIfNecessary(Node *parsetree); @@ -110,6 +115,7 @@ static void DecrementUtilityHookCountersIfNecessary(Node *parsetree); static bool IsDropSchemaOrDB(Node *parsetree); static bool ShouldCheckUndistributeCitusLocalTables(void); + /* * ProcessUtilityParseTree is a convenience method to create a PlannedStmt out of * pieces of a utility statement before invoking ProcessUtility. @@ -130,7 +136,7 @@ ProcessUtilityParseTree(Node *node, const char *queryString, ProcessUtilityConte /* - * multi_ProcessUtility is the main entry hook for implementing Citus-specific + * citus_ProcessUtility is the main entry hook for implementing Citus-specific * utility behavior. Its primary responsibilities are intercepting COPY and DDL * commands and augmenting the coordinator's command with corresponding tasks * to be run on worker nodes, after suitably ensuring said commands' options @@ -139,7 +145,7 @@ ProcessUtilityParseTree(Node *node, const char *queryString, ProcessUtilityConte * TRUNCATE and VACUUM are also supported. */ void -multi_ProcessUtility(PlannedStmt *pstmt, +citus_ProcessUtility(PlannedStmt *pstmt, const char *queryString, bool readOnlyTree, ProcessUtilityContext context, @@ -241,11 +247,25 @@ multi_ProcessUtility(PlannedStmt *pstmt, if (!CitusHasBeenLoaded()) { /* - * Ensure that utility commands do not behave any differently until CREATE - * EXTENSION is invoked. + * Process the command via RunPreprocessNonMainDBCommand and + * RunPostprocessNonMainDBCommand hooks if we're in a non-main database + * and if the command is a node-wide object management command that we + * support from non-main databases. */ - PrevProcessUtility(pstmt, queryString, false, context, - params, queryEnv, dest, completionTag); + + bool shouldSkipPrevUtilityHook = RunPreprocessNonMainDBCommand(parsetree); + + if (!shouldSkipPrevUtilityHook) + { + /* + * Ensure that utility commands do not behave any differently until CREATE + * EXTENSION is invoked. + */ + PrevProcessUtility(pstmt, queryString, false, context, + params, queryEnv, dest, completionTag); + } + + RunPostprocessNonMainDBCommand(parsetree); return; } @@ -329,8 +349,8 @@ multi_ProcessUtility(PlannedStmt *pstmt, PG_TRY(); { - ProcessUtilityInternal(pstmt, queryString, context, params, queryEnv, dest, - completionTag); + citus_ProcessUtilityInternal(pstmt, queryString, context, params, queryEnv, dest, + completionTag); if (UtilityHookLevel == 1) { @@ -404,7 +424,7 @@ multi_ProcessUtility(PlannedStmt *pstmt, /* - * ProcessUtilityInternal is a helper function for multi_ProcessUtility where majority + * citus_ProcessUtilityInternal is a helper function for citus_ProcessUtility where majority * of the Citus specific utility statements are handled here. The distinction between * both functions is that Citus_ProcessUtility does not handle CALL and DO statements. * The reason for the distinction is implemented to be able to find the "top-level" DDL @@ -412,13 +432,13 @@ multi_ProcessUtility(PlannedStmt *pstmt, * this goal. */ static void -ProcessUtilityInternal(PlannedStmt *pstmt, - const char *queryString, - ProcessUtilityContext context, - ParamListInfo params, - struct QueryEnvironment *queryEnv, - DestReceiver *dest, - QueryCompletion *completionTag) +citus_ProcessUtilityInternal(PlannedStmt *pstmt, + const char *queryString, + ProcessUtilityContext context, + ParamListInfo params, + struct QueryEnvironment *queryEnv, + DestReceiver *dest, + QueryCompletion *completionTag) { Node *parsetree = pstmt->utilityStmt; List *ddlJobs = NIL; @@ -694,25 +714,32 @@ ProcessUtilityInternal(PlannedStmt *pstmt, } /* inform the user about potential caveats */ - if (IsA(parsetree, CreatedbStmt)) + if (IsA(parsetree, CreatedbStmt) && !EnableCreateDatabasePropagation) { if (EnableUnsupportedFeatureMessages) { ereport(NOTICE, (errmsg("Citus partially supports CREATE DATABASE for " "distributed databases"), errdetail("Citus does not propagate CREATE DATABASE " - "command to workers"), + "command to other nodes"), errhint("You can manually create a database and its " - "extensions on workers."))); + "extensions on other nodes."))); } } else if (IsA(parsetree, CreateRoleStmt) && !EnableCreateRolePropagation) { - ereport(NOTICE, (errmsg("not propagating CREATE ROLE/USER commands to worker" + ereport(NOTICE, (errmsg("not propagating CREATE ROLE/USER commands to other" " nodes"), - errhint("Connect to worker nodes directly to manually create all" + errhint("Connect to other nodes directly to manually create all" " necessary users and roles."))); } + else if (IsA(parsetree, SecLabelStmt) && !EnableAlterRolePropagation) + { + ereport(NOTICE, (errmsg("not propagating SECURITY LABEL commands to other" + " nodes"), + errhint("Connect to other nodes directly to manually assign" + " necessary labels."))); + } /* * Make sure that on DROP EXTENSION we terminate the background daemon @@ -724,22 +751,13 @@ ProcessUtilityInternal(PlannedStmt *pstmt, } /* - * Make sure that dropping the role deletes the pg_dist_object entries. There is a - * separate logic for roles, since roles are not included as dropped objects in the - * drop event trigger. To handle it both on worker and coordinator nodes, it is not - * implemented as a part of process functions but here. + * Make sure that dropping node-wide objects deletes the pg_dist_object + * entries. There is a separate logic for node-wide objects (such as role + * and databases), since they are not included as dropped objects in the + * drop event trigger. To handle it both on worker and coordinator nodes, + * it is not implemented as a part of process functions but here. */ - if (IsA(parsetree, DropRoleStmt)) - { - DropRoleStmt *stmt = castNode(DropRoleStmt, parsetree); - List *allDropRoles = stmt->roles; - - List *distributedDropRoles = FilterDistributedRoles(allDropRoles); - if (list_length(distributedDropRoles) > 0) - { - UnmarkRolesDistributed(distributedDropRoles); - } - } + UnmarkNodeWideObjectsDistributed(parsetree); pstmt->utilityStmt = parsetree; @@ -1106,16 +1124,17 @@ IsDropSchemaOrDB(Node *parsetree) * each shard placement and COMMIT/ROLLBACK is handled by * CoordinatedTransactionCallback function. * - * The function errors out if the node is not the coordinator or if the DDL is on - * a partitioned table which has replication factor > 1. - * + * The function errors out if the DDL is on a partitioned table which has replication + * factor > 1, or if the the coordinator is not added into metadata and we're on a + * worker node because we want to make sure that distributed DDL jobs are executed + * on the coordinator node too. See EnsurePropagationToCoordinator() for more details. */ void ExecuteDistributedDDLJob(DDLJob *ddlJob) { bool shouldSyncMetadata = false; - EnsureCoordinator(); + EnsurePropagationToCoordinator(); ObjectAddress targetObjectAddress = ddlJob->targetObjectAddress; @@ -1139,23 +1158,24 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) { if (shouldSyncMetadata) { - SendCommandToWorkersWithMetadata(DISABLE_DDL_PROPAGATION); + SendCommandToRemoteNodesWithMetadata(DISABLE_DDL_PROPAGATION); char *currentSearchPath = CurrentSearchPath(); /* - * Given that we're relaying the query to the worker nodes directly, + * Given that we're relaying the query to the remote nodes directly, * we should set the search path exactly the same when necessary. */ if (currentSearchPath != NULL) { - SendCommandToWorkersWithMetadata( + SendCommandToRemoteNodesWithMetadata( psprintf("SET LOCAL search_path TO %s;", currentSearchPath)); } if (ddlJob->metadataSyncCommand != NULL) { - SendCommandToWorkersWithMetadata((char *) ddlJob->metadataSyncCommand); + SendCommandToRemoteNodesWithMetadata( + (char *) ddlJob->metadataSyncCommand); } } @@ -1234,7 +1254,7 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) char *currentSearchPath = CurrentSearchPath(); /* - * Given that we're relaying the query to the worker nodes directly, + * Given that we're relaying the query to the remote nodes directly, * we should set the search path exactly the same when necessary. */ if (currentSearchPath != NULL) @@ -1246,7 +1266,7 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) commandList = lappend(commandList, (char *) ddlJob->metadataSyncCommand); - SendBareCommandListToMetadataWorkers(commandList); + SendBareCommandListToRemoteMetadataNodes(commandList); } } PG_CATCH(); @@ -1269,15 +1289,18 @@ ExecuteDistributedDDLJob(DDLJob *ddlJob) errhint("Use DROP INDEX CONCURRENTLY IF EXISTS to remove the " "invalid index, then retry the original command."))); } - else + else if (ddlJob->warnForPartialFailure) { ereport(WARNING, (errmsg( - "CONCURRENTLY-enabled index commands can fail partially, " - "leaving behind an INVALID index.\n Use DROP INDEX " - "CONCURRENTLY IF EXISTS to remove the invalid index."))); - PG_RE_THROW(); + "Commands that are not transaction-safe may result in " + "partial failure, potentially leading to an inconsistent " + "state.\nIf the problematic command is a CREATE operation, " + "consider using the 'IF EXISTS' syntax to drop the object," + "\nif applicable, and then re-attempt the original command."))); } + + PG_RE_THROW(); } PG_END_TRY(); } @@ -1386,7 +1409,7 @@ PostStandardProcessUtility(Node *parsetree) * on the local table first. However, in order to decide whether the * command leads to an invalidation, we need to check before the command * is being executed since we read pg_constraint table. Thus, we maintain a - * local flag and do the invalidation after multi_ProcessUtility, + * local flag and do the invalidation after citus_ProcessUtility, * before ExecuteDistributedDDLJob(). */ InvalidateForeignKeyGraphForDDL(); @@ -1489,6 +1512,33 @@ DDLTaskList(Oid relationId, const char *commandString) } +/* + * NontransactionalNodeDDLTaskList builds a list of tasks to execute a DDL command on a + * given target set of nodes with cannotBeExecutedInTransaction is set to make sure + * that task list is executed outside a transaction block. + * + * Also sets warnForPartialFailure for the returned DDLJobs. + */ +List * +NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands, + bool warnForPartialFailure) +{ + List *ddlJobs = NodeDDLTaskList(targets, commands); + DDLJob *ddlJob = NULL; + foreach_ptr(ddlJob, ddlJobs) + { + Task *task = NULL; + foreach_ptr(task, ddlJob->taskList) + { + task->cannotBeExecutedInTransaction = true; + } + + ddlJob->warnForPartialFailure = warnForPartialFailure; + } + return ddlJobs; +} + + /* * NodeDDLTaskList builds a list of tasks to execute a DDL command on a * given target set of nodes. diff --git a/src/backend/distributed/commands/vacuum.c b/src/backend/distributed/commands/vacuum.c index ee03aeae1a9..5988a447ddf 100644 --- a/src/backend/distributed/commands/vacuum.c +++ b/src/backend/distributed/commands/vacuum.c @@ -10,10 +10,16 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" - +#include "access/xact.h" #include "commands/defrem.h" #include "commands/vacuum.h" +#include "postmaster/bgworker_internals.h" +#include "storage/lmgr.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" + +#include "pg_version_constants.h" + #include "distributed/adaptive_executor.h" #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" @@ -24,11 +30,6 @@ #include "distributed/resource_lock.h" #include "distributed/transaction_management.h" #include "distributed/version_compat.h" -#include "storage/lmgr.h" -#include "utils/builtins.h" -#include "utils/lsyscache.h" -#include "postmaster/bgworker_internals.h" -#include "access/xact.h" #define VACUUM_PARALLEL_NOTSET -2 @@ -184,7 +185,6 @@ ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList, CitusVacuumParams vacuumParams) { int relationIndex = 0; - int executedVacuumCount = 0; Oid relationId = InvalidOid; foreach_oid(relationId, relationIdList) @@ -197,7 +197,6 @@ ExecuteVacuumOnDistributedTables(VacuumStmt *vacuumStmt, List *relationIdList, /* local execution is not implemented for VACUUM commands */ bool localExecutionSupported = false; ExecuteUtilityTaskList(taskList, localExecutionSupported); - executedVacuumCount++; } relationIndex++; } @@ -279,7 +278,7 @@ VacuumTaskList(Oid relationId, CitusVacuumParams vacuumParams, List *vacuumColum task->replicationModel = REPLICATION_MODEL_INVALID; task->anchorShardId = shardId; task->taskPlacementList = ActiveShardPlacementList(shardId); - task->cannotBeExecutedInTransction = ((vacuumParams.options) & VACOPT_VACUUM); + task->cannotBeExecutedInTransaction = ((vacuumParams.options) & VACOPT_VACUUM); taskList = lappend(taskList, task); } @@ -719,7 +718,7 @@ ExecuteUnqualifiedVacuumTasks(VacuumStmt *vacuumStmt, CitusVacuumParams vacuumPa SetTaskQueryStringList(task, unqualifiedVacuumCommands); task->dependentTaskList = NULL; task->replicationModel = REPLICATION_MODEL_INVALID; - task->cannotBeExecutedInTransction = ((vacuumParams.options) & VACOPT_VACUUM); + task->cannotBeExecutedInTransaction = ((vacuumParams.options) & VACOPT_VACUUM); bool hasPeerWorker = false; diff --git a/src/backend/distributed/commands/variableset.c b/src/backend/distributed/commands/variableset.c index 277f5b63f98..2a3bc2f67ca 100644 --- a/src/backend/distributed/commands/variableset.c +++ b/src/backend/distributed/commands/variableset.c @@ -9,21 +9,23 @@ */ #include "postgres.h" + #include "c.h" #include "common/string.h" +#include "lib/ilist.h" +#include "storage/lmgr.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/varlena.h" + #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" #include "distributed/metadata_cache.h" +#include "distributed/remote_commands.h" #include "distributed/resource_lock.h" #include "distributed/transaction_management.h" #include "distributed/version_compat.h" -#include "storage/lmgr.h" -#include "utils/builtins.h" -#include "utils/lsyscache.h" -#include "lib/ilist.h" -#include "utils/varlena.h" -#include "distributed/remote_commands.h" /* * ShouldPropagateSetCommand determines whether a SET or RESET command should be diff --git a/src/backend/distributed/commands/view.c b/src/backend/distributed/commands/view.c index 7c4816144ac..9689b92679f 100644 --- a/src/backend/distributed/commands/view.c +++ b/src/backend/distributed/commands/view.c @@ -9,31 +9,33 @@ */ #include "postgres.h" + #include "fmgr.h" #include "access/genam.h" #include "catalog/objectaddress.h" #include "commands/extension.h" -#include "distributed/commands.h" +#include "executor/spi.h" +#include "nodes/nodes.h" +#include "nodes/pg_list.h" +#include "tcop/utility.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + #include "distributed/citus_ruleutils.h" +#include "distributed/commands.h" #include "distributed/commands/utility_hook.h" #include "distributed/deparser.h" #include "distributed/errormessage.h" #include "distributed/listutils.h" -#include "distributed/metadata_sync.h" #include "distributed/metadata/dependency.h" #include "distributed/metadata/distobject.h" +#include "distributed/metadata_sync.h" #include "distributed/multi_executor.h" #include "distributed/namespace_utils.h" #include "distributed/worker_transaction.h" -#include "executor/spi.h" -#include "nodes/nodes.h" -#include "nodes/pg_list.h" -#include "tcop/utility.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/lsyscache.h" -#include "utils/syscache.h" /* * GUC controls some restrictions for local objects. For example, @@ -390,9 +392,7 @@ CreateViewDDLCommand(Oid viewOid) static void AppendQualifiedViewNameToCreateViewCommand(StringInfo buf, Oid viewOid) { - char *viewName = get_rel_name(viewOid); - char *schemaName = get_namespace_name(get_rel_namespace(viewOid)); - char *qualifiedViewName = quote_qualified_identifier(schemaName, viewName); + char *qualifiedViewName = generate_qualified_relation_name(viewOid); appendStringInfo(buf, "%s ", qualifiedViewName); } diff --git a/src/backend/distributed/connection/connection_configuration.c b/src/backend/distributed/connection/connection_configuration.c index bf61f7fac37..3913173e285 100644 --- a/src/backend/distributed/connection/connection_configuration.c +++ b/src/backend/distributed/connection/connection_configuration.c @@ -12,6 +12,10 @@ #include "access/transam.h" #include "access/xact.h" +#include "mb/pg_wchar.h" +#include "postmaster/postmaster.h" +#include "utils/builtins.h" + #include "distributed/backend_data.h" #include "distributed/citus_safe_lib.h" #include "distributed/connection_management.h" @@ -19,10 +23,6 @@ #include "distributed/metadata_cache.h" #include "distributed/worker_manager.h" -#include "postmaster/postmaster.h" -#include "mb/pg_wchar.h" -#include "utils/builtins.h" - /* stores the string representation of our node connection GUC */ #ifdef USE_SSL char *NodeConninfo = "sslmode=require"; @@ -123,6 +123,10 @@ AddConnParam(const char *keyword, const char *value) errmsg("ConnParams arrays bound check failed"))); } + /* + * Don't use pstrdup here to avoid being tied to a memory context, we free + * these later using ResetConnParams + */ ConnParams.keywords[ConnParams.size] = strdup(keyword); ConnParams.values[ConnParams.size] = strdup(value); ConnParams.size++; @@ -267,9 +271,24 @@ GetConnParams(ConnectionHashKey *key, char ***keywords, char ***values, * We allocate everything in the provided context so as to facilitate using * pfree on all runtime parameters when connections using these entries are * invalidated during config reloads. + * + * Also, when "host" is already provided in global parameters, we use hostname + * from the key as "hostaddr" instead of "host" to avoid host name lookup. In + * that case, the value for "host" becomes useful only if the authentication + * method requires it. */ + bool gotHostParamFromGlobalParams = false; + for (Size paramIndex = 0; paramIndex < ConnParams.size; paramIndex++) + { + if (strcmp(ConnParams.keywords[paramIndex], "host") == 0) + { + gotHostParamFromGlobalParams = true; + break; + } + } + const char *runtimeKeywords[] = { - "host", + gotHostParamFromGlobalParams ? "hostaddr" : "host", "port", "dbname", "user", @@ -425,11 +444,13 @@ GetConnParam(const char *keyword) /* * GetEffectiveConnKey checks whether there is any pooler configuration for the - * provided key (host/port combination). The one case where this logic is not - * applied is for loopback connections originating within the task tracker. If - * a corresponding row is found in the poolinfo table, a modified (effective) - * key is returned with the node, port, and dbname overridden, as applicable, - * otherwise, the original key is returned unmodified. + * provided key (host/port combination). If a corresponding row is found in the + * poolinfo table, a modified (effective) key is returned with the node, port, + * and dbname overridden, as applicable, otherwise, the original key is returned + * unmodified. + * + * In the case of Citus non-main databases we just return the key, since we + * would not have access to tables with worker information. */ ConnectionHashKey * GetEffectiveConnKey(ConnectionHashKey *key) @@ -439,12 +460,22 @@ GetEffectiveConnKey(ConnectionHashKey *key) if (!IsTransactionState()) { /* we're in the task tracker, so should only see loopback */ - Assert(strncmp(LOCAL_HOST_NAME, key->hostname, MAX_NODE_LENGTH) == 0 && + Assert(strncmp(LocalHostName, key->hostname, MAX_NODE_LENGTH) == 0 && PostPortNumber == key->port); return key; } + if (!CitusHasBeenLoaded()) + { + /* + * This happens when we connect to main database over localhost + * from some non Citus database. + */ + return key; + } + WorkerNode *worker = FindWorkerNode(key->hostname, key->port); + if (worker == NULL) { /* this can be hit when the key references an unknown node */ @@ -505,9 +536,23 @@ char * GetAuthinfo(char *hostname, int32 port, char *user) { char *authinfo = NULL; - bool isLoopback = (strncmp(LOCAL_HOST_NAME, hostname, MAX_NODE_LENGTH) == 0 && + bool isLoopback = (strncmp(LocalHostName, hostname, MAX_NODE_LENGTH) == 0 && PostPortNumber == port); + /* + * Citus will not be loaded when we run a global DDL command from a + * Citus non-main database. + */ + if (!CitusHasBeenLoaded()) + { + /* + * We don't expect non-main databases to connect to a node other than + * the local one. + */ + Assert(isLoopback); + return ""; + } + if (IsTransactionState()) { int64 nodeId = WILDCARD_NODE_ID; diff --git a/src/backend/distributed/connection/connection_management.c b/src/backend/distributed/connection/connection_management.c index 9439b38c5fb..f8e4816ed7d 100644 --- a/src/backend/distributed/connection/connection_management.c +++ b/src/backend/distributed/connection/connection_management.c @@ -9,39 +9,39 @@ */ #include "postgres.h" -#include "pgstat.h" #include "libpq-fe.h" - #include "miscadmin.h" - +#include "pg_config.h" +#include "pgstat.h" #include "safe_lib.h" -#include "postmaster/postmaster.h" + #include "access/hash.h" #include "commands/dbcommands.h" +#include "mb/pg_wchar.h" +#include "portability/instr_time.h" +#include "postmaster/postmaster.h" +#include "storage/ipc.h" +#include "utils/hsearch.h" +#include "utils/memutils.h" + #include "distributed/backend_data.h" +#include "distributed/cancel_utils.h" #include "distributed/connection_management.h" -#include "distributed/errormessage.h" #include "distributed/error_codes.h" +#include "distributed/errormessage.h" +#include "distributed/hash_helpers.h" #include "distributed/listutils.h" #include "distributed/log_utils.h" #include "distributed/memutils.h" #include "distributed/metadata_cache.h" -#include "distributed/hash_helpers.h" #include "distributed/placement_connection.h" +#include "distributed/remote_commands.h" #include "distributed/run_from_same_connection.h" #include "distributed/shared_connection_stats.h" -#include "distributed/cancel_utils.h" -#include "distributed/remote_commands.h" #include "distributed/time_constants.h" #include "distributed/version_compat.h" #include "distributed/worker_log_messages.h" -#include "mb/pg_wchar.h" -#include "pg_config.h" -#include "portability/instr_time.h" -#include "storage/ipc.h" -#include "utils/hsearch.h" -#include "utils/memutils.h" int NodeConnectionTimeout = 30000; @@ -1046,8 +1046,15 @@ FinishConnectionListEstablishment(List *multiConnectionList) continue; } - + bool beforePollSocket = PQsocket(connectionState->connection->pgConn); bool connectionStateChanged = MultiConnectionStatePoll(connectionState); + + if (beforePollSocket != PQsocket(connectionState->connection->pgConn)) + { + /* rebuild the wait events if MultiConnectionStatePoll() changed the socket */ + waitEventSetRebuild = true; + } + if (connectionStateChanged) { if (connectionState->phase != MULTI_CONNECTION_PHASE_CONNECTING) diff --git a/src/backend/distributed/connection/locally_reserved_shared_connections.c b/src/backend/distributed/connection/locally_reserved_shared_connections.c index e3f7cb628eb..a64930b3296 100644 --- a/src/backend/distributed/connection/locally_reserved_shared_connections.c +++ b/src/backend/distributed/connection/locally_reserved_shared_connections.c @@ -33,12 +33,15 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" - #include "miscadmin.h" #include "access/hash.h" #include "commands/dbcommands.h" +#include "common/hashfn.h" +#include "utils/builtins.h" + +#include "pg_version_constants.h" + #include "distributed/listutils.h" #include "distributed/locally_reserved_shared_connections.h" #include "distributed/metadata_cache.h" @@ -47,8 +50,6 @@ #include "distributed/shared_connection_stats.h" #include "distributed/tuplestore.h" #include "distributed/worker_manager.h" -#include "utils/builtins.h" -#include "common/hashfn.h" #define RESERVED_CONNECTION_COLUMNS 4 @@ -302,8 +303,8 @@ EnsureConnectionPossibilityForRemotePrimaryNodes(void) * seem to cause any problems as none of the placements that we are * going to access would be on the new node. */ - List *primaryNodeList = ActivePrimaryRemoteNodeList(NoLock); - EnsureConnectionPossibilityForNodeList(primaryNodeList); + List *remoteNodeList = ActivePrimaryRemoteNodeList(NoLock); + EnsureConnectionPossibilityForNodeList(remoteNodeList); } diff --git a/src/backend/distributed/connection/placement_connection.c b/src/backend/distributed/connection/placement_connection.c index cc7962e37b9..10c99bd8074 100644 --- a/src/backend/distributed/connection/placement_connection.c +++ b/src/backend/distributed/connection/placement_connection.c @@ -11,23 +11,24 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" - #include "access/hash.h" +#include "common/hashfn.h" +#include "utils/hsearch.h" +#include "utils/memutils.h" + +#include "pg_version_constants.h" + #include "distributed/colocation_utils.h" #include "distributed/connection_management.h" +#include "distributed/coordinator_protocol.h" +#include "distributed/distributed_planner.h" #include "distributed/hash_helpers.h" #include "distributed/listutils.h" -#include "distributed/coordinator_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/multi_executor.h" -#include "distributed/distributed_planner.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/placement_connection.h" #include "distributed/relation_access_tracking.h" -#include "utils/hsearch.h" -#include "common/hashfn.h" -#include "utils/memutils.h" /* diff --git a/src/backend/distributed/connection/remote_commands.c b/src/backend/distributed/connection/remote_commands.c index 15dd985ec3f..cbd74ff51b1 100644 --- a/src/backend/distributed/connection/remote_commands.c +++ b/src/backend/distributed/connection/remote_commands.c @@ -9,24 +9,24 @@ */ #include "postgres.h" -#include "pgstat.h" #include "libpq-fe.h" +#include "miscadmin.h" +#include "pgstat.h" -#include "distributed/connection_management.h" -#include "distributed/errormessage.h" -#include "distributed/listutils.h" -#include "distributed/log_utils.h" -#include "distributed/remote_commands.h" -#include "distributed/errormessage.h" -#include "distributed/cancel_utils.h" #include "lib/stringinfo.h" -#include "miscadmin.h" #include "storage/latch.h" #include "utils/builtins.h" #include "utils/fmgrprotos.h" #include "utils/palloc.h" +#include "distributed/cancel_utils.h" +#include "distributed/connection_management.h" +#include "distributed/errormessage.h" +#include "distributed/listutils.h" +#include "distributed/log_utils.h" +#include "distributed/remote_commands.h" + /* * Setting that controls how many bytes of COPY data libpq is allowed to buffer @@ -246,6 +246,7 @@ ClearResultsIfReady(MultiConnection *connection) void ReportConnectionError(MultiConnection *connection, int elevel) { + char *userName = connection->user; char *nodeName = connection->hostname; int nodePort = connection->port; PGconn *pgConn = connection->pgConn; @@ -264,15 +265,15 @@ ReportConnectionError(MultiConnection *connection, int elevel) if (messageDetail) { ereport(elevel, (errcode(ERRCODE_CONNECTION_FAILURE), - errmsg("connection to the remote node %s:%d failed with the " - "following error: %s", nodeName, nodePort, + errmsg("connection to the remote node %s@%s:%d failed with the " + "following error: %s", userName, nodeName, nodePort, messageDetail))); } else { ereport(elevel, (errcode(ERRCODE_CONNECTION_FAILURE), - errmsg("connection to the remote node %s:%d failed", - nodeName, nodePort))); + errmsg("connection to the remote node %s@%s:%d failed", + userName, nodeName, nodePort))); } } @@ -882,7 +883,7 @@ WaitForAllConnections(List *connectionList, bool raiseInterrupts) palloc(totalConnectionCount * sizeof(MultiConnection *)); WaitEvent *events = palloc(totalConnectionCount * sizeof(WaitEvent)); bool *connectionReady = palloc(totalConnectionCount * sizeof(bool)); - WaitEventSet *waitEventSet = NULL; + WaitEventSet *volatile waitEventSet = NULL; /* convert connection list to an array such that we can move items around */ MultiConnection *connectionItem = NULL; diff --git a/src/backend/distributed/connection/shared_connection_stats.c b/src/backend/distributed/connection/shared_connection_stats.c index fcd396fe4d4..26598b465f2 100644 --- a/src/backend/distributed/connection/shared_connection_stats.c +++ b/src/backend/distributed/connection/shared_connection_stats.c @@ -11,18 +11,21 @@ */ #include "postgres.h" -#include "pgstat.h" - -#include "distributed/pg_version_constants.h" #include "libpq-fe.h" - #include "miscadmin.h" +#include "pgstat.h" #include "access/hash.h" #include "access/htup_details.h" #include "catalog/pg_authid.h" #include "commands/dbcommands.h" +#include "common/hashfn.h" +#include "storage/ipc.h" +#include "utils/builtins.h" + +#include "pg_version_constants.h" + #include "distributed/backend_data.h" #include "distributed/cancel_utils.h" #include "distributed/connection_management.h" @@ -32,12 +35,9 @@ #include "distributed/multi_executor.h" #include "distributed/placement_connection.h" #include "distributed/shared_connection_stats.h" -#include "distributed/worker_manager.h" #include "distributed/time_constants.h" #include "distributed/tuplestore.h" -#include "utils/builtins.h" -#include "common/hashfn.h" -#include "storage/ipc.h" +#include "distributed/worker_manager.h" #define REMOTE_CONNECTION_STATS_COLUMNS 4 diff --git a/src/backend/distributed/connection/worker_log_messages.c b/src/backend/distributed/connection/worker_log_messages.c index 9c240620e8e..9b64b81a62d 100644 --- a/src/backend/distributed/connection/worker_log_messages.c +++ b/src/backend/distributed/connection/worker_log_messages.c @@ -10,12 +10,13 @@ #include "postgres.h" +#include "utils/elog.h" + #include "distributed/connection_management.h" #include "distributed/error_codes.h" #include "distributed/errormessage.h" #include "distributed/log_utils.h" #include "distributed/worker_log_messages.h" -#include "utils/elog.h" /* diff --git a/src/backend/distributed/deparser/citus_deparseutils.c b/src/backend/distributed/deparser/citus_deparseutils.c new file mode 100644 index 00000000000..061263f6e16 --- /dev/null +++ b/src/backend/distributed/deparser/citus_deparseutils.c @@ -0,0 +1,90 @@ +/*------------------------------------------------------------------------- + * + * citus_deparseutils.c + * + * This file contains common functions used for deparsing PostgreSQL + * statements to their equivalent SQL representation. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "commands/defrem.h" +#include "utils/builtins.h" +#include "utils/elog.h" +#include "utils/rel.h" +#include "utils/relcache.h" +#include "utils/syscache.h" +#include "utils/typcache.h" + +#include "pg_version_constants.h" + +#include "distributed/deparser.h" + + +/** + * DefElemOptionToStatement converts a DefElem option to a SQL statement and + * appends it to the given StringInfo buffer. + * + * @param buf The StringInfo buffer to append the SQL statement to. + * @param option The DefElem option to convert to a SQL statement. + * @param optionFormats The option format specification to use for the conversion. + * @param optionFormatsLen The number of option formats in the opt_formats array. + */ +void +DefElemOptionToStatement(StringInfo buf, DefElem *option, + const DefElemOptionFormat *optionFormats, + int optionFormatsLen) +{ + const char *name = option->defname; + int i; + + for (i = 0; i < optionFormatsLen; i++) + { + if (strcmp(name, optionFormats[i].name) == 0) + { + switch (optionFormats[i].type) + { + case OPTION_FORMAT_STRING: + { + char *value = defGetString(option); + appendStringInfo(buf, optionFormats[i].format, quote_identifier( + value)); + break; + } + + case OPTION_FORMAT_INTEGER: + { + int32 value = defGetInt32(option); + appendStringInfo(buf, optionFormats[i].format, value); + break; + } + + case OPTION_FORMAT_BOOLEAN: + { + bool value = defGetBoolean(option); + appendStringInfo(buf, optionFormats[i].format, value ? "true" : + "false"); + break; + } + + case OPTION_FORMAT_LITERAL_CSTR: + { + char *value = defGetString(option); + appendStringInfo(buf, optionFormats[i].format, quote_literal_cstr( + value)); + break; + } + + default: + { + elog(ERROR, "unrecognized option type: %d", optionFormats[i].type); + break; + } + } + } + } +} diff --git a/src/backend/distributed/deparser/citus_grantutils.c b/src/backend/distributed/deparser/citus_grantutils.c index 8e0dadff2f2..8354e047944 100644 --- a/src/backend/distributed/deparser/citus_grantutils.c +++ b/src/backend/distributed/deparser/citus_grantutils.c @@ -1,8 +1,10 @@ #include "postgres.h" + #include "lib/stringinfo.h" #include "nodes/parsenodes.h" -#include "distributed/deparser.h" + #include "distributed/citus_ruleutils.h" +#include "distributed/deparser.h" /* * Append the 'WITH GRANT OPTION' clause to the given buffer if the given @@ -72,7 +74,7 @@ AppendGrantRestrictAndCascade(StringInfo buf, GrantStmt *stmt) void AppendGrantedByInGrantForRoleSpec(StringInfo buf, RoleSpec *grantor, bool isGrant) { - if (isGrant && grantor) + if (grantor) { appendStringInfo(buf, " GRANTED BY %s", RoleSpecString(grantor, true)); } diff --git a/src/backend/distributed/deparser/citus_ruleutils.c b/src/backend/distributed/deparser/citus_ruleutils.c index 220ea3ec79b..f99462058d9 100644 --- a/src/backend/distributed/deparser/citus_ruleutils.c +++ b/src/backend/distributed/deparser/citus_ruleutils.c @@ -7,12 +7,11 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" -#include "miscadmin.h" +#include -#include "distributed/pg_version_constants.h" +#include "postgres.h" -#include +#include "miscadmin.h" #include "access/attnum.h" #include "access/genam.h" @@ -39,21 +38,11 @@ #include "catalog/pg_type.h" #include "commands/defrem.h" #include "commands/extension.h" -#include "distributed/citus_ruleutils.h" -#include "distributed/commands.h" -#include "distributed/listutils.h" -#include "distributed/multi_partitioning_utils.h" -#include "distributed/metadata_cache.h" -#include "distributed/metadata_sync.h" -#include "distributed/metadata_utility.h" -#include "distributed/namespace_utils.h" -#include "distributed/relay_utility.h" -#include "distributed/version_compat.h" -#include "distributed/worker_protocol.h" +#include "commands/sequence.h" #include "foreign/foreign.h" #include "lib/stringinfo.h" -#include "nodes/nodes.h" #include "nodes/nodeFuncs.h" +#include "nodes/nodes.h" #include "nodes/parsenodes.h" #include "nodes/pg_list.h" #include "parser/parse_utilcmd.h" @@ -71,7 +60,20 @@ #include "utils/relcache.h" #include "utils/ruleutils.h" #include "utils/syscache.h" -#include "commands/sequence.h" + +#include "pg_version_constants.h" + +#include "distributed/citus_ruleutils.h" +#include "distributed/commands.h" +#include "distributed/listutils.h" +#include "distributed/metadata_cache.h" +#include "distributed/metadata_sync.h" +#include "distributed/metadata_utility.h" +#include "distributed/multi_partitioning_utils.h" +#include "distributed/namespace_utils.h" +#include "distributed/relay_utility.h" +#include "distributed/version_compat.h" +#include "distributed/worker_protocol.h" static void deparse_index_columns(StringInfo buffer, List *indexParameterList, diff --git a/src/backend/distributed/deparser/citus_setutils.c b/src/backend/distributed/deparser/citus_setutils.c index 481a2860b33..c113a47d504 100644 --- a/src/backend/distributed/deparser/citus_setutils.c +++ b/src/backend/distributed/deparser/citus_setutils.c @@ -1,18 +1,18 @@ #include "postgres.h" -#include "pg_version_compat.h" - #include "catalog/namespace.h" +#include "commands/defrem.h" #include "lib/stringinfo.h" #include "nodes/parsenodes.h" +#include "nodes/print.h" +#include "parser/parse_type.h" #include "utils/builtins.h" -#include "distributed/deparser.h" +#include "pg_version_compat.h" + #include "distributed/citus_ruleutils.h" -#include "commands/defrem.h" +#include "distributed/deparser.h" #include "distributed/log_utils.h" -#include "parser/parse_type.h" -#include "nodes/print.h" void AppendVarSetValue(StringInfo buf, VariableSetStmt *setStmt); diff --git a/src/backend/distributed/deparser/deparse_collation_stmts.c b/src/backend/distributed/deparser/deparse_collation_stmts.c index 44f7f9098ee..3a568d2ad42 100644 --- a/src/backend/distributed/deparser/deparse_collation_stmts.c +++ b/src/backend/distributed/deparser/deparse_collation_stmts.c @@ -17,8 +17,8 @@ #include "nodes/value.h" #include "utils/builtins.h" -#include "distributed/deparser.h" #include "distributed/citus_ruleutils.h" +#include "distributed/deparser.h" static void AppendDropCollationStmt(StringInfo buf, DropStmt *stmt); static void AppendRenameCollationStmt(StringInfo buf, RenameStmt *stmt); diff --git a/src/backend/distributed/deparser/deparse_comment_stmts.c b/src/backend/distributed/deparser/deparse_comment_stmts.c new file mode 100644 index 00000000000..36a63c97b11 --- /dev/null +++ b/src/backend/distributed/deparser/deparse_comment_stmts.c @@ -0,0 +1,77 @@ +/*------------------------------------------------------------------------- + * + * deparse_coment_stmts.c + * + * All routines to deparse comment statements. + * + * Copyright (c), Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "catalog/namespace.h" +#include "commands/defrem.h" +#include "lib/stringinfo.h" +#include "nodes/parsenodes.h" +#include "parser/parse_type.h" +#include "utils/builtins.h" +#include "utils/elog.h" + +#include "pg_version_compat.h" + +#include "distributed/citus_ruleutils.h" +#include "distributed/commands.h" +#include "distributed/comment.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" +#include "distributed/log_utils.h" + + +const char *ObjectTypeNames[] = +{ + [OBJECT_DATABASE] = "DATABASE", + [OBJECT_ROLE] = "ROLE", + [OBJECT_TSCONFIGURATION] = "TEXT SEARCH CONFIGURATION", + [OBJECT_TSDICTIONARY] = "TEXT SEARCH DICTIONARY", + + /* When support for propagating comments to new objects is introduced, an entry for each + * statement type should be added to this list. The first element in each entry is the 'object_type' keyword + * that will be included in the 'COMMENT ON ..' statement (i.e. DATABASE,). The second element is the type of + * stmt->object, which represents the name of the propagated object. + */ +}; + +char * +DeparseCommentStmt(Node *node) +{ + CommentStmt *stmt = castNode(CommentStmt, node); + StringInfoData str = { 0 }; + initStringInfo(&str); + + const char *objectName = NULL; + if (IsA(stmt->object, String)) + { + objectName = quote_identifier(strVal(stmt->object)); + } + else if (IsA(stmt->object, List)) + { + objectName = NameListToQuotedString(castNode(List, stmt->object)); + } + else + { + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + errmsg("unknown object type"))); + } + + const char *objectType = ObjectTypeNames[stmt->objtype]; + + char *comment = stmt->comment != NULL ? quote_literal_cstr(stmt->comment) : "NULL"; + + + appendStringInfo(&str, "COMMENT ON %s %s IS %s;", objectType, objectName, comment); + + return str.data; +} diff --git a/src/backend/distributed/deparser/deparse_database_stmts.c b/src/backend/distributed/deparser/deparse_database_stmts.c index d3d3ce633ad..66df5361e50 100644 --- a/src/backend/distributed/deparser/deparse_database_stmts.c +++ b/src/backend/distributed/deparser/deparse_database_stmts.c @@ -11,24 +11,57 @@ #include "postgres.h" -#include "pg_version_compat.h" - #include "catalog/namespace.h" +#include "commands/defrem.h" #include "lib/stringinfo.h" #include "nodes/parsenodes.h" +#include "parser/parse_type.h" #include "utils/builtins.h" -#include "distributed/deparser.h" +#include "pg_version_compat.h" + #include "distributed/citus_ruleutils.h" -#include "commands/defrem.h" +#include "distributed/commands.h" #include "distributed/deparser.h" +#include "distributed/listutils.h" #include "distributed/log_utils.h" -#include "parser/parse_type.h" static void AppendAlterDatabaseOwnerStmt(StringInfo buf, AlterOwnerStmt *stmt); +static void AppendAlterDatabaseSetStmt(StringInfo buf, AlterDatabaseSetStmt *stmt); static void AppendAlterDatabaseStmt(StringInfo buf, AlterDatabaseStmt *stmt); -static void AppendDefElemConnLimit(StringInfo buf, DefElem *def); +static void AppendCreateDatabaseStmt(StringInfo buf, CreatedbStmt *stmt); +static void AppendDropDatabaseStmt(StringInfo buf, DropdbStmt *stmt); +static void AppendGrantOnDatabaseStmt(StringInfo buf, GrantStmt *stmt); +static void AppendBasicAlterDatabaseOptions(StringInfo buf, AlterDatabaseStmt *stmt); +static void AppendGrantDatabases(StringInfo buf, GrantStmt *stmt); +static void AppendAlterDatabaseSetTablespace(StringInfo buf, DefElem *def, char *dbname); + +const DefElemOptionFormat createDatabaseOptionFormats[] = { + { "owner", " OWNER %s", OPTION_FORMAT_STRING }, + { "template", " TEMPLATE %s", OPTION_FORMAT_STRING }, + { "encoding", " ENCODING %s", OPTION_FORMAT_LITERAL_CSTR }, + { "strategy", " STRATEGY %s", OPTION_FORMAT_LITERAL_CSTR }, + { "locale", " LOCALE %s", OPTION_FORMAT_LITERAL_CSTR }, + { "lc_collate", " LC_COLLATE %s", OPTION_FORMAT_LITERAL_CSTR }, + { "lc_ctype", " LC_CTYPE %s", OPTION_FORMAT_LITERAL_CSTR }, + { "icu_locale", " ICU_LOCALE %s", OPTION_FORMAT_LITERAL_CSTR }, + { "icu_rules", " ICU_RULES %s", OPTION_FORMAT_LITERAL_CSTR }, + { "locale_provider", " LOCALE_PROVIDER %s", OPTION_FORMAT_LITERAL_CSTR }, + { "collation_version", " COLLATION_VERSION %s", OPTION_FORMAT_LITERAL_CSTR }, + { "tablespace", " TABLESPACE %s", OPTION_FORMAT_STRING }, + { "allow_connections", " ALLOW_CONNECTIONS %s", OPTION_FORMAT_BOOLEAN }, + { "connection_limit", " CONNECTION LIMIT %d", OPTION_FORMAT_INTEGER }, + { "is_template", " IS_TEMPLATE %s", OPTION_FORMAT_BOOLEAN } +}; + + +const DefElemOptionFormat alterDatabaseOptionFormats[] = { + { "is_template", " IS_TEMPLATE %s", OPTION_FORMAT_BOOLEAN }, + { "allow_connections", " ALLOW_CONNECTIONS %s", OPTION_FORMAT_BOOLEAN }, + { "connection_limit", " CONNECTION LIMIT %d", OPTION_FORMAT_INTEGER }, +}; + char * DeparseAlterDatabaseOwnerStmt(Node *node) @@ -88,52 +121,67 @@ AppendGrantOnDatabaseStmt(StringInfo buf, GrantStmt *stmt) } -static void -AppendDefElemConnLimit(StringInfo buf, DefElem *def) -{ - appendStringInfo(buf, " CONNECTION LIMIT %ld", (long int) defGetNumeric(def)); -} - - static void AppendAlterDatabaseStmt(StringInfo buf, AlterDatabaseStmt *stmt) { - appendStringInfo(buf, "ALTER DATABASE %s ", quote_identifier(stmt->dbname)); + if (list_length(stmt->options) == 0) + { + elog(ERROR, "got unexpected number of options for ALTER DATABASE"); + } if (stmt->options) { - ListCell *cell = NULL; - appendStringInfo(buf, "WITH "); - foreach(cell, stmt->options) + DefElem *firstOption = linitial(stmt->options); + if (strcmp(firstOption->defname, "tablespace") == 0) { - DefElem *def = castNode(DefElem, lfirst(cell)); - if (strcmp(def->defname, "is_template") == 0) - { - appendStringInfo(buf, "IS_TEMPLATE %s", - quote_literal_cstr(strVal(def->arg))); - } - else if (strcmp(def->defname, "connection_limit") == 0) - { - AppendDefElemConnLimit(buf, def); - } - else if (strcmp(def->defname, "allow_connections") == 0) - { - ereport(ERROR, - errmsg("ALLOW_CONNECTIONS is not supported")); - } - else - { - ereport(ERROR, - errmsg("unrecognized ALTER DATABASE option: %s", - def->defname)); - } + AppendAlterDatabaseSetTablespace(buf, firstOption, stmt->dbname); + + /* SET tablespace cannot be combined with other options */ + return; } + + + appendStringInfo(buf, "ALTER DATABASE %s WITH", + quote_identifier(stmt->dbname)); + + AppendBasicAlterDatabaseOptions(buf, stmt); } appendStringInfo(buf, ";"); } +static void +AppendAlterDatabaseSetTablespace(StringInfo buf, DefElem *def, char *dbname) +{ + appendStringInfo(buf, + "ALTER DATABASE %s SET TABLESPACE %s", + quote_identifier(dbname), quote_identifier(defGetString(def))); +} + + +/* + * AppendBasicAlterDatabaseOptions appends basic ALTER DATABASE options to a string buffer. + * Basic options are those that can be appended to the ALTER DATABASE statement + * after the "WITH" keyword.(i.e. ALLOW_CONNECTIONS, CONNECTION LIMIT, IS_TEMPLATE) + * For example, the tablespace option is not a basic option since it is defined via SET keyword. + * + * This function takes a string buffer and an AlterDatabaseStmt as input. + * It appends the basic options to the string buffer. + * + */ +static void +AppendBasicAlterDatabaseOptions(StringInfo buf, AlterDatabaseStmt *stmt) +{ + DefElem *def = NULL; + foreach_ptr(def, stmt->options) + { + DefElemOptionToStatement(buf, def, alterDatabaseOptionFormats, lengthof( + alterDatabaseOptionFormats)); + } +} + + char * DeparseGrantOnDatabaseStmt(Node *node) { @@ -193,6 +241,22 @@ AppendAlterDatabaseSetStmt(StringInfo buf, AlterDatabaseSetStmt *stmt) } +char * +DeparseAlterDatabaseRenameStmt(Node *node) +{ + RenameStmt *stmt = (RenameStmt *) node; + + StringInfoData str; + initStringInfo(&str); + + appendStringInfo(&str, "ALTER DATABASE %s RENAME TO %s", + quote_identifier(stmt->subname), + quote_identifier(stmt->newname)); + + return str.data; +} + + char * DeparseAlterDatabaseSetStmt(Node *node) { @@ -205,3 +269,92 @@ DeparseAlterDatabaseSetStmt(Node *node) return str.data; } + + +static void +AppendCreateDatabaseStmt(StringInfo buf, CreatedbStmt *stmt) +{ + /* + * Make sure that we don't try to deparse something that this + * function doesn't expect. + * + * This is also useful to throw an error for unsupported CREATE + * DATABASE options when the command is issued from non-main dbs + * because we use the same function to deparse CREATE DATABASE + * commands there too. + */ + EnsureSupportedCreateDatabaseCommand(stmt); + + appendStringInfo(buf, + "CREATE DATABASE %s", + quote_identifier(stmt->dbname)); + + DefElem *option = NULL; + foreach_ptr(option, stmt->options) + { + DefElemOptionToStatement(buf, option, createDatabaseOptionFormats, + lengthof(createDatabaseOptionFormats)); + } +} + + +char * +DeparseCreateDatabaseStmt(Node *node) +{ + CreatedbStmt *stmt = castNode(CreatedbStmt, node); + StringInfoData str = { 0 }; + initStringInfo(&str); + + AppendCreateDatabaseStmt(&str, stmt); + + return str.data; +} + + +static void +AppendDropDatabaseStmt(StringInfo buf, DropdbStmt *stmt) +{ + char *ifExistsStatement = stmt->missing_ok ? "IF EXISTS" : ""; + appendStringInfo(buf, + "DROP DATABASE %s %s", + ifExistsStatement, + quote_identifier(stmt->dbname)); + + if (list_length(stmt->options) > 1) + { + /* FORCE is the only option that can be provided for this command */ + elog(ERROR, "got unexpected number of options for DROP DATABASE"); + } + else if (list_length(stmt->options) == 1) + { + DefElem *option = linitial(stmt->options); + appendStringInfo(buf, " WITH ( "); + + if (strcmp(option->defname, "force") == 0) + { + appendStringInfo(buf, "FORCE"); + } + else + { + /* FORCE is the only option that can be provided for this command */ + ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("unrecognized DROP DATABASE option \"%s\"", + option->defname))); + } + + appendStringInfo(buf, " )"); + } +} + + +char * +DeparseDropDatabaseStmt(Node *node) +{ + DropdbStmt *stmt = castNode(DropdbStmt, node); + StringInfoData str = { 0 }; + initStringInfo(&str); + + AppendDropDatabaseStmt(&str, stmt); + + return str.data; +} diff --git a/src/backend/distributed/deparser/deparse_extension_stmts.c b/src/backend/distributed/deparser/deparse_extension_stmts.c index bb6b15dbd86..92d54602f06 100644 --- a/src/backend/distributed/deparser/deparse_extension_stmts.c +++ b/src/backend/distributed/deparser/deparse_extension_stmts.c @@ -14,13 +14,14 @@ #include "catalog/namespace.h" #include "commands/defrem.h" -#include "distributed/deparser.h" -#include "distributed/listutils.h" #include "lib/stringinfo.h" #include "nodes/parsenodes.h" #include "nodes/pg_list.h" #include "utils/builtins.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" + /* Local functions forward declarations for helper functions */ static void AppendCreateExtensionStmt(StringInfo buf, CreateExtensionStmt *stmt); static void AppendCreateExtensionStmtOptions(StringInfo buf, List *options); diff --git a/src/backend/distributed/deparser/deparse_foreign_data_wrapper_stmts.c b/src/backend/distributed/deparser/deparse_foreign_data_wrapper_stmts.c index 3f755c90586..fab1cc7ab05 100644 --- a/src/backend/distributed/deparser/deparse_foreign_data_wrapper_stmts.c +++ b/src/backend/distributed/deparser/deparse_foreign_data_wrapper_stmts.c @@ -10,13 +10,14 @@ #include "postgres.h" #include "commands/defrem.h" +#include "lib/stringinfo.h" +#include "nodes/nodes.h" +#include "utils/builtins.h" + #include "distributed/citus_ruleutils.h" #include "distributed/deparser.h" #include "distributed/listutils.h" #include "distributed/relay_utility.h" -#include "lib/stringinfo.h" -#include "nodes/nodes.h" -#include "utils/builtins.h" static void AppendGrantOnFDWStmt(StringInfo buf, GrantStmt *stmt); static void AppendGrantOnFDWNames(StringInfo buf, GrantStmt *stmt); diff --git a/src/backend/distributed/deparser/deparse_foreign_server_stmts.c b/src/backend/distributed/deparser/deparse_foreign_server_stmts.c index 403569b948e..9c708a771a7 100644 --- a/src/backend/distributed/deparser/deparse_foreign_server_stmts.c +++ b/src/backend/distributed/deparser/deparse_foreign_server_stmts.c @@ -10,13 +10,14 @@ #include "postgres.h" #include "commands/defrem.h" +#include "lib/stringinfo.h" +#include "nodes/nodes.h" +#include "utils/builtins.h" + #include "distributed/citus_ruleutils.h" #include "distributed/deparser.h" #include "distributed/listutils.h" #include "distributed/relay_utility.h" -#include "lib/stringinfo.h" -#include "nodes/nodes.h" -#include "utils/builtins.h" static void AppendCreateForeignServerStmt(StringInfo buf, CreateForeignServerStmt *stmt); static void AppendAlterForeignServerStmt(StringInfo buf, AlterForeignServerStmt *stmt); diff --git a/src/backend/distributed/deparser/deparse_function_stmts.c b/src/backend/distributed/deparser/deparse_function_stmts.c index a5bc52e5a5a..1e3e4a651a8 100644 --- a/src/backend/distributed/deparser/deparse_function_stmts.c +++ b/src/backend/distributed/deparser/deparse_function_stmts.c @@ -22,10 +22,6 @@ #include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "commands/defrem.h" -#include "distributed/citus_ruleutils.h" -#include "distributed/commands.h" -#include "distributed/deparser.h" -#include "distributed/version_compat.h" #include "lib/stringinfo.h" #include "nodes/makefuncs.h" #include "nodes/nodes.h" @@ -38,8 +34,13 @@ #include "utils/guc.h" #include "utils/lsyscache.h" #include "utils/memutils.h" -#include "utils/syscache.h" #include "utils/regproc.h" +#include "utils/syscache.h" + +#include "distributed/citus_ruleutils.h" +#include "distributed/commands.h" +#include "distributed/deparser.h" +#include "distributed/version_compat.h" /* forward declaration for deparse functions */ diff --git a/src/backend/distributed/deparser/deparse_owned_stmts.c b/src/backend/distributed/deparser/deparse_owned_stmts.c index 888071165f8..93572a4eee6 100644 --- a/src/backend/distributed/deparser/deparse_owned_stmts.c +++ b/src/backend/distributed/deparser/deparse_owned_stmts.c @@ -11,13 +11,14 @@ #include "postgres.h" +#include "lib/stringinfo.h" +#include "nodes/parsenodes.h" +#include "utils/builtins.h" + #include "pg_version_compat.h" #include "distributed/citus_ruleutils.h" #include "distributed/deparser.h" -#include "lib/stringinfo.h" -#include "nodes/parsenodes.h" -#include "utils/builtins.h" static void AppendDropOwnedStmt(StringInfo buf, DropOwnedStmt *stmt); static void AppendRoleList(StringInfo buf, List *roleList); @@ -70,7 +71,7 @@ AppendRoleList(StringInfo buf, List *roleList) { Node *roleNode = (Node *) lfirst(cell); Assert(IsA(roleNode, RoleSpec) || IsA(roleNode, AccessPriv)); - char const *rolename = NULL; + const char *rolename = NULL; if (IsA(roleNode, RoleSpec)) { rolename = RoleSpecString((RoleSpec *) roleNode, true); @@ -82,3 +83,27 @@ AppendRoleList(StringInfo buf, List *roleList) } } } + + +static void +AppendReassignOwnedStmt(StringInfo buf, ReassignOwnedStmt *stmt) +{ + appendStringInfo(buf, "REASSIGN OWNED BY "); + + AppendRoleList(buf, stmt->roles); + const char *newRoleName = RoleSpecString(stmt->newrole, true); + appendStringInfo(buf, " TO %s", newRoleName); +} + + +char * +DeparseReassignOwnedStmt(Node *node) +{ + ReassignOwnedStmt *stmt = castNode(ReassignOwnedStmt, node); + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + AppendReassignOwnedStmt(&buf, stmt); + + return buf.data; +} diff --git a/src/backend/distributed/deparser/deparse_publication_stmts.c b/src/backend/distributed/deparser/deparse_publication_stmts.c index e2233314634..8e311817199 100644 --- a/src/backend/distributed/deparser/deparse_publication_stmts.c +++ b/src/backend/distributed/deparser/deparse_publication_stmts.c @@ -13,20 +13,21 @@ #include "access/relation.h" #include "catalog/namespace.h" #include "commands/defrem.h" -#include "distributed/citus_ruleutils.h" -#include "distributed/deparser.h" -#include "distributed/listutils.h" -#include "distributed/namespace_utils.h" #include "lib/stringinfo.h" +#include "nodes/value.h" #include "parser/parse_clause.h" #include "parser/parse_collate.h" #include "parser/parse_node.h" #include "parser/parse_relation.h" -#include "nodes/value.h" #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/ruleutils.h" +#include "distributed/citus_ruleutils.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" +#include "distributed/namespace_utils.h" + static void AppendCreatePublicationStmt(StringInfo buf, CreatePublicationStmt *stmt, bool whereClauseNeedsTransform, diff --git a/src/backend/distributed/deparser/deparse_role_stmts.c b/src/backend/distributed/deparser/deparse_role_stmts.c index ee216809e7c..a4a085026c9 100644 --- a/src/backend/distributed/deparser/deparse_role_stmts.c +++ b/src/backend/distributed/deparser/deparse_role_stmts.c @@ -13,15 +13,16 @@ #include "postgres.h" +#include "commands/defrem.h" +#include "lib/stringinfo.h" +#include "nodes/parsenodes.h" +#include "utils/builtins.h" + #include "pg_version_compat.h" -#include "commands/defrem.h" #include "distributed/citus_ruleutils.h" #include "distributed/deparser.h" #include "distributed/listutils.h" -#include "lib/stringinfo.h" -#include "nodes/parsenodes.h" -#include "utils/builtins.h" static void AppendAlterRoleStmt(StringInfo buf, AlterRoleStmt *stmt); static void AppendAlterRoleSetStmt(StringInfo buf, AlterRoleSetStmt *stmt); @@ -410,6 +411,16 @@ AppendRevokeAdminOptionFor(StringInfo buf, GrantRoleStmt *stmt) appendStringInfo(buf, "ADMIN OPTION FOR "); break; } + else if (strcmp(opt->defname, "inherit") == 0) + { + appendStringInfo(buf, "INHERIT OPTION FOR "); + break; + } + else if (strcmp(opt->defname, "set") == 0) + { + appendStringInfo(buf, "SET OPTION FOR "); + break; + } } } #else @@ -427,16 +438,29 @@ AppendGrantWithAdminOption(StringInfo buf, GrantRoleStmt *stmt) if (stmt->is_grant) { #if PG_VERSION_NUM >= PG_VERSION_16 + int opt_count = 0; DefElem *opt = NULL; foreach_ptr(opt, stmt->opt) { - bool admin_option = false; char *optval = defGetString(opt); - if (strcmp(opt->defname, "admin") == 0 && - parse_bool(optval, &admin_option) && admin_option) + bool option_value = false; + if (parse_bool(optval, &option_value)) { - appendStringInfo(buf, " WITH ADMIN OPTION"); - break; + opt_count++; + char *prefix = opt_count > 1 ? "," : " WITH"; + if (strcmp(opt->defname, "inherit") == 0) + { + appendStringInfo(buf, "%s INHERIT %s", prefix, option_value ? "TRUE" : + "FALSE"); + } + else if (strcmp(opt->defname, "admin") == 0 && option_value) + { + appendStringInfo(buf, "%s ADMIN OPTION", prefix); + } + else if (strcmp(opt->defname, "set") == 0 && !option_value) + { + appendStringInfo(buf, "%s SET FALSE", prefix); + } } } #else @@ -464,7 +488,6 @@ AppendGrantRoleStmt(StringInfo buf, GrantRoleStmt *stmt) AppendGrantWithAdminOption(buf, stmt); AppendGrantedByInGrantForRoleSpec(buf, stmt->grantor, stmt->is_grant); AppendGrantRestrictAndCascadeForRoleSpec(buf, stmt->behavior, stmt->is_grant); - AppendGrantedByInGrantForRoleSpec(buf, stmt->grantor, stmt->is_grant); appendStringInfo(buf, ";"); } diff --git a/src/backend/distributed/deparser/deparse_schema_stmts.c b/src/backend/distributed/deparser/deparse_schema_stmts.c index 10317b89971..0a9c49801aa 100644 --- a/src/backend/distributed/deparser/deparse_schema_stmts.c +++ b/src/backend/distributed/deparser/deparse_schema_stmts.c @@ -12,13 +12,14 @@ */ #include "postgres.h" -#include "distributed/citus_ruleutils.h" -#include "distributed/deparser.h" -#include "distributed/listutils.h" #include "lib/stringinfo.h" #include "nodes/nodes.h" #include "utils/builtins.h" +#include "distributed/citus_ruleutils.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" + static void AppendCreateSchemaStmt(StringInfo buf, CreateSchemaStmt *stmt); static void AppendDropSchemaStmt(StringInfo buf, DropStmt *stmt); static void AppendGrantOnSchemaStmt(StringInfo buf, GrantStmt *stmt); diff --git a/src/backend/distributed/deparser/deparse_seclabel_stmts.c b/src/backend/distributed/deparser/deparse_seclabel_stmts.c new file mode 100644 index 00000000000..ffe775b76ee --- /dev/null +++ b/src/backend/distributed/deparser/deparse_seclabel_stmts.c @@ -0,0 +1,79 @@ +/*------------------------------------------------------------------------- + * + * deparse_seclabel_stmts.c + * All routines to deparse SECURITY LABEL statements. + * + * Copyright (c), Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "nodes/parsenodes.h" +#include "utils/builtins.h" + +#include "distributed/deparser.h" + +static void AppendSecLabelStmt(StringInfo buf, SecLabelStmt *stmt); + +/* + * DeparseSecLabelStmt builds and returns a string representing of the + * SecLabelStmt for application on a remote server. + */ +char * +DeparseSecLabelStmt(Node *node) +{ + SecLabelStmt *secLabelStmt = castNode(SecLabelStmt, node); + StringInfoData buf = { 0 }; + initStringInfo(&buf); + + AppendSecLabelStmt(&buf, secLabelStmt); + + return buf.data; +} + + +/* + * AppendSecLabelStmt generates the string representation of the + * SecLabelStmt and appends it to the buffer. + */ +static void +AppendSecLabelStmt(StringInfo buf, SecLabelStmt *stmt) +{ + appendStringInfoString(buf, "SECURITY LABEL "); + + if (stmt->provider != NULL) + { + appendStringInfo(buf, "FOR %s ", quote_identifier(stmt->provider)); + } + + appendStringInfoString(buf, "ON "); + + switch (stmt->objtype) + { + case OBJECT_ROLE: + { + appendStringInfo(buf, "ROLE %s ", quote_identifier(strVal(stmt->object))); + break; + } + + /* normally, we shouldn't reach this */ + default: + { + ereport(ERROR, (errmsg("unsupported security label statement for" + " deparsing"))); + } + } + + appendStringInfoString(buf, "IS "); + + if (stmt->label != NULL) + { + appendStringInfo(buf, "%s", quote_literal_cstr(stmt->label)); + } + else + { + appendStringInfoString(buf, "NULL"); + } +} diff --git a/src/backend/distributed/deparser/deparse_sequence_stmts.c b/src/backend/distributed/deparser/deparse_sequence_stmts.c index 98488c16013..9e5fab2c815 100644 --- a/src/backend/distributed/deparser/deparse_sequence_stmts.c +++ b/src/backend/distributed/deparser/deparse_sequence_stmts.c @@ -14,12 +14,13 @@ #include "postgres.h" #include "catalog/namespace.h" -#include "distributed/deparser.h" -#include "distributed/version_compat.h" #include "utils/acl.h" #include "utils/builtins.h" #include "utils/lsyscache.h" +#include "distributed/deparser.h" +#include "distributed/version_compat.h" + /* forward declaration for deparse functions */ static void AppendDropSequenceStmt(StringInfo buf, DropStmt *stmt); diff --git a/src/backend/distributed/deparser/deparse_statistics_stmts.c b/src/backend/distributed/deparser/deparse_statistics_stmts.c index 4a165ec7220..99b9d1c2ddf 100644 --- a/src/backend/distributed/deparser/deparse_statistics_stmts.c +++ b/src/backend/distributed/deparser/deparse_statistics_stmts.c @@ -12,16 +12,17 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" - #include "catalog/namespace.h" +#include "lib/stringinfo.h" +#include "nodes/nodes.h" +#include "utils/builtins.h" + +#include "pg_version_constants.h" + #include "distributed/citus_ruleutils.h" #include "distributed/deparser.h" #include "distributed/listutils.h" #include "distributed/relay_utility.h" -#include "lib/stringinfo.h" -#include "nodes/nodes.h" -#include "utils/builtins.h" static void AppendCreateStatisticsStmt(StringInfo buf, CreateStatsStmt *stmt); static void AppendDropStatisticsStmt(StringInfo buf, List *nameList, bool ifExists); diff --git a/src/backend/distributed/deparser/deparse_table_stmts.c b/src/backend/distributed/deparser/deparse_table_stmts.c index ff96d7fc336..5d184fa665b 100644 --- a/src/backend/distributed/deparser/deparse_table_stmts.c +++ b/src/backend/distributed/deparser/deparse_table_stmts.c @@ -13,20 +13,20 @@ #include "catalog/heap.h" #include "commands/defrem.h" -#include "distributed/commands.h" -#include "distributed/deparser.h" -#include "distributed/version_compat.h" +#include "commands/tablecmds.h" #include "nodes/nodes.h" #include "nodes/parsenodes.h" #include "parser/parse_expr.h" -#include "parser/parse_type.h" #include "parser/parse_relation.h" +#include "parser/parse_type.h" #include "utils/builtins.h" #include "utils/lsyscache.h" #include "utils/ruleutils.h" +#include "distributed/commands.h" +#include "distributed/deparser.h" #include "distributed/namespace_utils.h" -#include "commands/tablecmds.h" +#include "distributed/version_compat.h" static void AppendAlterTableSchemaStmt(StringInfo buf, AlterObjectSchemaStmt *stmt); static void AppendAlterTableStmt(StringInfo buf, AlterTableStmt *stmt); @@ -121,7 +121,7 @@ AppendAlterTableStmt(StringInfo buf, AlterTableStmt *stmt) * AppendColumnNameList converts a list of columns into comma separated string format * (colname_1, colname_2, .., colname_n). */ -static void +void AppendColumnNameList(StringInfo buf, List *columns) { appendStringInfoString(buf, " ("); diff --git a/src/backend/distributed/deparser/deparse_text_search.c b/src/backend/distributed/deparser/deparse_text_search.c index e0c750d0d54..ab5498ad81a 100644 --- a/src/backend/distributed/deparser/deparse_text_search.c +++ b/src/backend/distributed/deparser/deparse_text_search.c @@ -395,68 +395,6 @@ DeparseAlterTextSearchDictionarySchemaStmt(Node *node) } -/* - * DeparseTextSearchConfigurationCommentStmt returns the sql statement representing - * COMMENT ON TEXT SEARCH CONFIGURATION ... IS ... - */ -char * -DeparseTextSearchConfigurationCommentStmt(Node *node) -{ - CommentStmt *stmt = castNode(CommentStmt, node); - Assert(stmt->objtype == OBJECT_TSCONFIGURATION); - - StringInfoData buf = { 0 }; - initStringInfo(&buf); - - appendStringInfo(&buf, "COMMENT ON TEXT SEARCH CONFIGURATION %s IS ", - NameListToQuotedString(castNode(List, stmt->object))); - - if (stmt->comment == NULL) - { - appendStringInfoString(&buf, "NULL"); - } - else - { - appendStringInfoString(&buf, quote_literal_cstr(stmt->comment)); - } - - appendStringInfoString(&buf, ";"); - - return buf.data; -} - - -/* - * DeparseTextSearchDictionaryCommentStmt returns the sql statement representing - * COMMENT ON TEXT SEARCH DICTIONARY ... IS ... - */ -char * -DeparseTextSearchDictionaryCommentStmt(Node *node) -{ - CommentStmt *stmt = castNode(CommentStmt, node); - Assert(stmt->objtype == OBJECT_TSDICTIONARY); - - StringInfoData buf = { 0 }; - initStringInfo(&buf); - - appendStringInfo(&buf, "COMMENT ON TEXT SEARCH DICTIONARY %s IS ", - NameListToQuotedString(castNode(List, stmt->object))); - - if (stmt->comment == NULL) - { - appendStringInfoString(&buf, "NULL"); - } - else - { - appendStringInfoString(&buf, quote_literal_cstr(stmt->comment)); - } - - appendStringInfoString(&buf, ";"); - - return buf.data; -} - - /* * AppendStringInfoTokentypeList specializes in adding a comma separated list of * token_tyoe's to TEXT SEARCH CONFIGURATION commands diff --git a/src/backend/distributed/deparser/deparse_view_stmts.c b/src/backend/distributed/deparser/deparse_view_stmts.c index 39c4ccb6369..5592aec9d53 100644 --- a/src/backend/distributed/deparser/deparse_view_stmts.c +++ b/src/backend/distributed/deparser/deparse_view_stmts.c @@ -13,15 +13,16 @@ #include "catalog/namespace.h" #include "commands/defrem.h" -#include "distributed/citus_ruleutils.h" -#include "distributed/commands.h" -#include "distributed/deparser.h" -#include "distributed/listutils.h" #include "lib/stringinfo.h" #include "nodes/parsenodes.h" #include "utils/builtins.h" #include "utils/lsyscache.h" +#include "distributed/citus_ruleutils.h" +#include "distributed/commands.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" + static void AppendDropViewStmt(StringInfo buf, DropStmt *stmt); static void AppendViewNameList(StringInfo buf, List *objects); static void AppendAlterViewStmt(StringInfo buf, AlterTableStmt *stmt); diff --git a/src/backend/distributed/deparser/objectaddress.c b/src/backend/distributed/deparser/objectaddress.c index d835a3b1abf..6718c22cf7f 100644 --- a/src/backend/distributed/deparser/objectaddress.c +++ b/src/backend/distributed/deparser/objectaddress.c @@ -12,11 +12,12 @@ #include "postgres.h" +#include "catalog/objectaddress.h" +#include "catalog/pg_extension_d.h" #include "commands/extension.h" + #include "distributed/commands.h" #include "distributed/deparser.h" -#include "catalog/objectaddress.h" -#include "catalog/pg_extension_d.h" /* diff --git a/src/backend/distributed/deparser/qualify_aggregate_stmts.c b/src/backend/distributed/deparser/qualify_aggregate_stmts.c index 9debc244a22..e5d7210f316 100644 --- a/src/backend/distributed/deparser/qualify_aggregate_stmts.c +++ b/src/backend/distributed/deparser/qualify_aggregate_stmts.c @@ -15,10 +15,11 @@ #include "postgres.h" #include "catalog/namespace.h" -#include "distributed/deparser.h" #include "nodes/makefuncs.h" #include "utils/lsyscache.h" +#include "distributed/deparser.h" + void QualifyDefineAggregateStmt(Node *node) { diff --git a/src/backend/distributed/deparser/qualify_function_stmt.c b/src/backend/distributed/deparser/qualify_function_stmt.c index fbd6c17a0b7..184ff92bf70 100644 --- a/src/backend/distributed/deparser/qualify_function_stmt.c +++ b/src/backend/distributed/deparser/qualify_function_stmt.c @@ -21,12 +21,13 @@ #include "access/htup_details.h" #include "catalog/namespace.h" #include "catalog/pg_proc.h" -#include "distributed/deparser.h" -#include "distributed/version_compat.h" #include "parser/parse_func.h" #include "utils/lsyscache.h" #include "utils/syscache.h" +#include "distributed/deparser.h" +#include "distributed/version_compat.h" + /* forward declaration for qualify functions */ static void QualifyFunction(ObjectWithArgs *func, ObjectType type); static void QualifyFunctionSchemaName(ObjectWithArgs *func, ObjectType type); diff --git a/src/backend/distributed/deparser/qualify_publication_stmt.c b/src/backend/distributed/deparser/qualify_publication_stmt.c index 3231fe363b2..73ffe3a3533 100644 --- a/src/backend/distributed/deparser/qualify_publication_stmt.c +++ b/src/backend/distributed/deparser/qualify_publication_stmt.c @@ -12,12 +12,13 @@ #include "postgres.h" #include "catalog/namespace.h" -#include "distributed/deparser.h" -#include "distributed/listutils.h" #include "nodes/nodes.h" #include "utils/guc.h" #include "utils/lsyscache.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" + #if (PG_VERSION_NUM >= PG_VERSION_15) static void QualifyPublicationObjects(List *publicationObjects); #else diff --git a/src/backend/distributed/deparser/qualify_role_stmt.c b/src/backend/distributed/deparser/qualify_role_stmt.c index 93a958ea9e6..cffb7ac4c1a 100644 --- a/src/backend/distributed/deparser/qualify_role_stmt.c +++ b/src/backend/distributed/deparser/qualify_role_stmt.c @@ -17,10 +17,11 @@ #include "postgres.h" -#include "distributed/deparser.h" #include "nodes/nodes.h" #include "utils/guc.h" +#include "distributed/deparser.h" + static void QualifyVarSetCurrent(VariableSetStmt *setStmt); diff --git a/src/backend/distributed/deparser/qualify_sequence_stmt.c b/src/backend/distributed/deparser/qualify_sequence_stmt.c index 384e0c95383..1a0ecc8319b 100644 --- a/src/backend/distributed/deparser/qualify_sequence_stmt.c +++ b/src/backend/distributed/deparser/qualify_sequence_stmt.c @@ -17,12 +17,13 @@ #include "postgres.h" +#include "parser/parse_func.h" +#include "utils/lsyscache.h" + #include "distributed/commands.h" #include "distributed/deparser.h" #include "distributed/listutils.h" #include "distributed/version_compat.h" -#include "parser/parse_func.h" -#include "utils/lsyscache.h" /* diff --git a/src/backend/distributed/deparser/qualify_statistics_stmt.c b/src/backend/distributed/deparser/qualify_statistics_stmt.c index ce944393002..ba8e8a76496 100644 --- a/src/backend/distributed/deparser/qualify_statistics_stmt.c +++ b/src/backend/distributed/deparser/qualify_statistics_stmt.c @@ -16,15 +16,16 @@ #include "catalog/namespace.h" #include "catalog/pg_statistic_ext.h" -#include "distributed/commands.h" -#include "distributed/deparser.h" -#include "distributed/listutils.h" #include "nodes/parsenodes.h" #include "nodes/value.h" -#include "utils/syscache.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/relcache.h" +#include "utils/syscache.h" + +#include "distributed/commands.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" static Oid GetStatsNamespaceOid(Oid statsOid); diff --git a/src/backend/distributed/deparser/qualify_table_stmt.c b/src/backend/distributed/deparser/qualify_table_stmt.c index 9667c4c798d..e760ff3885a 100644 --- a/src/backend/distributed/deparser/qualify_table_stmt.c +++ b/src/backend/distributed/deparser/qualify_table_stmt.c @@ -23,6 +23,7 @@ #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/relcache.h" + #include "distributed/deparser.h" void diff --git a/src/backend/distributed/deparser/qualify_type_stmt.c b/src/backend/distributed/deparser/qualify_type_stmt.c index 487e6fc976b..91052b57659 100644 --- a/src/backend/distributed/deparser/qualify_type_stmt.c +++ b/src/backend/distributed/deparser/qualify_type_stmt.c @@ -23,13 +23,14 @@ #include "catalog/namespace.h" #include "catalog/objectaddress.h" #include "catalog/pg_type.h" -#include "distributed/commands.h" -#include "distributed/deparser.h" -#include "distributed/version_compat.h" #include "nodes/makefuncs.h" #include "parser/parse_type.h" -#include "utils/syscache.h" #include "utils/lsyscache.h" +#include "utils/syscache.h" + +#include "distributed/commands.h" +#include "distributed/deparser.h" +#include "distributed/version_compat.h" /* * GetTypeNamespaceNameByNameList resolved the schema name of a type by its namelist. diff --git a/src/backend/distributed/deparser/qualify_view_stmt.c b/src/backend/distributed/deparser/qualify_view_stmt.c index 1f450d50aee..af3fb280a8c 100644 --- a/src/backend/distributed/deparser/qualify_view_stmt.c +++ b/src/backend/distributed/deparser/qualify_view_stmt.c @@ -12,12 +12,13 @@ #include "postgres.h" #include "catalog/namespace.h" -#include "distributed/deparser.h" -#include "distributed/listutils.h" #include "nodes/nodes.h" #include "utils/guc.h" #include "utils/lsyscache.h" +#include "distributed/deparser.h" +#include "distributed/listutils.h" + static void QualifyViewRangeVar(RangeVar *view); /* diff --git a/src/backend/distributed/deparser/ruleutils_14.c b/src/backend/distributed/deparser/ruleutils_14.c index 6ab12453708..88948cff542 100644 --- a/src/backend/distributed/deparser/ruleutils_14.c +++ b/src/backend/distributed/deparser/ruleutils_14.c @@ -14,7 +14,7 @@ * This needs to be closely in sync with the core code. *------------------------------------------------------------------------- */ -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "pg_config.h" @@ -1526,8 +1526,15 @@ set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte, /* Assert we processed the right number of columns */ #ifdef USE_ASSERT_CHECKING - while (i < colinfo->num_cols && colinfo->colnames[i] == NULL) - i++; + for (int col_index = 0; col_index < colinfo->num_cols; col_index++) + { + /* + * In the above processing-loops, "i" advances only if + * the column is not new, check if this is a new column. + */ + if (colinfo->is_new_col[col_index]) + i++; + } Assert(i == colinfo->num_cols); Assert(j == nnewcolumns); #endif diff --git a/src/backend/distributed/deparser/ruleutils_15.c b/src/backend/distributed/deparser/ruleutils_15.c index 755e0f4cd3f..018468d0bfb 100644 --- a/src/backend/distributed/deparser/ruleutils_15.c +++ b/src/backend/distributed/deparser/ruleutils_15.c @@ -14,7 +14,7 @@ * This needs to be closely in sync with the core code. *------------------------------------------------------------------------- */ -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "pg_config.h" @@ -1563,8 +1563,15 @@ set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte, /* Assert we processed the right number of columns */ #ifdef USE_ASSERT_CHECKING - while (i < colinfo->num_cols && colinfo->colnames[i] == NULL) - i++; + for (int col_index = 0; col_index < colinfo->num_cols; col_index++) + { + /* + * In the above processing-loops, "i" advances only if + * the column is not new, check if this is a new column. + */ + if (colinfo->is_new_col[col_index]) + i++; + } Assert(i == colinfo->num_cols); Assert(j == nnewcolumns); #endif diff --git a/src/backend/distributed/deparser/ruleutils_16.c b/src/backend/distributed/deparser/ruleutils_16.c index 31e8823b125..7f2a41d75c3 100644 --- a/src/backend/distributed/deparser/ruleutils_16.c +++ b/src/backend/distributed/deparser/ruleutils_16.c @@ -14,7 +14,7 @@ * This needs to be closely in sync with the core code. *------------------------------------------------------------------------- */ -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include "pg_config.h" @@ -1580,8 +1580,15 @@ set_join_column_names(deparse_namespace *dpns, RangeTblEntry *rte, /* Assert we processed the right number of columns */ #ifdef USE_ASSERT_CHECKING - while (i < colinfo->num_cols && colinfo->colnames[i] == NULL) - i++; + for (int col_index = 0; col_index < colinfo->num_cols; col_index++) + { + /* + * In the above processing-loops, "i" advances only if + * the column is not new, check if this is a new column. + */ + if (colinfo->is_new_col[col_index]) + i++; + } Assert(i == colinfo->num_cols); Assert(j == nnewcolumns); #endif diff --git a/src/backend/distributed/executor/adaptive_executor.c b/src/backend/distributed/executor/adaptive_executor.c index 61a52e7c482..e912f418d6f 100644 --- a/src/backend/distributed/executor/adaptive_executor.c +++ b/src/backend/distributed/executor/adaptive_executor.c @@ -118,32 +118,43 @@ *------------------------------------------------------------------------- */ +#include +#include +#include + #include "postgres.h" + #include "funcapi.h" #include "libpq-fe.h" #include "miscadmin.h" #include "pgstat.h" -#include -#include -#include - +#include "access/htup_details.h" #include "access/transam.h" #include "access/xact.h" -#include "access/htup_details.h" #include "catalog/pg_type.h" #include "commands/dbcommands.h" #include "commands/schemacmds.h" +#include "lib/ilist.h" +#include "portability/instr_time.h" +#include "storage/fd.h" +#include "storage/latch.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" +#include "utils/syscache.h" +#include "utils/timestamp.h" + #include "distributed/adaptive_executor.h" +#include "distributed/backend_data.h" #include "distributed/cancel_utils.h" #include "distributed/citus_custom_scan.h" #include "distributed/citus_safe_lib.h" -#include "distributed/connection_management.h" #include "distributed/commands/multi_copy.h" +#include "distributed/connection_management.h" #include "distributed/deparse_shard_query.h" -#include "distributed/executor_util.h" -#include "distributed/shared_connection_stats.h" #include "distributed/distributed_execution_locks.h" +#include "distributed/executor_util.h" #include "distributed/intermediate_result_pruning.h" #include "distributed/listutils.h" #include "distributed/local_executor.h" @@ -161,21 +172,11 @@ #include "distributed/resource_lock.h" #include "distributed/shared_connection_stats.h" #include "distributed/subplan_execution.h" -#include "distributed/transaction_management.h" #include "distributed/transaction_identifier.h" +#include "distributed/transaction_management.h" #include "distributed/tuple_destination.h" #include "distributed/version_compat.h" #include "distributed/worker_protocol.h" -#include "distributed/backend_data.h" -#include "lib/ilist.h" -#include "portability/instr_time.h" -#include "storage/fd.h" -#include "storage/latch.h" -#include "utils/builtins.h" -#include "utils/lsyscache.h" -#include "utils/memutils.h" -#include "utils/syscache.h" -#include "utils/timestamp.h" #define SLOW_START_DISABLED 0 @@ -400,7 +401,7 @@ typedef struct WorkerPool /* * Placement executions destined for worker node, but not assigned to any - * connection and not ready to start. + * connection and ready to start. */ dlist_head readyTaskQueue; int readyTaskCount; @@ -491,8 +492,6 @@ typedef struct WorkerSession } WorkerSession; -struct TaskPlacementExecution; - /* GUC, determining whether Citus opens 1 connection per task */ bool ForceMaxQueryParallelization = false; int MaxAdaptiveExecutorPoolSize = 16; @@ -584,7 +583,7 @@ typedef enum TaskPlacementExecutionState } TaskPlacementExecutionState; /* - * TaskPlacementExecution represents the an execution of a command + * TaskPlacementExecution represents the execution of a command * on a shard placement. */ typedef struct TaskPlacementExecution @@ -728,6 +727,11 @@ static uint64 MicrosecondsBetweenTimestamps(instr_time startTime, instr_time end static int WorkerPoolCompare(const void *lhsKey, const void *rhsKey); static void SetAttributeInputMetadata(DistributedExecution *execution, ShardCommandExecution *shardCommandExecution); +static ExecutionParams * CreateDefaultExecutionParams(RowModifyLevel modLevel, + List *taskList, + TupleDestination *tupleDest, + bool expectResults, + ParamListInfo paramListInfo); /* @@ -1014,14 +1018,14 @@ ExecuteTaskListOutsideTransaction(RowModifyLevel modLevel, List *taskList, /* - * ExecuteTaskListIntoTupleDestWithParam is a proxy to ExecuteTaskListExtended() which uses - * bind params from executor state, and with defaults for some of the arguments. + * CreateDefaultExecutionParams returns execution params based on given (possibly null) + * bind params (presumably from executor state) with defaults for some of the arguments. */ -uint64 -ExecuteTaskListIntoTupleDestWithParam(RowModifyLevel modLevel, List *taskList, - TupleDestination *tupleDest, - bool expectResults, - ParamListInfo paramListInfo) +static ExecutionParams * +CreateDefaultExecutionParams(RowModifyLevel modLevel, List *taskList, + TupleDestination *tupleDest, + bool expectResults, + ParamListInfo paramListInfo) { int targetPoolSize = MaxAdaptiveExecutorPoolSize; bool localExecutionSupported = true; @@ -1035,6 +1039,24 @@ ExecuteTaskListIntoTupleDestWithParam(RowModifyLevel modLevel, List *taskList, executionParams->tupleDestination = tupleDest; executionParams->paramListInfo = paramListInfo; + return executionParams; +} + + +/* + * ExecuteTaskListIntoTupleDestWithParam is a proxy to ExecuteTaskListExtended() which uses + * bind params from executor state, and with defaults for some of the arguments. + */ +uint64 +ExecuteTaskListIntoTupleDestWithParam(RowModifyLevel modLevel, List *taskList, + TupleDestination *tupleDest, + bool expectResults, + ParamListInfo paramListInfo) +{ + ExecutionParams *executionParams = CreateDefaultExecutionParams(modLevel, taskList, + tupleDest, + expectResults, + paramListInfo); return ExecuteTaskListExtended(executionParams); } @@ -1048,17 +1070,11 @@ ExecuteTaskListIntoTupleDest(RowModifyLevel modLevel, List *taskList, TupleDestination *tupleDest, bool expectResults) { - int targetPoolSize = MaxAdaptiveExecutorPoolSize; - bool localExecutionSupported = true; - ExecutionParams *executionParams = CreateBasicExecutionParams( - modLevel, taskList, targetPoolSize, localExecutionSupported - ); - - executionParams->xactProperties = DecideTransactionPropertiesForTaskList( - modLevel, taskList, false); - executionParams->expectResults = expectResults; - executionParams->tupleDestination = tupleDest; - + ParamListInfo paramListInfo = NULL; + ExecutionParams *executionParams = CreateDefaultExecutionParams(modLevel, taskList, + tupleDest, + expectResults, + paramListInfo); return ExecuteTaskListExtended(executionParams); } @@ -1907,7 +1923,7 @@ RunDistributedExecution(DistributedExecution *execution) /* * Iterate until all the tasks are finished. Once all the tasks - * are finished, ensure that that all the connection initializations + * are finished, ensure that all the connection initializations * are also finished. Otherwise, those connections are terminated * abruptly before they are established (or failed). Instead, we let * the ConnectionStateMachine() to properly handle them. @@ -3117,7 +3133,7 @@ ConnectionStateMachine(WorkerSession *session) * * We can only retry connection when the remote transaction has * not started over the connection. Otherwise, we'd have to deal - * with restoring the transaction state, which iis beyond our + * with restoring the transaction state, which is beyond our * purpose at this time. */ RemoteTransaction *transaction = &connection->remoteTransaction; diff --git a/src/backend/distributed/executor/citus_custom_scan.c b/src/backend/distributed/executor/citus_custom_scan.c index a2a2ff6cb32..34a2f3d90bd 100644 --- a/src/backend/distributed/executor/citus_custom_scan.c +++ b/src/backend/distributed/executor/citus_custom_scan.c @@ -9,19 +9,30 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" - #include "miscadmin.h" #include "commands/copy.h" +#include "executor/executor.h" +#include "nodes/makefuncs.h" +#include "optimizer/clauses.h" +#include "optimizer/optimizer.h" +#include "utils/datum.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" +#include "utils/rel.h" + +#include "pg_version_constants.h" + #include "distributed/backend_data.h" #include "distributed/citus_clauses.h" #include "distributed/citus_custom_scan.h" #include "distributed/citus_nodefuncs.h" #include "distributed/citus_ruleutils.h" +#include "distributed/colocation_utils.h" #include "distributed/connection_management.h" #include "distributed/deparse_shard_query.h" #include "distributed/distributed_execution_locks.h" +#include "distributed/function_call_delegation.h" #include "distributed/insert_select_executor.h" #include "distributed/insert_select_planner.h" #include "distributed/listutils.h" @@ -30,23 +41,13 @@ #include "distributed/merge_executor.h" #include "distributed/merge_planner.h" #include "distributed/multi_executor.h" -#include "distributed/multi_server_executor.h" #include "distributed/multi_router_planner.h" +#include "distributed/multi_server_executor.h" #include "distributed/query_stats.h" #include "distributed/shard_utils.h" #include "distributed/subplan_execution.h" #include "distributed/worker_log_messages.h" #include "distributed/worker_protocol.h" -#include "distributed/colocation_utils.h" -#include "distributed/function_call_delegation.h" -#include "executor/executor.h" -#include "nodes/makefuncs.h" -#include "optimizer/optimizer.h" -#include "optimizer/clauses.h" -#include "utils/lsyscache.h" -#include "utils/memutils.h" -#include "utils/rel.h" -#include "utils/datum.h" extern AllowedDistributionColumn AllowedDistributionColumnValue; diff --git a/src/backend/distributed/executor/directed_acyclic_graph_execution.c b/src/backend/distributed/executor/directed_acyclic_graph_execution.c index e0d4c9b8125..15b0272ddaf 100644 --- a/src/backend/distributed/executor/directed_acyclic_graph_execution.c +++ b/src/backend/distributed/executor/directed_acyclic_graph_execution.c @@ -8,11 +8,12 @@ */ #include "postgres.h" + #include "access/hash.h" -#include "distributed/hash_helpers.h" #include "distributed/adaptive_executor.h" #include "distributed/directed_acyclic_graph_execution.h" +#include "distributed/hash_helpers.h" #include "distributed/listutils.h" #include "distributed/metadata_cache.h" #include "distributed/multi_physical_planner.h" diff --git a/src/backend/distributed/executor/distributed_execution_locks.c b/src/backend/distributed/executor/distributed_execution_locks.c index f7d2fd49d6a..4424accb712 100644 --- a/src/backend/distributed/executor/distributed_execution_locks.c +++ b/src/backend/distributed/executor/distributed_execution_locks.c @@ -8,10 +8,10 @@ * Copyright (c) Citus Data, Inc. *------------------------------------------------------------------------- */ +#include "distributed/coordinator_protocol.h" #include "distributed/distributed_execution_locks.h" #include "distributed/executor_util.h" #include "distributed/listutils.h" -#include "distributed/coordinator_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/multi_executor.h" #include "distributed/multi_partitioning_utils.h" diff --git a/src/backend/distributed/executor/distributed_intermediate_results.c b/src/backend/distributed/executor/distributed_intermediate_results.c index c10303e18d6..c5ac27fb624 100644 --- a/src/backend/distributed/executor/distributed_intermediate_results.c +++ b/src/backend/distributed/executor/distributed_intermediate_results.c @@ -8,12 +8,11 @@ *------------------------------------------------------------------------- */ -#include "distributed/pg_version_constants.h" - #include #include #include "postgres.h" + #include "funcapi.h" #include "miscadmin.h" #include "port.h" @@ -21,21 +20,24 @@ #include "access/htup_details.h" #include "access/tupdesc.h" #include "catalog/pg_type.h" +#include "tcop/pquery.h" +#include "tcop/tcopprot.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" + +#include "pg_version_constants.h" + #include "distributed/deparse_shard_query.h" #include "distributed/intermediate_results.h" #include "distributed/listutils.h" -#include "distributed/metadata_utility.h" #include "distributed/metadata_cache.h" +#include "distributed/metadata_utility.h" #include "distributed/multi_executor.h" #include "distributed/multi_physical_planner.h" #include "distributed/transaction_management.h" #include "distributed/tuple_destination.h" #include "distributed/tuplestore.h" #include "distributed/worker_protocol.h" -#include "tcop/pquery.h" -#include "tcop/tcopprot.h" -#include "utils/builtins.h" -#include "utils/lsyscache.h" /* diff --git a/src/backend/distributed/executor/executor_util_params.c b/src/backend/distributed/executor/executor_util_params.c index 6b5139bff2b..975654f22d7 100644 --- a/src/backend/distributed/executor/executor_util_params.c +++ b/src/backend/distributed/executor/executor_util_params.c @@ -8,12 +8,14 @@ */ #include "postgres.h" + #include "funcapi.h" #include "miscadmin.h" -#include "distributed/executor_util.h" #include "utils/lsyscache.h" +#include "distributed/executor_util.h" + /* * ExtractParametersForRemoteExecution extracts parameter types and values from diff --git a/src/backend/distributed/executor/executor_util_tasks.c b/src/backend/distributed/executor/executor_util_tasks.c index abf72119600..6a3eec8fc52 100644 --- a/src/backend/distributed/executor/executor_util_tasks.c +++ b/src/backend/distributed/executor/executor_util_tasks.c @@ -8,6 +8,7 @@ */ #include "postgres.h" + #include "funcapi.h" #include "miscadmin.h" @@ -61,7 +62,7 @@ TaskListRequiresRollback(List *taskList) } Task *task = (Task *) linitial(taskList); - if (task->cannotBeExecutedInTransction) + if (task->cannotBeExecutedInTransaction) { /* vacuum, create index concurrently etc. */ return false; @@ -164,7 +165,7 @@ TaskListCannotBeExecutedInTransaction(List *taskList) Task *task = NULL; foreach_ptr(task, taskList) { - if (task->cannotBeExecutedInTransction) + if (task->cannotBeExecutedInTransaction) { return true; } diff --git a/src/backend/distributed/executor/executor_util_tuples.c b/src/backend/distributed/executor/executor_util_tuples.c index c5fde9f9097..68f6999566f 100644 --- a/src/backend/distributed/executor/executor_util_tuples.c +++ b/src/backend/distributed/executor/executor_util_tuples.c @@ -8,12 +8,14 @@ */ #include "postgres.h" + #include "funcapi.h" #include "miscadmin.h" -#include "distributed/executor_util.h" #include "utils/lsyscache.h" +#include "distributed/executor_util.h" + /* * TupleDescGetAttBinaryInMetadata - Build an AttInMetadata structure based on diff --git a/src/backend/distributed/executor/insert_select_executor.c b/src/backend/distributed/executor/insert_select_executor.c index 4a15289e6dc..a8dc1fa5a44 100644 --- a/src/backend/distributed/executor/insert_select_executor.c +++ b/src/backend/distributed/executor/insert_select_executor.c @@ -9,26 +9,42 @@ */ #include "postgres.h" + #include "miscadmin.h" +#include "executor/executor.h" +#include "nodes/execnodes.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "nodes/parsenodes.h" +#include "nodes/plannodes.h" +#include "parser/parse_coerce.h" +#include "parser/parse_relation.h" +#include "parser/parsetree.h" +#include "tcop/pquery.h" +#include "tcop/tcopprot.h" +#include "utils/lsyscache.h" +#include "utils/portal.h" +#include "utils/rel.h" +#include "utils/snapmgr.h" + +#include "distributed/adaptive_executor.h" #include "distributed/citus_ruleutils.h" #include "distributed/commands/multi_copy.h" -#include "distributed/adaptive_executor.h" #include "distributed/deparse_shard_query.h" #include "distributed/distributed_execution_locks.h" +#include "distributed/distributed_planner.h" #include "distributed/insert_select_executor.h" #include "distributed/insert_select_planner.h" #include "distributed/intermediate_results.h" +#include "distributed/listutils.h" #include "distributed/local_executor.h" #include "distributed/merge_planner.h" +#include "distributed/metadata_cache.h" #include "distributed/multi_executor.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/multi_physical_planner.h" -#include "distributed/listutils.h" -#include "distributed/metadata_cache.h" #include "distributed/multi_router_planner.h" -#include "distributed/local_executor.h" -#include "distributed/distributed_planner.h" #include "distributed/recursive_planning.h" #include "distributed/relation_access_tracking.h" #include "distributed/repartition_executor.h" @@ -37,21 +53,6 @@ #include "distributed/subplan_execution.h" #include "distributed/transaction_management.h" #include "distributed/version_compat.h" -#include "executor/executor.h" -#include "nodes/execnodes.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "nodes/parsenodes.h" -#include "nodes/plannodes.h" -#include "parser/parse_coerce.h" -#include "parser/parse_relation.h" -#include "parser/parsetree.h" -#include "tcop/pquery.h" -#include "tcop/tcopprot.h" -#include "utils/lsyscache.h" -#include "utils/portal.h" -#include "utils/rel.h" -#include "utils/snapmgr.h" /* Config variables managed via guc.c */ bool EnableRepartitionedInsertSelect = true; @@ -142,15 +143,10 @@ NonPushableInsertSelectExecScan(CustomScanState *node) targetRelation->partitionColumn); if (distributionColumnIndex == -1) { - char *relationName = get_rel_name(targetRelationId); - Oid schemaOid = get_rel_namespace(targetRelationId); - char *schemaName = get_namespace_name(schemaOid); - ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg( "the partition column of table %s should have a value", - quote_qualified_identifier(schemaName, - relationName)))); + generate_qualified_relation_name(targetRelationId)))); } TargetEntry *selectPartitionTE = list_nth(selectQuery->targetList, diff --git a/src/backend/distributed/executor/intermediate_results.c b/src/backend/distributed/executor/intermediate_results.c index d17e6521724..daf707b2462 100644 --- a/src/backend/distributed/executor/intermediate_results.c +++ b/src/backend/distributed/executor/intermediate_results.c @@ -11,6 +11,7 @@ #include #include "postgres.h" + #include "funcapi.h" #include "libpq-fe.h" #include "miscadmin.h" @@ -19,31 +20,32 @@ #include "catalog/pg_enum.h" #include "catalog/pg_type.h" #include "commands/copy.h" +#include "nodes/makefuncs.h" +#include "nodes/parsenodes.h" +#include "nodes/primnodes.h" +#include "storage/fd.h" +#include "tcop/tcopprot.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" +#include "utils/syscache.h" + #include "distributed/commands/multi_copy.h" #include "distributed/connection_management.h" #include "distributed/error_codes.h" #include "distributed/intermediate_results.h" #include "distributed/listutils.h" -#include "distributed/metadata_utility.h" #include "distributed/metadata_cache.h" +#include "distributed/metadata_utility.h" #include "distributed/multi_executor.h" #include "distributed/remote_commands.h" -#include "distributed/transmit.h" #include "distributed/transaction_identifier.h" +#include "distributed/transmit.h" #include "distributed/tuplestore.h" #include "distributed/utils/array_type.h" #include "distributed/utils/directory.h" #include "distributed/version_compat.h" #include "distributed/worker_protocol.h" -#include "nodes/makefuncs.h" -#include "nodes/parsenodes.h" -#include "nodes/primnodes.h" -#include "storage/fd.h" -#include "tcop/tcopprot.h" -#include "utils/builtins.h" -#include "utils/lsyscache.h" -#include "utils/memutils.h" -#include "utils/syscache.h" static List *CreatedResultsDirectories = NIL; @@ -293,7 +295,6 @@ PrepareIntermediateResultBroadcast(RemoteFileDestReceiver *resultDest) if (resultDest->writeLocalFile) { const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); - const int fileMode = (S_IRUSR | S_IWUSR); /* make sure the directory exists */ CreateIntermediateResultsDirectory(); @@ -301,8 +302,7 @@ PrepareIntermediateResultBroadcast(RemoteFileDestReceiver *resultDest) const char *fileName = QueryResultFileName(resultId); resultDest->fileCompat = FileCompatFromFileStart(FileOpenForTransmit(fileName, - fileFlags, - fileMode)); + fileFlags)); } WorkerNode *workerNode = NULL; @@ -604,7 +604,7 @@ CreateIntermediateResultsDirectory(void) { char *resultDirectory = IntermediateResultsDirectory(); - int makeOK = mkdir(resultDirectory, S_IRWXU); + int makeOK = MakePGDirectory(resultDirectory); if (makeOK != 0) { if (errno == EEXIST) @@ -974,7 +974,6 @@ FetchRemoteIntermediateResult(MultiConnection *connection, char *resultId) StringInfo copyCommand = makeStringInfo(); const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); - const int fileMode = (S_IRUSR | S_IWUSR); PGconn *pgConn = connection->pgConn; int socket = PQsocket(pgConn); @@ -996,7 +995,7 @@ FetchRemoteIntermediateResult(MultiConnection *connection, char *resultId) PQclear(result); - File fileDesc = FileOpenForTransmit(localPath, fileFlags, fileMode); + File fileDesc = FileOpenForTransmit(localPath, fileFlags); FileCompat fileCompat = FileCompatFromFileStart(fileDesc); while (true) diff --git a/src/backend/distributed/executor/local_executor.c b/src/backend/distributed/executor/local_executor.c index 5661403b930..bedaa643e29 100644 --- a/src/backend/distributed/executor/local_executor.c +++ b/src/backend/distributed/executor/local_executor.c @@ -76,36 +76,38 @@ * via coordinator cannot happen via the local execution. */ #include "postgres.h" + #include "miscadmin.h" -#include "distributed/pg_version_constants.h" +#include "executor/tstoreReceiver.h" +#include "executor/tuptable.h" +#include "nodes/params.h" +#include "optimizer/optimizer.h" +#include "utils/snapmgr.h" + +#include "pg_version_constants.h" #include "distributed/adaptive_executor.h" -#include "distributed/commands/utility_hook.h" #include "distributed/citus_custom_scan.h" #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" -#include "distributed/query_utils.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/coordinator_protocol.h" #include "distributed/deparse_shard_query.h" +#include "distributed/executor_util.h" #include "distributed/listutils.h" #include "distributed/local_executor.h" #include "distributed/local_plan_cache.h" -#include "distributed/coordinator_protocol.h" -#include "distributed/executor_util.h" #include "distributed/metadata_cache.h" #include "distributed/multi_executor.h" #include "distributed/multi_server_executor.h" +#include "distributed/query_utils.h" #include "distributed/relation_access_tracking.h" #include "distributed/remote_commands.h" /* to access LogRemoteCommands */ #include "distributed/transaction_management.h" #include "distributed/utils/citus_stat_tenants.h" #include "distributed/version_compat.h" #include "distributed/worker_protocol.h" -#include "executor/tstoreReceiver.h" -#include "executor/tuptable.h" -#include "optimizer/optimizer.h" -#include "nodes/params.h" -#include "utils/snapmgr.h" /* controlled via a GUC */ bool EnableLocalExecution = true; @@ -567,7 +569,7 @@ LogLocalCommand(Task *task) * * One slightly different case is modifications to replicated tables * (e.g., reference tables) where a single task ends in two separate tasks - * and the local task is added to localTaskList and the remaning ones to + * and the local task is added to localTaskList and the remaining ones to * the remoteTaskList. */ void diff --git a/src/backend/distributed/executor/merge_executor.c b/src/backend/distributed/executor/merge_executor.c index bcacbcd1e5d..969b03faf93 100644 --- a/src/backend/distributed/executor/merge_executor.c +++ b/src/backend/distributed/executor/merge_executor.c @@ -9,8 +9,13 @@ */ #include "postgres.h" + #include "miscadmin.h" +#include "nodes/execnodes.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" + #include "distributed/distributed_execution_locks.h" #include "distributed/insert_select_executor.h" #include "distributed/intermediate_results.h" @@ -23,10 +28,6 @@ #include "distributed/repartition_executor.h" #include "distributed/subplan_execution.h" -#include "nodes/execnodes.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" - static void ExecuteSourceAtWorkerAndRepartition(CitusScanState *scanState); static void ExecuteSourceAtCoordAndRedistribution(CitusScanState *scanState); static HTAB * ExecuteMergeSourcePlanIntoColocatedIntermediateResults(Oid targetRelationId, diff --git a/src/backend/distributed/executor/multi_executor.c b/src/backend/distributed/executor/multi_executor.c index 662eaaf972b..386a278b4c7 100644 --- a/src/backend/distributed/executor/multi_executor.c +++ b/src/backend/distributed/executor/multi_executor.c @@ -10,50 +10,50 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" - #include "miscadmin.h" #include "access/xact.h" #include "catalog/dependency.h" -#include "catalog/pg_class.h" #include "catalog/namespace.h" +#include "catalog/pg_class.h" +#include "commands/copy.h" +#include "executor/execdebug.h" +#include "nodes/execnodes.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "parser/parse_oper.h" +#include "parser/parsetree.h" +#include "storage/lmgr.h" +#include "tcop/dest.h" +#include "tcop/pquery.h" +#include "tcop/utility.h" +#include "utils/fmgrprotos.h" +#include "utils/memutils.h" +#include "utils/snapmgr.h" + +#include "pg_version_constants.h" + #include "distributed/backend_data.h" #include "distributed/citus_custom_scan.h" +#include "distributed/combine_query_planner.h" #include "distributed/commands/multi_copy.h" #include "distributed/commands/utility_hook.h" +#include "distributed/coordinator_protocol.h" +#include "distributed/distributed_planner.h" #include "distributed/function_call_delegation.h" #include "distributed/insert_select_executor.h" #include "distributed/insert_select_planner.h" #include "distributed/listutils.h" #include "distributed/local_executor.h" -#include "distributed/coordinator_protocol.h" #include "distributed/multi_executor.h" -#include "distributed/combine_query_planner.h" -#include "distributed/distributed_planner.h" #include "distributed/multi_router_planner.h" #include "distributed/multi_server_executor.h" #include "distributed/relation_access_tracking.h" #include "distributed/resource_lock.h" #include "distributed/transaction_management.h" #include "distributed/version_compat.h" -#include "distributed/worker_shard_visibility.h" #include "distributed/worker_protocol.h" -#include "distributed/function_call_delegation.h" -#include "executor/execdebug.h" -#include "commands/copy.h" -#include "nodes/execnodes.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "parser/parsetree.h" -#include "parser/parse_oper.h" -#include "storage/lmgr.h" -#include "tcop/dest.h" -#include "tcop/pquery.h" -#include "tcop/utility.h" -#include "utils/fmgrprotos.h" -#include "utils/snapmgr.h" -#include "utils/memutils.h" +#include "distributed/worker_shard_visibility.h" /* @@ -168,7 +168,7 @@ CitusExecutorRun(QueryDesc *queryDesc, executorBoundParams = queryDesc->params; /* - * We do some potentially time consuming operations our self now before we hand of + * We do some potentially time consuming operations ourself now before we hand off * control to postgres' executor. To make sure that time spent is accurately measured * we remove the totaltime instrumentation from the queryDesc. Instead we will start * and stop the instrumentation of the total time and put it back on the queryDesc diff --git a/src/backend/distributed/executor/multi_server_executor.c b/src/backend/distributed/executor/multi_server_executor.c index ac144c350e8..20901983337 100644 --- a/src/backend/distributed/executor/multi_server_executor.c +++ b/src/backend/distributed/executor/multi_server_executor.c @@ -14,22 +14,24 @@ *------------------------------------------------------------------------- */ +#include + #include "postgres.h" + #include "miscadmin.h" -#include +#include "utils/lsyscache.h" +#include "distributed/coordinator_protocol.h" #include "distributed/listutils.h" #include "distributed/log_utils.h" #include "distributed/multi_executor.h" #include "distributed/multi_physical_planner.h" -#include "distributed/multi_server_executor.h" #include "distributed/multi_router_planner.h" -#include "distributed/coordinator_protocol.h" +#include "distributed/multi_server_executor.h" #include "distributed/subplan_execution.h" #include "distributed/tuple_destination.h" #include "distributed/worker_protocol.h" -#include "utils/lsyscache.h" int RemoteTaskCheckInterval = 10; /* per cycle sleep interval in millisecs */ int TaskExecutorType = MULTI_EXECUTOR_ADAPTIVE; /* distributed executor type */ diff --git a/src/backend/distributed/executor/partitioned_intermediate_results.c b/src/backend/distributed/executor/partitioned_intermediate_results.c index 75255234360..3ec73a4567b 100644 --- a/src/backend/distributed/executor/partitioned_intermediate_results.c +++ b/src/backend/distributed/executor/partitioned_intermediate_results.c @@ -11,6 +11,7 @@ #include #include "postgres.h" + #include "funcapi.h" #include "libpq-fe.h" #include "miscadmin.h" @@ -20,9 +21,15 @@ #include "access/nbtree.h" #include "catalog/pg_am.h" #include "catalog/pg_type.h" +#include "nodes/makefuncs.h" +#include "nodes/primnodes.h" +#include "tcop/pquery.h" +#include "tcop/tcopprot.h" +#include "utils/typcache.h" + #include "distributed/intermediate_results.h" -#include "distributed/metadata_utility.h" #include "distributed/metadata_cache.h" +#include "distributed/metadata_utility.h" #include "distributed/multi_executor.h" #include "distributed/pg_dist_shard.h" #include "distributed/remote_commands.h" @@ -31,11 +38,6 @@ #include "distributed/utils/function.h" #include "distributed/version_compat.h" #include "distributed/worker_protocol.h" -#include "nodes/makefuncs.h" -#include "nodes/primnodes.h" -#include "tcop/pquery.h" -#include "tcop/tcopprot.h" -#include "utils/typcache.h" /* diff --git a/src/backend/distributed/executor/placement_access.c b/src/backend/distributed/executor/placement_access.c index df5143a5423..a8573de7c08 100644 --- a/src/backend/distributed/executor/placement_access.c +++ b/src/backend/distributed/executor/placement_access.c @@ -8,9 +8,9 @@ * Copyright (c) Citus Data, Inc. *------------------------------------------------------------------------- */ -#include "distributed/placement_access.h" #include "distributed/listutils.h" #include "distributed/metadata_cache.h" +#include "distributed/placement_access.h" static List * BuildPlacementSelectList(int32 groupId, List *relationShardList); static List * BuildPlacementDDLList(int32 groupId, List *relationShardList); diff --git a/src/backend/distributed/executor/query_stats.c b/src/backend/distributed/executor/query_stats.c index 1ac70489c27..f37a99bbf72 100644 --- a/src/backend/distributed/executor/query_stats.c +++ b/src/backend/distributed/executor/query_stats.c @@ -9,32 +9,32 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" +#include -#include "safe_lib.h" +#include "postgres.h" +#include "funcapi.h" #include "miscadmin.h" - -#include "distributed/pg_version_constants.h" +#include "safe_lib.h" #include "access/hash.h" #include "catalog/pg_authid.h" +#include "storage/fd.h" +#include "storage/ipc.h" +#include "storage/spin.h" +#include "tcop/utility.h" +#include "utils/builtins.h" + +#include "pg_version_constants.h" + #include "distributed/citus_safe_lib.h" #include "distributed/function_utils.h" #include "distributed/hash_helpers.h" #include "distributed/multi_executor.h" #include "distributed/multi_server_executor.h" -#include "distributed/version_compat.h" #include "distributed/query_stats.h" #include "distributed/tuplestore.h" -#include "funcapi.h" -#include "storage/ipc.h" -#include "storage/fd.h" -#include "storage/spin.h" -#include "tcop/utility.h" -#include "utils/builtins.h" - -#include +#include "distributed/version_compat.h" #define CITUS_STATS_DUMP_FILE "pg_stat/citus_query_stats.stat" #define CITUS_STAT_STATEMENTS_COLS 6 diff --git a/src/backend/distributed/executor/repartition_executor.c b/src/backend/distributed/executor/repartition_executor.c index af4f0ac7e70..6e4dd3df441 100644 --- a/src/backend/distributed/executor/repartition_executor.c +++ b/src/backend/distributed/executor/repartition_executor.c @@ -10,6 +10,7 @@ */ #include "postgres.h" + #include "miscadmin.h" #include "nodes/makefuncs.h" diff --git a/src/backend/distributed/executor/repartition_join_execution.c b/src/backend/distributed/executor/repartition_join_execution.c index 29d994e59d1..8dce1239034 100644 --- a/src/backend/distributed/executor/repartition_join_execution.c +++ b/src/backend/distributed/executor/repartition_join_execution.c @@ -24,20 +24,22 @@ */ #include "postgres.h" -#include "access/hash.h" + #include "miscadmin.h" + +#include "access/hash.h" #include "utils/builtins.h" -#include "distributed/hash_helpers.h" #include "distributed/adaptive_executor.h" #include "distributed/directed_acyclic_graph_execution.h" +#include "distributed/hash_helpers.h" #include "distributed/listutils.h" #include "distributed/local_executor.h" #include "distributed/metadata_cache.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_server_executor.h" -#include "distributed/task_execution_utils.h" #include "distributed/repartition_join_execution.h" +#include "distributed/task_execution_utils.h" #include "distributed/transaction_management.h" #include "distributed/transmit.h" #include "distributed/worker_manager.h" diff --git a/src/backend/distributed/executor/subplan_execution.c b/src/backend/distributed/executor/subplan_execution.c index 3651d7f5284..4e81bb48680 100644 --- a/src/backend/distributed/executor/subplan_execution.c +++ b/src/backend/distributed/executor/subplan_execution.c @@ -10,6 +10,9 @@ #include "postgres.h" +#include "executor/executor.h" +#include "utils/datetime.h" + #include "distributed/intermediate_result_pruning.h" #include "distributed/intermediate_results.h" #include "distributed/listutils.h" @@ -19,8 +22,6 @@ #include "distributed/subplan_execution.h" #include "distributed/transaction_management.h" #include "distributed/worker_manager.h" -#include "executor/executor.h" -#include "utils/datetime.h" #define SECOND_TO_MILLI_SECOND 1000 #define MICRO_TO_MILLI_SECOND 0.001 diff --git a/src/backend/distributed/executor/transmit.c b/src/backend/distributed/executor/transmit.c index 24cbbb55049..224d8e5892d 100644 --- a/src/backend/distributed/executor/transmit.c +++ b/src/backend/distributed/executor/transmit.c @@ -7,24 +7,27 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" -#include "miscadmin.h" -#include "pgstat.h" - #include #include #include +#include "postgres.h" + +#include "miscadmin.h" +#include "pgstat.h" + #include "commands/defrem.h" +#include "common/file_perm.h" +#include "libpq/libpq.h" +#include "libpq/pqformat.h" +#include "storage/fd.h" + #include "distributed/listutils.h" #include "distributed/relay_utility.h" #include "distributed/transmit.h" #include "distributed/utils/directory.h" -#include "distributed/worker_protocol.h" #include "distributed/version_compat.h" -#include "libpq/libpq.h" -#include "libpq/pqformat.h" -#include "storage/fd.h" +#include "distributed/worker_protocol.h" /* Local functions forward declarations */ @@ -46,8 +49,7 @@ RedirectCopyDataToRegularFile(const char *filename) { StringInfo copyData = makeStringInfo(); const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); - const int fileMode = (S_IRUSR | S_IWUSR); - File fileDesc = FileOpenForTransmit(filename, fileFlags, fileMode); + File fileDesc = FileOpenForTransmit(filename, fileFlags); FileCompat fileCompat = FileCompatFromFileStart(fileDesc); SendCopyInStart(); @@ -90,7 +92,7 @@ SendRegularFile(const char *filename) const int fileMode = 0; /* we currently do not check if the caller has permissions for this file */ - File fileDesc = FileOpenForTransmit(filename, fileFlags, fileMode); + File fileDesc = FileOpenForTransmitPerm(filename, fileFlags, fileMode); FileCompat fileCompat = FileCompatFromFileStart(fileDesc); /* @@ -134,12 +136,23 @@ FreeStringInfo(StringInfo stringInfo) /* - * FileOpenForTransmit opens file with the given filename and flags. On success, - * the function returns the internal file handle for the opened file. On failure - * the function errors out. + * Open a file with FileOpenForTransmitPerm() and pass default file mode for + * the fileMode parameter. + */ +File +FileOpenForTransmit(const char *filename, int fileFlags) +{ + return FileOpenForTransmitPerm(filename, fileFlags, pg_file_create_mode); +} + + +/* + * FileOpenForTransmitPerm opens file with the given filename and flags. On + * success, the function returns the internal file handle for the opened file. + * On failure the function errors out. */ File -FileOpenForTransmit(const char *filename, int fileFlags, int fileMode) +FileOpenForTransmitPerm(const char *filename, int fileFlags, int fileMode) { struct stat fileStat; diff --git a/src/backend/distributed/executor/tuple_destination.c b/src/backend/distributed/executor/tuple_destination.c index 42dbf001ecc..b3c4b509c2f 100644 --- a/src/backend/distributed/executor/tuple_destination.c +++ b/src/backend/distributed/executor/tuple_destination.c @@ -1,13 +1,15 @@ +#include +#include + #include "postgres.h" + #include "funcapi.h" #include "libpq-fe.h" #include "miscadmin.h" #include "pgstat.h" -#include -#include - #include "access/htup_details.h" + #include "distributed/multi_server_executor.h" #include "distributed/subplan_execution.h" #include "distributed/tuple_destination.h" @@ -107,7 +109,7 @@ TupleStoreTupleDestPutTuple(TupleDestination *self, Task *task, uint64 tupleSize = tupleLibpqSize; if (tupleSize == 0) { - tupleSize = HeapTupleHeaderGetDatumLength(heapTuple); + tupleSize = heapTuple->t_len; } /* diff --git a/src/backend/distributed/metadata/dependency.c b/src/backend/distributed/metadata/dependency.c index f970cecd1f4..01653721ec3 100644 --- a/src/backend/distributed/metadata/dependency.c +++ b/src/backend/distributed/metadata/dependency.c @@ -10,8 +10,7 @@ #include "postgres.h" -#include "distributed/commands.h" -#include "distributed/pg_version_constants.h" +#include "miscadmin.h" #include "access/genam.h" #include "access/heapam.h" @@ -36,6 +35,13 @@ #include "catalog/pg_type.h" #include "commands/extension.h" #include "common/hashfn.h" +#include "utils/fmgroids.h" +#include "utils/hsearch.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + +#include "pg_version_constants.h" + #include "distributed/citus_depended_object.h" #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" @@ -46,11 +52,6 @@ #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" #include "distributed/version_compat.h" -#include "miscadmin.h" -#include "utils/fmgroids.h" -#include "utils/hsearch.h" -#include "utils/lsyscache.h" -#include "utils/syscache.h" /* * ObjectAddressCollector keeps track of collected ObjectAddresses. This can be used @@ -698,7 +699,6 @@ SupportedDependencyByCitus(const ObjectAddress *address) case OCLASS_DATABASE: { - /* only to propagate its owner */ return true; } diff --git a/src/backend/distributed/metadata/distobject.c b/src/backend/distributed/metadata/distobject.c index c6a8b0a2298..ff5b2c7a954 100644 --- a/src/backend/distributed/metadata/distobject.c +++ b/src/backend/distributed/metadata/distobject.c @@ -10,8 +10,6 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" - #include "miscadmin.h" #include "access/genam.h" @@ -22,48 +20,88 @@ #include "catalog/dependency.h" #include "catalog/namespace.h" #include "catalog/objectaddress.h" +#include "catalog/pg_database.h" #include "catalog/pg_extension_d.h" #include "catalog/pg_namespace.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" -#include "citus_version.h" +#include "commands/dbcommands.h" #include "commands/extension.h" -#include "distributed/listutils.h" -#include "distributed/colocation_utils.h" -#include "distributed/commands.h" -#include "distributed/commands/utility_hook.h" -#include "distributed/metadata/dependency.h" -#include "distributed/metadata/distobject.h" -#include "distributed/metadata/pg_dist_object.h" -#include "distributed/metadata_cache.h" -#include "distributed/metadata_sync.h" -#include "distributed/version_compat.h" -#include "distributed/worker_transaction.h" #include "executor/spi.h" #include "nodes/makefuncs.h" #include "nodes/pg_list.h" #include "parser/parse_type.h" +#include "postmaster/postmaster.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" #include "utils/regproc.h" #include "utils/rel.h" +#include "citus_version.h" +#include "pg_version_constants.h" + +#include "distributed/colocation_utils.h" +#include "distributed/commands.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/listutils.h" +#include "distributed/metadata/dependency.h" +#include "distributed/metadata/distobject.h" +#include "distributed/metadata/pg_dist_object.h" +#include "distributed/metadata_cache.h" +#include "distributed/metadata_sync.h" +#include "distributed/remote_commands.h" +#include "distributed/version_compat.h" +#include "distributed/worker_transaction.h" -static char * CreatePgDistObjectEntryCommand(const ObjectAddress *objectAddress); +static char * CreatePgDistObjectEntryCommand(const ObjectAddress *objectAddress, + char *objectName); static int ExecuteCommandAsSuperuser(char *query, int paramCount, Oid *paramTypes, Datum *paramValues); static bool IsObjectDistributed(const ObjectAddress *address); +PG_FUNCTION_INFO_V1(mark_object_distributed); PG_FUNCTION_INFO_V1(citus_unmark_object_distributed); PG_FUNCTION_INFO_V1(master_unmark_object_distributed); /* - * citus_unmark_object_distributed(classid oid, objid oid, objsubid int) + * mark_object_distributed adds an object to pg_dist_object + * in all of the nodes, for the connections to the other nodes this function + * uses the user passed. + */ +Datum +mark_object_distributed(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + EnsureSuperUser(); + + Oid classId = PG_GETARG_OID(0); + text *objectNameText = PG_GETARG_TEXT_P(1); + char *objectName = text_to_cstring(objectNameText); + Oid objectId = PG_GETARG_OID(2); + ObjectAddress *objectAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*objectAddress, classId, objectId); + text *connectionUserText = PG_GETARG_TEXT_P(3); + char *connectionUser = text_to_cstring(connectionUserText); + + /* + * This function is called when a query is run from a Citus non-main database. + * We need to insert into local pg_dist_object over a connection to make sure + * 2PC still works. + */ + bool useConnectionForLocalQuery = true; + MarkObjectDistributedWithName(objectAddress, objectName, useConnectionForLocalQuery, + connectionUser); + PG_RETURN_VOID(); +} + + +/* + * citus_unmark_object_distributed(classid oid, objid oid, objsubid int,checkobjectexistence bool) * - * removes the entry for an object address from pg_dist_object. Only removes the entry if - * the object does not exist anymore. + * Removes the entry for an object address from pg_dist_object. If checkobjectexistence is true, + * throws an error if the object still exists. */ Datum citus_unmark_object_distributed(PG_FUNCTION_ARGS) @@ -71,6 +109,12 @@ citus_unmark_object_distributed(PG_FUNCTION_ARGS) Oid classid = PG_GETARG_OID(0); Oid objid = PG_GETARG_OID(1); int32 objsubid = PG_GETARG_INT32(2); + bool checkObjectExistence = true; + if (!PG_ARGISNULL(3)) + { + checkObjectExistence = PG_GETARG_BOOL(3); + } + ObjectAddress address = { 0 }; ObjectAddressSubSet(address, classid, objid, objsubid); @@ -81,7 +125,7 @@ citus_unmark_object_distributed(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } - if (ObjectExists(&address)) + if (checkObjectExistence && ObjectExists(&address)) { ereport(ERROR, (errmsg("object still exists"), errdetail("the %s \"%s\" still exists", @@ -149,7 +193,7 @@ ObjectExists(const ObjectAddress *address) /* * MarkObjectDistributed marks an object as a distributed object. Marking is done * by adding appropriate entries to citus.pg_dist_object and also marking the object - * as distributed by opening a connection using current user to all of the workers + * as distributed by opening a connection using current user to all remote nodes * with metadata if object propagation is on. * * This function should be used if the user creating the given object. If you want @@ -158,13 +202,51 @@ ObjectExists(const ObjectAddress *address) void MarkObjectDistributed(const ObjectAddress *distAddress) { - MarkObjectDistributedLocally(distAddress); + bool useConnectionForLocalQuery = false; + MarkObjectDistributedWithName(distAddress, "", useConnectionForLocalQuery, + CurrentUserName()); +} + + +/* + * MarkObjectDistributedWithName marks an object as a distributed object. + * Same as MarkObjectDistributed but this function also allows passing an objectName + * that is used in case the object does not exists for the current transaction. + */ +void +MarkObjectDistributedWithName(const ObjectAddress *distAddress, char *objectName, + bool useConnectionForLocalQuery, char *connectionUser) +{ + if (!CitusHasBeenLoaded()) + { + elog(ERROR, "Cannot mark object distributed because Citus has not been loaded."); + } + + /* + * When a query is run from a Citus non-main database we need to insert into pg_dist_object + * over a connection to make sure 2PC still works. + */ + if (useConnectionForLocalQuery) + { + StringInfo insertQuery = makeStringInfo(); + appendStringInfo(insertQuery, + "INSERT INTO pg_catalog.pg_dist_object (classid, objid, objsubid)" + "VALUES (%d, %d, %d) ON CONFLICT DO NOTHING", + distAddress->classId, distAddress->objectId, + distAddress->objectSubId); + SendCommandToWorker(LocalHostName, PostPortNumber, insertQuery->data); + } + else + { + MarkObjectDistributedLocally(distAddress); + } if (EnableMetadataSync) { char *workerPgDistObjectUpdateCommand = - CreatePgDistObjectEntryCommand(distAddress); - SendCommandToWorkersWithMetadata(workerPgDistObjectUpdateCommand); + CreatePgDistObjectEntryCommand(distAddress, objectName); + SendCommandToRemoteMetadataNodesParams(workerPgDistObjectUpdateCommand, + connectionUser, 0, NULL, NULL); } } @@ -172,7 +254,7 @@ MarkObjectDistributed(const ObjectAddress *distAddress) /* * MarkObjectDistributedViaSuperUser marks an object as a distributed object. Marking * is done by adding appropriate entries to citus.pg_dist_object and also marking the - * object as distributed by opening a connection using super user to all of the workers + * object as distributed by opening a connection using super user to all remote nodes * with metadata if object propagation is on. * * This function should be used to mark dependent object as distributed. If you want @@ -186,8 +268,8 @@ MarkObjectDistributedViaSuperUser(const ObjectAddress *distAddress) if (EnableMetadataSync) { char *workerPgDistObjectUpdateCommand = - CreatePgDistObjectEntryCommand(distAddress); - SendCommandToWorkersWithMetadataViaSuperUser(workerPgDistObjectUpdateCommand); + CreatePgDistObjectEntryCommand(distAddress, ""); + SendCommandToRemoteNodesWithMetadataViaSuperUser(workerPgDistObjectUpdateCommand); } } @@ -277,17 +359,21 @@ ShouldMarkRelationDistributed(Oid relationId) * for the given object address. */ static char * -CreatePgDistObjectEntryCommand(const ObjectAddress *objectAddress) +CreatePgDistObjectEntryCommand(const ObjectAddress *objectAddress, char *objectName) { /* create a list by adding the address of value to not to have warning */ List *objectAddressList = list_make1((ObjectAddress *) objectAddress); + + /* names also require a list so we create a nested list here */ + List *objectNameList = list_make1(list_make1((char *) objectName)); List *distArgumetIndexList = list_make1_int(INVALID_DISTRIBUTION_ARGUMENT_INDEX); List *colocationIdList = list_make1_int(INVALID_COLOCATION_ID); List *forceDelegationList = list_make1_int(NO_FORCE_PUSHDOWN); char *workerPgDistObjectUpdateCommand = MarkObjectsDistributedCreateCommand(objectAddressList, + objectNameList, distArgumetIndexList, colocationIdList, forceDelegationList); @@ -357,6 +443,42 @@ ExecuteCommandAsSuperuser(char *query, int paramCount, Oid *paramTypes, } +/* + * UnmarkNodeWideObjectsDistributed deletes pg_dist_object records + * for all distributed objects in given Drop stmt node. + * + * Today we only expect DropRoleStmt and DropdbStmt to get here. + */ +void +UnmarkNodeWideObjectsDistributed(Node *node) +{ + if (IsA(node, DropRoleStmt)) + { + DropRoleStmt *stmt = castNode(DropRoleStmt, node); + List *allDropRoles = stmt->roles; + + List *distributedDropRoles = FilterDistributedRoles(allDropRoles); + if (list_length(distributedDropRoles) > 0) + { + UnmarkRolesDistributed(distributedDropRoles); + } + } + else if (IsA(node, DropdbStmt)) + { + DropdbStmt *stmt = castNode(DropdbStmt, node); + char *dbName = stmt->dbname; + + Oid dbOid = get_database_oid(dbName, stmt->missing_ok); + ObjectAddress *dbObjectAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*dbObjectAddress, DatabaseRelationId, dbOid); + if (IsAnyObjectDistributed(list_make1(dbObjectAddress))) + { + UnmarkObjectDistributed(dbObjectAddress); + } + } +} + + /* * UnmarkObjectDistributed removes the entry from pg_dist_object that marks this object as * distributed. This will prevent updates to that object to be propagated to the worker. diff --git a/src/backend/distributed/metadata/metadata_cache.c b/src/backend/distributed/metadata/metadata_cache.c index 55d0f11c57f..402dedb8a91 100644 --- a/src/backend/distributed/metadata/metadata_cache.c +++ b/src/backend/distributed/metadata/metadata_cache.c @@ -8,20 +8,17 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" -#include "pg_version_compat.h" -#include "stdint.h" -#include "postgres.h" #include "libpq-fe.h" #include "miscadmin.h" +#include "stdint.h" #include "access/genam.h" #include "access/heapam.h" #include "access/htup_details.h" #include "access/nbtree.h" -#include "access/xact.h" #include "access/sysattr.h" +#include "access/xact.h" #include "catalog/index.h" #include "catalog/indexing.h" #include "catalog/pg_am.h" @@ -30,66 +27,70 @@ #include "catalog/pg_extension.h" #include "catalog/pg_namespace.h" #include "catalog/pg_type.h" -#include "citus_version.h" #include "commands/dbcommands.h" #include "commands/extension.h" #include "commands/trigger.h" +#include "common/hashfn.h" +#include "executor/executor.h" +#include "nodes/makefuncs.h" +#include "nodes/memnodes.h" +#include "nodes/pg_list.h" +#include "parser/parse_func.h" +#include "parser/parse_type.h" +#include "storage/lmgr.h" +#include "utils/array.h" +#include "utils/builtins.h" +#include "utils/catcache.h" +#include "utils/datum.h" +#include "utils/elog.h" +#include "utils/fmgroids.h" +#include "utils/hsearch.h" +#include "utils/inval.h" +#include "utils/jsonb.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" +#include "utils/palloc.h" +#include "utils/rel.h" +#include "utils/relmapper.h" +#include "utils/resowner.h" +#include "utils/syscache.h" +#include "utils/typcache.h" + +#include "citus_version.h" +#include "pg_version_compat.h" +#include "pg_version_constants.h" + #include "distributed/backend_data.h" #include "distributed/citus_depended_object.h" +#include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" #include "distributed/connection_management.h" -#include "distributed/citus_ruleutils.h" -#include "distributed/multi_executor.h" -#include "distributed/function_utils.h" -#include "distributed/listutils.h" #include "distributed/foreign_key_relationship.h" +#include "distributed/function_utils.h" #include "distributed/listutils.h" -#include "distributed/metadata_utility.h" #include "distributed/metadata/pg_dist_object.h" #include "distributed/metadata_cache.h" +#include "distributed/metadata_utility.h" #include "distributed/multi_executor.h" #include "distributed/multi_physical_planner.h" #include "distributed/pg_dist_local_group.h" -#include "distributed/pg_dist_node_metadata.h" #include "distributed/pg_dist_node.h" +#include "distributed/pg_dist_node_metadata.h" #include "distributed/pg_dist_partition.h" -#include "distributed/pg_dist_shard.h" #include "distributed/pg_dist_placement.h" -#include "distributed/shared_library_init.h" +#include "distributed/pg_dist_shard.h" +#include "distributed/remote_commands.h" #include "distributed/shardinterval_utils.h" +#include "distributed/shared_library_init.h" #include "distributed/utils/array_type.h" #include "distributed/utils/function.h" #include "distributed/version_compat.h" #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" -#include "executor/executor.h" -#include "nodes/makefuncs.h" -#include "nodes/memnodes.h" -#include "nodes/pg_list.h" -#include "parser/parse_func.h" -#include "parser/parse_type.h" -#include "storage/lmgr.h" -#include "utils/array.h" -#include "utils/builtins.h" -#include "utils/catcache.h" -#include "utils/datum.h" -#include "utils/elog.h" -#include "utils/hsearch.h" -#include "utils/jsonb.h" -#include "common/hashfn.h" -#include "utils/inval.h" -#include "utils/fmgroids.h" -#include "utils/lsyscache.h" -#include "utils/memutils.h" -#include "utils/palloc.h" -#include "utils/rel.h" + #if PG_VERSION_NUM < PG_VERSION_16 #include "utils/relfilenodemap.h" #endif -#include "utils/relmapper.h" -#include "utils/resowner.h" -#include "utils/syscache.h" -#include "utils/typcache.h" /* user configuration */ @@ -380,7 +381,7 @@ EnsureModificationsCanRun(void) /* - * EnsureModificationsCanRunOnRelation firsts calls into EnsureModificationsCanRun() and + * EnsureModificationsCanRunOnRelation first calls into EnsureModificationsCanRun() and * then does one more additional check. The additional check is to give a proper error * message if any relation that is modified is replicated, as replicated tables use * 2PC and 2PC cannot happen when recovery is in progress. @@ -521,8 +522,7 @@ IsCitusTableTypeCacheEntry(CitusTableCacheEntry *tableEntry, CitusTableType tabl /* - * HasDistributionKey returs true if given Citus table doesn't have a - * distribution key. + * HasDistributionKey returns true if given Citus table has a distribution key. */ bool HasDistributionKey(Oid relationId) @@ -538,8 +538,8 @@ HasDistributionKey(Oid relationId) /* - * HasDistributionKey returs true if given cache entry identifies a Citus - * table that doesn't have a distribution key. + * HasDistributionKeyCacheEntry returns true if given cache entry identifies a + * Citus table that has a distribution key. */ bool HasDistributionKeyCacheEntry(CitusTableCacheEntry *tableEntry) diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index 40bdae0eaf2..ef7c56dc752 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -11,13 +11,15 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" -#include "miscadmin.h" - #include #include #include +#include "postgres.h" + +#include "miscadmin.h" +#include "pgstat.h" + #include "access/genam.h" #include "access/heapam.h" #include "access/htup_details.h" @@ -30,64 +32,66 @@ #include "catalog/pg_attrdef.h" #include "catalog/pg_collation.h" #include "catalog/pg_constraint.h" +#include "catalog/pg_database.h" +#include "catalog/pg_database_d.h" #include "catalog/pg_depend.h" #include "catalog/pg_foreign_server.h" #include "catalog/pg_namespace.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "commands/async.h" +#include "commands/dbcommands.h" +#include "executor/spi.h" +#include "foreign/foreign.h" +#include "nodes/makefuncs.h" +#include "nodes/pg_list.h" +#include "parser/parse_type.h" +#include "postmaster/bgworker.h" +#include "postmaster/postmaster.h" +#include "storage/lmgr.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" +#include "utils/snapmgr.h" +#include "utils/syscache.h" + #include "distributed/argutils.h" #include "distributed/backend_data.h" #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" -#include "distributed/tenant_schema_metadata.h" #include "distributed/commands.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/coordinator_protocol.h" #include "distributed/deparser.h" #include "distributed/distribution_column.h" #include "distributed/listutils.h" -#include "distributed/metadata_utility.h" -#include "distributed/coordinator_protocol.h" #include "distributed/maintenanced.h" -#include "distributed/metadata_cache.h" -#include "distributed/metadata_sync.h" -#include "distributed/metadata_utility.h" #include "distributed/metadata/dependency.h" #include "distributed/metadata/distobject.h" #include "distributed/metadata/pg_dist_object.h" +#include "distributed/metadata_cache.h" +#include "distributed/metadata_sync.h" +#include "distributed/metadata_utility.h" #include "distributed/multi_executor.h" #include "distributed/multi_join_order.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/multi_physical_planner.h" #include "distributed/pg_dist_colocation.h" #include "distributed/pg_dist_node.h" -#include "distributed/pg_dist_shard.h" #include "distributed/pg_dist_schema.h" +#include "distributed/pg_dist_shard.h" #include "distributed/relation_access_tracking.h" #include "distributed/remote_commands.h" +#include "distributed/remote_transaction.h" #include "distributed/resource_lock.h" +#include "distributed/tenant_schema_metadata.h" #include "distributed/utils/array_type.h" #include "distributed/utils/function.h" +#include "distributed/version_compat.h" #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" -#include "distributed/version_compat.h" -#include "distributed/commands/utility_hook.h" -#include "executor/spi.h" -#include "foreign/foreign.h" -#include "miscadmin.h" -#include "nodes/makefuncs.h" -#include "nodes/pg_list.h" -#include "pgstat.h" -#include "postmaster/bgworker.h" -#include "postmaster/postmaster.h" -#include "parser/parse_type.h" -#include "storage/lmgr.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/lsyscache.h" -#include "utils/memutils.h" -#include "utils/snapmgr.h" -#include "utils/syscache.h" /* managed via a GUC */ @@ -120,6 +124,7 @@ static List * GetObjectsForGrantStmt(ObjectType objectType, Oid objectId); static AccessPriv * GetAccessPrivObjectForGrantStmt(char *permission); static List * GenerateGrantOnSchemaQueriesFromAclItem(Oid schemaOid, AclItem *aclItem); +static List * GenerateGrantOnDatabaseFromAclItem(Oid databaseOid, AclItem *aclItem); static List * GenerateGrantOnFunctionQueriesFromAclItem(Oid schemaOid, AclItem *aclItem); static List * GrantOnSequenceDDLCommands(Oid sequenceOid); @@ -134,7 +139,7 @@ static bool ShouldSkipMetadataChecks(void); static void EnsurePartitionMetadataIsSane(Oid relationId, char distributionMethod, int colocationId, char replicationModel, Var *distributionKey); -static void EnsureCoordinatorInitiatedOperation(void); +static void EnsureCitusInitiatedOperation(void); static void EnsureShardMetadataIsSane(Oid relationId, int64 shardId, char storageType, text *shardMinValue, text *shardMaxValue); @@ -179,6 +184,7 @@ PG_FUNCTION_INFO_V1(citus_internal_delete_colocation_metadata); PG_FUNCTION_INFO_V1(citus_internal_add_tenant_schema); PG_FUNCTION_INFO_V1(citus_internal_delete_tenant_schema); PG_FUNCTION_INFO_V1(citus_internal_update_none_dist_table_metadata); +PG_FUNCTION_INFO_V1(citus_internal_database_command); static bool got_SIGTERM = false; @@ -486,19 +492,7 @@ stop_metadata_sync_to_node(PG_FUNCTION_ARGS) bool ClusterHasKnownMetadataWorkers() { - bool workerWithMetadata = false; - - if (!IsCoordinator()) - { - workerWithMetadata = true; - } - - if (workerWithMetadata || HasMetadataWorkers()) - { - return true; - } - - return false; + return !IsCoordinator() || HasMetadataWorkers(); } @@ -895,6 +889,7 @@ NodeListIdempotentInsertCommand(List *workerNodeList) */ char * MarkObjectsDistributedCreateCommand(List *addresses, + List *namesArg, List *distributionArgumentIndexes, List *colocationIds, List *forceDelegations) @@ -919,9 +914,25 @@ MarkObjectsDistributedCreateCommand(List *addresses, int forceDelegation = list_nth_int(forceDelegations, currentObjectCounter); List *names = NIL; List *args = NIL; + char *objectType = NULL; - char *objectType = getObjectTypeDescription(address, false); - getObjectIdentityParts(address, &names, &args, false); + if (IsMainDBCommand) + { + /* + * When we try to distribute an object that's being created in a non Citus + * main database, we cannot find the name, since the object is not visible + * in Citus main database. + * Because of that we need to pass the name to this function. + */ + names = list_nth(namesArg, currentObjectCounter); + bool missingOk = false; + objectType = getObjectTypeDescription(address, missingOk); + } + else + { + objectType = getObjectTypeDescription(address, false); + getObjectIdentityParts(address, &names, &args, IsMainDBCommand); + } if (!isFirstObject) { @@ -976,7 +987,7 @@ MarkObjectsDistributedCreateCommand(List *addresses, appendStringInfo(insertDistributedObjectsCommand, ") "); appendStringInfo(insertDistributedObjectsCommand, - "SELECT citus_internal_add_object_metadata(" + "SELECT citus_internal.add_object_metadata(" "typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) " "FROM distributed_object_data;"); @@ -1001,7 +1012,7 @@ citus_internal_add_object_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); /* * Ensure given distributionArgumentIndex and colocationId values are @@ -1111,7 +1122,7 @@ DistributionCreateCommand(CitusTableCacheEntry *cacheEntry) } appendStringInfo(insertDistributionCommand, - "SELECT citus_internal_add_partition_metadata " + "SELECT citus_internal.add_partition_metadata " "(%s::regclass, '%c', %s, %d, '%c')", quote_literal_cstr(qualifiedRelationName), distributionMethod, @@ -1153,7 +1164,7 @@ DistributionDeleteMetadataCommand(Oid relationId) char *qualifiedRelationName = generate_qualified_relation_name(relationId); appendStringInfo(deleteCommand, - "SELECT pg_catalog.citus_internal_delete_partition_metadata(%s)", + "SELECT citus_internal.delete_partition_metadata(%s)", quote_literal_cstr(qualifiedRelationName)); return deleteCommand->data; @@ -1236,7 +1247,7 @@ ShardListInsertCommand(List *shardIntervalList) appendStringInfo(insertPlacementCommand, ") "); appendStringInfo(insertPlacementCommand, - "SELECT citus_internal_add_placement_metadata(" + "SELECT citus_internal.add_placement_metadata(" "shardid, shardlength, groupid, placementid) " "FROM placement_data;"); @@ -1292,7 +1303,7 @@ ShardListInsertCommand(List *shardIntervalList) appendStringInfo(insertShardCommand, ") "); appendStringInfo(insertShardCommand, - "SELECT citus_internal_add_shard_metadata(relationname, shardid, " + "SELECT citus_internal.add_shard_metadata(relationname, shardid, " "storagetype, shardminvalue, shardmaxvalue) " "FROM shard_data;"); @@ -1331,7 +1342,7 @@ ShardDeleteCommandList(ShardInterval *shardInterval) StringInfo deleteShardCommand = makeStringInfo(); appendStringInfo(deleteShardCommand, - "SELECT citus_internal_delete_shard_metadata(%ld);", shardId); + "SELECT citus_internal.delete_shard_metadata(%ld);", shardId); return list_make1(deleteShardCommand->data); } @@ -1401,7 +1412,7 @@ ColocationIdUpdateCommand(Oid relationId, uint32 colocationId) StringInfo command = makeStringInfo(); char *qualifiedRelationName = generate_qualified_relation_name(relationId); appendStringInfo(command, - "SELECT citus_internal_update_relation_colocation(%s::regclass, %d)", + "SELECT citus_internal.update_relation_colocation(%s::regclass, %d)", quote_literal_cstr(qualifiedRelationName), colocationId); return command->data; @@ -1626,6 +1637,74 @@ GetDependentSequencesWithRelation(Oid relationId, List **seqInfoList, } +/* + * GetDependentDependentRelationsWithSequence returns a list of oids of + * relations that have have a dependency on the given sequence. + * There are three types of dependencies: + * 1. direct auto (owned sequences), created using SERIAL or BIGSERIAL + * 2. indirect auto (through an AttrDef), created using DEFAULT nextval('..') + * 3. internal, created using GENERATED ALWAYS AS IDENTITY + * + * Depending on the passed deptype, we return the relations that have the + * given type(s): + * - DEPENDENCY_AUTO returns both 1 and 2 + * - DEPENDENCY_INTERNAL returns 3 + * + * The returned list can contain duplicates, as the same relation can have + * multiple dependencies on the sequence. + */ +List * +GetDependentRelationsWithSequence(Oid sequenceOid, char depType) +{ + List *relations = NIL; + ScanKeyData key[2]; + HeapTuple tup; + + Relation depRel = table_open(DependRelationId, AccessShareLock); + + ScanKeyInit(&key[0], + Anum_pg_depend_classid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationRelationId)); + ScanKeyInit(&key[1], + Anum_pg_depend_objid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(sequenceOid)); + SysScanDesc scan = systable_beginscan(depRel, DependDependerIndexId, true, + NULL, lengthof(key), key); + while (HeapTupleIsValid(tup = systable_getnext(scan))) + { + Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup); + + if ( + deprec->refclassid == RelationRelationId && + deprec->refobjsubid != 0 && + deprec->deptype == depType) + { + relations = lappend_oid(relations, deprec->refobjid); + } + } + + systable_endscan(scan); + + table_close(depRel, AccessShareLock); + + if (depType == DEPENDENCY_AUTO) + { + Oid attrDefOid; + List *attrDefOids = GetAttrDefsFromSequence(sequenceOid); + + foreach_oid(attrDefOid, attrDefOids) + { + ObjectAddress columnAddress = GetAttrDefaultColumnAddress(attrDefOid); + relations = lappend_oid(relations, columnAddress.objectId); + } + } + + return relations; +} + + /* * GetSequencesFromAttrDef returns a list of sequence OIDs that have * dependency with the given attrdefOid in pg_depend @@ -1671,6 +1750,90 @@ GetSequencesFromAttrDef(Oid attrdefOid) } +#if PG_VERSION_NUM < PG_VERSION_15 + +/* + * Given a pg_attrdef OID, return the relation OID and column number of + * the owning column (represented as an ObjectAddress for convenience). + * + * Returns InvalidObjectAddress if there is no such pg_attrdef entry. + */ +ObjectAddress +GetAttrDefaultColumnAddress(Oid attrdefoid) +{ + ObjectAddress result = InvalidObjectAddress; + ScanKeyData skey[1]; + HeapTuple tup; + + Relation attrdef = table_open(AttrDefaultRelationId, AccessShareLock); + ScanKeyInit(&skey[0], + Anum_pg_attrdef_oid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(attrdefoid)); + SysScanDesc scan = systable_beginscan(attrdef, AttrDefaultOidIndexId, true, + NULL, 1, skey); + + if (HeapTupleIsValid(tup = systable_getnext(scan))) + { + Form_pg_attrdef atdform = (Form_pg_attrdef) GETSTRUCT(tup); + + result.classId = RelationRelationId; + result.objectId = atdform->adrelid; + result.objectSubId = atdform->adnum; + } + + systable_endscan(scan); + table_close(attrdef, AccessShareLock); + + return result; +} + + +#endif + + +/* + * GetAttrDefsFromSequence returns a list of attrdef OIDs that have + * a dependency on the given sequence + */ +List * +GetAttrDefsFromSequence(Oid seqOid) +{ + List *attrDefsResult = NIL; + ScanKeyData key[2]; + HeapTuple tup; + + Relation depRel = table_open(DependRelationId, AccessShareLock); + + ScanKeyInit(&key[0], + Anum_pg_depend_refclassid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationRelationId)); + ScanKeyInit(&key[1], + Anum_pg_depend_refobjid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(seqOid)); + SysScanDesc scan = systable_beginscan(depRel, DependReferenceIndexId, true, + NULL, lengthof(key), key); + while (HeapTupleIsValid(tup = systable_getnext(scan))) + { + Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup); + + if (deprec->classid == AttrDefaultRelationId && + deprec->deptype == DEPENDENCY_NORMAL) + { + attrDefsResult = lappend_oid(attrDefsResult, deprec->objid); + } + } + + systable_endscan(scan); + + table_close(depRel, AccessShareLock); + + return attrDefsResult; +} + + /* * GetDependentFunctionsWithRelation returns the dependent functions for the * given relation id. @@ -2043,6 +2206,92 @@ GenerateGrantOnSchemaQueriesFromAclItem(Oid schemaOid, AclItem *aclItem) } +/* + * GrantOnDatabaseDDLCommands creates a list of ddl command for replicating the permissions + * of roles on databases. + */ +List * +GrantOnDatabaseDDLCommands(Oid databaseOid) +{ + HeapTuple databaseTuple = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(databaseOid)); + bool isNull = true; + Datum aclDatum = SysCacheGetAttr(DATABASEOID, databaseTuple, Anum_pg_database_datacl, + &isNull); + if (isNull) + { + ReleaseSysCache(databaseTuple); + return NIL; + } + Acl *acl = DatumGetAclPCopy(aclDatum); + AclItem *aclDat = ACL_DAT(acl); + int aclNum = ACL_NUM(acl); + List *commands = NIL; + + ReleaseSysCache(databaseTuple); + + for (int i = 0; i < aclNum; i++) + { + commands = list_concat(commands, + GenerateGrantOnDatabaseFromAclItem( + databaseOid, &aclDat[i])); + } + + return commands; +} + + +/* + * GenerateGrantOnDatabaseFromAclItem generates a query string for replicating a users permissions + * on a database. + */ +List * +GenerateGrantOnDatabaseFromAclItem(Oid databaseOid, AclItem *aclItem) +{ + AclMode permissions = ACLITEM_GET_PRIVS(*aclItem) & ACL_ALL_RIGHTS_DATABASE; + AclMode grants = ACLITEM_GET_GOPTIONS(*aclItem) & ACL_ALL_RIGHTS_DATABASE; + + /* + * seems unlikely but we check if there is a grant option in the list without the actual permission + */ + Assert(!(grants & ACL_CONNECT) || (permissions & ACL_CONNECT)); + Assert(!(grants & ACL_CREATE) || (permissions & ACL_CREATE)); + Assert(!(grants & ACL_CREATE_TEMP) || (permissions & ACL_CREATE_TEMP)); + Oid granteeOid = aclItem->ai_grantee; + List *queries = NIL; + + queries = lappend(queries, GenerateSetRoleQuery(aclItem->ai_grantor)); + + if (permissions & ACL_CONNECT) + { + char *query = DeparseTreeNode((Node *) GenerateGrantStmtForRights( + OBJECT_DATABASE, granteeOid, databaseOid, + "CONNECT", + grants & ACL_CONNECT)); + queries = lappend(queries, query); + } + if (permissions & ACL_CREATE) + { + char *query = DeparseTreeNode((Node *) GenerateGrantStmtForRights( + OBJECT_DATABASE, granteeOid, databaseOid, + "CREATE", + grants & ACL_CREATE)); + queries = lappend(queries, query); + } + if (permissions & ACL_CREATE_TEMP) + { + char *query = DeparseTreeNode((Node *) GenerateGrantStmtForRights( + OBJECT_DATABASE, granteeOid, databaseOid, + "TEMPORARY", + grants & ACL_CREATE_TEMP)); + queries = lappend(queries, query); + } + + queries = lappend(queries, "RESET ROLE"); + + return queries; +} + + /* * GenerateGrantStmtForRights is the function for creating GrantStmt's for all * types of objects that are supported. It takes parameters to fill a GrantStmt's @@ -2116,6 +2365,11 @@ GetObjectsForGrantStmt(ObjectType objectType, Oid objectId) return list_make1(sequence); } + case OBJECT_DATABASE: + { + return list_make1(makeString(get_database_name(objectId))); + } + default: { elog(ERROR, "unsupported object type for GRANT"); @@ -3090,7 +3344,7 @@ citus_internal_add_partition_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); if (distributionMethod == DISTRIBUTE_BY_NONE && distributionColumnVar != NULL) { @@ -3206,7 +3460,7 @@ citus_internal_delete_partition_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); } DeletePartitionRow(relationId); @@ -3254,7 +3508,7 @@ citus_internal_add_shard_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); /* * Even if the table owner is a malicious user and the shard metadata is @@ -3272,19 +3526,13 @@ citus_internal_add_shard_metadata(PG_FUNCTION_ARGS) /* - * EnsureCoordinatorInitiatedOperation is a helper function which ensures that - * the execution is initiated by the coordinator on a worker node. + * EnsureCitusInitiatedOperation is a helper function which ensures that + * the execution is initiated by Citus. */ static void -EnsureCoordinatorInitiatedOperation(void) +EnsureCitusInitiatedOperation(void) { - /* - * We are restricting the operation to only MX workers with the local group id - * check. The other two checks are to ensure that the operation is initiated - * by the coordinator. - */ - if (!(IsCitusInternalBackend() || IsRebalancerInternalBackend()) || - GetLocalGroupId() == COORDINATOR_GROUP_ID) + if (!(IsCitusInternalBackend() || IsRebalancerInternalBackend())) { ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("This is an internal Citus function can only be " @@ -3465,7 +3713,7 @@ citus_internal_delete_placement_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); } DeleteShardPlacementRow(placementId); @@ -3513,7 +3761,7 @@ citus_internal_add_placement_metadata_internal(int64 shardId, int64 shardLength, if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); /* * Even if the table owner is a malicious user, as long as the shard placements @@ -3608,7 +3856,7 @@ citus_internal_update_placement_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); if (!ShardExists(shardId)) { @@ -3672,7 +3920,7 @@ citus_internal_delete_shard_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); if (!ShardExists(shardId)) { @@ -3715,7 +3963,7 @@ citus_internal_update_relation_colocation(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); /* ensure that the table is in pg_dist_partition */ char partitionMethod = PartitionMethodViaCatalog(relationId); @@ -3781,7 +4029,7 @@ citus_internal_add_colocation_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); } InsertColocationGroupLocally(colocationId, shardCount, replicationFactor, @@ -3806,7 +4054,7 @@ citus_internal_delete_colocation_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { /* this UDF is not allowed allowed for executing as a separate command */ - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); } DeleteColocationGroupLocally(colocationId); @@ -3885,7 +4133,7 @@ citus_internal_update_none_dist_table_metadata(PG_FUNCTION_ARGS) if (!ShouldSkipMetadataChecks()) { - EnsureCoordinatorInitiatedOperation(); + EnsureCitusInitiatedOperation(); } UpdateNoneDistTableMetadata(relationId, replicationModel, @@ -3895,6 +4143,70 @@ citus_internal_update_none_dist_table_metadata(PG_FUNCTION_ARGS) } +/* + * citus_internal_database_command is an internal UDF to + * create a database in an idempotent maner without + * transaction block restrictions. + */ +Datum +citus_internal_database_command(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + + if (!ShouldSkipMetadataChecks()) + { + EnsureCitusInitiatedOperation(); + } + + PG_ENSURE_ARGNOTNULL(0, "command"); + + text *commandText = PG_GETARG_TEXT_P(0); + char *command = text_to_cstring(commandText); + Node *parseTree = ParseTreeNode(command); + + int saveNestLevel = NewGUCNestLevel(); + + set_config_option("citus.enable_ddl_propagation", "off", + (superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION, + GUC_ACTION_LOCAL, true, 0, false); + + set_config_option("citus.enable_create_database_propagation", "off", + (superuser() ? PGC_SUSET : PGC_USERSET), PGC_S_SESSION, + GUC_ACTION_LOCAL, true, 0, false); + + /* + * createdb() uses ParseState to report the error position for the + * input command and the position is reported to be 0 when it's provided as NULL. + * We're okay with that because we don't expect this UDF to be called with an incorrect + * DDL command. + */ + ParseState *pstate = NULL; + + if (IsA(parseTree, CreatedbStmt)) + { + CreatedbStmt *stmt = castNode(CreatedbStmt, parseTree); + + bool missingOk = true; + Oid databaseOid = get_database_oid(stmt->dbname, missingOk); + + if (!OidIsValid(databaseOid)) + { + createdb(pstate, (CreatedbStmt *) parseTree); + } + } + else + { + ereport(ERROR, (errmsg("citus_internal.database_command() can only be used " + "for CREATE DATABASE command by Citus."))); + } + + /* rollback GUCs to the state before this session */ + AtEOXact_GUC(true, saveNestLevel); + + PG_RETURN_VOID(); +} + + /* * SyncNewColocationGroup synchronizes a new pg_dist_colocation entry to a worker. */ @@ -3925,7 +4237,7 @@ ColocationGroupCreateCommand(uint32 colocationId, int shardCount, int replicatio StringInfo insertColocationCommand = makeStringInfo(); appendStringInfo(insertColocationCommand, - "SELECT pg_catalog.citus_internal_add_colocation_metadata(" + "SELECT citus_internal.add_colocation_metadata(" "%d, %d, %d, %s, %s)", colocationId, shardCount, @@ -4037,7 +4349,7 @@ ColocationGroupDeleteCommand(uint32 colocationId) StringInfo deleteColocationCommand = makeStringInfo(); appendStringInfo(deleteColocationCommand, - "SELECT pg_catalog.citus_internal_delete_colocation_metadata(%d)", + "SELECT citus_internal.delete_colocation_metadata(%d)", colocationId); return deleteColocationCommand->data; @@ -4053,7 +4365,7 @@ TenantSchemaInsertCommand(Oid schemaId, uint32 colocationId) { StringInfo command = makeStringInfo(); appendStringInfo(command, - "SELECT pg_catalog.citus_internal_add_tenant_schema(%s, %u)", + "SELECT citus_internal.add_tenant_schema(%s, %u)", RemoteSchemaIdExpressionById(schemaId), colocationId); return command->data; @@ -4069,7 +4381,7 @@ TenantSchemaDeleteCommand(char *schemaName) { StringInfo command = makeStringInfo(); appendStringInfo(command, - "SELECT pg_catalog.citus_internal_delete_tenant_schema(%s)", + "SELECT citus_internal.delete_tenant_schema(%s)", RemoteSchemaIdExpressionByName(schemaName)); return command->data; @@ -4086,7 +4398,7 @@ UpdateNoneDistTableMetadataCommand(Oid relationId, char replicationModel, { StringInfo command = makeStringInfo(); appendStringInfo(command, - "SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(%s, '%c', %u, %s)", + "SELECT citus_internal.update_none_dist_table_metadata(%s, '%c', %u, %s)", RemoteTableIdExpression(relationId), replicationModel, colocationId, autoConverted ? "true" : "false"); @@ -4104,7 +4416,7 @@ AddPlacementMetadataCommand(uint64 shardId, uint64 placementId, { StringInfo command = makeStringInfo(); appendStringInfo(command, - "SELECT citus_internal_add_placement_metadata(%ld, %ld, %d, %ld)", + "SELECT citus_internal.add_placement_metadata(%ld, %ld, %d, %ld)", shardId, shardLength, groupId, placementId); return command->data; } @@ -4119,7 +4431,7 @@ DeletePlacementMetadataCommand(uint64 placementId) { StringInfo command = makeStringInfo(); appendStringInfo(command, - "SELECT pg_catalog.citus_internal_delete_placement_metadata(%ld)", + "SELECT citus_internal.delete_placement_metadata(%ld)", placementId); return command->data; } @@ -4734,7 +5046,7 @@ SendColocationMetadataCommands(MetadataSyncContext *context) } appendStringInfo(colocationGroupCreateCommand, - ") SELECT pg_catalog.citus_internal_add_colocation_metadata(" + ") SELECT citus_internal.add_colocation_metadata(" "colocationid, shardcount, replicationfactor, " "distributioncolumntype, coalesce(c.oid, 0)) " "FROM colocation_group_data d LEFT JOIN pg_collation c " @@ -4785,7 +5097,7 @@ SendTenantSchemaMetadataCommands(MetadataSyncContext *context) StringInfo insertTenantSchemaCommand = makeStringInfo(); appendStringInfo(insertTenantSchemaCommand, - "SELECT pg_catalog.citus_internal_add_tenant_schema(%s, %u)", + "SELECT citus_internal.add_tenant_schema(%s, %u)", RemoteSchemaIdExpressionById(tenantSchemaForm->schemaid), tenantSchemaForm->colocationid); @@ -4994,6 +5306,7 @@ SendDistObjectCommands(MetadataSyncContext *context) char *workerMetadataUpdateCommand = MarkObjectsDistributedCreateCommand(list_make1(address), + NIL, list_make1_int(distributionArgumentIndex), list_make1_int(colocationId), list_make1_int(forceDelegation)); diff --git a/src/backend/distributed/metadata/metadata_utility.c b/src/backend/distributed/metadata/metadata_utility.c index ae0f6589a2b..15e167008dd 100644 --- a/src/backend/distributed/metadata/metadata_utility.c +++ b/src/backend/distributed/metadata/metadata_utility.c @@ -13,39 +13,52 @@ #include #include "postgres.h" + #include "funcapi.h" #include "libpq-fe.h" #include "miscadmin.h" -#include "distributed/pg_version_constants.h" - #include "access/genam.h" #include "access/htup_details.h" #include "access/sysattr.h" #include "access/xact.h" #include "catalog/dependency.h" +#include "catalog/index.h" #include "catalog/indexing.h" #include "catalog/pg_authid.h" #include "catalog/pg_constraint.h" #include "catalog/pg_extension.h" #include "catalog/pg_namespace.h" -#if PG_VERSION_NUM >= PG_VERSION_16 -#include "catalog/pg_proc_d.h" -#endif #include "catalog/pg_type.h" #include "commands/extension.h" #include "commands/sequence.h" +#include "nodes/makefuncs.h" +#include "parser/scansup.h" +#include "storage/lmgr.h" +#include "storage/procarray.h" +#include "utils/acl.h" +#include "utils/builtins.h" +#include "utils/datum.h" +#include "utils/fmgroids.h" +#include "utils/fmgrprotos.h" +#include "utils/inval.h" +#include "utils/lsyscache.h" +#include "utils/rel.h" +#include "utils/syscache.h" + +#include "pg_version_constants.h" + #include "distributed/background_jobs.h" -#include "distributed/colocation_utils.h" -#include "distributed/connection_management.h" #include "distributed/citus_nodes.h" #include "distributed/citus_safe_lib.h" +#include "distributed/colocation_utils.h" +#include "distributed/connection_management.h" +#include "distributed/coordinator_protocol.h" #include "distributed/listutils.h" #include "distributed/lock_graph.h" -#include "distributed/metadata_utility.h" -#include "distributed/coordinator_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" +#include "distributed/metadata_utility.h" #include "distributed/multi_join_order.h" #include "distributed/multi_logical_optimizer.h" #include "distributed/multi_partitioning_utils.h" @@ -55,31 +68,22 @@ #include "distributed/pg_dist_backrgound_task_depend.h" #include "distributed/pg_dist_colocation.h" #include "distributed/pg_dist_partition.h" -#include "distributed/pg_dist_shard.h" #include "distributed/pg_dist_placement.h" +#include "distributed/pg_dist_shard.h" #include "distributed/reference_table_utils.h" #include "distributed/relay_utility.h" -#include "distributed/resource_lock.h" #include "distributed/remote_commands.h" +#include "distributed/resource_lock.h" #include "distributed/shard_rebalancer.h" #include "distributed/tuplestore.h" #include "distributed/utils/array_type.h" +#include "distributed/version_compat.h" #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" -#include "distributed/version_compat.h" -#include "nodes/makefuncs.h" -#include "parser/scansup.h" -#include "storage/lmgr.h" -#include "storage/procarray.h" -#include "utils/acl.h" -#include "utils/builtins.h" -#include "utils/datum.h" -#include "utils/fmgroids.h" -#include "utils/fmgrprotos.h" -#include "utils/inval.h" -#include "utils/lsyscache.h" -#include "utils/rel.h" -#include "utils/syscache.h" + +#if PG_VERSION_NUM >= PG_VERSION_16 +#include "catalog/pg_proc_d.h" +#endif #define DISK_SPACE_FIELDS 2 @@ -88,11 +92,11 @@ static uint64 * AllocateUint64(uint64 value); static void RecordDistributedRelationDependencies(Oid distributedRelationId); static GroupShardPlacement * TupleToGroupShardPlacement(TupleDesc tupleDesc, HeapTuple heapTuple); -static bool DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, - bool failOnError, uint64 *tableSize); -static bool DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, - SizeQueryType sizeQueryType, bool failOnError, - uint64 *tableSize); +static bool DistributedRelationSize(Oid relationId, SizeQueryType sizeQueryType, + bool failOnError, uint64 *relationSize); +static bool DistributedRelationSizeOnWorker(WorkerNode *workerNode, Oid relationId, + SizeQueryType sizeQueryType, bool failOnError, + uint64 *relationSize); static List * ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId); static char * GenerateShardIdNameValuesForShardList(List *shardIntervalList, bool firstValue); @@ -282,7 +286,7 @@ citus_shard_sizes(PG_FUNCTION_ARGS) /* - * citus_total_relation_size accepts a table name and returns a distributed table + * citus_total_relation_size accepts a distributed table name and returns a distributed table * and its indexes' total relation size. */ Datum @@ -294,20 +298,20 @@ citus_total_relation_size(PG_FUNCTION_ARGS) bool failOnError = PG_GETARG_BOOL(1); SizeQueryType sizeQueryType = TOTAL_RELATION_SIZE; - uint64 tableSize = 0; + uint64 relationSize = 0; - if (!DistributedTableSize(relationId, sizeQueryType, failOnError, &tableSize)) + if (!DistributedRelationSize(relationId, sizeQueryType, failOnError, &relationSize)) { Assert(!failOnError); PG_RETURN_NULL(); } - PG_RETURN_INT64(tableSize); + PG_RETURN_INT64(relationSize); } /* - * citus_table_size accepts a table name and returns a distributed table's total + * citus_table_size accepts a distributed table name and returns a distributed table's total * relation size. */ Datum @@ -318,21 +322,24 @@ citus_table_size(PG_FUNCTION_ARGS) Oid relationId = PG_GETARG_OID(0); bool failOnError = true; SizeQueryType sizeQueryType = TABLE_SIZE; - uint64 tableSize = 0; + uint64 relationSize = 0; - if (!DistributedTableSize(relationId, sizeQueryType, failOnError, &tableSize)) + /* We do not check if relation is really a table, like PostgreSQL is doing. */ + if (!DistributedRelationSize(relationId, sizeQueryType, failOnError, &relationSize)) { Assert(!failOnError); PG_RETURN_NULL(); } - PG_RETURN_INT64(tableSize); + PG_RETURN_INT64(relationSize); } /* - * citus_relation_size accept a table name and returns a relation's 'main' + * citus_relation_size accept a distributed relation name and returns a relation's 'main' * fork's size. + * + * Input relation is allowed to be an index on a distributed table too. */ Datum citus_relation_size(PG_FUNCTION_ARGS) @@ -344,7 +351,7 @@ citus_relation_size(PG_FUNCTION_ARGS) SizeQueryType sizeQueryType = RELATION_SIZE; uint64 relationSize = 0; - if (!DistributedTableSize(relationId, sizeQueryType, failOnError, &relationSize)) + if (!DistributedRelationSize(relationId, sizeQueryType, failOnError, &relationSize)) { Assert(!failOnError); PG_RETURN_NULL(); @@ -506,13 +513,16 @@ ReceiveShardIdAndSizeResults(List *connectionList, Tuplestorestate *tupleStore, /* - * DistributedTableSize is helper function for each kind of citus size functions. - * It first checks whether the table is distributed and size query can be run on - * it. Connection to each node has to be established to get the size of the table. + * DistributedRelationSize is helper function for each kind of citus size + * functions. It first checks whether the relation is a distributed table or an + * index belonging to a distributed table and size query can be run on it. + * Connection to each node has to be established to get the size of the + * relation. + * Input relation is allowed to be an index on a distributed table too. */ static bool -DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, bool failOnError, - uint64 *tableSize) +DistributedRelationSize(Oid relationId, SizeQueryType sizeQueryType, + bool failOnError, uint64 *relationSize) { int logLevel = WARNING; @@ -538,7 +548,7 @@ DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, bool failOnErr if (relation == NULL) { ereport(logLevel, - (errmsg("could not compute table size: relation does not exist"))); + (errmsg("could not compute relation size: relation does not exist"))); return false; } @@ -553,8 +563,9 @@ DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, bool failOnErr { uint64 relationSizeOnNode = 0; - bool gotSize = DistributedTableSizeOnWorker(workerNode, relationId, sizeQueryType, - failOnError, &relationSizeOnNode); + bool gotSize = DistributedRelationSizeOnWorker(workerNode, relationId, + sizeQueryType, + failOnError, &relationSizeOnNode); if (!gotSize) { return false; @@ -563,21 +574,22 @@ DistributedTableSize(Oid relationId, SizeQueryType sizeQueryType, bool failOnErr sumOfSizes += relationSizeOnNode; } - *tableSize = sumOfSizes; + *relationSize = sumOfSizes; return true; } /* - * DistributedTableSizeOnWorker gets the workerNode and relationId to calculate + * DistributedRelationSizeOnWorker gets the workerNode and relationId to calculate * size of that relation on the given workerNode by summing up the size of each * shard placement. + * Input relation is allowed to be an index on a distributed table too. */ static bool -DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, - SizeQueryType sizeQueryType, - bool failOnError, uint64 *tableSize) +DistributedRelationSizeOnWorker(WorkerNode *workerNode, Oid relationId, + SizeQueryType sizeQueryType, + bool failOnError, uint64 *relationSize) { int logLevel = WARNING; @@ -591,6 +603,17 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, uint32 connectionFlag = 0; PGresult *result = NULL; + /* if the relation is an index, update relationId and define indexId */ + Oid indexId = InvalidOid; + Oid relKind = get_rel_relkind(relationId); + if (relKind == RELKIND_INDEX || relKind == RELKIND_PARTITIONED_INDEX) + { + indexId = relationId; + + bool missingOk = false; + relationId = IndexGetRelation(indexId, missingOk); + } + List *shardIntervalsOnNode = ShardIntervalsOnWorkerGroup(workerNode, relationId); /* @@ -598,21 +621,22 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, * But citus size functions shouldn't include them, like PG. */ bool optimizePartitionCalculations = false; - StringInfo tableSizeQuery = GenerateSizeQueryOnMultiplePlacements( + StringInfo relationSizeQuery = GenerateSizeQueryOnMultiplePlacements( shardIntervalsOnNode, + indexId, sizeQueryType, optimizePartitionCalculations); MultiConnection *connection = GetNodeConnection(connectionFlag, workerNodeName, workerNodePort); - int queryResult = ExecuteOptionalRemoteCommand(connection, tableSizeQuery->data, + int queryResult = ExecuteOptionalRemoteCommand(connection, relationSizeQuery->data, &result); if (queryResult != 0) { ereport(logLevel, (errcode(ERRCODE_CONNECTION_FAILURE), errmsg("could not connect to %s:%d to get size of " - "table \"%s\"", + "relation \"%s\"", workerNodeName, workerNodePort, get_rel_name(relationId)))); @@ -626,19 +650,19 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, ClearResults(connection, failOnError); ereport(logLevel, (errcode(ERRCODE_CONNECTION_FAILURE), - errmsg("cannot parse size of table \"%s\" from %s:%d", + errmsg("cannot parse size of relation \"%s\" from %s:%d", get_rel_name(relationId), workerNodeName, workerNodePort))); return false; } - StringInfo tableSizeStringInfo = (StringInfo) linitial(sizeList); - char *tableSizeString = tableSizeStringInfo->data; + StringInfo relationSizeStringInfo = (StringInfo) linitial(sizeList); + char *relationSizeString = relationSizeStringInfo->data; - if (strlen(tableSizeString) > 0) + if (strlen(relationSizeString) > 0) { - *tableSize = SafeStringToUint64(tableSizeString); + *relationSize = SafeStringToUint64(relationSizeString); } else { @@ -647,7 +671,7 @@ DistributedTableSizeOnWorker(WorkerNode *workerNode, Oid relationId, * being executed. For this case we get an empty string as table size. * We can take that as zero to prevent any unnecessary errors. */ - *tableSize = 0; + *relationSize = 0; } PQclear(result); @@ -732,7 +756,7 @@ ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId) /* * GenerateSizeQueryOnMultiplePlacements generates a select size query to get - * size of multiple tables. Note that, different size functions supported by PG + * size of multiple relations. Note that, different size functions supported by PG * are also supported by this function changing the size query type given as the * last parameter to function. Depending on the sizeQueryType enum parameter, the * generated query will call one of the functions: pg_relation_size, @@ -740,9 +764,13 @@ ShardIntervalsOnWorkerGroup(WorkerNode *workerNode, Oid relationId) * This function uses UDFs named worker_partitioned_*_size for partitioned tables, * if the parameter optimizePartitionCalculations is true. The UDF to be called is * determined by the parameter sizeQueryType. + * + * indexId is provided if we're interested in the size of an index, not the whole + * table. */ StringInfo GenerateSizeQueryOnMultiplePlacements(List *shardIntervalList, + Oid indexId, SizeQueryType sizeQueryType, bool optimizePartitionCalculations) { @@ -766,16 +794,20 @@ GenerateSizeQueryOnMultiplePlacements(List *shardIntervalList, */ continue; } + + /* we need to build the shard relation name, being an index or table */ + Oid objectId = OidIsValid(indexId) ? indexId : shardInterval->relationId; + uint64 shardId = shardInterval->shardId; - Oid schemaId = get_rel_namespace(shardInterval->relationId); + Oid schemaId = get_rel_namespace(objectId); char *schemaName = get_namespace_name(schemaId); - char *shardName = get_rel_name(shardInterval->relationId); + char *shardName = get_rel_name(objectId); AppendShardIdToName(&shardName, shardId); char *shardQualifiedName = quote_qualified_identifier(schemaName, shardName); char *quotedShardName = quote_literal_cstr(shardQualifiedName); - /* for partitoned tables, we will call worker_partitioned_... size functions */ + /* for partitioned tables, we will call worker_partitioned_... size functions */ if (optimizePartitionCalculations && PartitionedTable(shardInterval->relationId)) { partitionedShardNames = lappend(partitionedShardNames, quotedShardName); @@ -1010,7 +1042,7 @@ AppendShardIdNameValues(StringInfo selectQuery, ShardInterval *shardInterval) /* - * ErrorIfNotSuitableToGetSize determines whether the table is suitable to find + * ErrorIfNotSuitableToGetSize determines whether the relation is suitable to find * its' size with internal functions. */ static void @@ -1018,11 +1050,32 @@ ErrorIfNotSuitableToGetSize(Oid relationId) { if (!IsCitusTable(relationId)) { - char *relationName = get_rel_name(relationId); - char *escapedQueryString = quote_literal_cstr(relationName); - ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("cannot calculate the size because relation %s is not " - "distributed", escapedQueryString))); + Oid relKind = get_rel_relkind(relationId); + if (relKind != RELKIND_INDEX && relKind != RELKIND_PARTITIONED_INDEX) + { + char *relationName = get_rel_name(relationId); + char *escapedRelationName = quote_literal_cstr(relationName); + ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), + errmsg( + "cannot calculate the size because relation %s " + "is not distributed", + escapedRelationName))); + } + bool missingOk = false; + Oid indexId = relationId; + relationId = IndexGetRelation(relationId, missingOk); + if (!IsCitusTable(relationId)) + { + char *tableName = get_rel_name(relationId); + char *escapedTableName = quote_literal_cstr(tableName); + char *indexName = get_rel_name(indexId); + char *escapedIndexName = quote_literal_cstr(indexName); + ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), + errmsg( + "cannot calculate the size because table %s for " + "index %s is not distributed", + escapedTableName, escapedIndexName))); + } } } diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c index a73f2e9d2cb..d93b133eaed 100644 --- a/src/backend/distributed/metadata/node_metadata.c +++ b/src/backend/distributed/metadata/node_metadata.c @@ -5,34 +5,49 @@ * Copyright (c) Citus Data, Inc. */ #include "postgres.h" -#include "miscadmin.h" + #include "funcapi.h" -#include "utils/plancache.h" +#include "miscadmin.h" #include "access/genam.h" #include "access/heapam.h" #include "access/htup.h" #include "access/htup_details.h" #include "access/skey.h" -#include "access/skey.h" #include "access/tupmacs.h" #include "access/xact.h" #include "catalog/indexing.h" #include "catalog/namespace.h" #include "commands/sequence.h" +#include "executor/spi.h" +#include "lib/stringinfo.h" +#include "postmaster/postmaster.h" +#include "storage/bufmgr.h" +#include "storage/fd.h" +#include "storage/lmgr.h" +#include "storage/lock.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" +#include "utils/plancache.h" +#include "utils/rel.h" +#include "utils/relcache.h" + #include "distributed/citus_acquire_lock.h" #include "distributed/citus_safe_lib.h" #include "distributed/colocation_utils.h" #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" #include "distributed/connection_management.h" -#include "distributed/maintenanced.h" #include "distributed/coordinator_protocol.h" -#include "distributed/metadata_utility.h" +#include "distributed/maintenanced.h" #include "distributed/metadata/distobject.h" +#include "distributed/metadata/pg_dist_object.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" +#include "distributed/metadata_utility.h" #include "distributed/multi_join_order.h" +#include "distributed/multi_partitioning_utils.h" #include "distributed/multi_router_planner.h" #include "distributed/pg_dist_node.h" #include "distributed/pg_dist_node_metadata.h" @@ -40,26 +55,12 @@ #include "distributed/remote_commands.h" #include "distributed/resource_lock.h" #include "distributed/shardinterval_utils.h" -#include "distributed/multi_partitioning_utils.h" #include "distributed/shared_connection_stats.h" #include "distributed/string_utils.h" -#include "distributed/metadata/pg_dist_object.h" #include "distributed/transaction_recovery.h" #include "distributed/version_compat.h" #include "distributed/worker_manager.h" #include "distributed/worker_transaction.h" -#include "executor/spi.h" -#include "lib/stringinfo.h" -#include "postmaster/postmaster.h" -#include "storage/bufmgr.h" -#include "storage/lmgr.h" -#include "storage/lock.h" -#include "storage/fd.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/lsyscache.h" -#include "utils/rel.h" -#include "utils/relcache.h" #define INVALID_GROUP_ID -1 @@ -506,7 +507,13 @@ citus_disable_node(PG_FUNCTION_ARGS) { text *nodeNameText = PG_GETARG_TEXT_P(0); int32 nodePort = PG_GETARG_INT32(1); - bool synchronousDisableNode = PG_GETARG_BOOL(2); + + bool synchronousDisableNode = 1; + Assert(PG_NARGS() == 2 || PG_NARGS() == 3); + if (PG_NARGS() == 3) + { + synchronousDisableNode = PG_GETARG_BOOL(2); + } char *nodeName = text_to_cstring(nodeNameText); WorkerNode *workerNode = ModifiableWorkerNode(nodeName, nodePort); @@ -1691,7 +1698,7 @@ EnsureParentSessionHasExclusiveLockOnPgDistNode(pid_t parentSessionPid) if (!parentHasExclusiveLock) { ereport(ERROR, (errmsg("lock is not held by the caller. Unexpected caller " - "for citus_internal_mark_node_not_synced"))); + "for citus_internal.mark_node_not_synced"))); } } @@ -1750,6 +1757,10 @@ citus_internal_mark_node_not_synced(PG_FUNCTION_ARGS) /* * FindWorkerNode searches over the worker nodes and returns the workerNode * if it already exists. Else, the function returns NULL. + * + * NOTE: A special case that this handles is when nodeName and nodePort are set + * to LocalHostName and PostPortNumber. In that case we return the primary node + * for the local group. */ WorkerNode * FindWorkerNode(const char *nodeName, int32 nodePort) @@ -1772,6 +1783,11 @@ FindWorkerNode(const char *nodeName, int32 nodePort) return workerNode; } + if (strcmp(LocalHostName, nodeName) == 0 && nodePort == PostPortNumber) + { + return PrimaryNodeForGroup(GetLocalGroupId(), NULL); + } + return NULL; } @@ -2742,6 +2758,25 @@ EnsureCoordinator(void) } +/* + * EnsurePropagationToCoordinator checks whether the coordinator is added to the + * metadata if we're not on the coordinator. + * + * Given that metadata syncing skips syncing metadata to the coordinator, we need + * too make sure that the coordinator is added to the metadata before propagating + * a command from a worker. For this reason, today we use this only for the commands + * that we support propagating from workers. + */ +void +EnsurePropagationToCoordinator(void) +{ + if (!IsCoordinator()) + { + EnsureCoordinatorIsInMetadata(); + } +} + + /* * EnsureCoordinatorIsInMetadata checks whether the coordinator is added to the * metadata, which is required for many operations. @@ -2751,12 +2786,24 @@ EnsureCoordinatorIsInMetadata(void) { bool isCoordinatorInMetadata = false; PrimaryNodeForGroup(COORDINATOR_GROUP_ID, &isCoordinatorInMetadata); - if (!isCoordinatorInMetadata) + if (isCoordinatorInMetadata) + { + return; + } + + /* be more descriptive when we're not on coordinator */ + if (IsCoordinator()) { ereport(ERROR, (errmsg("coordinator is not added to the metadata"), errhint("Use SELECT citus_set_coordinator_host('') " "to configure the coordinator hostname"))); } + else + { + ereport(ERROR, (errmsg("coordinator is not added to the metadata"), + errhint("Use SELECT citus_set_coordinator_host('') " + "on coordinator to configure the coordinator hostname"))); + } } diff --git a/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c b/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c index 54f764fc1bf..abe378cdbf0 100644 --- a/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c +++ b/src/backend/distributed/metadata/pg_get_object_address_13_14_15.c @@ -16,23 +16,26 @@ */ #include "postgres.h" + #include "miscadmin.h" #include "catalog/objectaddress.h" #include "catalog/pg_type.h" -#include "distributed/citus_ruleutils.h" -#include "distributed/citus_safe_lib.h" -#include "distributed/metadata/dependency.h" -#include "distributed/metadata/distobject.h" -#include "distributed/pg_version_constants.h" -#include "distributed/version_compat.h" +#include "mb/pg_wchar.h" #include "nodes/value.h" +#include "parser/parse_type.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "utils/varlena.h" -#include "mb/pg_wchar.h" -#include "parser/parse_type.h" + +#include "pg_version_constants.h" + +#include "distributed/citus_ruleutils.h" +#include "distributed/citus_safe_lib.h" +#include "distributed/metadata/dependency.h" +#include "distributed/metadata/distobject.h" +#include "distributed/version_compat.h" static void ErrorIfCurrentUserCanNotDistributeObject(char *textType, ObjectType type, diff --git a/src/backend/distributed/operations/citus_create_restore_point.c b/src/backend/distributed/operations/citus_create_restore_point.c index 42fc5311f98..8a5e738e44b 100644 --- a/src/backend/distributed/operations/citus_create_restore_point.c +++ b/src/backend/distributed/operations/citus_create_restore_point.c @@ -10,22 +10,24 @@ */ #include "postgres.h" + #include "libpq-fe.h" #include "access/xlog.h" #include "access/xlog_internal.h" #include "catalog/pg_type.h" -#include "distributed/connection_management.h" -#include "distributed/listutils.h" -#include "distributed/metadata_utility.h" -#include "distributed/metadata_cache.h" -#include "distributed/remote_commands.h" #include "nodes/pg_list.h" #include "storage/lmgr.h" #include "storage/lock.h" #include "utils/builtins.h" #include "utils/pg_lsn.h" +#include "distributed/connection_management.h" +#include "distributed/listutils.h" +#include "distributed/metadata_cache.h" +#include "distributed/metadata_utility.h" +#include "distributed/remote_commands.h" + #define CREATE_RESTORE_POINT_COMMAND "SELECT pg_catalog.pg_create_restore_point($1::text)" diff --git a/src/backend/distributed/operations/citus_split_shard_by_split_points.c b/src/backend/distributed/operations/citus_split_shard_by_split_points.c index 5bdbaf576f9..076e58d4c02 100644 --- a/src/backend/distributed/operations/citus_split_shard_by_split_points.c +++ b/src/backend/distributed/operations/citus_split_shard_by_split_points.c @@ -10,19 +10,21 @@ */ #include "postgres.h" + #include "catalog/pg_type.h" -#include "nodes/pg_list.h" #include "lib/stringinfo.h" +#include "nodes/pg_list.h" #include "utils/builtins.h" #include "utils/lsyscache.h" -#include "distributed/utils/array_type.h" + #include "distributed/colocation_utils.h" -#include "distributed/metadata_cache.h" -#include "distributed/shardinterval_utils.h" -#include "distributed/coordinator_protocol.h" #include "distributed/connection_management.h" +#include "distributed/coordinator_protocol.h" +#include "distributed/metadata_cache.h" #include "distributed/remote_commands.h" #include "distributed/shard_split.h" +#include "distributed/shardinterval_utils.h" +#include "distributed/utils/array_type.h" #include "distributed/utils/distribution_column_map.h" /* declarations for dynamic loading */ diff --git a/src/backend/distributed/operations/citus_tools.c b/src/backend/distributed/operations/citus_tools.c index 8f6f80c2b37..bc4aa55604c 100644 --- a/src/backend/distributed/operations/citus_tools.c +++ b/src/backend/distributed/operations/citus_tools.c @@ -12,8 +12,15 @@ #include "postgres.h" +#include "funcapi.h" +#include "libpq-fe.h" +#include "miscadmin.h" + #include "access/htup_details.h" #include "catalog/pg_type.h" +#include "lib/stringinfo.h" +#include "utils/builtins.h" + #include "distributed/backend_data.h" #include "distributed/connection_management.h" #include "distributed/metadata_cache.h" @@ -23,11 +30,6 @@ #include "distributed/utils/function.h" #include "distributed/version_compat.h" #include "distributed/worker_protocol.h" -#include "funcapi.h" -#include "lib/stringinfo.h" -#include "libpq-fe.h" -#include "miscadmin.h" -#include "utils/builtins.h" PG_FUNCTION_INFO_V1(master_run_on_worker); diff --git a/src/backend/distributed/operations/create_shards.c b/src/backend/distributed/operations/create_shards.c index d0fcc961256..96254705122 100644 --- a/src/backend/distributed/operations/create_shards.c +++ b/src/backend/distributed/operations/create_shards.c @@ -10,35 +10,22 @@ *------------------------------------------------------------------------- */ +#include +#include +#include +#include +#include + #include "postgres.h" + #include "c.h" #include "fmgr.h" #include "libpq-fe.h" #include "miscadmin.h" #include "port.h" -#include -#include -#include -#include -#include - #include "catalog/namespace.h" #include "catalog/pg_class.h" -#include "distributed/listutils.h" -#include "distributed/metadata_utility.h" -#include "distributed/coordinator_protocol.h" -#include "distributed/metadata_cache.h" -#include "distributed/multi_join_order.h" -#include "distributed/multi_executor.h" -#include "distributed/multi_partitioning_utils.h" -#include "distributed/pg_dist_partition.h" -#include "distributed/pg_dist_shard.h" -#include "distributed/reference_table_utils.h" -#include "distributed/resource_lock.h" -#include "distributed/shardinterval_utils.h" -#include "distributed/transaction_management.h" -#include "distributed/worker_manager.h" #include "lib/stringinfo.h" #include "nodes/pg_list.h" #include "nodes/primnodes.h" @@ -52,6 +39,21 @@ #include "utils/lsyscache.h" #include "utils/palloc.h" +#include "distributed/coordinator_protocol.h" +#include "distributed/listutils.h" +#include "distributed/metadata_cache.h" +#include "distributed/metadata_utility.h" +#include "distributed/multi_executor.h" +#include "distributed/multi_join_order.h" +#include "distributed/multi_partitioning_utils.h" +#include "distributed/pg_dist_partition.h" +#include "distributed/pg_dist_shard.h" +#include "distributed/reference_table_utils.h" +#include "distributed/resource_lock.h" +#include "distributed/shardinterval_utils.h" +#include "distributed/transaction_management.h" +#include "distributed/worker_manager.h" + /* declarations for dynamic loading */ PG_FUNCTION_INFO_V1(master_create_worker_shards); @@ -158,13 +160,6 @@ CreateShardsWithRoundRobinPolicy(Oid distributedTableId, int32 shardCount, "replication factor."))); } - /* if we have enough nodes, add an extra placement attempt for backup */ - uint32 placementAttemptCount = (uint32) replicationFactor; - if (workerNodeCount > replicationFactor) - { - placementAttemptCount++; - } - /* set shard storage type according to relation type */ char shardStorageType = ShardStorageType(distributedTableId); diff --git a/src/backend/distributed/operations/delete_protocol.c b/src/backend/distributed/operations/delete_protocol.c index abed39272a1..39651715853 100644 --- a/src/backend/distributed/operations/delete_protocol.c +++ b/src/backend/distributed/operations/delete_protocol.c @@ -13,9 +13,9 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" +#include -#include "distributed/pg_version_constants.h" +#include "postgres.h" #include "c.h" #include "fmgr.h" @@ -23,17 +23,37 @@ #include "miscadmin.h" #include "port.h" -#include - #include "access/xact.h" #include "catalog/namespace.h" #include "commands/dbcommands.h" +#include "lib/stringinfo.h" +#include "nodes/nodeFuncs.h" +#include "nodes/nodes.h" +#include "nodes/parsenodes.h" +#include "nodes/pathnodes.h" +#include "nodes/pg_list.h" +#include "nodes/primnodes.h" +#include "optimizer/clauses.h" +#include "optimizer/optimizer.h" +#include "optimizer/restrictinfo.h" +#include "storage/lmgr.h" +#include "storage/lock.h" +#include "tcop/tcopprot.h" +#include "utils/array.h" +#include "utils/builtins.h" +#include "utils/elog.h" +#include "utils/errcodes.h" +#include "utils/lsyscache.h" +#include "utils/varlena.h" + +#include "pg_version_constants.h" + #include "distributed/commands/utility_hook.h" #include "distributed/connection_management.h" +#include "distributed/coordinator_protocol.h" #include "distributed/deparse_shard_query.h" #include "distributed/listutils.h" #include "distributed/local_executor.h" -#include "distributed/coordinator_protocol.h" #include "distributed/metadata_sync.h" #include "distributed/multi_join_order.h" #include "distributed/multi_logical_planner.h" @@ -47,25 +67,6 @@ #include "distributed/shard_cleaner.h" #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" -#include "lib/stringinfo.h" -#include "nodes/nodeFuncs.h" -#include "nodes/nodes.h" -#include "nodes/parsenodes.h" -#include "nodes/pg_list.h" -#include "nodes/primnodes.h" -#include "optimizer/clauses.h" -#include "nodes/pathnodes.h" -#include "optimizer/optimizer.h" -#include "optimizer/restrictinfo.h" -#include "storage/lock.h" -#include "storage/lmgr.h" -#include "tcop/tcopprot.h" -#include "utils/array.h" -#include "utils/builtins.h" -#include "utils/elog.h" -#include "utils/errcodes.h" -#include "utils/lsyscache.h" -#include "utils/varlena.h" /* Local functions forward declarations */ @@ -425,10 +426,9 @@ ExecuteDropShardPlacementCommandRemotely(ShardPlacement *shardPlacement, errdetail("Marking this shard placement for " "deletion"))); - InsertCleanupRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - shardRelationName, - shardPlacement->groupId, - CLEANUP_DEFERRED_ON_SUCCESS); + InsertCleanupOnSuccessRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + shardRelationName, + shardPlacement->groupId); return; } diff --git a/src/backend/distributed/operations/health_check.c b/src/backend/distributed/operations/health_check.c index b3246f88883..c908606c156 100644 --- a/src/backend/distributed/operations/health_check.c +++ b/src/backend/distributed/operations/health_check.c @@ -13,6 +13,8 @@ #include "postgres.h" +#include "utils/builtins.h" + #include "distributed/argutils.h" #include "distributed/listutils.h" #include "distributed/lock_graph.h" @@ -20,7 +22,6 @@ #include "distributed/remote_commands.h" #include "distributed/tuplestore.h" #include "distributed/worker_manager.h" -#include "utils/builtins.h" /* simple query to run on workers to check connectivity */ #define CONNECTIVITY_CHECK_QUERY "SELECT 1" diff --git a/src/backend/distributed/operations/isolate_shards.c b/src/backend/distributed/operations/isolate_shards.c index ec89ae40293..502b00f5bb9 100644 --- a/src/backend/distributed/operations/isolate_shards.c +++ b/src/backend/distributed/operations/isolate_shards.c @@ -11,11 +11,20 @@ */ #include "postgres.h" + #include "c.h" #include "fmgr.h" #include "libpq-fe.h" #include "catalog/pg_class.h" +#include "nodes/pg_list.h" +#include "storage/lock.h" +#include "utils/builtins.h" +#include "utils/elog.h" +#include "utils/errcodes.h" +#include "utils/lsyscache.h" +#include "utils/typcache.h" + #include "distributed/colocation_utils.h" #include "distributed/coordinator_protocol.h" #include "distributed/metadata_cache.h" @@ -25,22 +34,15 @@ #include "distributed/multi_router_planner.h" #include "distributed/pg_dist_partition.h" #include "distributed/pg_dist_shard.h" -#include "distributed/remote_commands.h" #include "distributed/reference_table_utils.h" +#include "distributed/remote_commands.h" #include "distributed/resource_lock.h" +#include "distributed/shard_split.h" +#include "distributed/utils/distribution_column_map.h" +#include "distributed/version_compat.h" #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" -#include "distributed/version_compat.h" -#include "distributed/shard_split.h" -#include "distributed/utils/distribution_column_map.h" -#include "nodes/pg_list.h" -#include "storage/lock.h" -#include "utils/builtins.h" -#include "utils/elog.h" -#include "utils/errcodes.h" -#include "utils/lsyscache.h" -#include "utils/typcache.h" /* declarations for dynamic loading */ PG_FUNCTION_INFO_V1(isolate_tenant_to_new_shard); diff --git a/src/backend/distributed/operations/modify_multiple_shards.c b/src/backend/distributed/operations/modify_multiple_shards.c index 8def1b26e8f..9e287972850 100644 --- a/src/backend/distributed/operations/modify_multiple_shards.c +++ b/src/backend/distributed/operations/modify_multiple_shards.c @@ -14,46 +14,46 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" - #include "funcapi.h" #include "libpq-fe.h" #include "miscadmin.h" - #include "catalog/pg_class.h" #include "commands/dbcommands.h" #include "commands/event_trigger.h" +#include "nodes/makefuncs.h" +#include "optimizer/clauses.h" +#include "optimizer/optimizer.h" +#include "optimizer/restrictinfo.h" +#include "tcop/tcopprot.h" +#include "utils/builtins.h" +#include "utils/datum.h" +#include "utils/inval.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" + +#include "pg_version_constants.h" + #include "distributed/citus_clauses.h" #include "distributed/citus_ruleutils.h" #include "distributed/commands.h" -#include "distributed/listutils.h" -#include "distributed/metadata_utility.h" #include "distributed/coordinator_protocol.h" +#include "distributed/distributed_planner.h" +#include "distributed/listutils.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" +#include "distributed/metadata_utility.h" #include "distributed/multi_executor.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_server_executor.h" -#include "distributed/distributed_planner.h" -#include "distributed/pg_dist_shard.h" #include "distributed/pg_dist_partition.h" +#include "distributed/pg_dist_shard.h" #include "distributed/resource_lock.h" -#include "distributed/shardinterval_utils.h" #include "distributed/shard_pruning.h" +#include "distributed/shardinterval_utils.h" #include "distributed/version_compat.h" #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" -#include "optimizer/clauses.h" -#include "optimizer/optimizer.h" -#include "optimizer/restrictinfo.h" -#include "nodes/makefuncs.h" -#include "tcop/tcopprot.h" -#include "utils/builtins.h" -#include "utils/datum.h" -#include "utils/inval.h" -#include "utils/lsyscache.h" -#include "utils/memutils.h" PG_FUNCTION_INFO_V1(master_modify_multiple_shards); diff --git a/src/backend/distributed/operations/node_protocol.c b/src/backend/distributed/operations/node_protocol.c index a3f7092d1bd..52e44bea01c 100644 --- a/src/backend/distributed/operations/node_protocol.c +++ b/src/backend/distributed/operations/node_protocol.c @@ -11,17 +11,15 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" +#include -#include "distributed/pg_version_constants.h" +#include "postgres.h" #include "c.h" #include "fmgr.h" #include "funcapi.h" #include "miscadmin.h" -#include - #include "access/attnum.h" #include "access/genam.h" #include "access/heapam.h" @@ -37,20 +35,9 @@ #include "catalog/pg_class.h" #include "catalog/pg_constraint.h" #include "catalog/pg_index.h" -#include "catalog/pg_type.h" #include "catalog/pg_namespace.h" +#include "catalog/pg_type.h" #include "commands/sequence.h" -#include "distributed/citus_ruleutils.h" -#include "distributed/commands.h" -#include "distributed/listutils.h" -#include "distributed/coordinator_protocol.h" -#include "distributed/metadata_cache.h" -#include "distributed/metadata_sync.h" -#include "distributed/namespace_utils.h" -#include "distributed/pg_dist_shard.h" -#include "distributed/shared_library_init.h" -#include "distributed/version_compat.h" -#include "distributed/worker_manager.h" #include "foreign/foreign.h" #include "lib/stringinfo.h" #include "nodes/pg_list.h" @@ -65,6 +52,20 @@ #include "utils/ruleutils.h" #include "utils/varlena.h" +#include "pg_version_constants.h" + +#include "distributed/citus_ruleutils.h" +#include "distributed/commands.h" +#include "distributed/coordinator_protocol.h" +#include "distributed/listutils.h" +#include "distributed/metadata_cache.h" +#include "distributed/metadata_sync.h" +#include "distributed/namespace_utils.h" +#include "distributed/pg_dist_shard.h" +#include "distributed/shared_library_init.h" +#include "distributed/version_compat.h" +#include "distributed/worker_manager.h" + /* Shard related configuration */ int ShardCount = 32; int ShardReplicationFactor = 1; /* desired replication factor for shards */ diff --git a/src/backend/distributed/operations/partitioning.c b/src/backend/distributed/operations/partitioning.c index 9e205792759..afcaa8ac15c 100644 --- a/src/backend/distributed/operations/partitioning.c +++ b/src/backend/distributed/operations/partitioning.c @@ -9,13 +9,12 @@ */ #include "postgres.h" + #include "fmgr.h" #include "funcapi.h" #include "access/htup.h" #include "access/htup_details.h" -#include "distributed/metadata_cache.h" -#include "distributed/metadata_utility.h" #include "nodes/parsenodes.h" #include "nodes/pg_list.h" #include "utils/builtins.h" @@ -23,6 +22,9 @@ #include "utils/lsyscache.h" #include "utils/syscache.h" +#include "distributed/metadata_cache.h" +#include "distributed/metadata_utility.h" + /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(time_partition_range); diff --git a/src/backend/distributed/operations/replicate_none_dist_table_shard.c b/src/backend/distributed/operations/replicate_none_dist_table_shard.c index c28490367db..33a98ee4226 100644 --- a/src/backend/distributed/operations/replicate_none_dist_table_shard.c +++ b/src/backend/distributed/operations/replicate_none_dist_table_shard.c @@ -10,7 +10,9 @@ */ #include "postgres.h" + #include "miscadmin.h" + #include "nodes/pg_list.h" #include "distributed/adaptive_executor.h" diff --git a/src/backend/distributed/operations/shard_cleaner.c b/src/backend/distributed/operations/shard_cleaner.c index 42877bf1023..2efce9a7b09 100644 --- a/src/backend/distributed/operations/shard_cleaner.c +++ b/src/backend/distributed/operations/shard_cleaner.c @@ -10,27 +10,29 @@ */ #include "postgres.h" + #include "miscadmin.h" + #include "access/genam.h" #include "access/xact.h" #include "catalog/namespace.h" #include "commands/dbcommands.h" #include "commands/sequence.h" -#include "postmaster/postmaster.h" #include "nodes/makefuncs.h" +#include "postmaster/postmaster.h" #include "utils/builtins.h" #include "utils/fmgroids.h" #include "distributed/citus_safe_lib.h" -#include "distributed/listutils.h" #include "distributed/coordinator_protocol.h" +#include "distributed/listutils.h" #include "distributed/metadata_cache.h" -#include "distributed/shard_cleaner.h" -#include "distributed/shard_rebalancer.h" +#include "distributed/pg_dist_cleanup.h" #include "distributed/remote_commands.h" #include "distributed/resource_lock.h" +#include "distributed/shard_cleaner.h" +#include "distributed/shard_rebalancer.h" #include "distributed/worker_transaction.h" -#include "distributed/pg_dist_cleanup.h" #define REPLICATION_SLOT_CATALOG_TABLE_NAME "pg_replication_slots" #define STR_ERRCODE_OBJECT_IN_USE "55006" @@ -90,6 +92,8 @@ static bool TryDropReplicationSlotOutsideTransaction(char *replicationSlotName, char *nodeName, int nodePort); static bool TryDropUserOutsideTransaction(char *username, char *nodeName, int nodePort); +static bool TryDropDatabaseOutsideTransaction(char *databaseName, char *nodeName, + int nodePort); static CleanupRecord * GetCleanupRecordByNameAndType(char *objectName, CleanupObject type); @@ -139,7 +143,6 @@ Datum citus_cleanup_orphaned_resources(PG_FUNCTION_ARGS) { CheckCitusVersion(ERROR); - EnsureCoordinator(); PreventInTransactionBlock(true, "citus_cleanup_orphaned_resources"); int droppedCount = DropOrphanedResourcesForCleanup(); @@ -243,12 +246,6 @@ TryDropOrphanedResources() static int DropOrphanedResourcesForCleanup() { - /* Only runs on Coordinator */ - if (!IsCoordinator()) - { - return 0; - } - List *cleanupRecordList = ListCleanupRecords(); /* @@ -450,15 +447,15 @@ CompareCleanupRecordsByObjectType(const void *leftElement, const void *rightElem /* - * InsertCleanupRecordInCurrentTransaction inserts a new pg_dist_cleanup entry + * InsertCleanupOnSuccessRecordInCurrentTransaction inserts a new pg_dist_cleanup entry * as part of the current transaction. This is primarily useful for deferred drop scenarios, - * since these records would roll back in case of operation failure. + * since these records would roll back in case of operation failure. And for the same reason, + * always sets the policy type to CLEANUP_DEFERRED_ON_SUCCESS. */ void -InsertCleanupRecordInCurrentTransaction(CleanupObject objectType, - char *objectName, - int nodeGroupId, - CleanupPolicy policy) +InsertCleanupOnSuccessRecordInCurrentTransaction(CleanupObject objectType, + char *objectName, + int nodeGroupId) { /* We must have a valid OperationId. Any operation requring cleanup * will call RegisterOperationNeedingCleanup. @@ -480,7 +477,8 @@ InsertCleanupRecordInCurrentTransaction(CleanupObject objectType, values[Anum_pg_dist_cleanup_object_type - 1] = Int32GetDatum(objectType); values[Anum_pg_dist_cleanup_object_name - 1] = CStringGetTextDatum(objectName); values[Anum_pg_dist_cleanup_node_group_id - 1] = Int32GetDatum(nodeGroupId); - values[Anum_pg_dist_cleanup_policy_type - 1] = Int32GetDatum(policy); + values[Anum_pg_dist_cleanup_policy_type - 1] = + Int32GetDatum(CLEANUP_DEFERRED_ON_SUCCESS); /* open cleanup relation and insert new tuple */ Oid relationId = DistCleanupRelationId(); @@ -497,23 +495,27 @@ InsertCleanupRecordInCurrentTransaction(CleanupObject objectType, /* - * InsertCleanupRecordInSubtransaction inserts a new pg_dist_cleanup entry in a + * InsertCleanupRecordOutsideTransaction inserts a new pg_dist_cleanup entry in a * separate transaction to ensure the record persists after rollback. We should * delete these records if the operation completes successfully. * - * For failure scenarios, use a subtransaction (direct insert via localhost). + * This is used in scenarios where we need to cleanup resources on operation + * completion (CLEANUP_ALWAYS) or on failure (CLEANUP_ON_FAILURE). */ void -InsertCleanupRecordInSubtransaction(CleanupObject objectType, - char *objectName, - int nodeGroupId, - CleanupPolicy policy) +InsertCleanupRecordOutsideTransaction(CleanupObject objectType, + char *objectName, + int nodeGroupId, + CleanupPolicy policy) { /* We must have a valid OperationId. Any operation requring cleanup * will call RegisterOperationNeedingCleanup. */ Assert(CurrentOperationId != INVALID_OPERATION_ID); + /* assert the circumstance noted in function comment */ + Assert(policy == CLEANUP_ALWAYS || policy == CLEANUP_ON_FAILURE); + StringInfo sequenceName = makeStringInfo(); appendStringInfo(sequenceName, "%s.%s", PG_CATALOG, @@ -601,6 +603,12 @@ TryDropResourceByCleanupRecordOutsideTransaction(CleanupRecord *record, return TryDropUserOutsideTransaction(record->objectName, nodeName, nodePort); } + case CLEANUP_OBJECT_DATABASE: + { + return TryDropDatabaseOutsideTransaction(record->objectName, nodeName, + nodePort); + } + default: { ereport(WARNING, (errmsg( @@ -881,6 +889,69 @@ TryDropUserOutsideTransaction(char *username, } +/* + * TryDropDatabaseOutsideTransaction drops the database with the given name + * if it exists. + */ +static bool +TryDropDatabaseOutsideTransaction(char *databaseName, char *nodeName, int nodePort) +{ + int connectionFlags = (OUTSIDE_TRANSACTION | FORCE_NEW_CONNECTION); + MultiConnection *connection = GetNodeUserDatabaseConnection(connectionFlags, + nodeName, nodePort, + CitusExtensionOwnerName(), + NULL); + + if (PQstatus(connection->pgConn) != CONNECTION_OK) + { + return false; + } + + /* + * We want to disable DDL propagation and set lock_timeout before issuing + * the DROP DATABASE command but we cannot do so in a way that's scoped + * to the DROP DATABASE command. This is because, we cannot use a + * transaction block for the DROP DATABASE command. + * + * For this reason, to avoid leaking the lock_timeout and DDL propagation + * settings to future commands, we force the connection to close at the end + * of the transaction. + */ + ForceConnectionCloseAtTransactionEnd(connection); + + /* + * The DROP DATABASE command should not propagate, so we disable DDL + * propagation. + */ + List *commandList = list_make3( + "SET lock_timeout TO '1s'", + "SET citus.enable_ddl_propagation TO OFF;", + psprintf("DROP DATABASE IF EXISTS %s;", quote_identifier(databaseName)) + ); + + bool executeCommand = true; + + const char *commandString = NULL; + foreach_ptr(commandString, commandList) + { + /* + * Cannot use SendOptionalCommandListToWorkerOutsideTransactionWithConnection() + * because we don't want to open a transaction block on remote nodes as DROP + * DATABASE commands cannot be run inside a transaction block. + */ + if (ExecuteOptionalRemoteCommand(connection, commandString, NULL) != + RESPONSE_OKAY) + { + executeCommand = false; + break; + } + } + + CloseConnection(connection); + return executeCommand; +} + + /* * ErrorIfCleanupRecordForShardExists errors out if a cleanup record for the given * shard name exists. diff --git a/src/backend/distributed/operations/shard_rebalancer.c b/src/backend/distributed/operations/shard_rebalancer.c index e3ee4aa4d0b..03dc4c1b84e 100644 --- a/src/backend/distributed/operations/shard_rebalancer.c +++ b/src/backend/distributed/operations/shard_rebalancer.c @@ -12,30 +12,47 @@ */ -#include "postgres.h" -#include "libpq-fe.h" - #include -#include "distributed/pg_version_constants.h" +#include "postgres.h" + +#include "funcapi.h" +#include "libpq-fe.h" +#include "miscadmin.h" -#include "access/htup_details.h" #include "access/genam.h" -#include "catalog/pg_type.h" +#include "access/htup_details.h" #include "catalog/pg_proc.h" +#include "catalog/pg_type.h" #include "commands/dbcommands.h" #include "commands/sequence.h" +#include "common/hashfn.h" +#include "postmaster/postmaster.h" +#include "storage/lmgr.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/guc_tables.h" +#include "utils/json.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" +#include "utils/pg_lsn.h" +#include "utils/syscache.h" +#include "utils/varlena.h" + +#include "pg_version_constants.h" + #include "distributed/argutils.h" #include "distributed/background_jobs.h" -#include "distributed/citus_safe_lib.h" #include "distributed/citus_ruleutils.h" +#include "distributed/citus_safe_lib.h" #include "distributed/colocation_utils.h" +#include "distributed/commands/utility_hook.h" #include "distributed/connection_management.h" +#include "distributed/coordinator_protocol.h" #include "distributed/enterprise.h" #include "distributed/hash_helpers.h" #include "distributed/listutils.h" #include "distributed/lock_graph.h" -#include "distributed/coordinator_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_utility.h" #include "distributed/multi_logical_replication.h" @@ -45,27 +62,12 @@ #include "distributed/reference_table_utils.h" #include "distributed/remote_commands.h" #include "distributed/resource_lock.h" -#include "distributed/shard_rebalancer.h" #include "distributed/shard_cleaner.h" +#include "distributed/shard_rebalancer.h" #include "distributed/shard_transfer.h" #include "distributed/tuplestore.h" #include "distributed/utils/array_type.h" #include "distributed/worker_protocol.h" -#include "funcapi.h" -#include "miscadmin.h" -#include "postmaster/postmaster.h" -#include "storage/lmgr.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/json.h" -#include "utils/lsyscache.h" -#include "utils/memutils.h" -#include "utils/pg_lsn.h" -#include "utils/syscache.h" -#include "common/hashfn.h" -#include "utils/varlena.h" -#include "utils/guc_tables.h" -#include "distributed/commands/utility_hook.h" /* RebalanceOptions are the options used to control the rebalance algorithm */ typedef struct RebalanceOptions @@ -317,7 +319,7 @@ PG_FUNCTION_INFO_V1(citus_rebalance_start); PG_FUNCTION_INFO_V1(citus_rebalance_stop); PG_FUNCTION_INFO_V1(citus_rebalance_wait); -bool RunningUnderIsolationTest = false; +bool RunningUnderCitusTestSuite = false; int MaxRebalancerLoggedIgnoredMoves = 5; int RebalancerByDiskSizeBaseCost = 100 * 1024 * 1024; bool PropagateSessionSettingsForLoopbackConnection = false; @@ -382,6 +384,7 @@ CheckRebalanceStateInvariants(const RebalanceState *state) Assert(shardCost->cost <= prevShardCost->cost); } totalCost += shardCost->cost; + prevShardCost = shardCost; } /* Check that utilization field is up to date. */ diff --git a/src/backend/distributed/operations/shard_split.c b/src/backend/distributed/operations/shard_split.c index 0772b03b488..4baf0fb241b 100644 --- a/src/backend/distributed/operations/shard_split.c +++ b/src/backend/distributed/operations/shard_split.c @@ -10,41 +10,43 @@ */ #include "postgres.h" + #include "miscadmin.h" + +#include "commands/dbcommands.h" #include "common/hashfn.h" +#include "lib/stringinfo.h" #include "nodes/pg_list.h" +#include "postmaster/postmaster.h" #include "utils/array.h" -#include "distributed/utils/array_type.h" -#include "lib/stringinfo.h" #include "utils/builtins.h" #include "utils/lsyscache.h" -#include "distributed/shared_library_init.h" + #include "distributed/adaptive_executor.h" #include "distributed/colocation_utils.h" +#include "distributed/connection_management.h" +#include "distributed/coordinator_protocol.h" +#include "distributed/deparse_shard_query.h" #include "distributed/hash_helpers.h" #include "distributed/metadata_cache.h" -#include "distributed/shardinterval_utils.h" -#include "distributed/coordinator_protocol.h" -#include "distributed/connection_management.h" -#include "distributed/remote_commands.h" -#include "distributed/shard_split.h" +#include "distributed/metadata_sync.h" +#include "distributed/multi_partitioning_utils.h" +#include "distributed/multi_physical_planner.h" +#include "distributed/pg_dist_shard.h" #include "distributed/reference_table_utils.h" -#include "distributed/shard_transfer.h" +#include "distributed/remote_commands.h" #include "distributed/resource_lock.h" -#include "distributed/multi_partitioning_utils.h" -#include "distributed/worker_manager.h" -#include "distributed/worker_transaction.h" #include "distributed/shard_cleaner.h" +#include "distributed/shard_rebalancer.h" +#include "distributed/shard_split.h" +#include "distributed/shard_transfer.h" +#include "distributed/shardinterval_utils.h" +#include "distributed/shardsplit_logical_replication.h" #include "distributed/shared_library_init.h" -#include "distributed/pg_dist_shard.h" -#include "distributed/metadata_sync.h" -#include "distributed/multi_physical_planner.h" +#include "distributed/utils/array_type.h" #include "distributed/utils/distribution_column_map.h" -#include "commands/dbcommands.h" -#include "distributed/shardsplit_logical_replication.h" -#include "distributed/deparse_shard_query.h" -#include "distributed/shard_rebalancer.h" -#include "postmaster/postmaster.h" +#include "distributed/worker_manager.h" +#include "distributed/worker_transaction.h" /* * Entry for map that tracks ShardInterval -> Placement Node @@ -731,11 +733,11 @@ CreateSplitShardsForShardGroup(List *shardGroupSplitIntervalListList, workerPlacementNode->workerPort))); } - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - ConstructQualifiedShardName( - shardInterval), - workerPlacementNode->groupId, - CLEANUP_ON_FAILURE); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + ConstructQualifiedShardName( + shardInterval), + workerPlacementNode->groupId, + CLEANUP_ON_FAILURE); /* Create new split child shard on the specified placement list */ CreateObjectOnPlacement(splitShardCreationCommandList, @@ -1312,7 +1314,7 @@ DropShardListMetadata(List *shardIntervalList) { ListCell *commandCell = NULL; - /* send the commands one by one (calls citus_internal_delete_shard_metadata internally) */ + /* send the commands one by one (calls citus_internal.delete_shard_metadata internally) */ List *shardMetadataDeleteCommandList = ShardDeleteCommandList(shardInterval); foreach(commandCell, shardMetadataDeleteCommandList) { @@ -1715,11 +1717,11 @@ CreateDummyShardsForShardGroup(HTAB *mapOfPlacementToDummyShardList, /* Log shard in pg_dist_cleanup. Given dummy shards are transient resources, * we want to cleanup irrespective of operation success or failure. */ - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - ConstructQualifiedShardName( - shardInterval), - workerPlacementNode->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + ConstructQualifiedShardName( + shardInterval), + workerPlacementNode->groupId, + CLEANUP_ALWAYS); /* Create dummy source shard on the specified placement list */ CreateObjectOnPlacement(splitShardCreationCommandList, @@ -1778,11 +1780,11 @@ CreateDummyShardsForShardGroup(HTAB *mapOfPlacementToDummyShardList, /* Log shard in pg_dist_cleanup. Given dummy shards are transient resources, * we want to cleanup irrespective of operation success or failure. */ - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - ConstructQualifiedShardName( - shardInterval), - sourceWorkerNode->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + ConstructQualifiedShardName( + shardInterval), + sourceWorkerNode->groupId, + CLEANUP_ALWAYS); /* Create dummy split child shard on source worker node */ CreateObjectOnPlacement(splitShardCreationCommandList, sourceWorkerNode); diff --git a/src/backend/distributed/operations/shard_transfer.c b/src/backend/distributed/operations/shard_transfer.c index 23925a3153a..737086752d6 100644 --- a/src/backend/distributed/operations/shard_transfer.c +++ b/src/backend/distributed/operations/shard_transfer.c @@ -9,27 +9,39 @@ *------------------------------------------------------------------------- */ +#include +#include + #include "postgres.h" + #include "fmgr.h" #include "miscadmin.h" -#include -#include - #include "access/htup_details.h" #include "catalog/pg_class.h" #include "catalog/pg_enum.h" +#include "lib/stringinfo.h" +#include "nodes/pg_list.h" +#include "storage/lmgr.h" +#include "storage/lock.h" +#include "utils/builtins.h" +#include "utils/elog.h" +#include "utils/errcodes.h" +#include "utils/lsyscache.h" +#include "utils/palloc.h" +#include "utils/rel.h" +#include "utils/syscache.h" + #include "distributed/adaptive_executor.h" #include "distributed/backend_data.h" #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" #include "distributed/commands.h" #include "distributed/connection_management.h" +#include "distributed/coordinator_protocol.h" #include "distributed/deparse_shard_query.h" #include "distributed/distributed_planner.h" #include "distributed/listutils.h" -#include "distributed/shard_cleaner.h" -#include "distributed/coordinator_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" #include "distributed/multi_join_order.h" @@ -39,24 +51,13 @@ #include "distributed/reference_table_utils.h" #include "distributed/remote_commands.h" #include "distributed/resource_lock.h" +#include "distributed/shard_cleaner.h" #include "distributed/shard_rebalancer.h" #include "distributed/shard_split.h" #include "distributed/shard_transfer.h" #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" -#include "lib/stringinfo.h" -#include "nodes/pg_list.h" -#include "storage/lmgr.h" -#include "storage/lock.h" -#include "storage/lmgr.h" -#include "utils/builtins.h" -#include "utils/elog.h" -#include "utils/errcodes.h" -#include "utils/lsyscache.h" -#include "utils/palloc.h" -#include "utils/rel.h" -#include "utils/syscache.h" /* local type declarations */ @@ -293,6 +294,17 @@ citus_move_shard_placement(PG_FUNCTION_ARGS) CheckCitusVersion(ERROR); EnsureCoordinator(); + List *referenceTableIdList = NIL; + + if (HasNodesWithMissingReferenceTables(&referenceTableIdList)) + { + ereport(ERROR, (errmsg("there are missing reference tables on some nodes"), + errhint("Copy reference tables first with " + "replicate_reference_tables() or use " + "citus_rebalance_start() that will do it automatically." + ))); + } + int64 shardId = PG_GETARG_INT64(0); char *sourceNodeName = text_to_cstring(PG_GETARG_TEXT_P(1)); int32 sourceNodePort = PG_GETARG_INT32(2); @@ -592,10 +604,10 @@ InsertDeferredDropCleanupRecordsForShards(List *shardIntervalList) * We also log cleanup record in the current transaction. If the current transaction rolls back, * we do not generate a record at all. */ - InsertCleanupRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - qualifiedShardName, - placement->groupId, - CLEANUP_DEFERRED_ON_SUCCESS); + InsertCleanupOnSuccessRecordInCurrentTransaction( + CLEANUP_OBJECT_SHARD_PLACEMENT, + qualifiedShardName, + placement->groupId); } } } @@ -622,10 +634,9 @@ InsertCleanupRecordsForShardPlacementsOnNode(List *shardIntervalList, * We also log cleanup record in the current transaction. If the current transaction rolls back, * we do not generate a record at all. */ - InsertCleanupRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - qualifiedShardName, - groupId, - CLEANUP_DEFERRED_ON_SUCCESS); + InsertCleanupOnSuccessRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + qualifiedShardName, + groupId); } } @@ -792,7 +803,12 @@ ShardListSizeInBytes(List *shardList, char *workerNodeName, uint32 /* we skip child tables of a partitioned table if this boolean variable is true */ bool optimizePartitionCalculations = true; + + /* we're interested in whole table, not a particular index */ + Oid indexId = InvalidOid; + StringInfo tableSizeQuery = GenerateSizeQueryOnMultiplePlacements(shardList, + indexId, TOTAL_RELATION_SIZE, optimizePartitionCalculations); @@ -1376,10 +1392,11 @@ CopyShardTablesViaLogicalReplication(List *shardIntervalList, char *sourceNodeNa char *tableOwner = TableOwner(shardInterval->relationId); /* drop the shard we created on the target, in case of failure */ - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - ConstructQualifiedShardName(shardInterval), - GroupForNode(targetNodeName, targetNodePort), - CLEANUP_ON_FAILURE); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + ConstructQualifiedShardName(shardInterval), + GroupForNode(targetNodeName, + targetNodePort), + CLEANUP_ON_FAILURE); SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort, tableOwner, @@ -1449,10 +1466,11 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName, char *tableOwner = TableOwner(shardInterval->relationId); /* drop the shard we created on the target, in case of failure */ - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - ConstructQualifiedShardName(shardInterval), - GroupForNode(targetNodeName, targetNodePort), - CLEANUP_ON_FAILURE); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + ConstructQualifiedShardName(shardInterval), + GroupForNode(targetNodeName, + targetNodePort), + CLEANUP_ON_FAILURE); SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort, tableOwner, ddlCommandList); @@ -1939,11 +1957,7 @@ ConstructQualifiedShardName(ShardInterval *shardInterval) static List * RecreateTableDDLCommandList(Oid relationId) { - const char *relationName = get_rel_name(relationId); - Oid relationSchemaId = get_rel_namespace(relationId); - const char *relationSchemaName = get_namespace_name(relationSchemaId); - const char *qualifiedRelationName = quote_qualified_identifier(relationSchemaName, - relationName); + const char *qualifiedRelationName = generate_qualified_relation_name(relationId); StringInfo dropCommand = makeStringInfo(); @@ -2033,7 +2047,7 @@ UpdateColocatedShardPlacementMetadataOnWorkers(int64 shardId, StringInfo updateCommand = makeStringInfo(); appendStringInfo(updateCommand, - "SELECT citus_internal_update_placement_metadata(%ld, %d, %d)", + "SELECT citus_internal.update_placement_metadata(%ld, %d, %d)", colocatedShard->shardId, sourceGroupId, targetGroupId); SendCommandToWorkersWithMetadata(updateCommand->data); diff --git a/src/backend/distributed/operations/stage_protocol.c b/src/backend/distributed/operations/stage_protocol.c index 421593c662c..5770d648e21 100644 --- a/src/backend/distributed/operations/stage_protocol.c +++ b/src/backend/distributed/operations/stage_protocol.c @@ -15,31 +15,40 @@ */ #include "postgres.h" + #include "funcapi.h" -#include "miscadmin.h" #include "libpq-fe.h" +#include "miscadmin.h" #include "access/htup_details.h" #include "access/xact.h" -#include "commands/tablecmds.h" #include "catalog/indexing.h" #include "catalog/namespace.h" #include "catalog/partition.h" +#include "commands/tablecmds.h" +#include "storage/lmgr.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/inval.h" +#include "utils/lsyscache.h" +#include "utils/rel.h" +#include "utils/syscache.h" + +#include "distributed/adaptive_executor.h" #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" #include "distributed/commands.h" -#include "distributed/adaptive_executor.h" #include "distributed/connection_management.h" +#include "distributed/coordinator_protocol.h" #include "distributed/deparse_shard_query.h" #include "distributed/distributed_planner.h" #include "distributed/foreign_key_relationship.h" #include "distributed/hash_helpers.h" #include "distributed/listutils.h" #include "distributed/lock_graph.h" -#include "distributed/multi_executor.h" -#include "distributed/metadata_utility.h" -#include "distributed/coordinator_protocol.h" #include "distributed/metadata_cache.h" +#include "distributed/metadata_utility.h" +#include "distributed/multi_executor.h" #include "distributed/multi_join_order.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/pg_dist_partition.h" @@ -50,16 +59,9 @@ #include "distributed/remote_commands.h" #include "distributed/resource_lock.h" #include "distributed/transaction_management.h" +#include "distributed/version_compat.h" #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" -#include "distributed/version_compat.h" -#include "storage/lmgr.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/inval.h" -#include "utils/lsyscache.h" -#include "utils/syscache.h" -#include "utils/rel.h" /* Local functions forward declarations */ diff --git a/src/backend/distributed/operations/worker_copy_table_to_node_udf.c b/src/backend/distributed/operations/worker_copy_table_to_node_udf.c index f0f83744de5..c603de72af0 100644 --- a/src/backend/distributed/operations/worker_copy_table_to_node_udf.c +++ b/src/backend/distributed/operations/worker_copy_table_to_node_udf.c @@ -15,6 +15,7 @@ #include "utils/builtins.h" #include "utils/lsyscache.h" + #include "distributed/citus_ruleutils.h" #include "distributed/metadata_cache.h" #include "distributed/multi_executor.h" diff --git a/src/backend/distributed/operations/worker_node_manager.c b/src/backend/distributed/operations/worker_node_manager.c index 76f2732bae2..ba622e4d7ab 100644 --- a/src/backend/distributed/operations/worker_node_manager.c +++ b/src/backend/distributed/operations/worker_node_manager.c @@ -12,16 +12,13 @@ */ #include "postgres.h" + #include "miscadmin.h" #include "commands/dbcommands.h" -#include "distributed/coordinator_protocol.h" -#include "distributed/hash_helpers.h" -#include "distributed/listutils.h" -#include "distributed/metadata_cache.h" -#include "distributed/worker_manager.h" -#include "libpq/hba.h" +#include "common/hashfn.h" #include "common/ip.h" +#include "libpq/hba.h" #include "libpq/libpq-be.h" #include "postmaster/postmaster.h" #include "storage/fd.h" @@ -31,7 +28,12 @@ #include "utils/guc.h" #include "utils/hsearch.h" #include "utils/memutils.h" -#include "common/hashfn.h" + +#include "distributed/coordinator_protocol.h" +#include "distributed/hash_helpers.h" +#include "distributed/listutils.h" +#include "distributed/metadata_cache.h" +#include "distributed/worker_manager.h" /* Config variables managed via guc.c */ @@ -180,7 +182,7 @@ ActivePrimaryNodeList(LOCKMODE lockMode) /* * ActivePrimaryRemoteNodeList returns a list of all active primary nodes in - * workerNodeHash. + * workerNodeHash except the local one. */ List * ActivePrimaryRemoteNodeList(LOCKMODE lockMode) diff --git a/src/backend/distributed/operations/worker_shard_copy.c b/src/backend/distributed/operations/worker_shard_copy.c index ba65635a7fb..f99c9b537f7 100644 --- a/src/backend/distributed/operations/worker_shard_copy.c +++ b/src/backend/distributed/operations/worker_shard_copy.c @@ -8,23 +8,26 @@ *------------------------------------------------------------------------- */ -#include "libpq-fe.h" #include "postgres.h" + +#include "libpq-fe.h" + #include "commands/copy.h" #include "nodes/makefuncs.h" #include "parser/parse_relation.h" -#include "utils/lsyscache.h" #include "utils/builtins.h" -#include "distributed/remote_commands.h" -#include "distributed/worker_shard_copy.h" +#include "utils/lsyscache.h" + #include "distributed/commands/multi_copy.h" -#include "distributed/local_multi_copy.h" -#include "distributed/worker_manager.h" #include "distributed/connection_management.h" -#include "distributed/relation_utils.h" -#include "distributed/version_compat.h" #include "distributed/local_executor.h" +#include "distributed/local_multi_copy.h" +#include "distributed/relation_utils.h" +#include "distributed/remote_commands.h" #include "distributed/replication_origin_session_utils.h" +#include "distributed/version_compat.h" +#include "distributed/worker_manager.h" +#include "distributed/worker_shard_copy.h" /* * LocalCopyBuffer is used in copy callback to return the copied rows. diff --git a/src/backend/distributed/operations/worker_split_copy_udf.c b/src/backend/distributed/operations/worker_split_copy_udf.c index 18fdbfc4a12..03354ea047b 100644 --- a/src/backend/distributed/operations/worker_split_copy_udf.c +++ b/src/backend/distributed/operations/worker_split_copy_udf.c @@ -8,7 +8,13 @@ */ #include "postgres.h" + +#include "utils/array.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" + #include "pg_version_compat.h" + #include "distributed/citus_ruleutils.h" #include "distributed/distribution_column.h" #include "distributed/intermediate_results.h" @@ -16,9 +22,6 @@ #include "distributed/multi_executor.h" #include "distributed/utils/array_type.h" #include "distributed/worker_shard_copy.h" -#include "utils/lsyscache.h" -#include "utils/array.h" -#include "utils/builtins.h" PG_FUNCTION_INFO_V1(worker_split_copy); diff --git a/src/backend/distributed/operations/worker_split_shard_release_dsm_udf.c b/src/backend/distributed/operations/worker_split_shard_release_dsm_udf.c index 94ce40cdb75..7f3f3ff7a3b 100644 --- a/src/backend/distributed/operations/worker_split_shard_release_dsm_udf.c +++ b/src/backend/distributed/operations/worker_split_shard_release_dsm_udf.c @@ -9,6 +9,7 @@ *------------------------------------------------------------------------- */ #include "postgres.h" + #include "distributed/shardinterval_utils.h" #include "distributed/shardsplit_shared_memory.h" diff --git a/src/backend/distributed/operations/worker_split_shard_replication_setup_udf.c b/src/backend/distributed/operations/worker_split_shard_replication_setup_udf.c index 4d116dfa13a..d4775995c17 100644 --- a/src/backend/distributed/operations/worker_split_shard_replication_setup_udf.c +++ b/src/backend/distributed/operations/worker_split_shard_replication_setup_udf.c @@ -9,24 +9,27 @@ *------------------------------------------------------------------------- */ #include "postgres.h" + #include "miscadmin.h" -#include "postmaster/postmaster.h" + +#include "commands/dbcommands.h" #include "common/hashfn.h" +#include "postmaster/postmaster.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" + +#include "distributed/citus_safe_lib.h" +#include "distributed/connection_management.h" #include "distributed/distribution_column.h" #include "distributed/hash_helpers.h" -#include "distributed/shardinterval_utils.h" +#include "distributed/listutils.h" +#include "distributed/remote_commands.h" #include "distributed/shard_cleaner.h" #include "distributed/shard_utils.h" +#include "distributed/shardinterval_utils.h" +#include "distributed/shardsplit_logical_replication.h" #include "distributed/shardsplit_shared_memory.h" -#include "distributed/connection_management.h" -#include "distributed/citus_safe_lib.h" -#include "distributed/listutils.h" -#include "distributed/remote_commands.h" #include "distributed/tuplestore.h" -#include "distributed/shardsplit_logical_replication.h" -#include "utils/builtins.h" -#include "utils/lsyscache.h" -#include "commands/dbcommands.h" /* declarations for dynamic loading */ diff --git a/src/backend/distributed/planner/combine_query_planner.c b/src/backend/distributed/planner/combine_query_planner.c index e61ff8daf08..e3aa7b3e637 100644 --- a/src/backend/distributed/planner/combine_query_planner.c +++ b/src/backend/distributed/planner/combine_query_planner.c @@ -11,21 +11,22 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" - #include "catalog/pg_type.h" -#include "distributed/citus_ruleutils.h" -#include "distributed/insert_select_planner.h" -#include "distributed/listutils.h" -#include "distributed/metadata_cache.h" -#include "distributed/combine_query_planner.h" -#include "distributed/multi_physical_planner.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "optimizer/clauses.h" #include "optimizer/planner.h" #include "rewrite/rewriteManip.h" +#include "pg_version_constants.h" + +#include "distributed/citus_ruleutils.h" +#include "distributed/combine_query_planner.h" +#include "distributed/insert_select_planner.h" +#include "distributed/listutils.h" +#include "distributed/metadata_cache.h" +#include "distributed/multi_physical_planner.h" + static List * RemoteScanTargetList(List *workerTargetList); static PlannedStmt * BuildSelectStatementViaStdPlanner(Query *combineQuery, List *remoteScanTargetList, diff --git a/src/backend/distributed/planner/cte_inline.c b/src/backend/distributed/planner/cte_inline.c index ce258916d2b..d6f88525cf4 100644 --- a/src/backend/distributed/planner/cte_inline.c +++ b/src/backend/distributed/planner/cte_inline.c @@ -12,14 +12,16 @@ *------------------------------------------------------------------------- */ #include "postgres.h" -#include "pg_version_compat.h" -#include "distributed/pg_version_constants.h" -#include "distributed/cte_inline.h" #include "nodes/nodeFuncs.h" #include "optimizer/optimizer.h" #include "rewrite/rewriteManip.h" +#include "pg_version_compat.h" +#include "pg_version_constants.h" + +#include "distributed/cte_inline.h" + typedef struct inline_cte_walker_context { const char *ctename; /* name and relative level of target CTE */ diff --git a/src/backend/distributed/planner/deparse_shard_query.c b/src/backend/distributed/planner/deparse_shard_query.c index ac37b139975..43b5f14933a 100644 --- a/src/backend/distributed/planner/deparse_shard_query.c +++ b/src/backend/distributed/planner/deparse_shard_query.c @@ -10,11 +10,24 @@ */ #include "postgres.h" + #include "c.h" #include "access/heapam.h" #include "access/htup_details.h" #include "catalog/pg_constraint.h" +#include "lib/stringinfo.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "nodes/nodes.h" +#include "nodes/parsenodes.h" +#include "nodes/pg_list.h" +#include "parser/parsetree.h" +#include "storage/lock.h" +#include "utils/lsyscache.h" +#include "utils/rel.h" +#include "utils/syscache.h" + #include "distributed/citus_nodefuncs.h" #include "distributed/citus_ruleutils.h" #include "distributed/combine_query_planner.h" @@ -28,17 +41,6 @@ #include "distributed/shard_utils.h" #include "distributed/utils/citus_stat_tenants.h" #include "distributed/version_compat.h" -#include "lib/stringinfo.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "nodes/nodes.h" -#include "nodes/parsenodes.h" -#include "nodes/pg_list.h" -#include "parser/parsetree.h" -#include "storage/lock.h" -#include "utils/lsyscache.h" -#include "utils/rel.h" -#include "utils/syscache.h" static void UpdateTaskQueryString(Query *query, Task *task); diff --git a/src/backend/distributed/planner/distributed_planner.c b/src/backend/distributed/planner/distributed_planner.c index 65278d1ea4d..1d6550afdb5 100644 --- a/src/backend/distributed/planner/distributed_planner.c +++ b/src/backend/distributed/planner/distributed_planner.c @@ -7,70 +7,72 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" +#include +#include -#include "distributed/pg_version_constants.h" +#include "postgres.h" #include "funcapi.h" -#include -#include - #include "access/htup_details.h" #include "access/xact.h" #include "catalog/pg_class.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" +#include "executor/executor.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "nodes/pg_list.h" +#include "optimizer/optimizer.h" +#include "optimizer/pathnode.h" +#include "optimizer/plancat.h" +#include "optimizer/planmain.h" +#include "optimizer/planner.h" +#include "parser/parse_type.h" +#include "parser/parsetree.h" +#include "utils/builtins.h" +#include "utils/datum.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" +#include "utils/syscache.h" + +#include "pg_version_constants.h" + #include "distributed/citus_depended_object.h" #include "distributed/citus_nodefuncs.h" #include "distributed/citus_nodes.h" #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" +#include "distributed/combine_query_planner.h" #include "distributed/commands.h" +#include "distributed/coordinator_protocol.h" #include "distributed/cte_inline.h" +#include "distributed/distributed_planner.h" #include "distributed/function_call_delegation.h" #include "distributed/insert_select_planner.h" #include "distributed/intermediate_result_pruning.h" #include "distributed/intermediate_results.h" #include "distributed/listutils.h" -#include "distributed/coordinator_protocol.h" #include "distributed/merge_planner.h" #include "distributed/metadata_cache.h" #include "distributed/multi_executor.h" -#include "distributed/distributed_planner.h" -#include "distributed/query_pushdown_planning.h" #include "distributed/multi_logical_optimizer.h" #include "distributed/multi_logical_planner.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/multi_physical_planner.h" -#include "distributed/combine_query_planner.h" #include "distributed/multi_router_planner.h" +#include "distributed/query_pushdown_planning.h" #include "distributed/query_utils.h" #include "distributed/recursive_planning.h" -#include "distributed/shardinterval_utils.h" #include "distributed/shard_utils.h" +#include "distributed/shardinterval_utils.h" #include "distributed/utils/citus_stat_tenants.h" #include "distributed/version_compat.h" #include "distributed/worker_shard_visibility.h" -#include "executor/executor.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "nodes/pg_list.h" + #if PG_VERSION_NUM >= PG_VERSION_16 #include "parser/parse_relation.h" #endif -#include "parser/parsetree.h" -#include "parser/parse_type.h" -#include "optimizer/optimizer.h" -#include "optimizer/plancat.h" -#include "optimizer/pathnode.h" -#include "optimizer/planner.h" -#include "optimizer/planmain.h" -#include "utils/builtins.h" -#include "utils/datum.h" -#include "utils/lsyscache.h" -#include "utils/memutils.h" -#include "utils/syscache.h" /* RouterPlanType is used to determine the router plan to invoke */ @@ -702,6 +704,7 @@ DissuadePlannerFromUsingPlan(PlannedStmt *plan) * Arbitrarily high cost, but low enough that it can be added up * without overflowing by choose_custom_plan(). */ + Assert(plan != NULL); plan->planTree->total_cost = FLT_MAX / 100000000; } diff --git a/src/backend/distributed/planner/extended_op_node_utils.c b/src/backend/distributed/planner/extended_op_node_utils.c index 0a2a8b8348b..7912de1d96c 100644 --- a/src/backend/distributed/planner/extended_op_node_utils.c +++ b/src/backend/distributed/planner/extended_op_node_utils.c @@ -9,17 +9,19 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" + +#include "nodes/nodeFuncs.h" +#include "nodes/pg_list.h" +#include "optimizer/optimizer.h" +#include "optimizer/restrictinfo.h" + +#include "pg_version_constants.h" #include "distributed/extended_op_node_utils.h" #include "distributed/listutils.h" #include "distributed/metadata_cache.h" #include "distributed/multi_logical_optimizer.h" #include "distributed/pg_dist_partition.h" -#include "optimizer/optimizer.h" -#include "optimizer/restrictinfo.h" -#include "nodes/nodeFuncs.h" -#include "nodes/pg_list.h" static bool GroupedByPartitionColumn(MultiNode *node, MultiExtendedOp *opNode); diff --git a/src/backend/distributed/planner/fast_path_router_planner.c b/src/backend/distributed/planner/fast_path_router_planner.c index 933ee742520..59f80bb403e 100644 --- a/src/backend/distributed/planner/fast_path_router_planner.c +++ b/src/backend/distributed/planner/fast_path_router_planner.c @@ -34,22 +34,23 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "nodes/parsenodes.h" +#include "nodes/pg_list.h" +#include "optimizer/optimizer.h" +#include "tcop/pquery.h" + +#include "pg_version_constants.h" #include "distributed/distributed_planner.h" #include "distributed/insert_select_planner.h" -#include "distributed/multi_physical_planner.h" /* only to use some utility functions */ #include "distributed/metadata_cache.h" +#include "distributed/multi_physical_planner.h" /* only to use some utility functions */ #include "distributed/multi_router_planner.h" #include "distributed/pg_dist_partition.h" -#include "distributed/shardinterval_utils.h" #include "distributed/shard_pruning.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "nodes/parsenodes.h" -#include "nodes/pg_list.h" -#include "optimizer/optimizer.h" -#include "tcop/pquery.h" +#include "distributed/shardinterval_utils.h" bool EnableFastPathRouterPlanner = true; @@ -154,7 +155,7 @@ GeneratePlaceHolderPlannedStmt(Query *parse) * being a fast path router query. * The requirements for the fast path query can be listed below: * - * - SELECT query without CTES, sublinks-subqueries, set operations + * - SELECT/UPDATE/DELETE query without CTES, sublinks-subqueries, set operations * - The query should touch only a single hash distributed or reference table * - The distribution with equality operator should be in the WHERE clause * and it should be ANDed with any other filters. Also, the distribution @@ -251,7 +252,7 @@ FastPathRouterQuery(Query *query, Node **distributionKeyValue) /* * Distribution column must be used in a simple equality match check and it must be - * place at top level conjustion operator. In simple words, we should have + * place at top level conjunction operator. In simple words, we should have * WHERE dist_key = VALUE [AND ....]; * * We're also not allowing any other appearances of the distribution key in the quals. diff --git a/src/backend/distributed/planner/function_call_delegation.c b/src/backend/distributed/planner/function_call_delegation.c index 2f8da29c028..4a79dc25aac 100644 --- a/src/backend/distributed/planner/function_call_delegation.c +++ b/src/backend/distributed/planner/function_call_delegation.c @@ -12,44 +12,46 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "miscadmin.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "commands/defrem.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "nodes/parsenodes.h" +#include "nodes/primnodes.h" +#include "nodes/print.h" +#include "optimizer/clauses.h" +#include "parser/parse_coerce.h" +#include "parser/parsetree.h" +#include "tcop/dest.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" + +#include "pg_version_constants.h" + #include "distributed/backend_data.h" -#include "distributed/metadata_utility.h" +#include "distributed/citus_custom_scan.h" #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" #include "distributed/commands.h" #include "distributed/commands/multi_copy.h" #include "distributed/connection_management.h" +#include "distributed/coordinator_protocol.h" #include "distributed/deparse_shard_query.h" #include "distributed/function_call_delegation.h" #include "distributed/insert_select_planner.h" -#include "distributed/citus_custom_scan.h" -#include "distributed/coordinator_protocol.h" #include "distributed/listutils.h" #include "distributed/metadata_cache.h" +#include "distributed/metadata_utility.h" #include "distributed/multi_executor.h" #include "distributed/multi_physical_planner.h" +#include "distributed/recursive_planning.h" #include "distributed/remote_commands.h" #include "distributed/shard_pruning.h" -#include "distributed/recursive_planning.h" #include "distributed/version_compat.h" #include "distributed/worker_manager.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "nodes/parsenodes.h" -#include "nodes/primnodes.h" -#include "nodes/print.h" -#include "optimizer/clauses.h" -#include "parser/parse_coerce.h" -#include "parser/parsetree.h" -#include "miscadmin.h" -#include "tcop/dest.h" -#include "utils/lsyscache.h" -#include "utils/syscache.h" struct ParamWalkerContext { @@ -89,6 +91,10 @@ bool InDelegatedFunctionCall = false; static bool contain_param_walker(Node *node, void *context) { + if (node == NULL) + { + return false; + } if (IsA(node, Param)) { Param *paramNode = (Param *) node; @@ -525,8 +531,16 @@ ShardPlacementForFunctionColocatedWithDistTable(DistObjectCacheEntry *procedure, if (partitionParam->paramkind == PARAM_EXTERN) { - /* Don't log a message, we should end up here again without a parameter */ - DissuadePlannerFromUsingPlan(plan); + /* + * Don't log a message, we should end up here again without a + * parameter. + * Note that "plan" can be null, for example when a CALL statement + * is prepared. + */ + if (plan) + { + DissuadePlannerFromUsingPlan(plan); + } return NULL; } } diff --git a/src/backend/distributed/planner/insert_select_planner.c b/src/backend/distributed/planner/insert_select_planner.c index 1b7f468f896..60d6ce466ca 100644 --- a/src/backend/distributed/planner/insert_select_planner.c +++ b/src/backend/distributed/planner/insert_select_planner.c @@ -10,22 +10,39 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" - #include "catalog/pg_class.h" #include "catalog/pg_type.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "nodes/parsenodes.h" +#include "nodes/print.h" +#include "optimizer/clauses.h" +#include "optimizer/optimizer.h" +#include "optimizer/planner.h" +#include "optimizer/restrictinfo.h" +#include "optimizer/tlist.h" +#include "parser/parse_coerce.h" +#include "parser/parse_relation.h" +#include "parser/parsetree.h" +#include "tcop/tcopprot.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/rel.h" + +#include "pg_version_constants.h" + #include "distributed/citus_clauses.h" #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" #include "distributed/errormessage.h" -#include "distributed/listutils.h" -#include "distributed/log_utils.h" #include "distributed/insert_select_executor.h" #include "distributed/insert_select_planner.h" +#include "distributed/listutils.h" +#include "distributed/log_utils.h" #include "distributed/metadata_cache.h" #include "distributed/multi_executor.h" -#include "distributed/multi_logical_planner.h" #include "distributed/multi_logical_optimizer.h" +#include "distributed/multi_logical_planner.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_router_planner.h" #include "distributed/pg_dist_partition.h" @@ -34,22 +51,6 @@ #include "distributed/repartition_executor.h" #include "distributed/resource_lock.h" #include "distributed/version_compat.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "nodes/parsenodes.h" -#include "optimizer/clauses.h" -#include "optimizer/planner.h" -#include "optimizer/restrictinfo.h" -#include "optimizer/tlist.h" -#include "optimizer/optimizer.h" -#include "parser/parsetree.h" -#include "parser/parse_coerce.h" -#include "parser/parse_relation.h" -#include "tcop/tcopprot.h" -#include "utils/builtins.h" -#include "utils/lsyscache.h" -#include "utils/rel.h" -#include static void PrepareInsertSelectForCitusPlanner(Query *insertSelectQuery); diff --git a/src/backend/distributed/planner/intermediate_result_pruning.c b/src/backend/distributed/planner/intermediate_result_pruning.c index cefbfb833aa..5c9ee6c4331 100644 --- a/src/backend/distributed/planner/intermediate_result_pruning.c +++ b/src/backend/distributed/planner/intermediate_result_pruning.c @@ -11,6 +11,10 @@ * *------------------------------------------------------------------------- */ +#include "postgres.h" + +#include "common/hashfn.h" +#include "utils/builtins.h" #include "distributed/citus_custom_scan.h" #include "distributed/citus_ruleutils.h" @@ -20,8 +24,6 @@ #include "distributed/metadata_cache.h" #include "distributed/query_utils.h" #include "distributed/worker_manager.h" -#include "utils/builtins.h" -#include "common/hashfn.h" /* controlled via GUC, used mostly for testing */ bool LogIntermediateResults = false; diff --git a/src/backend/distributed/planner/local_distributed_join_planner.c b/src/backend/distributed/planner/local_distributed_join_planner.c index d93921966c5..a6502bf43c4 100644 --- a/src/backend/distributed/planner/local_distributed_join_planner.c +++ b/src/backend/distributed/planner/local_distributed_join_planner.c @@ -71,54 +71,52 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" - #include "funcapi.h" -#include "catalog/pg_type.h" #include "catalog/pg_class.h" #include "catalog/pg_index.h" +#include "catalog/pg_type.h" +#include "lib/stringinfo.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "nodes/nodes.h" +#include "nodes/pathnodes.h" +#include "nodes/pg_list.h" +#include "nodes/primnodes.h" +#include "optimizer/clauses.h" +#include "optimizer/optimizer.h" +#include "optimizer/planner.h" +#include "optimizer/prep.h" +#include "parser/parse_relation.h" +#include "parser/parsetree.h" +#include "utils/builtins.h" +#include "utils/guc.h" +#include "utils/lsyscache.h" + +#include "pg_version_constants.h" + #include "distributed/citus_nodes.h" #include "distributed/citus_ruleutils.h" #include "distributed/commands.h" #include "distributed/commands/multi_copy.h" +#include "distributed/coordinator_protocol.h" #include "distributed/distributed_planner.h" #include "distributed/errormessage.h" -#include "distributed/local_distributed_join_planner.h" #include "distributed/listutils.h" +#include "distributed/local_distributed_join_planner.h" #include "distributed/log_utils.h" #include "distributed/metadata_cache.h" -#include "distributed/multi_logical_planner.h" #include "distributed/multi_logical_optimizer.h" -#include "distributed/multi_router_planner.h" +#include "distributed/multi_logical_planner.h" #include "distributed/multi_physical_planner.h" -#include "distributed/multi_server_executor.h" #include "distributed/multi_router_planner.h" -#include "distributed/coordinator_protocol.h" +#include "distributed/multi_server_executor.h" #include "distributed/query_colocation_checker.h" #include "distributed/query_pushdown_planning.h" #include "distributed/recursive_planning.h" #include "distributed/relation_restriction_equivalence.h" -#include "distributed/log_utils.h" #include "distributed/shard_pruning.h" #include "distributed/version_compat.h" -#include "lib/stringinfo.h" -#include "optimizer/clauses.h" -#include "optimizer/optimizer.h" -#include "optimizer/planner.h" -#include "optimizer/prep.h" -#include "parser/parse_relation.h" -#include "parser/parsetree.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "nodes/nodes.h" -#include "nodes/nodeFuncs.h" -#include "nodes/pg_list.h" -#include "nodes/primnodes.h" -#include "nodes/pathnodes.h" -#include "utils/builtins.h" -#include "utils/guc.h" -#include "utils/lsyscache.h" #define INVALID_RTE_IDENTITY -1 diff --git a/src/backend/distributed/planner/local_plan_cache.c b/src/backend/distributed/planner/local_plan_cache.c index 946d9fc469f..2e5ca4e550a 100644 --- a/src/backend/distributed/planner/local_plan_cache.c +++ b/src/backend/distributed/planner/local_plan_cache.c @@ -9,19 +9,20 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "optimizer/clauses.h" +#include "optimizer/optimizer.h" + +#include "pg_version_constants.h" +#include "distributed/citus_ruleutils.h" +#include "distributed/deparse_shard_query.h" +#include "distributed/insert_select_planner.h" #include "distributed/listutils.h" #include "distributed/local_executor.h" #include "distributed/local_plan_cache.h" -#include "distributed/deparse_shard_query.h" -#include "distributed/citus_ruleutils.h" -#include "distributed/insert_select_planner.h" #include "distributed/metadata_cache.h" #include "distributed/multi_executor.h" #include "distributed/version_compat.h" -#include "optimizer/optimizer.h" -#include "optimizer/clauses.h" static Query * GetLocalShardQueryForCache(Query *jobQuery, Task *task, diff --git a/src/backend/distributed/planner/merge_planner.c b/src/backend/distributed/planner/merge_planner.c index 3cadea23a78..09d2d90acec 100644 --- a/src/backend/distributed/planner/merge_planner.c +++ b/src/backend/distributed/planner/merge_planner.c @@ -12,6 +12,7 @@ #include #include "postgres.h" + #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "optimizer/optimizer.h" @@ -20,6 +21,8 @@ #include "tcop/tcopprot.h" #include "utils/lsyscache.h" +#include "pg_version_constants.h" + #include "distributed/citus_clauses.h" #include "distributed/citus_custom_scan.h" #include "distributed/insert_select_planner.h" @@ -29,12 +32,11 @@ #include "distributed/multi_logical_optimizer.h" #include "distributed/multi_router_planner.h" #include "distributed/pg_dist_node_metadata.h" -#include "distributed/pg_version_constants.h" -#include "distributed/query_pushdown_planning.h" #include "distributed/query_colocation_checker.h" +#include "distributed/query_pushdown_planning.h" #include "distributed/repartition_executor.h" -#include "distributed/shared_library_init.h" #include "distributed/shard_pruning.h" +#include "distributed/shared_library_init.h" #if PG_VERSION_NUM >= PG_VERSION_15 @@ -180,14 +182,6 @@ CreateRouterMergePlan(Oid targetRelationId, Query *originalQuery, Query *query, return distributedPlan; } - Var *insertVar = - FetchAndValidateInsertVarIfExists(targetRelationId, originalQuery); - if (insertVar && - !IsDistributionColumnInMergeSource((Expr *) insertVar, originalQuery, true)) - { - ereport(ERROR, (errmsg("MERGE INSERT must use the source table " - "distribution column value"))); - } Job *job = RouterJob(originalQuery, plannerRestrictionContext, &distributedPlan->planningError); @@ -1122,6 +1116,27 @@ DeferErrorIfRoutableMergeNotSupported(Query *query, List *rangeTableList, "repartitioning"))); return deferredError; } + + + /* + * If execution has reached this point, it indicates that the query can be delegated to the worker. + * However, before proceeding with this delegation, we need to confirm that the user is utilizing + * the distribution column of the source table in the Insert variable. + * If this is not the case, we should refrain from pushing down the query. + * This is just a deffered error which will be handle by caller. + */ + + Var *insertVar = + FetchAndValidateInsertVarIfExists(targetRelationId, query); + if (insertVar && + !IsDistributionColumnInMergeSource((Expr *) insertVar, query, true)) + { + ereport(DEBUG1, (errmsg( + "MERGE INSERT must use the source table distribution column value for push down to workers. Otherwise, repartitioning will be applied"))); + return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, + "MERGE INSERT must use the source table distribution column value for push down to workers. Otherwise, repartitioning will be applied", + NULL, NULL); + } return NULL; } diff --git a/src/backend/distributed/planner/multi_explain.c b/src/backend/distributed/planner/multi_explain.c index 94d125f41af..4584e774024 100644 --- a/src/backend/distributed/planner/multi_explain.c +++ b/src/backend/distributed/planner/multi_explain.c @@ -8,11 +8,11 @@ */ #include "postgres.h" + +#include "fmgr.h" #include "libpq-fe.h" #include "miscadmin.h" -#include "distributed/pg_version_constants.h" - #include "access/htup_details.h" #include "access/xact.h" #include "catalog/namespace.h" @@ -24,52 +24,54 @@ #include "commands/dbcommands.h" #include "commands/explain.h" #include "commands/tablecmds.h" +#include "executor/tstoreReceiver.h" +#include "lib/stringinfo.h" +#include "nodes/plannodes.h" +#include "nodes/primnodes.h" +#include "nodes/print.h" +#include "optimizer/clauses.h" #include "optimizer/cost.h" +#include "optimizer/planner.h" +#include "parser/analyze.h" +#include "portability/instr_time.h" +#include "rewrite/rewriteHandler.h" +#include "tcop/dest.h" +#include "tcop/tcopprot.h" +#include "tcop/utility.h" +#include "utils/builtins.h" +#include "utils/json.h" +#include "utils/lsyscache.h" +#include "utils/snapmgr.h" + +#include "pg_version_constants.h" + #include "distributed/citus_depended_object.h" #include "distributed/citus_nodefuncs.h" +#include "distributed/combine_query_planner.h" +#include "distributed/commands/utility_hook.h" #include "distributed/connection_management.h" #include "distributed/deparse_shard_query.h" +#include "distributed/distributed_planner.h" #include "distributed/executor_util.h" -#include "distributed/insert_select_planner.h" #include "distributed/insert_select_executor.h" +#include "distributed/insert_select_planner.h" +#include "distributed/jsonbutils.h" #include "distributed/listutils.h" #include "distributed/merge_planner.h" #include "distributed/multi_executor.h" #include "distributed/multi_explain.h" #include "distributed/multi_logical_optimizer.h" #include "distributed/multi_logical_planner.h" -#include "distributed/combine_query_planner.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_router_planner.h" -#include "distributed/distributed_planner.h" #include "distributed/multi_server_executor.h" -#include "distributed/remote_commands.h" -#include "distributed/recursive_planning.h" #include "distributed/placement_connection.h" +#include "distributed/recursive_planning.h" +#include "distributed/remote_commands.h" #include "distributed/tuple_destination.h" #include "distributed/tuplestore.h" -#include "distributed/worker_protocol.h" #include "distributed/version_compat.h" -#include "distributed/jsonbutils.h" -#include "distributed/commands/utility_hook.h" -#include "executor/tstoreReceiver.h" -#include "fmgr.h" -#include "lib/stringinfo.h" -#include "nodes/plannodes.h" -#include "nodes/primnodes.h" -#include "nodes/print.h" -#include "optimizer/clauses.h" -#include "optimizer/planner.h" -#include "parser/analyze.h" -#include "portability/instr_time.h" -#include "rewrite/rewriteHandler.h" -#include "tcop/dest.h" -#include "tcop/tcopprot.h" -#include "tcop/utility.h" -#include "utils/builtins.h" -#include "utils/json.h" -#include "utils/lsyscache.h" -#include "utils/snapmgr.h" +#include "distributed/worker_protocol.h" /* Config variables that enable printing distributed query plans */ @@ -195,9 +197,7 @@ CitusExplainScan(CustomScanState *node, List *ancestors, struct ExplainState *es if (!ExplainDistributedQueries) { - appendStringInfoSpaces(es->str, es->indent * 2); - appendStringInfo(es->str, "explain statements for distributed queries "); - appendStringInfo(es->str, "are not enabled\n"); + ExplainPropertyBool("citus.explain_distributed_queries", false, es); return; } diff --git a/src/backend/distributed/planner/multi_join_order.c b/src/backend/distributed/planner/multi_join_order.c index 7714a1e0870..908ed206ea6 100644 --- a/src/backend/distributed/planner/multi_join_order.c +++ b/src/backend/distributed/planner/multi_join_order.c @@ -11,32 +11,32 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" - -#include "distributed/pg_version_constants.h" - #include -#include "access/nbtree.h" +#include "postgres.h" + #include "access/heapam.h" #include "access/htup_details.h" +#include "access/nbtree.h" #include "catalog/pg_am.h" -#include "distributed/listutils.h" -#include "distributed/metadata_cache.h" -#include "distributed/multi_join_order.h" -#include "distributed/multi_physical_planner.h" -#include "distributed/pg_dist_partition.h" -#include "distributed/worker_protocol.h" #include "lib/stringinfo.h" -#include "optimizer/optimizer.h" -#include "utils/builtins.h" #include "nodes/nodeFuncs.h" +#include "optimizer/optimizer.h" #include "utils/builtins.h" #include "utils/datum.h" #include "utils/lsyscache.h" #include "utils/rel.h" #include "utils/syscache.h" +#include "pg_version_constants.h" + +#include "distributed/listutils.h" +#include "distributed/metadata_cache.h" +#include "distributed/multi_join_order.h" +#include "distributed/multi_physical_planner.h" +#include "distributed/pg_dist_partition.h" +#include "distributed/worker_protocol.h" + /* Config variables managed via guc.c */ bool LogMultiJoinOrder = false; /* print join order as a debugging aid */ diff --git a/src/backend/distributed/planner/multi_logical_optimizer.c b/src/backend/distributed/planner/multi_logical_optimizer.c index 455f050a011..76e38237ad6 100644 --- a/src/backend/distributed/planner/multi_logical_optimizer.c +++ b/src/backend/distributed/planner/multi_logical_optimizer.c @@ -11,12 +11,10 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" - -#include "distributed/pg_version_constants.h" - #include +#include "postgres.h" + #include "access/genam.h" #include "access/heapam.h" #include "access/htup_details.h" @@ -27,6 +25,23 @@ #include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "commands/extension.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "optimizer/clauses.h" +#include "optimizer/optimizer.h" +#include "optimizer/tlist.h" +#include "parser/parse_agg.h" +#include "parser/parse_coerce.h" +#include "parser/parse_oper.h" +#include "parser/parsetree.h" +#include "rewrite/rewriteManip.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" +#include "utils/rel.h" +#include "utils/syscache.h" + +#include "pg_version_constants.h" + #include "distributed/citus_nodes.h" #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" @@ -42,22 +57,8 @@ #include "distributed/query_pushdown_planning.h" #include "distributed/string_utils.h" #include "distributed/tdigest_extension.h" -#include "distributed/worker_protocol.h" #include "distributed/version_compat.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "optimizer/clauses.h" -#include "optimizer/tlist.h" -#include "optimizer/optimizer.h" -#include "parser/parse_agg.h" -#include "parser/parse_coerce.h" -#include "parser/parse_oper.h" -#include "parser/parsetree.h" -#include "rewrite/rewriteManip.h" -#include "utils/fmgroids.h" -#include "utils/lsyscache.h" -#include "utils/rel.h" -#include "utils/syscache.h" +#include "distributed/worker_protocol.h" /* Config variable managed via guc.c */ int LimitClauseRowFetchCount = -1; /* number of rows to fetch from each task */ diff --git a/src/backend/distributed/planner/multi_logical_planner.c b/src/backend/distributed/planner/multi_logical_planner.c index 0969e0c7c2e..5201195c715 100644 --- a/src/backend/distributed/planner/multi_logical_planner.c +++ b/src/backend/distributed/planner/multi_logical_planner.c @@ -14,42 +14,43 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" - #include "access/heapam.h" #include "access/nbtree.h" #include "catalog/pg_am.h" #include "catalog/pg_class.h" #include "commands/defrem.h" -#include "distributed/citus_clauses.h" -#include "distributed/colocation_utils.h" -#include "distributed/metadata_cache.h" -#include "distributed/insert_select_planner.h" -#include "distributed/listutils.h" -#include "distributed/multi_logical_optimizer.h" -#include "distributed/multi_logical_planner.h" -#include "distributed/multi_physical_planner.h" -#include "distributed/reference_table_utils.h" -#include "distributed/relation_restriction_equivalence.h" -#include "distributed/query_pushdown_planning.h" -#include "distributed/query_utils.h" -#include "distributed/multi_router_planner.h" -#include "distributed/worker_protocol.h" -#include "distributed/version_compat.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "nodes/pathnodes.h" -#include "optimizer/optimizer.h" #include "optimizer/clauses.h" +#include "optimizer/optimizer.h" #include "optimizer/prep.h" #include "optimizer/tlist.h" #include "parser/parsetree.h" #include "utils/builtins.h" #include "utils/datum.h" #include "utils/lsyscache.h" -#include "utils/syscache.h" #include "utils/rel.h" #include "utils/relcache.h" +#include "utils/syscache.h" + +#include "pg_version_constants.h" + +#include "distributed/citus_clauses.h" +#include "distributed/colocation_utils.h" +#include "distributed/insert_select_planner.h" +#include "distributed/listutils.h" +#include "distributed/metadata_cache.h" +#include "distributed/multi_logical_optimizer.h" +#include "distributed/multi_logical_planner.h" +#include "distributed/multi_physical_planner.h" +#include "distributed/multi_router_planner.h" +#include "distributed/query_pushdown_planning.h" +#include "distributed/query_utils.h" +#include "distributed/reference_table_utils.h" +#include "distributed/relation_restriction_equivalence.h" +#include "distributed/version_compat.h" +#include "distributed/worker_protocol.h" /* Struct to differentiate different qualifier types in an expression tree walker */ @@ -714,8 +715,8 @@ MultiNodeTree(Query *queryTree) /* - * ContainsReadIntermediateResultFunction determines whether an expresion tree contains - * a call to the read_intermediate_result function. + * ContainsReadIntermediateResultFunction determines whether an expression tree + * contains a call to the read_intermediate_result function. */ bool ContainsReadIntermediateResultFunction(Node *node) @@ -725,7 +726,7 @@ ContainsReadIntermediateResultFunction(Node *node) /* - * ContainsReadIntermediateResultArrayFunction determines whether an expresion + * ContainsReadIntermediateResultArrayFunction determines whether an expression * tree contains a call to the read_intermediate_results(result_ids, format) * function. */ diff --git a/src/backend/distributed/planner/multi_physical_planner.c b/src/backend/distributed/planner/multi_physical_planner.c index 21befa6f202..fb7f844c7b4 100644 --- a/src/backend/distributed/planner/multi_physical_planner.c +++ b/src/backend/distributed/planner/multi_physical_planner.c @@ -11,13 +11,11 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" - -#include "distributed/pg_version_constants.h" - #include #include +#include "postgres.h" + #include "miscadmin.h" #include "access/genam.h" @@ -33,56 +31,59 @@ #include "catalog/pg_type.h" #include "commands/defrem.h" #include "commands/sequence.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "nodes/pathnodes.h" +#include "nodes/print.h" +#include "optimizer/clauses.h" +#include "optimizer/optimizer.h" +#include "optimizer/restrictinfo.h" +#include "optimizer/tlist.h" +#include "parser/parse_relation.h" +#include "parser/parse_type.h" +#include "parser/parsetree.h" +#include "rewrite/rewriteManip.h" +#include "utils/builtins.h" +#include "utils/catcache.h" +#include "utils/datum.h" +#include "utils/fmgroids.h" +#include "utils/guc.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" +#include "utils/rel.h" +#include "utils/syscache.h" +#include "utils/typcache.h" + +#include "pg_version_constants.h" + #include "distributed/backend_data.h" -#include "distributed/listutils.h" #include "distributed/citus_nodefuncs.h" #include "distributed/citus_nodes.h" #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" -#include "distributed/deparse_shard_query.h" #include "distributed/coordinator_protocol.h" +#include "distributed/deparse_shard_query.h" #include "distributed/intermediate_results.h" +#include "distributed/listutils.h" +#include "distributed/log_utils.h" #include "distributed/metadata_cache.h" -#include "distributed/multi_router_planner.h" #include "distributed/multi_join_order.h" #include "distributed/multi_logical_optimizer.h" #include "distributed/multi_logical_planner.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/multi_physical_planner.h" -#include "distributed/log_utils.h" +#include "distributed/multi_router_planner.h" #include "distributed/pg_dist_partition.h" #include "distributed/pg_dist_shard.h" #include "distributed/query_pushdown_planning.h" #include "distributed/query_utils.h" #include "distributed/recursive_planning.h" -#include "distributed/shardinterval_utils.h" #include "distributed/shard_pruning.h" +#include "distributed/shardinterval_utils.h" #include "distributed/string_utils.h" +#include "distributed/version_compat.h" #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" -#include "distributed/version_compat.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "nodes/print.h" -#include "optimizer/clauses.h" -#include "nodes/pathnodes.h" -#include "optimizer/optimizer.h" -#include "optimizer/restrictinfo.h" -#include "optimizer/tlist.h" -#include "parser/parse_relation.h" -#include "parser/parse_type.h" -#include "parser/parsetree.h" -#include "rewrite/rewriteManip.h" -#include "utils/builtins.h" -#include "utils/catcache.h" -#include "utils/datum.h" -#include "utils/fmgroids.h" -#include "utils/guc.h" -#include "utils/lsyscache.h" -#include "utils/memutils.h" -#include "utils/rel.h" -#include "utils/syscache.h" -#include "utils/typcache.h" /* RepartitionJoinBucketCountPerNode determines bucket amount during repartitions */ int RepartitionJoinBucketCountPerNode = 4; diff --git a/src/backend/distributed/planner/multi_router_planner.c b/src/backend/distributed/planner/multi_router_planner.c index 0d7a0de78d2..44f955a3227 100644 --- a/src/backend/distributed/planner/multi_router_planner.c +++ b/src/backend/distributed/planner/multi_router_planner.c @@ -11,77 +11,77 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" - -#include "distributed/pg_version_constants.h" - #include +#include "postgres.h" + #include "access/stratnum.h" #include "access/xact.h" #include "catalog/pg_opfamily.h" +#include "catalog/pg_proc.h" #include "catalog/pg_type.h" -#include "distributed/colocation_utils.h" +#include "executor/execdesc.h" +#include "lib/stringinfo.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "nodes/nodes.h" +#include "nodes/parsenodes.h" +#include "nodes/pg_list.h" +#include "nodes/primnodes.h" +#include "optimizer/clauses.h" +#include "optimizer/joininfo.h" +#include "optimizer/optimizer.h" +#include "optimizer/pathnode.h" +#include "optimizer/paths.h" +#include "optimizer/planmain.h" +#include "optimizer/restrictinfo.h" +#include "parser/parse_oper.h" +#include "parser/parsetree.h" +#include "postmaster/postmaster.h" +#include "storage/lock.h" +#include "utils/builtins.h" +#include "utils/elog.h" +#include "utils/errcodes.h" +#include "utils/lsyscache.h" +#include "utils/rel.h" +#include "utils/typcache.h" + +#include "pg_version_constants.h" + #include "distributed/citus_clauses.h" -#include "distributed/citus_nodes.h" #include "distributed/citus_nodefuncs.h" +#include "distributed/citus_nodes.h" +#include "distributed/citus_ruleutils.h" +#include "distributed/colocation_utils.h" +#include "distributed/coordinator_protocol.h" #include "distributed/deparse_shard_query.h" #include "distributed/distribution_column.h" #include "distributed/errormessage.h" #include "distributed/executor_util.h" -#include "distributed/log_utils.h" #include "distributed/insert_select_planner.h" #include "distributed/intermediate_result_pruning.h" -#include "distributed/metadata_utility.h" -#include "distributed/coordinator_protocol.h" +#include "distributed/listutils.h" +#include "distributed/log_utils.h" #include "distributed/merge_planner.h" #include "distributed/metadata_cache.h" +#include "distributed/metadata_utility.h" #include "distributed/multi_executor.h" #include "distributed/multi_join_order.h" -#include "distributed/multi_logical_planner.h" #include "distributed/multi_logical_optimizer.h" +#include "distributed/multi_logical_planner.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/multi_physical_planner.h" #include "distributed/multi_router_planner.h" #include "distributed/multi_server_executor.h" -#include "distributed/listutils.h" -#include "distributed/citus_ruleutils.h" #include "distributed/query_pushdown_planning.h" #include "distributed/query_utils.h" +#include "distributed/recursive_planning.h" #include "distributed/reference_table_utils.h" #include "distributed/relation_restriction_equivalence.h" #include "distributed/relay_utility.h" -#include "distributed/recursive_planning.h" #include "distributed/resource_lock.h" -#include "distributed/shardinterval_utils.h" #include "distributed/shard_pruning.h" -#include "executor/execdesc.h" -#include "lib/stringinfo.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "nodes/nodes.h" -#include "nodes/parsenodes.h" -#include "nodes/pg_list.h" -#include "nodes/primnodes.h" -#include "optimizer/clauses.h" -#include "optimizer/joininfo.h" -#include "optimizer/pathnode.h" -#include "optimizer/paths.h" -#include "optimizer/optimizer.h" -#include "optimizer/restrictinfo.h" -#include "parser/parsetree.h" -#include "parser/parse_oper.h" -#include "postmaster/postmaster.h" -#include "storage/lock.h" -#include "utils/builtins.h" -#include "utils/elog.h" -#include "utils/errcodes.h" -#include "utils/lsyscache.h" -#include "utils/rel.h" -#include "utils/typcache.h" - -#include "catalog/pg_proc.h" -#include "optimizer/planmain.h" +#include "distributed/shardinterval_utils.h" /* intermediate value for INSERT processing */ typedef struct InsertValues @@ -434,7 +434,7 @@ ExtractSelectRangeTableEntry(Query *query) * for the given modification query. * * The function errors out if the input query is not a - * modify query (e.g., INSERT, UPDATE or DELETE). So, this + * modify query (e.g., INSERT, UPDATE, DELETE or MERGE). So, this * function is not expected to be called on SELECT queries. */ Oid @@ -2271,13 +2271,13 @@ SelectsFromDistributedTable(List *rangeTableList, Query *query) /* - * RouterQuery runs router pruning logic for SELECT, UPDATE, DELETE, and MERGE queries. - * If there are shards present and query is routable, all RTEs have been updated - * to point to the relevant shards in the originalQuery. Also, placementList is - * filled with the list of worker nodes that has all the required shard placements - * for the query execution. anchorShardId is set to the first pruned shardId of - * the given query. Finally, relationShardList is filled with the list of - * relation-to-shard mappings for the query. + * PlanRouterQuery runs router pruning logic for SELECT, UPDATE, DELETE, and + * MERGE queries. If there are shards present and query is routable, all RTEs + * have been updated to point to the relevant shards in the originalQuery. Also, + * placementList is filled with the list of worker nodes that has all the + * required shard placements for the query execution. anchorShardId is set to + * the first pruned shardId of the given query. Finally, relationShardList is + * filled with the list of relation-to-shard mappings for the query. * * If the given query is not routable, it fills planningError with the related * DeferredErrorMessage. The caller can check this error message to see if query @@ -2324,27 +2324,11 @@ PlanRouterQuery(Query *originalQuery, TargetShardIntervalForFastPathQuery(originalQuery, &isMultiShardQuery, distributionKeyValue, partitionValueConst); - - /* - * This could only happen when there is a parameter on the distribution key. - * We defer error here, later the planner is forced to use a generic plan - * by assigning arbitrarily high cost to the plan. - */ - if (UpdateOrDeleteOrMergeQuery(originalQuery) && isMultiShardQuery) - { - planningError = DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, - "Router planner cannot handle multi-shard " - "modify queries", NULL, NULL); - return planningError; - } + Assert(!isMultiShardQuery); *prunedShardIntervalListList = shardIntervalList; - - if (!isMultiShardQuery) - { - ereport(DEBUG2, (errmsg("Distributed planning for a fast-path router " - "query"))); - } + ereport(DEBUG2, (errmsg("Distributed planning for a fast-path router " + "query"))); } else { @@ -2526,7 +2510,7 @@ AllShardsColocated(List *relationShardList) if (currentTableType == RANGE_DISTRIBUTED || currentTableType == APPEND_DISTRIBUTED) { - /* we do not have further strict colocation chceks */ + /* we do not have further strict colocation checks */ continue; } } @@ -2948,7 +2932,7 @@ TargetShardIntervalsForRestrictInfo(RelationRestrictionContext *restrictionConte } /* - * Different resrictions might have different partition columns. + * Different restrictions might have different partition columns. * We report partition column value if there is only one. */ if (multiplePartitionValuesExist) diff --git a/src/backend/distributed/planner/query_colocation_checker.c b/src/backend/distributed/planner/query_colocation_checker.c index 77baab1972c..bef91618e42 100644 --- a/src/backend/distributed/planner/query_colocation_checker.c +++ b/src/backend/distributed/planner/query_colocation_checker.c @@ -21,26 +21,26 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" - #include "access/relation.h" -#include "distributed/multi_logical_planner.h" -#include "distributed/query_colocation_checker.h" -#include "distributed/pg_dist_partition.h" -#include "distributed/relation_restriction_equivalence.h" -#include "distributed/metadata_cache.h" -#include "distributed/multi_logical_planner.h" /* only to access utility functions */ - #include "catalog/pg_type.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" -#include "parser/parsetree.h" -#include "distributed/listutils.h" -#include "parser/parse_relation.h" #include "optimizer/planner.h" #include "optimizer/prep.h" +#include "parser/parse_relation.h" +#include "parser/parsetree.h" #include "utils/rel.h" +#include "pg_version_constants.h" + +#include "distributed/listutils.h" +#include "distributed/metadata_cache.h" +#include "distributed/multi_logical_planner.h" +#include "distributed/multi_logical_planner.h" /* only to access utility functions */ +#include "distributed/pg_dist_partition.h" +#include "distributed/query_colocation_checker.h" +#include "distributed/relation_restriction_equivalence.h" + static RangeTblEntry * AnchorRte(Query *subquery); static List * UnionRelationRestrictionLists(List *firstRelationList, @@ -433,7 +433,7 @@ CreateTargetEntryForColumn(Form_pg_attribute attributeTuple, Index rteIndex, attributeTuple->atttypmod, attributeTuple->attcollation, 0); TargetEntry *targetEntry = makeTargetEntry((Expr *) targetColumn, resno, - strdup(attributeTuple->attname.data), false); + pstrdup(attributeTuple->attname.data), false); return targetEntry; } @@ -449,7 +449,7 @@ CreateTargetEntryForNullCol(Form_pg_attribute attributeTuple, int resno) attributeTuple->attcollation); char *resName = attributeTuple->attname.data; TargetEntry *targetEntry = - makeTargetEntry(nullExpr, resno, strdup(resName), false); + makeTargetEntry(nullExpr, resno, pstrdup(resName), false); return targetEntry; } diff --git a/src/backend/distributed/planner/query_pushdown_planning.c b/src/backend/distributed/planner/query_pushdown_planning.c index 3bad7345900..2eda4e42a76 100644 --- a/src/backend/distributed/planner/query_pushdown_planning.c +++ b/src/backend/distributed/planner/query_pushdown_planning.c @@ -21,7 +21,14 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "nodes/pg_list.h" +#include "optimizer/clauses.h" +#include "optimizer/optimizer.h" +#include "parser/parsetree.h" + +#include "pg_version_constants.h" #include "distributed/citus_clauses.h" #include "distributed/citus_ruleutils.h" @@ -32,17 +39,11 @@ #include "distributed/multi_logical_planner.h" #include "distributed/multi_router_planner.h" #include "distributed/pg_dist_partition.h" -#include "distributed/query_utils.h" #include "distributed/query_pushdown_planning.h" +#include "distributed/query_utils.h" #include "distributed/recursive_planning.h" #include "distributed/relation_restriction_equivalence.h" #include "distributed/version_compat.h" -#include "nodes/nodeFuncs.h" -#include "nodes/makefuncs.h" -#include "optimizer/optimizer.h" -#include "nodes/pg_list.h" -#include "optimizer/clauses.h" -#include "parser/parsetree.h" #define INVALID_RELID -1 diff --git a/src/backend/distributed/planner/recursive_planning.c b/src/backend/distributed/planner/recursive_planning.c index c2426cf5f73..9f520fa5f51 100644 --- a/src/backend/distributed/planner/recursive_planning.c +++ b/src/backend/distributed/planner/recursive_planning.c @@ -48,50 +48,49 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" - #include "funcapi.h" -#include "catalog/pg_type.h" #include "catalog/pg_class.h" +#include "catalog/pg_type.h" +#include "lib/stringinfo.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "nodes/nodes.h" +#include "nodes/pathnodes.h" +#include "nodes/pg_list.h" +#include "nodes/primnodes.h" +#include "optimizer/clauses.h" +#include "optimizer/optimizer.h" +#include "optimizer/planner.h" +#include "optimizer/prep.h" +#include "parser/parse_relation.h" +#include "parser/parsetree.h" +#include "utils/builtins.h" +#include "utils/guc.h" +#include "utils/lsyscache.h" + +#include "pg_version_constants.h" + #include "distributed/citus_nodes.h" #include "distributed/citus_ruleutils.h" #include "distributed/commands/multi_copy.h" #include "distributed/distributed_planner.h" #include "distributed/errormessage.h" -#include "distributed/local_distributed_join_planner.h" #include "distributed/listutils.h" +#include "distributed/local_distributed_join_planner.h" #include "distributed/log_utils.h" #include "distributed/metadata_cache.h" -#include "distributed/multi_logical_planner.h" #include "distributed/multi_logical_optimizer.h" -#include "distributed/multi_router_planner.h" +#include "distributed/multi_logical_planner.h" #include "distributed/multi_physical_planner.h" +#include "distributed/multi_router_planner.h" #include "distributed/multi_server_executor.h" #include "distributed/query_colocation_checker.h" #include "distributed/query_pushdown_planning.h" #include "distributed/recursive_planning.h" #include "distributed/relation_restriction_equivalence.h" -#include "distributed/log_utils.h" #include "distributed/shard_pruning.h" #include "distributed/version_compat.h" -#include "lib/stringinfo.h" -#include "optimizer/clauses.h" -#include "optimizer/optimizer.h" -#include "optimizer/planner.h" -#include "optimizer/prep.h" -#include "parser/parse_relation.h" -#include "parser/parsetree.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "nodes/nodes.h" -#include "nodes/nodeFuncs.h" -#include "nodes/pg_list.h" -#include "nodes/primnodes.h" -#include "nodes/pathnodes.h" -#include "utils/builtins.h" -#include "utils/guc.h" -#include "utils/lsyscache.h" /* * RecursivePlanningContext is used to recursively plan subqueries @@ -1098,8 +1097,8 @@ RecursivelyPlanCTEs(Query *query, RecursivePlanningContext *planningContext) if (query->hasRecursive) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, - "recursive CTEs are not supported in distributed " - "queries", + "recursive CTEs are only supported when they " + "contain a filter on the distribution column", NULL, NULL); } diff --git a/src/backend/distributed/planner/relation_restriction_equivalence.c b/src/backend/distributed/planner/relation_restriction_equivalence.c index 368ba2026c1..83d7cbcdb77 100644 --- a/src/backend/distributed/planner/relation_restriction_equivalence.c +++ b/src/backend/distributed/planner/relation_restriction_equivalence.c @@ -10,31 +10,31 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "catalog/pg_type.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "nodes/pathnodes.h" +#include "nodes/pg_list.h" +#include "nodes/primnodes.h" +#include "optimizer/optimizer.h" +#include "optimizer/pathnode.h" +#include "optimizer/paths.h" +#include "parser/parsetree.h" + +#include "pg_version_constants.h" #include "distributed/colocation_utils.h" #include "distributed/distributed_planner.h" #include "distributed/listutils.h" #include "distributed/metadata_cache.h" -#include "distributed/multi_logical_planner.h" #include "distributed/multi_logical_optimizer.h" +#include "distributed/multi_logical_planner.h" #include "distributed/multi_router_planner.h" #include "distributed/pg_dist_partition.h" #include "distributed/query_utils.h" #include "distributed/relation_restriction_equivalence.h" #include "distributed/shard_pruning.h" -#include "catalog/pg_type.h" -#include "nodes/nodeFuncs.h" -#include "nodes/pg_list.h" -#include "nodes/primnodes.h" -#include "nodes/pathnodes.h" -#include "optimizer/optimizer.h" -#include "nodes/makefuncs.h" -#include "optimizer/paths.h" -#include "parser/parsetree.h" -#include "optimizer/pathnode.h" - static uint32 AttributeEquivalenceId = 1; diff --git a/src/backend/distributed/planner/shard_pruning.c b/src/backend/distributed/planner/shard_pruning.c index 5375a70fa33..e68ac72b01c 100644 --- a/src/backend/distributed/planner/shard_pruning.c +++ b/src/backend/distributed/planner/shard_pruning.c @@ -66,28 +66,14 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" - #include "fmgr.h" -#include "distributed/shard_pruning.h" - #include "access/nbtree.h" #include "catalog/pg_am.h" #include "catalog/pg_collation.h" #include "catalog/pg_type.h" -#include "distributed/distributed_planner.h" -#include "distributed/listutils.h" -#include "distributed/log_utils.h" -#include "distributed/metadata_cache.h" -#include "distributed/multi_join_order.h" -#include "distributed/multi_physical_planner.h" -#include "distributed/pg_dist_partition.h" -#include "distributed/shardinterval_utils.h" -#include "distributed/version_compat.h" -#include "distributed/worker_protocol.h" -#include "nodes/nodeFuncs.h" #include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" #include "optimizer/clauses.h" #include "optimizer/planner.h" #include "parser/parse_coerce.h" @@ -98,6 +84,20 @@ #include "utils/memutils.h" #include "utils/ruleutils.h" +#include "pg_version_constants.h" + +#include "distributed/distributed_planner.h" +#include "distributed/listutils.h" +#include "distributed/log_utils.h" +#include "distributed/metadata_cache.h" +#include "distributed/multi_join_order.h" +#include "distributed/multi_physical_planner.h" +#include "distributed/pg_dist_partition.h" +#include "distributed/shard_pruning.h" +#include "distributed/shardinterval_utils.h" +#include "distributed/version_compat.h" +#include "distributed/worker_protocol.h" + /* * Tree node for compact representation of the given query logical tree. diff --git a/src/backend/distributed/planner/tdigest_extension.c b/src/backend/distributed/planner/tdigest_extension.c index 123b170d4a4..3a3701940ba 100644 --- a/src/backend/distributed/planner/tdigest_extension.c +++ b/src/backend/distributed/planner/tdigest_extension.c @@ -12,13 +12,14 @@ #include "access/htup_details.h" #include "catalog/pg_extension.h" #include "catalog/pg_type.h" -#include "distributed/metadata_cache.h" -#include "distributed/tdigest_extension.h" -#include "distributed/version_compat.h" #include "parser/parse_func.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" +#include "distributed/metadata_cache.h" +#include "distributed/tdigest_extension.h" +#include "distributed/version_compat.h" + static Oid LookupTDigestFunction(const char *functionName, int argcount, Oid *argtypes); diff --git a/src/backend/distributed/progress/multi_progress.c b/src/backend/distributed/progress/multi_progress.c index 8a3adf4bc87..64e0a5b4762 100644 --- a/src/backend/distributed/progress/multi_progress.c +++ b/src/backend/distributed/progress/multi_progress.c @@ -8,15 +8,17 @@ */ #include "postgres.h" + #include "miscadmin.h" #include "pgstat.h" +#include "storage/dsm.h" +#include "utils/builtins.h" + #include "distributed/function_utils.h" #include "distributed/listutils.h" #include "distributed/multi_progress.h" #include "distributed/version_compat.h" -#include "storage/dsm.h" -#include "utils/builtins.h" /* dynamic shared memory handle of the current progress */ diff --git a/src/backend/distributed/relay/relay_event_utility.c b/src/backend/distributed/relay/relay_event_utility.c index 3284ead1172..d0267025bcc 100644 --- a/src/backend/distributed/relay/relay_event_utility.c +++ b/src/backend/distributed/relay/relay_event_utility.c @@ -12,34 +12,28 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" -#include "c.h" - #include #include +#include "postgres.h" + +#include "c.h" + #include "access/genam.h" -#include "access/heapam.h" -#include "access/htup_details.h" #include "access/hash.h" +#include "access/heapam.h" #include "access/htup.h" +#include "access/htup_details.h" #include "access/skey.h" #include "access/stratnum.h" #include "catalog/indexing.h" #include "catalog/namespace.h" #include "catalog/pg_class.h" #include "catalog/pg_constraint.h" -#include "distributed/citus_safe_lib.h" -#include "distributed/commands.h" -#include "distributed/listutils.h" -#include "distributed/metadata_cache.h" -#include "distributed/multi_partitioning_utils.h" -#include "distributed/relay_utility.h" -#include "distributed/version_compat.h" #include "lib/stringinfo.h" #include "mb/pg_wchar.h" -#include "nodes/nodes.h" #include "nodes/nodeFuncs.h" +#include "nodes/nodes.h" #include "nodes/parsenodes.h" #include "nodes/pg_list.h" #include "nodes/primnodes.h" @@ -53,6 +47,14 @@ #include "utils/palloc.h" #include "utils/relcache.h" +#include "distributed/citus_safe_lib.h" +#include "distributed/commands.h" +#include "distributed/listutils.h" +#include "distributed/metadata_cache.h" +#include "distributed/multi_partitioning_utils.h" +#include "distributed/relay_utility.h" +#include "distributed/version_compat.h" + /* Local functions forward declarations */ static void RelayEventExtendConstraintAndIndexNames(AlterTableStmt *alterTableStmt, Constraint *constraint, diff --git a/src/backend/distributed/replication/multi_logical_replication.c b/src/backend/distributed/replication/multi_logical_replication.c index f66e309ab1f..08e6c557308 100644 --- a/src/backend/distributed/replication/multi_logical_replication.c +++ b/src/backend/distributed/replication/multi_logical_replication.c @@ -10,61 +10,61 @@ *------------------------------------------------------------------------- */ #include "postgres.h" -#include "miscadmin.h" + #include "fmgr.h" -#include "pgstat.h" #include "libpq-fe.h" - -#include "distributed/pg_version_constants.h" +#include "miscadmin.h" +#include "pgstat.h" #include "access/genam.h" - -#include "postmaster/interrupt.h" - #include "access/htup_details.h" #include "access/sysattr.h" #include "access/xact.h" -#include "commands/dbcommands.h" -#include "common/hashfn.h" -#include "catalog/pg_subscription_rel.h" #include "catalog/namespace.h" #include "catalog/pg_constraint.h" +#include "catalog/pg_subscription_rel.h" +#include "commands/dbcommands.h" +#include "common/hashfn.h" +#include "nodes/bitmapset.h" +#include "parser/scansup.h" +#include "postmaster/interrupt.h" +#include "storage/ipc.h" +#include "storage/latch.h" +#include "storage/lock.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/fmgrprotos.h" +#include "utils/formatting.h" +#include "utils/guc.h" +#include "utils/inval.h" +#include "utils/lsyscache.h" +#include "utils/pg_lsn.h" +#include "utils/rel.h" +#include "utils/ruleutils.h" +#include "utils/syscache.h" + +#include "pg_version_constants.h" + #include "distributed/adaptive_executor.h" #include "distributed/citus_safe_lib.h" #include "distributed/colocation_utils.h" #include "distributed/connection_management.h" +#include "distributed/coordinator_protocol.h" +#include "distributed/distributed_planner.h" #include "distributed/hash_helpers.h" #include "distributed/listutils.h" -#include "distributed/coordinator_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" #include "distributed/multi_join_order.h" #include "distributed/multi_logical_replication.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/priority.h" -#include "distributed/distributed_planner.h" #include "distributed/remote_commands.h" #include "distributed/resource_lock.h" #include "distributed/shard_cleaner.h" #include "distributed/shard_rebalancer.h" #include "distributed/shard_transfer.h" #include "distributed/version_compat.h" -#include "nodes/bitmapset.h" -#include "parser/scansup.h" -#include "storage/ipc.h" -#include "storage/latch.h" -#include "storage/lock.h" -#include "utils/guc.h" -#include "utils/builtins.h" -#include "utils/fmgrprotos.h" -#include "utils/fmgroids.h" -#include "utils/formatting.h" -#include "utils/inval.h" -#include "utils/lsyscache.h" -#include "utils/pg_lsn.h" -#include "utils/rel.h" -#include "utils/ruleutils.h" -#include "utils/syscache.h" #define CURRENT_LOG_POSITION_COMMAND "SELECT pg_current_wal_lsn()" @@ -1143,7 +1143,7 @@ ConflictWithIsolationTestingBeforeCopy(void) const bool sessionLock = false; const bool dontWait = false; - if (RunningUnderIsolationTest) + if (RunningUnderCitusTestSuite) { SET_LOCKTAG_ADVISORY(tag, MyDatabaseId, SHARD_MOVE_ADVISORY_LOCK_SECOND_KEY, @@ -1177,7 +1177,7 @@ ConflictWithIsolationTestingAfterCopy(void) const bool sessionLock = false; const bool dontWait = false; - if (RunningUnderIsolationTest) + if (RunningUnderCitusTestSuite) { SET_LOCKTAG_ADVISORY(tag, MyDatabaseId, SHARD_MOVE_ADVISORY_LOCK_FIRST_KEY, @@ -1335,10 +1335,10 @@ CreatePublications(MultiConnection *connection, WorkerNode *worker = FindWorkerNode(connection->hostname, connection->port); - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_PUBLICATION, - entry->name, - worker->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_PUBLICATION, + entry->name, + worker->groupId, + CLEANUP_ALWAYS); ExecuteCriticalRemoteCommand(connection, DISABLE_DDL_PROPAGATION); ExecuteCriticalRemoteCommand(connection, createPublicationCommand->data); @@ -1435,10 +1435,10 @@ CreateReplicationSlots(MultiConnection *sourceConnection, WorkerNode *worker = FindWorkerNode(sourceConnection->hostname, sourceConnection->port); - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_REPLICATION_SLOT, - replicationSlot->name, - worker->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_REPLICATION_SLOT, + replicationSlot->name, + worker->groupId, + CLEANUP_ALWAYS); if (!firstReplicationSlot) { @@ -1506,10 +1506,10 @@ CreateSubscriptions(MultiConnection *sourceConnection, quote_identifier(GetUserNameFromId(ownerId, false)) ))); - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_USER, - target->subscriptionOwnerName, - worker->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_USER, + target->subscriptionOwnerName, + worker->groupId, + CLEANUP_ALWAYS); StringInfo conninfo = makeStringInfo(); appendStringInfo(conninfo, "host='%s' port=%d user='%s' dbname='%s' " @@ -1567,10 +1567,10 @@ CreateSubscriptions(MultiConnection *sourceConnection, pfree(createSubscriptionCommand->data); pfree(createSubscriptionCommand); - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SUBSCRIPTION, - target->subscriptionName, - worker->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SUBSCRIPTION, + target->subscriptionName, + worker->groupId, + CLEANUP_ALWAYS); ExecuteCriticalRemoteCommand(target->superuserConnection, psprintf( "ALTER SUBSCRIPTION %s OWNER TO %s", diff --git a/src/backend/distributed/serialize_distributed_ddls.c b/src/backend/distributed/serialize_distributed_ddls.c new file mode 100644 index 00000000000..11d10905b9b --- /dev/null +++ b/src/backend/distributed/serialize_distributed_ddls.c @@ -0,0 +1,275 @@ +/*------------------------------------------------------------------------- + * + * serialize_distributed_ddls.c + * + * This file contains functions for serializing distributed DDLs. + * + * If you're adding support for serializing a new DDL, you should + * extend the following functions to support the new object class: + * AcquireCitusAdvisoryObjectClassLockGetOid() + * AcquireCitusAdvisoryObjectClassLockCheckPrivileges() + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "miscadmin.h" + +#include "catalog/dependency.h" +#include "catalog/pg_database_d.h" +#include "commands/dbcommands.h" +#include "storage/lock.h" +#include "utils/builtins.h" + +#include "pg_version_compat.h" + +#include "distributed/adaptive_executor.h" +#include "distributed/argutils.h" +#include "distributed/deparse_shard_query.h" +#include "distributed/resource_lock.h" +#include "distributed/serialize_distributed_ddls.h" + + +PG_FUNCTION_INFO_V1(citus_internal_acquire_citus_advisory_object_class_lock); + + +static void SerializeDistributedDDLsOnObjectClassInternal(ObjectClass objectClass, + char *qualifiedObjectName); +static char * AcquireCitusAdvisoryObjectClassLockCommand(ObjectClass objectClass, + char *qualifiedObjectName); +static void AcquireCitusAdvisoryObjectClassLock(ObjectClass objectClass, + char *qualifiedObjectName); +static Oid AcquireCitusAdvisoryObjectClassLockGetOid(ObjectClass objectClass, + char *qualifiedObjectName); +static void AcquireCitusAdvisoryObjectClassLockCheckPrivileges(ObjectClass objectClass, + Oid oid); + + +/* + * citus_internal_acquire_citus_advisory_object_class_lock is an internal UDF + * to call AcquireCitusAdvisoryObjectClassLock(). + */ +Datum +citus_internal_acquire_citus_advisory_object_class_lock(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + + PG_ENSURE_ARGNOTNULL(0, "object_class"); + ObjectClass objectClass = PG_GETARG_INT32(0); + + char *qualifiedObjectName = PG_ARGISNULL(1) ? NULL : PG_GETARG_CSTRING(1); + + AcquireCitusAdvisoryObjectClassLock(objectClass, qualifiedObjectName); + + PG_RETURN_VOID(); +} + + +/* + * SerializeDistributedDDLsOnObjectClass is a wrapper around + * SerializeDistributedDDLsOnObjectClassInternal to acquire the lock on given + * object class itself, see the comment in header file for more details about + * the difference between this function and + * SerializeDistributedDDLsOnObjectClassObject(). + */ +void +SerializeDistributedDDLsOnObjectClass(ObjectClass objectClass) +{ + SerializeDistributedDDLsOnObjectClassInternal(objectClass, NULL); +} + + +/* + * SerializeDistributedDDLsOnObjectClassObject is a wrapper around + * SerializeDistributedDDLsOnObjectClassInternal to acquire the lock on given + * object that belongs to given object class, see the comment in header file + * for more details about the difference between this function and + * SerializeDistributedDDLsOnObjectClass(). + */ +void +SerializeDistributedDDLsOnObjectClassObject(ObjectClass objectClass, + char *qualifiedObjectName) +{ + if (qualifiedObjectName == NULL) + { + elog(ERROR, "qualified object name cannot be NULL"); + } + + SerializeDistributedDDLsOnObjectClassInternal(objectClass, qualifiedObjectName); +} + + +/* + * SerializeDistributedDDLsOnObjectClassInternal serializes distributed DDLs + * that target given object class by acquiring a Citus specific advisory lock + * on the first primary worker node if there are any workers in the cluster. + * + * The lock is acquired via a coordinated transaction. For this reason, + * it automatically gets released when (maybe implicit) transaction on + * current server commits or rolls back. + * + * If qualifiedObjectName is provided to be non-null, then the oid of the + * object is first resolved on the first primary worker node and then the + * lock is acquired on that oid. If qualifiedObjectName is null, then the + * lock is acquired on the object class itself. + * + * Note that those two lock types don't conflict with each other and are + * acquired for different purposes. The lock on the object class + * (qualifiedObjectName = NULL) is used to serialize DDLs that target the + * object class itself, e.g., when creating a new object of that class, and + * the latter is used to serialize DDLs that target a specific object of + * that class, e.g., when altering an object. + * + * In some cases, we may want to acquire both locks at the same time. For + * example, when renaming a database, we want to acquire both lock types + * because while the object class lock is used to ensure that another session + * doesn't create a new database with the same name, the object lock is used + * to ensure that another session doesn't alter the same database. + */ +static void +SerializeDistributedDDLsOnObjectClassInternal(ObjectClass objectClass, + char *qualifiedObjectName) +{ + WorkerNode *firstWorkerNode = GetFirstPrimaryWorkerNode(); + if (firstWorkerNode == NULL) + { + /* + * If there are no worker nodes in the cluster, then we don't need + * to acquire the lock at all; and we cannot indeed. + */ + return; + } + + /* + * Indeed we would already ensure permission checks in remote node + * --via AcquireCitusAdvisoryObjectClassLock()-- but we first do so on + * the local node to avoid from reporting confusing error messages. + */ + Oid oid = AcquireCitusAdvisoryObjectClassLockGetOid(objectClass, qualifiedObjectName); + AcquireCitusAdvisoryObjectClassLockCheckPrivileges(objectClass, oid); + + Task *task = CitusMakeNode(Task); + task->taskType = DDL_TASK; + + char *command = AcquireCitusAdvisoryObjectClassLockCommand(objectClass, + qualifiedObjectName); + SetTaskQueryString(task, command); + + ShardPlacement *targetPlacement = CitusMakeNode(ShardPlacement); + SetPlacementNodeMetadata(targetPlacement, firstWorkerNode); + task->taskPlacementList = list_make1(targetPlacement); + + /* need to be in a transaction to acquire a lock that's bound to transactions */ + UseCoordinatedTransaction(); + + bool localExecutionSupported = true; + ExecuteUtilityTaskList(list_make1(task), localExecutionSupported); +} + + +/* + * AcquireCitusAdvisoryObjectClassLockCommand returns a command to call + * citus_internal.acquire_citus_advisory_object_class_lock(). + */ +static char * +AcquireCitusAdvisoryObjectClassLockCommand(ObjectClass objectClass, + char *qualifiedObjectName) +{ + /* safe to cast to int as it's an enum */ + int objectClassInt = (int) objectClass; + + char *quotedObjectName = + !qualifiedObjectName ? "NULL" : + quote_literal_cstr(qualifiedObjectName); + + StringInfo command = makeStringInfo(); + appendStringInfo(command, + "SELECT citus_internal.acquire_citus_advisory_object_class_lock(%d, %s)", + objectClassInt, quotedObjectName); + + return command->data; +} + + +/* + * AcquireCitusAdvisoryObjectClassLock acquires a Citus specific advisory + * ExclusiveLock based on given object class. + */ +static void +AcquireCitusAdvisoryObjectClassLock(ObjectClass objectClass, char *qualifiedObjectName) +{ + Oid oid = AcquireCitusAdvisoryObjectClassLockGetOid(objectClass, qualifiedObjectName); + + AcquireCitusAdvisoryObjectClassLockCheckPrivileges(objectClass, oid); + + LOCKTAG locktag; + SET_LOCKTAG_GLOBAL_DDL_SERIALIZATION(locktag, objectClass, oid); + + LOCKMODE lockmode = ExclusiveLock; + bool sessionLock = false; + bool dontWait = false; + LockAcquire(&locktag, lockmode, sessionLock, dontWait); +} + + +/* + * AcquireCitusAdvisoryObjectClassLockGetOid returns the oid of given object + * that belongs to given object class. If qualifiedObjectName is NULL, then + * it returns InvalidOid. + */ +static Oid +AcquireCitusAdvisoryObjectClassLockGetOid(ObjectClass objectClass, + char *qualifiedObjectName) +{ + if (qualifiedObjectName == NULL) + { + return InvalidOid; + } + + bool missingOk = false; + + switch (objectClass) + { + case OCLASS_DATABASE: + { + return get_database_oid(qualifiedObjectName, missingOk); + } + + default: + elog(ERROR, "unsupported object class: %d", objectClass); + } +} + + +/* + * AcquireCitusAdvisoryObjectClassLockCheckPrivileges is used to perform privilege checks + * before acquiring the Citus specific advisory lock on given object class and oid. + */ +static void +AcquireCitusAdvisoryObjectClassLockCheckPrivileges(ObjectClass objectClass, Oid oid) +{ + switch (objectClass) + { + case OCLASS_DATABASE: + { + if (OidIsValid(oid) && !object_ownercheck(DatabaseRelationId, oid, + GetUserId())) + { + aclcheck_error(ACLCHECK_NOT_OWNER, OBJECT_DATABASE, + get_database_name(oid)); + } + else if (!OidIsValid(oid) && !have_createdb_privilege()) + { + ereport(ERROR, + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("permission denied to create / rename database"))); + } + + break; + } + + default: + elog(ERROR, "unsupported object class: %d", objectClass); + } +} diff --git a/src/backend/distributed/shardsplit/shardsplit_decoder.c b/src/backend/distributed/shardsplit/shardsplit_decoder.c index 1386a21b0d3..f14f105576c 100644 --- a/src/backend/distributed/shardsplit/shardsplit_decoder.c +++ b/src/backend/distributed/shardsplit/shardsplit_decoder.c @@ -8,16 +8,18 @@ *------------------------------------------------------------------------- */ #include "postgres.h" + +#include "catalog/pg_namespace.h" +#include "replication/logical.h" +#include "utils/lsyscache.h" +#include "utils/typcache.h" + +#include "distributed/listutils.h" +#include "distributed/metadata/distobject.h" #include "distributed/shardinterval_utils.h" #include "distributed/shardsplit_shared_memory.h" -#include "distributed/worker_shard_visibility.h" #include "distributed/worker_protocol.h" -#include "distributed/listutils.h" -#include "distributed/metadata/distobject.h" -#include "replication/logical.h" -#include "utils/typcache.h" -#include "utils/lsyscache.h" -#include "catalog/pg_namespace.h" +#include "distributed/worker_shard_visibility.h" extern void _PG_output_plugin_init(OutputPluginCallbacks *cb); static LogicalDecodeChangeCB pgOutputPluginChangeCB; @@ -90,6 +92,46 @@ replication_origin_filter_cb(LogicalDecodingContext *ctx, RepOriginId origin_id) } +/* + * update_replication_progress is copied from Postgres 15. We use it to send keepalive + * messages when we are filtering out the wal changes resulting from the initial copy. + * If we do not send out messages long enough, wal reciever will time out. + * Postgres 16 has refactored this code such that keepalive messages are sent during + * reordering phase which is above change_cb. So we do not need to send keepalive in + * change_cb. + */ +#if (PG_VERSION_NUM < PG_VERSION_16) +static void +update_replication_progress(LogicalDecodingContext *ctx, bool skipped_xact) +{ + static int changes_count = 0; + + /* + * We don't want to try sending a keepalive message after processing each + * change as that can have overhead. Tests revealed that there is no + * noticeable overhead in doing it after continuously processing 100 or so + * changes. + */ +#define CHANGES_THRESHOLD 100 + + /* + * After continuously processing CHANGES_THRESHOLD changes, we + * try to send a keepalive message if required. + */ + if (ctx->end_xact || ++changes_count >= CHANGES_THRESHOLD) + { +#if (PG_VERSION_NUM >= PG_VERSION_15) + OutputPluginUpdateProgress(ctx, skipped_xact); +#else + OutputPluginUpdateProgress(ctx); +#endif + changes_count = 0; + } +} + + +#endif + /* * shard_split_change_cb function emits the incoming tuple change * to the appropriate destination shard. @@ -108,6 +150,12 @@ shard_split_change_cb(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, return; } +#if (PG_VERSION_NUM < PG_VERSION_16) + + /* Send replication keepalive. */ + update_replication_progress(ctx, false); +#endif + /* check if the relation is publishable.*/ if (!is_publishable_relation(relation)) { diff --git a/src/backend/distributed/shardsplit/shardsplit_logical_replication.c b/src/backend/distributed/shardsplit/shardsplit_logical_replication.c index 8ffccb90c64..328dc9af946 100644 --- a/src/backend/distributed/shardsplit/shardsplit_logical_replication.c +++ b/src/backend/distributed/shardsplit/shardsplit_logical_replication.c @@ -10,23 +10,26 @@ */ #include "postgres.h" + #include "miscadmin.h" + +#include "commands/dbcommands.h" #include "nodes/pg_list.h" +#include "utils/builtins.h" + #include "distributed/colocation_utils.h" +#include "distributed/connection_management.h" #include "distributed/hash_helpers.h" +#include "distributed/listutils.h" #include "distributed/metadata_cache.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/priority.h" -#include "distributed/shardinterval_utils.h" -#include "distributed/connection_management.h" #include "distributed/remote_commands.h" +#include "distributed/resource_lock.h" #include "distributed/shard_split.h" -#include "distributed/shared_library_init.h" -#include "distributed/listutils.h" +#include "distributed/shardinterval_utils.h" #include "distributed/shardsplit_logical_replication.h" -#include "distributed/resource_lock.h" -#include "utils/builtins.h" -#include "commands/dbcommands.h" +#include "distributed/shared_library_init.h" static HTAB *ShardInfoHashMapForPublications = NULL; diff --git a/src/backend/distributed/shardsplit/shardsplit_shared_memory.c b/src/backend/distributed/shardsplit/shardsplit_shared_memory.c index 3e874575844..16ed79ad7be 100644 --- a/src/backend/distributed/shardsplit/shardsplit_shared_memory.c +++ b/src/backend/distributed/shardsplit/shardsplit_shared_memory.c @@ -12,13 +12,15 @@ */ #include "postgres.h" -#include "distributed/shardinterval_utils.h" -#include "distributed/shardsplit_shared_memory.h" -#include "distributed/citus_safe_lib.h" -#include "distributed/multi_logical_replication.h" + +#include "common/hashfn.h" #include "storage/ipc.h" #include "utils/memutils.h" -#include "common/hashfn.h" + +#include "distributed/citus_safe_lib.h" +#include "distributed/multi_logical_replication.h" +#include "distributed/shardinterval_utils.h" +#include "distributed/shardsplit_shared_memory.h" const char *SharedMemoryNameForHandleManagement = "Shared memory handle for shard split"; diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index e5d593295a3..bd65fa60c01 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -7,12 +7,12 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" - #include #include #include +#include "postgres.h" + /* necessary to get alloca on illumos */ #ifdef __sun #include @@ -20,75 +20,94 @@ #include "fmgr.h" #include "miscadmin.h" - #include "safe_lib.h" -#include "catalog/pg_authid.h" #include "catalog/objectaccess.h" +#include "catalog/pg_authid.h" #include "catalog/pg_extension.h" -#include "citus_version.h" #include "commands/explain.h" #include "commands/extension.h" +#include "commands/seclabel.h" #include "common/string.h" #include "executor/executor.h" +#include "libpq/auth.h" +#include "optimizer/paths.h" +#include "optimizer/plancat.h" +#include "optimizer/planner.h" +#include "port/atomics.h" +#include "postmaster/postmaster.h" +#include "replication/walsender.h" +#include "storage/ipc.h" +#include "tcop/tcopprot.h" +#include "utils/guc.h" +#include "utils/guc_tables.h" +#include "utils/inval.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" +#include "utils/varlena.h" + +#include "citus_version.h" + +#include "columnar/columnar.h" + +#include "distributed/adaptive_executor.h" #include "distributed/backend_data.h" #include "distributed/background_jobs.h" #include "distributed/causal_clock.h" #include "distributed/citus_depended_object.h" #include "distributed/citus_nodefuncs.h" #include "distributed/citus_safe_lib.h" +#include "distributed/combine_query_planner.h" #include "distributed/commands.h" #include "distributed/commands/multi_copy.h" #include "distributed/commands/utility_hook.h" #include "distributed/connection_management.h" +#include "distributed/coordinator_protocol.h" #include "distributed/cte_inline.h" #include "distributed/distributed_deadlock_detection.h" +#include "distributed/distributed_planner.h" #include "distributed/errormessage.h" -#include "distributed/repartition_executor.h" #include "distributed/intermediate_result_pruning.h" -#include "distributed/local_multi_copy.h" -#include "distributed/local_executor.h" #include "distributed/local_distributed_join_planner.h" +#include "distributed/local_executor.h" +#include "distributed/local_multi_copy.h" #include "distributed/locally_reserved_shared_connections.h" #include "distributed/log_utils.h" #include "distributed/maintenanced.h" -#include "distributed/shard_cleaner.h" -#include "distributed/metadata_utility.h" -#include "distributed/coordinator_protocol.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" -#include "distributed/multi_physical_planner.h" +#include "distributed/metadata_utility.h" #include "distributed/multi_executor.h" #include "distributed/multi_explain.h" #include "distributed/multi_join_order.h" -#include "distributed/multi_logical_replication.h" #include "distributed/multi_logical_optimizer.h" -#include "distributed/distributed_planner.h" -#include "distributed/combine_query_planner.h" +#include "distributed/multi_logical_replication.h" +#include "distributed/multi_physical_planner.h" #include "distributed/multi_router_planner.h" #include "distributed/multi_server_executor.h" #include "distributed/pg_dist_partition.h" #include "distributed/placement_connection.h" #include "distributed/priority.h" +#include "distributed/query_pushdown_planning.h" #include "distributed/query_stats.h" #include "distributed/recursive_planning.h" #include "distributed/reference_table_utils.h" #include "distributed/relation_access_tracking.h" +#include "distributed/remote_commands.h" +#include "distributed/remote_transaction.h" +#include "distributed/repartition_executor.h" #include "distributed/replication_origin_session_utils.h" +#include "distributed/resource_lock.h" #include "distributed/run_from_same_connection.h" #include "distributed/shard_cleaner.h" +#include "distributed/shard_rebalancer.h" #include "distributed/shard_transfer.h" -#include "distributed/shared_connection_stats.h" #include "distributed/shardsplit_shared_memory.h" -#include "distributed/query_pushdown_planning.h" -#include "distributed/time_constants.h" -#include "distributed/query_stats.h" -#include "distributed/remote_commands.h" -#include "distributed/shard_rebalancer.h" +#include "distributed/shared_connection_stats.h" #include "distributed/shared_library_init.h" #include "distributed/statistics_collection.h" #include "distributed/subplan_execution.h" -#include "distributed/resource_lock.h" +#include "distributed/time_constants.h" #include "distributed/transaction_management.h" #include "distributed/transaction_recovery.h" #include "distributed/utils/citus_stat_tenants.h" @@ -97,24 +116,6 @@ #include "distributed/worker_manager.h" #include "distributed/worker_protocol.h" #include "distributed/worker_shard_visibility.h" -#include "distributed/adaptive_executor.h" -#include "libpq/auth.h" -#include "port/atomics.h" -#include "postmaster/postmaster.h" -#include "replication/walsender.h" -#include "storage/ipc.h" -#include "optimizer/planner.h" -#include "optimizer/plancat.h" -#include "optimizer/paths.h" -#include "tcop/tcopprot.h" -#include "utils/guc.h" -#include "utils/guc_tables.h" -#include "utils/inval.h" -#include "utils/lsyscache.h" -#include "utils/syscache.h" -#include "utils/varlena.h" - -#include "columnar/columnar.h" /* marks shared object as one loadable by the postgres version compiled against */ PG_MODULE_MAGIC; @@ -481,6 +482,7 @@ _PG_init(void) #endif InitializeMaintenanceDaemon(); + InitializeMaintenanceDaemonForMainDb(); /* initialize coordinated transaction management */ InitializeTransactionManagement(); @@ -543,7 +545,7 @@ _PG_init(void) */ PrevProcessUtility = (ProcessUtility_hook != NULL) ? ProcessUtility_hook : standard_ProcessUtility; - ProcessUtility_hook = multi_ProcessUtility; + ProcessUtility_hook = citus_ProcessUtility; /* * Acquire symbols for columnar functions that citus calls. @@ -573,6 +575,16 @@ _PG_init(void) INIT_COLUMNAR_SYMBOL(PGFunction, columnar_storage_info); INIT_COLUMNAR_SYMBOL(PGFunction, columnar_store_memory_stats); INIT_COLUMNAR_SYMBOL(PGFunction, test_columnar_storage_write_new_page); + + /* + * This part is only for SECURITY LABEL tests + * mimicking what an actual security label provider would do + */ + if (RunningUnderCitusTestSuite) + { + register_label_provider("citus '!tests_label_provider", + citus_test_object_relabel); + } } @@ -883,22 +895,13 @@ DecrementExternalClientBackendCounterAtExit(int code, Datum arg) static void CreateRequiredDirectories(void) { - const char *subdirs[] = { - "pg_foreign_file", - "pg_foreign_file/cached", - ("base/" PG_JOB_CACHE_DIR) - }; + const char *subdir = ("base/" PG_JOB_CACHE_DIR); - for (int dirNo = 0; dirNo < lengthof(subdirs); dirNo++) + if (MakePGDirectory(subdir) != 0 && errno != EEXIST) { - int ret = mkdir(subdirs[dirNo], S_IRWXU); - - if (ret != 0 && errno != EEXIST) - { - ereport(ERROR, (errcode_for_file_access(), - errmsg("could not create directory \"%s\": %m", - subdirs[dirNo]))); - } + ereport(ERROR, (errcode_for_file_access(), + errmsg("could not create directory \"%s\": %m", + subdir))); } } @@ -1262,6 +1265,17 @@ RegisterCitusConfigVariables(void) GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE, NULL, NULL, NULL); + DefineCustomBoolVariable( + "citus.enable_create_database_propagation", + gettext_noop("Enables propagating CREATE DATABASE " + "and DROP DATABASE statements to workers."), + NULL, + &EnableCreateDatabasePropagation, + false, + PGC_USERSET, + GUC_STANDARD, + NULL, NULL, NULL); + DefineCustomBoolVariable( "citus.enable_create_role_propagation", gettext_noop("Enables propagating CREATE ROLE " @@ -1820,6 +1834,16 @@ RegisterCitusConfigVariables(void) GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE | GUC_UNIT_MS, NULL, NULL, NULL); + DefineCustomStringVariable( + "citus.main_db", + gettext_noop("Which database is designated as the main_db"), + NULL, + &MainDb, + "", + PGC_POSTMASTER, + GUC_STANDARD, + NULL, NULL, NULL); + DefineCustomIntVariable( "citus.max_adaptive_executor_pool_size", gettext_noop("Sets the maximum number of connections per worker node used by " @@ -2294,13 +2318,14 @@ RegisterCitusConfigVariables(void) WarnIfReplicationModelIsSet, NULL, NULL); DefineCustomBoolVariable( - "citus.running_under_isolation_test", + "citus.running_under_citus_test_suite", gettext_noop( "Only useful for testing purposes, when set to true, Citus does some " - "tricks to implement useful isolation tests with rebalancing. Should " + "tricks to implement useful isolation tests with rebalancing. It also " + "registers a dummy label provider for SECURITY LABEL tests. Should " "never be set to true on production systems "), gettext_noop("for details of the tricks implemented, refer to the source code"), - &RunningUnderIsolationTest, + &RunningUnderCitusTestSuite, false, PGC_SUSET, GUC_SUPERUSER_ONLY | GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE, @@ -2537,6 +2562,17 @@ RegisterCitusConfigVariables(void) GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE, NoticeIfSubqueryPushdownEnabled, NULL, NULL); + DefineCustomStringVariable( + "citus.superuser", + gettext_noop("Name of a superuser role to be used in Citus main database " + "connections"), + NULL, + &SuperuserRole, + "", + PGC_SUSET, + GUC_STANDARD, + NULL, NULL, NULL); + DefineCustomEnumVariable( "citus.task_assignment_policy", gettext_noop("Sets the policy to use when assigning tasks to worker nodes."), @@ -2893,6 +2929,7 @@ NodeConninfoGucCheckHook(char **newval, void **extra, GucSource source) #if defined(ENABLE_GSS) && defined(ENABLE_SSPI) "gsslib", #endif + "host", "keepalives", "keepalives_count", "keepalives_idle", @@ -3116,6 +3153,8 @@ CitusAuthHook(Port *port, int status) */ InitializeBackendData(port->application_name); + IsMainDB = (strncmp(MainDb, "", NAMEDATALEN) == 0 || + strncmp(MainDb, port->database_name, NAMEDATALEN) == 0); /* let other authentication hooks to kick in first */ if (original_client_auth_hook) diff --git a/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql b/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql index bb9d2296999..1bec0f42973 100644 --- a/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql +++ b/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql @@ -1,3 +1,57 @@ -- citus--12.1-1--12.2-1 - -- bump version to 12.2-1 + +#include "udfs/citus_internal_database_command/12.2-1.sql" +#include "udfs/citus_add_rebalance_strategy/12.2-1.sql" + +#include "udfs/start_management_transaction/12.2-1.sql" +#include "udfs/execute_command_on_remote_nodes_as_user/12.2-1.sql" +#include "udfs/mark_object_distributed/12.2-1.sql" +DROP FUNCTION pg_catalog.citus_unmark_object_distributed(oid, oid, int); +#include "udfs/citus_unmark_object_distributed/12.2-1.sql" +#include "udfs/commit_management_command_2pc/12.2-1.sql" + +ALTER TABLE pg_catalog.pg_dist_transaction ADD COLUMN outer_xid xid8; + +#include "udfs/citus_internal_acquire_citus_advisory_object_class_lock/12.2-1.sql" + +GRANT USAGE ON SCHEMA citus_internal TO PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.commit_management_command_2pc FROM PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.execute_command_on_remote_nodes_as_user FROM PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.find_groupid_for_node FROM PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.mark_object_distributed FROM PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.pg_dist_node_trigger_func FROM PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.pg_dist_rebalance_strategy_trigger_func FROM PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.pg_dist_shard_placement_trigger_func FROM PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.refresh_isolation_tester_prepared_statement FROM PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.replace_isolation_tester_func FROM PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.restore_isolation_tester_func FROM PUBLIC; +REVOKE ALL ON FUNCTION citus_internal.start_management_transaction FROM PUBLIC; + +#include "udfs/citus_internal_add_colocation_metadata/12.2-1.sql" +#include "udfs/citus_internal_add_object_metadata/12.2-1.sql" +#include "udfs/citus_internal_add_partition_metadata/12.2-1.sql" +#include "udfs/citus_internal_add_placement_metadata/12.2-1.sql" +#include "udfs/citus_internal_add_shard_metadata/12.2-1.sql" +#include "udfs/citus_internal_add_tenant_schema/12.2-1.sql" +#include "udfs/citus_internal_adjust_local_clock_to_remote/12.2-1.sql" +#include "udfs/citus_internal_delete_colocation_metadata/12.2-1.sql" +#include "udfs/citus_internal_delete_partition_metadata/12.2-1.sql" +#include "udfs/citus_internal_delete_placement_metadata/12.2-1.sql" +#include "udfs/citus_internal_delete_shard_metadata/12.2-1.sql" +#include "udfs/citus_internal_delete_tenant_schema/12.2-1.sql" +#include "udfs/citus_internal_local_blocked_processes/12.2-1.sql" +#include "udfs/citus_internal_global_blocked_processes/12.2-1.sql" +#include "udfs/citus_blocking_pids/12.2-1.sql" +#include "udfs/citus_isolation_test_session_is_blocked/12.2-1.sql" +DROP VIEW IF EXISTS pg_catalog.citus_lock_waits; +#include "udfs/citus_lock_waits/12.2-1.sql" + +#include "udfs/citus_internal_mark_node_not_synced/12.2-1.sql" +#include "udfs/citus_internal_unregister_tenant_schema_globally/12.2-1.sql" +#include "udfs/citus_drop_trigger/12.2-1.sql" +#include "udfs/citus_internal_update_none_dist_table_metadata/12.2-1.sql" +#include "udfs/citus_internal_update_placement_metadata/12.2-1.sql" +#include "udfs/citus_internal_update_relation_colocation/12.2-1.sql" +#include "udfs/repl_origin_helper/12.2-1.sql" +#include "udfs/citus_finish_pg_upgrade/12.2-1.sql" diff --git a/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql b/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql index b26fc16bc52..099bf8d875a 100644 --- a/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql +++ b/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql @@ -1,2 +1,57 @@ -- citus--12.2-1--12.1-1 --- this is an empty downgrade path since citus--12.2-1--12.1-1.sql is empty for now + +DROP FUNCTION citus_internal.database_command(text); +DROP FUNCTION citus_internal.acquire_citus_advisory_object_class_lock(int, cstring); + +#include "../udfs/citus_add_rebalance_strategy/10.1-1.sql" + +DROP FUNCTION citus_internal.start_management_transaction( + outer_xid xid8 +); + +DROP FUNCTION citus_internal.execute_command_on_remote_nodes_as_user( + query text, + username text +); + +DROP FUNCTION citus_internal.mark_object_distributed( + classId Oid, objectName text, objectId Oid, connectionUser text +); + +DROP FUNCTION pg_catalog.citus_unmark_object_distributed(oid,oid,int,boolean); +#include "../udfs/citus_unmark_object_distributed/10.0-1.sql" + +DROP FUNCTION citus_internal.commit_management_command_2pc(); + +ALTER TABLE pg_catalog.pg_dist_transaction DROP COLUMN outer_xid; +REVOKE USAGE ON SCHEMA citus_internal FROM PUBLIC; + +DROP FUNCTION citus_internal.add_colocation_metadata(int, int, int, regtype, oid); +DROP FUNCTION citus_internal.add_object_metadata(text, text[], text[], integer, integer, boolean); +DROP FUNCTION citus_internal.add_partition_metadata(regclass, "char", text, integer, "char"); +DROP FUNCTION citus_internal.add_placement_metadata(bigint, bigint, integer, bigint); +DROP FUNCTION citus_internal.add_shard_metadata(regclass, bigint, "char", text, text); +DROP FUNCTION citus_internal.add_tenant_schema(oid, integer); +DROP FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock); +DROP FUNCTION citus_internal.delete_colocation_metadata(int); +DROP FUNCTION citus_internal.delete_partition_metadata(regclass); +DROP FUNCTION citus_internal.delete_placement_metadata(bigint); +DROP FUNCTION citus_internal.delete_shard_metadata(bigint); +DROP FUNCTION citus_internal.delete_tenant_schema(oid); +DROP FUNCTION citus_internal.local_blocked_processes(); +#include "../udfs/citus_blocking_pids/11.0-1.sql" +#include "../udfs/citus_isolation_test_session_is_blocked/11.1-1.sql" +DROP VIEW IF EXISTS pg_catalog.citus_lock_waits; +#include "../udfs/citus_lock_waits/11.0-1.sql" +DROP FUNCTION citus_internal.global_blocked_processes(); + +DROP FUNCTION citus_internal.mark_node_not_synced(int, int); +DROP FUNCTION citus_internal.unregister_tenant_schema_globally(oid, text); +#include "../udfs/citus_drop_trigger/12.0-1.sql" +DROP FUNCTION citus_internal.update_none_dist_table_metadata(oid, "char", bigint, boolean); +DROP FUNCTION citus_internal.update_placement_metadata(bigint, integer, integer); +DROP FUNCTION citus_internal.update_relation_colocation(oid, int); +DROP FUNCTION citus_internal.start_replication_origin_tracking(); +DROP FUNCTION citus_internal.stop_replication_origin_tracking(); +DROP FUNCTION citus_internal.is_replication_origin_tracking_active(); +#include "../udfs/citus_finish_pg_upgrade/12.1-1.sql" diff --git a/src/backend/distributed/sql/udfs/citus_add_rebalance_strategy/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_add_rebalance_strategy/12.2-1.sql new file mode 100644 index 00000000000..c4f157c2e4f --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_add_rebalance_strategy/12.2-1.sql @@ -0,0 +1,32 @@ +DROP FUNCTION pg_catalog.citus_add_rebalance_strategy; +CREATE OR REPLACE FUNCTION pg_catalog.citus_add_rebalance_strategy( + name name, + shard_cost_function regproc, + node_capacity_function regproc, + shard_allowed_on_node_function regproc, + default_threshold float4, + minimum_threshold float4 DEFAULT 0, + improvement_threshold float4 DEFAULT 0 +) + RETURNS VOID AS $$ + INSERT INTO + pg_catalog.pg_dist_rebalance_strategy( + name, + shard_cost_function, + node_capacity_function, + shard_allowed_on_node_function, + default_threshold, + minimum_threshold, + improvement_threshold + ) VALUES ( + name, + shard_cost_function, + node_capacity_function, + shard_allowed_on_node_function, + default_threshold, + minimum_threshold, + improvement_threshold + ); + $$ LANGUAGE sql; +COMMENT ON FUNCTION pg_catalog.citus_add_rebalance_strategy(name,regproc,regproc,regproc,float4, float4, float4) + IS 'adds a new rebalance strategy which can be used when rebalancing shards or draining nodes'; diff --git a/src/backend/distributed/sql/udfs/citus_add_rebalance_strategy/latest.sql b/src/backend/distributed/sql/udfs/citus_add_rebalance_strategy/latest.sql index 4c5f8ba79c2..c4f157c2e4f 100644 --- a/src/backend/distributed/sql/udfs/citus_add_rebalance_strategy/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_add_rebalance_strategy/latest.sql @@ -16,14 +16,16 @@ CREATE OR REPLACE FUNCTION pg_catalog.citus_add_rebalance_strategy( node_capacity_function, shard_allowed_on_node_function, default_threshold, - minimum_threshold + minimum_threshold, + improvement_threshold ) VALUES ( name, shard_cost_function, node_capacity_function, shard_allowed_on_node_function, default_threshold, - minimum_threshold + minimum_threshold, + improvement_threshold ); $$ LANGUAGE sql; COMMENT ON FUNCTION pg_catalog.citus_add_rebalance_strategy(name,regproc,regproc,regproc,float4, float4, float4) diff --git a/src/backend/distributed/sql/udfs/citus_blocking_pids/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_blocking_pids/12.2-1.sql new file mode 100644 index 00000000000..4e747ff4fc9 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_blocking_pids/12.2-1.sql @@ -0,0 +1,34 @@ +DROP FUNCTION pg_catalog.citus_blocking_pids; +CREATE FUNCTION pg_catalog.citus_blocking_pids(pBlockedPid integer) +RETURNS int4[] AS $$ + DECLARE + mLocalBlockingPids int4[]; + mRemoteBlockingPids int4[]; + mLocalGlobalPid int8; + BEGIN + SELECT pg_catalog.old_pg_blocking_pids(pBlockedPid) INTO mLocalBlockingPids; + + IF (array_length(mLocalBlockingPids, 1) > 0) THEN + RETURN mLocalBlockingPids; + END IF; + + -- pg says we're not blocked locally; check whether we're blocked globally. + SELECT global_pid INTO mLocalGlobalPid + FROM get_all_active_transactions() WHERE process_id = pBlockedPid; + + SELECT array_agg(global_pid) INTO mRemoteBlockingPids FROM ( + WITH activeTransactions AS ( + SELECT global_pid FROM get_all_active_transactions() + ), blockingTransactions AS ( + SELECT blocking_global_pid FROM citus_internal.global_blocked_processes() + WHERE waiting_global_pid = mLocalGlobalPid + ) + SELECT activeTransactions.global_pid FROM activeTransactions, blockingTransactions + WHERE activeTransactions.global_pid = blockingTransactions.blocking_global_pid + ) AS sub; + + RETURN mRemoteBlockingPids; + END; +$$ LANGUAGE plpgsql; + +REVOKE ALL ON FUNCTION citus_blocking_pids(integer) FROM PUBLIC; diff --git a/src/backend/distributed/sql/udfs/citus_blocking_pids/latest.sql b/src/backend/distributed/sql/udfs/citus_blocking_pids/latest.sql index c7e607c1c34..4e747ff4fc9 100644 --- a/src/backend/distributed/sql/udfs/citus_blocking_pids/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_blocking_pids/latest.sql @@ -20,7 +20,7 @@ RETURNS int4[] AS $$ WITH activeTransactions AS ( SELECT global_pid FROM get_all_active_transactions() ), blockingTransactions AS ( - SELECT blocking_global_pid FROM citus_internal_global_blocked_processes() + SELECT blocking_global_pid FROM citus_internal.global_blocked_processes() WHERE waiting_global_pid = mLocalGlobalPid ) SELECT activeTransactions.global_pid FROM activeTransactions, blockingTransactions diff --git a/src/backend/distributed/sql/udfs/citus_drop_trigger/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_drop_trigger/12.2-1.sql new file mode 100644 index 00000000000..6e4c52209a6 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_drop_trigger/12.2-1.sql @@ -0,0 +1,68 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_drop_trigger() + RETURNS event_trigger + LANGUAGE plpgsql + SET search_path = pg_catalog + AS $cdbdt$ +DECLARE + constraint_event_count INTEGER; + v_obj record; + dropped_table_is_a_partition boolean := false; +BEGIN + FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() + WHERE object_type IN ('table', 'foreign table') + LOOP + -- first drop the table and metadata on the workers + -- then drop all the shards on the workers + -- finally remove the pg_dist_partition entry on the coordinator + PERFORM master_remove_distributed_table_metadata_from_workers(v_obj.objid, v_obj.schema_name, v_obj.object_name); + + -- If both original and normal values are false, the dropped table was a partition + -- that was dropped as a result of its parent being dropped + -- NOTE: the other way around is not true: + -- the table being a partition doesn't imply both original and normal values are false + SELECT (v_obj.original = false AND v_obj.normal = false) INTO dropped_table_is_a_partition; + + -- The partition's shards will be dropped when dropping the parent's shards, so we can skip: + -- i.e. we call citus_drop_all_shards with drop_shards_metadata_only parameter set to true + IF dropped_table_is_a_partition + THEN + PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := true); + ELSE + PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false); + END IF; + + PERFORM master_remove_partition_metadata(v_obj.objid, v_obj.schema_name, v_obj.object_name); + END LOOP; + + FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + -- Remove entries from pg_catalog.pg_dist_schema for all dropped tenant schemas. + -- Also delete the corresponding colocation group from pg_catalog.pg_dist_colocation. + -- + -- Although normally we automatically delete the colocation groups when they become empty, + -- we don't do so for the colocation groups that are created for tenant schemas. For this + -- reason, here we need to delete the colocation group when the tenant schema is dropped. + IF v_obj.object_type = 'schema' AND EXISTS (SELECT 1 FROM pg_catalog.pg_dist_schema WHERE schemaid = v_obj.objid) + THEN + PERFORM citus_internal.unregister_tenant_schema_globally(v_obj.objid, v_obj.object_name); + END IF; + + -- remove entries from citus.pg_dist_object for all dropped root (objsubid = 0) objects + PERFORM master_unmark_object_distributed(v_obj.classid, v_obj.objid, v_obj.objsubid); + END LOOP; + + SELECT COUNT(*) INTO constraint_event_count + FROM pg_event_trigger_dropped_objects() + WHERE object_type IN ('table constraint'); + + IF constraint_event_count > 0 + THEN + -- Tell utility hook that a table constraint is dropped so we might + -- need to undistribute some of the citus local tables that are not + -- connected to any reference tables. + PERFORM notify_constraint_dropped(); + END IF; +END; +$cdbdt$; +COMMENT ON FUNCTION pg_catalog.citus_drop_trigger() + IS 'perform checks and actions at the end of DROP actions'; diff --git a/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql b/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql index 312099aeb54..6e4c52209a6 100644 --- a/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql @@ -44,7 +44,7 @@ BEGIN -- reason, here we need to delete the colocation group when the tenant schema is dropped. IF v_obj.object_type = 'schema' AND EXISTS (SELECT 1 FROM pg_catalog.pg_dist_schema WHERE schemaid = v_obj.objid) THEN - PERFORM pg_catalog.citus_internal_unregister_tenant_schema_globally(v_obj.objid, v_obj.object_name); + PERFORM citus_internal.unregister_tenant_schema_globally(v_obj.objid, v_obj.object_name); END IF; -- remove entries from citus.pg_dist_object for all dropped root (objsubid = 0) objects diff --git a/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/12.2-1.sql new file mode 100644 index 00000000000..4d3a17bd47a --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/12.2-1.sql @@ -0,0 +1,227 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_finish_pg_upgrade() + RETURNS void + LANGUAGE plpgsql + SET search_path = pg_catalog + AS $cppu$ +DECLARE + table_name regclass; + command text; + trigger_name text; +BEGIN + + + IF substring(current_Setting('server_version'), '\d+')::int >= 14 THEN + EXECUTE $cmd$ + -- disable propagation to prevent EnsureCoordinator errors + -- the aggregate created here does not depend on Citus extension (yet) + -- since we add the dependency with the next command + SET citus.enable_ddl_propagation TO OFF; + CREATE AGGREGATE array_cat_agg(anycompatiblearray) (SFUNC = array_cat, STYPE = anycompatiblearray); + COMMENT ON AGGREGATE array_cat_agg(anycompatiblearray) + IS 'concatenate input arrays into a single array'; + RESET citus.enable_ddl_propagation; + $cmd$; + ELSE + EXECUTE $cmd$ + SET citus.enable_ddl_propagation TO OFF; + CREATE AGGREGATE array_cat_agg(anyarray) (SFUNC = array_cat, STYPE = anyarray); + COMMENT ON AGGREGATE array_cat_agg(anyarray) + IS 'concatenate input arrays into a single array'; + RESET citus.enable_ddl_propagation; + $cmd$; + END IF; + + -- + -- Citus creates the array_cat_agg but because of a compatibility + -- issue between pg13-pg14, we drop and create it during upgrade. + -- And as Citus creates it, there needs to be a dependency to the + -- Citus extension, so we create that dependency here. + -- We are not using: + -- ALTER EXENSION citus DROP/CREATE AGGREGATE array_cat_agg + -- because we don't have an easy way to check if the aggregate + -- exists with anyarray type or anycompatiblearray type. + + INSERT INTO pg_depend + SELECT + 'pg_proc'::regclass::oid as classid, + (SELECT oid FROM pg_proc WHERE proname = 'array_cat_agg') as objid, + 0 as objsubid, + 'pg_extension'::regclass::oid as refclassid, + (select oid from pg_extension where extname = 'citus') as refobjid, + 0 as refobjsubid , + 'e' as deptype; + + -- PG16 has its own any_value, so only create it pre PG16. + -- We can remove this part when we drop support for PG16 + IF substring(current_Setting('server_version'), '\d+')::int < 16 THEN + EXECUTE $cmd$ + -- disable propagation to prevent EnsureCoordinator errors + -- the aggregate created here does not depend on Citus extension (yet) + -- since we add the dependency with the next command + SET citus.enable_ddl_propagation TO OFF; + CREATE OR REPLACE FUNCTION pg_catalog.any_value_agg ( anyelement, anyelement ) + RETURNS anyelement AS $$ + SELECT CASE WHEN $1 IS NULL THEN $2 ELSE $1 END; + $$ LANGUAGE SQL STABLE; + + CREATE AGGREGATE pg_catalog.any_value ( + sfunc = pg_catalog.any_value_agg, + combinefunc = pg_catalog.any_value_agg, + basetype = anyelement, + stype = anyelement + ); + COMMENT ON AGGREGATE pg_catalog.any_value(anyelement) IS + 'Returns the value of any row in the group. It is mostly useful when you know there will be only 1 element.'; + RESET citus.enable_ddl_propagation; + -- + -- Citus creates the any_value aggregate but because of a compatibility + -- issue between pg15-pg16 -- any_value is created in PG16, we drop + -- and create it during upgrade IF upgraded version is less than 16. + -- And as Citus creates it, there needs to be a dependency to the + -- Citus extension, so we create that dependency here. + + INSERT INTO pg_depend + SELECT + 'pg_proc'::regclass::oid as classid, + (SELECT oid FROM pg_proc WHERE proname = 'any_value_agg') as objid, + 0 as objsubid, + 'pg_extension'::regclass::oid as refclassid, + (select oid from pg_extension where extname = 'citus') as refobjid, + 0 as refobjsubid , + 'e' as deptype; + + INSERT INTO pg_depend + SELECT + 'pg_proc'::regclass::oid as classid, + (SELECT oid FROM pg_proc WHERE proname = 'any_value') as objid, + 0 as objsubid, + 'pg_extension'::regclass::oid as refclassid, + (select oid from pg_extension where extname = 'citus') as refobjid, + 0 as refobjsubid , + 'e' as deptype; + $cmd$; + END IF; + + -- + -- restore citus catalog tables + -- + INSERT INTO pg_catalog.pg_dist_partition SELECT * FROM public.pg_dist_partition; + + -- if we are upgrading from PG14/PG15 to PG16+, + -- we need to regenerate the partkeys because they will include varnullingrels as well. + UPDATE pg_catalog.pg_dist_partition + SET partkey = column_name_to_column(pg_dist_partkeys_pre_16_upgrade.logicalrelid, col_name) + FROM public.pg_dist_partkeys_pre_16_upgrade + WHERE pg_dist_partkeys_pre_16_upgrade.logicalrelid = pg_dist_partition.logicalrelid; + DROP TABLE public.pg_dist_partkeys_pre_16_upgrade; + + INSERT INTO pg_catalog.pg_dist_shard SELECT * FROM public.pg_dist_shard; + INSERT INTO pg_catalog.pg_dist_placement SELECT * FROM public.pg_dist_placement; + INSERT INTO pg_catalog.pg_dist_node_metadata SELECT * FROM public.pg_dist_node_metadata; + INSERT INTO pg_catalog.pg_dist_node SELECT * FROM public.pg_dist_node; + INSERT INTO pg_catalog.pg_dist_local_group SELECT * FROM public.pg_dist_local_group; + INSERT INTO pg_catalog.pg_dist_transaction SELECT * FROM public.pg_dist_transaction; + INSERT INTO pg_catalog.pg_dist_colocation SELECT * FROM public.pg_dist_colocation; + INSERT INTO pg_catalog.pg_dist_cleanup SELECT * FROM public.pg_dist_cleanup; + INSERT INTO pg_catalog.pg_dist_schema SELECT schemaname::regnamespace, colocationid FROM public.pg_dist_schema; + -- enterprise catalog tables + INSERT INTO pg_catalog.pg_dist_authinfo SELECT * FROM public.pg_dist_authinfo; + INSERT INTO pg_catalog.pg_dist_poolinfo SELECT * FROM public.pg_dist_poolinfo; + + -- Temporarily disable trigger to check for validity of functions while + -- inserting. The current contents of the table might be invalid if one of + -- the functions was removed by the user without also removing the + -- rebalance strategy. Obviously that's not great, but it should be no + -- reason to fail the upgrade. + ALTER TABLE pg_catalog.pg_dist_rebalance_strategy DISABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger; + INSERT INTO pg_catalog.pg_dist_rebalance_strategy SELECT + name, + default_strategy, + shard_cost_function::regprocedure::regproc, + node_capacity_function::regprocedure::regproc, + shard_allowed_on_node_function::regprocedure::regproc, + default_threshold, + minimum_threshold, + improvement_threshold + FROM public.pg_dist_rebalance_strategy; + ALTER TABLE pg_catalog.pg_dist_rebalance_strategy ENABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger; + + -- + -- drop backup tables + -- + DROP TABLE public.pg_dist_authinfo; + DROP TABLE public.pg_dist_colocation; + DROP TABLE public.pg_dist_local_group; + DROP TABLE public.pg_dist_node; + DROP TABLE public.pg_dist_node_metadata; + DROP TABLE public.pg_dist_partition; + DROP TABLE public.pg_dist_placement; + DROP TABLE public.pg_dist_poolinfo; + DROP TABLE public.pg_dist_shard; + DROP TABLE public.pg_dist_transaction; + DROP TABLE public.pg_dist_rebalance_strategy; + DROP TABLE public.pg_dist_cleanup; + DROP TABLE public.pg_dist_schema; + -- + -- reset sequences + -- + PERFORM setval('pg_catalog.pg_dist_shardid_seq', (SELECT MAX(shardid)+1 AS max_shard_id FROM pg_dist_shard), false); + PERFORM setval('pg_catalog.pg_dist_placement_placementid_seq', (SELECT MAX(placementid)+1 AS max_placement_id FROM pg_dist_placement), false); + PERFORM setval('pg_catalog.pg_dist_groupid_seq', (SELECT MAX(groupid)+1 AS max_group_id FROM pg_dist_node), false); + PERFORM setval('pg_catalog.pg_dist_node_nodeid_seq', (SELECT MAX(nodeid)+1 AS max_node_id FROM pg_dist_node), false); + PERFORM setval('pg_catalog.pg_dist_colocationid_seq', (SELECT MAX(colocationid)+1 AS max_colocation_id FROM pg_dist_colocation), false); + PERFORM setval('pg_catalog.pg_dist_operationid_seq', (SELECT MAX(operation_id)+1 AS max_operation_id FROM pg_dist_cleanup), false); + PERFORM setval('pg_catalog.pg_dist_cleanup_recordid_seq', (SELECT MAX(record_id)+1 AS max_record_id FROM pg_dist_cleanup), false); + PERFORM setval('pg_catalog.pg_dist_clock_logical_seq', (SELECT last_value FROM public.pg_dist_clock_logical_seq), false); + DROP TABLE public.pg_dist_clock_logical_seq; + + + + -- + -- register triggers + -- + FOR table_name IN SELECT logicalrelid FROM pg_catalog.pg_dist_partition JOIN pg_class ON (logicalrelid = oid) WHERE relkind <> 'f' + LOOP + trigger_name := 'truncate_trigger_' || table_name::oid; + command := 'create trigger ' || trigger_name || ' after truncate on ' || table_name || ' execute procedure pg_catalog.citus_truncate_trigger()'; + EXECUTE command; + command := 'update pg_trigger set tgisinternal = true where tgname = ' || quote_literal(trigger_name); + EXECUTE command; + END LOOP; + + -- + -- set dependencies + -- + INSERT INTO pg_depend + SELECT + 'pg_class'::regclass::oid as classid, + p.logicalrelid::regclass::oid as objid, + 0 as objsubid, + 'pg_extension'::regclass::oid as refclassid, + (select oid from pg_extension where extname = 'citus') as refobjid, + 0 as refobjsubid , + 'n' as deptype + FROM pg_catalog.pg_dist_partition p; + + -- set dependencies for columnar table access method + PERFORM columnar_internal.columnar_ensure_am_depends_catalog(); + + -- restore pg_dist_object from the stable identifiers + TRUNCATE pg_catalog.pg_dist_object; + INSERT INTO pg_catalog.pg_dist_object (classid, objid, objsubid, distribution_argument_index, colocationid) + SELECT + address.classid, + address.objid, + address.objsubid, + naming.distribution_argument_index, + naming.colocationid + FROM + public.pg_dist_object naming, + pg_catalog.pg_get_object_address(naming.type, naming.object_names, naming.object_args) address; + + DROP TABLE public.pg_dist_object; +END; +$cppu$; + +COMMENT ON FUNCTION pg_catalog.citus_finish_pg_upgrade() + IS 'perform tasks to restore citus settings from a location that has been prepared before pg_upgrade'; diff --git a/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/latest.sql b/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/latest.sql index 766e86a2e29..4d3a17bd47a 100644 --- a/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_finish_pg_upgrade/latest.sql @@ -128,6 +128,12 @@ BEGIN INSERT INTO pg_catalog.pg_dist_authinfo SELECT * FROM public.pg_dist_authinfo; INSERT INTO pg_catalog.pg_dist_poolinfo SELECT * FROM public.pg_dist_poolinfo; + -- Temporarily disable trigger to check for validity of functions while + -- inserting. The current contents of the table might be invalid if one of + -- the functions was removed by the user without also removing the + -- rebalance strategy. Obviously that's not great, but it should be no + -- reason to fail the upgrade. + ALTER TABLE pg_catalog.pg_dist_rebalance_strategy DISABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger; INSERT INTO pg_catalog.pg_dist_rebalance_strategy SELECT name, default_strategy, @@ -138,6 +144,7 @@ BEGIN minimum_threshold, improvement_threshold FROM public.pg_dist_rebalance_strategy; + ALTER TABLE pg_catalog.pg_dist_rebalance_strategy ENABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger; -- -- drop backup tables diff --git a/src/backend/distributed/sql/udfs/citus_internal_acquire_citus_advisory_object_class_lock/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_acquire_citus_advisory_object_class_lock/12.2-1.sql new file mode 100644 index 00000000000..9f32b67d41f --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_acquire_citus_advisory_object_class_lock/12.2-1.sql @@ -0,0 +1,5 @@ +CREATE OR REPLACE FUNCTION citus_internal.acquire_citus_advisory_object_class_lock(objectClass int, qualifiedObjectName cstring) + RETURNS void + LANGUAGE C + VOLATILE +AS 'MODULE_PATHNAME', $$citus_internal_acquire_citus_advisory_object_class_lock$$; diff --git a/src/backend/distributed/sql/udfs/citus_internal_acquire_citus_advisory_object_class_lock/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_acquire_citus_advisory_object_class_lock/latest.sql new file mode 100644 index 00000000000..9f32b67d41f --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_acquire_citus_advisory_object_class_lock/latest.sql @@ -0,0 +1,5 @@ +CREATE OR REPLACE FUNCTION citus_internal.acquire_citus_advisory_object_class_lock(objectClass int, qualifiedObjectName cstring) + RETURNS void + LANGUAGE C + VOLATILE +AS 'MODULE_PATHNAME', $$citus_internal_acquire_citus_advisory_object_class_lock$$; diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_colocation_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_add_colocation_metadata/12.2-1.sql new file mode 100644 index 00000000000..e054448011c --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_add_colocation_metadata/12.2-1.sql @@ -0,0 +1,27 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_colocation_metadata( + colocation_id int, + shard_count int, + replication_factor int, + distribution_column_type regtype, + distribution_column_collation oid) + RETURNS void + LANGUAGE C + STRICT + AS 'MODULE_PATHNAME', $$citus_internal_add_colocation_metadata$$; + +COMMENT ON FUNCTION citus_internal.add_colocation_metadata(int,int,int,regtype,oid) IS + 'Inserts a co-location group into pg_dist_colocation'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_colocation_metadata( + colocation_id int, + shard_count int, + replication_factor int, + distribution_column_type regtype, + distribution_column_collation oid) + RETURNS void + LANGUAGE C + STRICT + AS 'MODULE_PATHNAME'; + +COMMENT ON FUNCTION pg_catalog.citus_internal_add_colocation_metadata(int,int,int,regtype,oid) IS + 'Inserts a co-location group into pg_dist_colocation'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_colocation_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_add_colocation_metadata/latest.sql index 823f455699d..e054448011c 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_add_colocation_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_add_colocation_metadata/latest.sql @@ -1,3 +1,17 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_colocation_metadata( + colocation_id int, + shard_count int, + replication_factor int, + distribution_column_type regtype, + distribution_column_collation oid) + RETURNS void + LANGUAGE C + STRICT + AS 'MODULE_PATHNAME', $$citus_internal_add_colocation_metadata$$; + +COMMENT ON FUNCTION citus_internal.add_colocation_metadata(int,int,int,regtype,oid) IS + 'Inserts a co-location group into pg_dist_colocation'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_colocation_metadata( colocation_id int, shard_count int, diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_object_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_add_object_metadata/12.2-1.sql new file mode 100644 index 00000000000..560bce69072 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_add_object_metadata/12.2-1.sql @@ -0,0 +1,29 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_object_metadata( + typeText text, + objNames text[], + objArgs text[], + distribution_argument_index int, + colocationid int, + force_delegation bool) + RETURNS void + LANGUAGE C + STRICT + AS 'MODULE_PATHNAME', $$citus_internal_add_object_metadata$$; + +COMMENT ON FUNCTION citus_internal.add_object_metadata(text,text[],text[],int,int,bool) IS + 'Inserts distributed object into pg_dist_object'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_object_metadata( + typeText text, + objNames text[], + objArgs text[], + distribution_argument_index int, + colocationid int, + force_delegation bool) + RETURNS void + LANGUAGE C + STRICT + AS 'MODULE_PATHNAME'; + +COMMENT ON FUNCTION pg_catalog.citus_internal_add_object_metadata(text,text[],text[],int,int,bool) IS + 'Inserts distributed object into pg_dist_object'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_object_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_add_object_metadata/latest.sql index d35198f9005..560bce69072 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_add_object_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_add_object_metadata/latest.sql @@ -1,3 +1,18 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_object_metadata( + typeText text, + objNames text[], + objArgs text[], + distribution_argument_index int, + colocationid int, + force_delegation bool) + RETURNS void + LANGUAGE C + STRICT + AS 'MODULE_PATHNAME', $$citus_internal_add_object_metadata$$; + +COMMENT ON FUNCTION citus_internal.add_object_metadata(text,text[],text[],int,int,bool) IS + 'Inserts distributed object into pg_dist_object'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_object_metadata( typeText text, objNames text[], diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_partition_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_add_partition_metadata/12.2-1.sql new file mode 100644 index 00000000000..511a5b1c171 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_add_partition_metadata/12.2-1.sql @@ -0,0 +1,22 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_partition_metadata( + relation_id regclass, distribution_method "char", + distribution_column text, colocation_id integer, + replication_model "char") + RETURNS void + LANGUAGE C + AS 'MODULE_PATHNAME', $$citus_internal_add_partition_metadata$$; + +COMMENT ON FUNCTION citus_internal.add_partition_metadata(regclass, "char", text, integer, "char") IS + 'Inserts into pg_dist_partition with user checks'; + + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_partition_metadata( + relation_id regclass, distribution_method "char", + distribution_column text, colocation_id integer, + replication_model "char") + RETURNS void + LANGUAGE C + AS 'MODULE_PATHNAME'; + +COMMENT ON FUNCTION pg_catalog.citus_internal_add_partition_metadata(regclass, "char", text, integer, "char") IS + 'Inserts into pg_dist_partition with user checks'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_partition_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_add_partition_metadata/latest.sql index ed4f853a68c..511a5b1c171 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_add_partition_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_add_partition_metadata/latest.sql @@ -1,3 +1,15 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_partition_metadata( + relation_id regclass, distribution_method "char", + distribution_column text, colocation_id integer, + replication_model "char") + RETURNS void + LANGUAGE C + AS 'MODULE_PATHNAME', $$citus_internal_add_partition_metadata$$; + +COMMENT ON FUNCTION citus_internal.add_partition_metadata(regclass, "char", text, integer, "char") IS + 'Inserts into pg_dist_partition with user checks'; + + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_partition_metadata( relation_id regclass, distribution_method "char", distribution_column text, colocation_id integer, diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/12.2-1.sql new file mode 100644 index 00000000000..339fc2948c5 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/12.2-1.sql @@ -0,0 +1,36 @@ +-- create a new function, without shardstate +CREATE OR REPLACE FUNCTION citus_internal.add_placement_metadata( + shard_id bigint, + shard_length bigint, group_id integer, + placement_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata$$; + +COMMENT ON FUNCTION citus_internal.add_placement_metadata(bigint, bigint, integer, bigint) IS + 'Inserts into pg_dist_shard_placement with user checks'; + +-- create a new function, without shardstate +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_placement_metadata( + shard_id bigint, + shard_length bigint, group_id integer, + placement_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata$$; + +COMMENT ON FUNCTION pg_catalog.citus_internal_add_placement_metadata(bigint, bigint, integer, bigint) IS + 'Inserts into pg_dist_shard_placement with user checks'; + +-- replace the old one so it would call the old C function with shard_state +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_placement_metadata( + shard_id bigint, shard_state integer, + shard_length bigint, group_id integer, + placement_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata_legacy$$; + +COMMENT ON FUNCTION pg_catalog.citus_internal_add_placement_metadata(bigint, integer, bigint, integer, bigint) IS + 'Inserts into pg_dist_shard_placement with user checks'; + diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/latest.sql index 9d1dd4ffa36..339fc2948c5 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/latest.sql @@ -1,3 +1,15 @@ +-- create a new function, without shardstate +CREATE OR REPLACE FUNCTION citus_internal.add_placement_metadata( + shard_id bigint, + shard_length bigint, group_id integer, + placement_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata$$; + +COMMENT ON FUNCTION citus_internal.add_placement_metadata(bigint, bigint, integer, bigint) IS + 'Inserts into pg_dist_shard_placement with user checks'; + -- create a new function, without shardstate CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_placement_metadata( shard_id bigint, diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/12.2-1.sql new file mode 100644 index 00000000000..82c29f054dc --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/12.2-1.sql @@ -0,0 +1,21 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_shard_metadata( + relation_id regclass, shard_id bigint, + storage_type "char", shard_min_value text, + shard_max_value text + ) + RETURNS void + LANGUAGE C + AS 'MODULE_PATHNAME', $$citus_internal_add_shard_metadata$$; +COMMENT ON FUNCTION citus_internal.add_shard_metadata(regclass, bigint, "char", text, text) IS + 'Inserts into pg_dist_shard with user checks'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_shard_metadata( + relation_id regclass, shard_id bigint, + storage_type "char", shard_min_value text, + shard_max_value text + ) + RETURNS void + LANGUAGE C + AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_add_shard_metadata(regclass, bigint, "char", text, text) IS + 'Inserts into pg_dist_shard with user checks'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/latest.sql index 7411d917981..82c29f054dc 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/latest.sql @@ -1,3 +1,14 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_shard_metadata( + relation_id regclass, shard_id bigint, + storage_type "char", shard_min_value text, + shard_max_value text + ) + RETURNS void + LANGUAGE C + AS 'MODULE_PATHNAME', $$citus_internal_add_shard_metadata$$; +COMMENT ON FUNCTION citus_internal.add_shard_metadata(regclass, bigint, "char", text, text) IS + 'Inserts into pg_dist_shard with user checks'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_shard_metadata( relation_id regclass, shard_id bigint, storage_type "char", shard_min_value text, diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/12.2-1.sql new file mode 100644 index 00000000000..028848f90c3 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/12.2-1.sql @@ -0,0 +1,17 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_tenant_schema(schema_id Oid, colocation_id int) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME', $$citus_internal_add_tenant_schema$$; + +COMMENT ON FUNCTION citus_internal.add_tenant_schema(Oid, int) IS + 'insert given tenant schema into pg_dist_schema with given colocation id'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_tenant_schema(schema_id Oid, colocation_id int) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME'; + +COMMENT ON FUNCTION pg_catalog.citus_internal_add_tenant_schema(Oid, int) IS + 'insert given tenant schema into pg_dist_schema with given colocation id'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/latest.sql index 56b3cae8449..028848f90c3 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/latest.sql @@ -1,3 +1,12 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_tenant_schema(schema_id Oid, colocation_id int) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME', $$citus_internal_add_tenant_schema$$; + +COMMENT ON FUNCTION citus_internal.add_tenant_schema(Oid, int) IS + 'insert given tenant schema into pg_dist_schema with given colocation id'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_tenant_schema(schema_id Oid, colocation_id int) RETURNS void LANGUAGE C diff --git a/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/12.2-1.sql new file mode 100644 index 00000000000..36d37a9e66d --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/12.2-1.sql @@ -0,0 +1,17 @@ +CREATE OR REPLACE FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) + RETURNS void + LANGUAGE C STABLE PARALLEL SAFE STRICT + AS 'MODULE_PATHNAME', $$citus_internal_adjust_local_clock_to_remote$$; +COMMENT ON FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) + IS 'Internal UDF used to adjust the local clock to the maximum of nodes in the cluster'; + +REVOKE ALL ON FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) FROM PUBLIC; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_adjust_local_clock_to_remote(pg_catalog.cluster_clock) + RETURNS void + LANGUAGE C STABLE PARALLEL SAFE STRICT + AS 'MODULE_PATHNAME', $$citus_internal_adjust_local_clock_to_remote$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_adjust_local_clock_to_remote(pg_catalog.cluster_clock) + IS 'Internal UDF used to adjust the local clock to the maximum of nodes in the cluster'; + +REVOKE ALL ON FUNCTION pg_catalog.citus_internal_adjust_local_clock_to_remote(pg_catalog.cluster_clock) FROM PUBLIC; diff --git a/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/latest.sql index 240f7a9b74b..36d37a9e66d 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/latest.sql @@ -1,3 +1,12 @@ +CREATE OR REPLACE FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) + RETURNS void + LANGUAGE C STABLE PARALLEL SAFE STRICT + AS 'MODULE_PATHNAME', $$citus_internal_adjust_local_clock_to_remote$$; +COMMENT ON FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) + IS 'Internal UDF used to adjust the local clock to the maximum of nodes in the cluster'; + +REVOKE ALL ON FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) FROM PUBLIC; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_adjust_local_clock_to_remote(pg_catalog.cluster_clock) RETURNS void LANGUAGE C STABLE PARALLEL SAFE STRICT diff --git a/src/backend/distributed/sql/udfs/citus_internal_database_command/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_database_command/12.2-1.sql new file mode 100644 index 00000000000..2c6e916c016 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_database_command/12.2-1.sql @@ -0,0 +1,10 @@ +-- +-- citus_internal.database_command run given database command without transaction block restriction. + +CREATE OR REPLACE FUNCTION citus_internal.database_command(command text) + RETURNS void + LANGUAGE C + VOLATILE +AS 'MODULE_PATHNAME', $$citus_internal_database_command$$; +COMMENT ON FUNCTION citus_internal.database_command(text) IS + 'run a database command without transaction block restrictions'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_database_command/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_database_command/latest.sql new file mode 100644 index 00000000000..2c6e916c016 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_database_command/latest.sql @@ -0,0 +1,10 @@ +-- +-- citus_internal.database_command run given database command without transaction block restriction. + +CREATE OR REPLACE FUNCTION citus_internal.database_command(command text) + RETURNS void + LANGUAGE C + VOLATILE +AS 'MODULE_PATHNAME', $$citus_internal_database_command$$; +COMMENT ON FUNCTION citus_internal.database_command(text) IS + 'run a database command without transaction block restrictions'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/12.2-1.sql new file mode 100644 index 00000000000..cb56a25cd3e --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/12.2-1.sql @@ -0,0 +1,19 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_colocation_metadata( + colocation_id int) + RETURNS void + LANGUAGE C + STRICT + AS 'MODULE_PATHNAME', $$citus_internal_delete_colocation_metadata$$; + +COMMENT ON FUNCTION citus_internal.delete_colocation_metadata(int) IS + 'deletes a co-location group from pg_dist_colocation'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_colocation_metadata( + colocation_id int) + RETURNS void + LANGUAGE C + STRICT + AS 'MODULE_PATHNAME'; + +COMMENT ON FUNCTION pg_catalog.citus_internal_delete_colocation_metadata(int) IS + 'deletes a co-location group from pg_dist_colocation'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/latest.sql index d4c3f1be957..cb56a25cd3e 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/latest.sql @@ -1,3 +1,13 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_colocation_metadata( + colocation_id int) + RETURNS void + LANGUAGE C + STRICT + AS 'MODULE_PATHNAME', $$citus_internal_delete_colocation_metadata$$; + +COMMENT ON FUNCTION citus_internal.delete_colocation_metadata(int) IS + 'deletes a co-location group from pg_dist_colocation'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_colocation_metadata( colocation_id int) RETURNS void diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/12.2-1.sql new file mode 100644 index 00000000000..693815abf3e --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/12.2-1.sql @@ -0,0 +1,14 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_partition_metadata(table_name regclass) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_delete_partition_metadata$$; +COMMENT ON FUNCTION citus_internal.delete_partition_metadata(regclass) IS + 'Deletes a row from pg_dist_partition with table ownership checks'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_partition_metadata(table_name regclass) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_delete_partition_metadata(regclass) IS + 'Deletes a row from pg_dist_partition with table ownership checks'; + diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/latest.sql index c7cb5455d6b..693815abf3e 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/latest.sql @@ -1,3 +1,10 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_partition_metadata(table_name regclass) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_delete_partition_metadata$$; +COMMENT ON FUNCTION citus_internal.delete_partition_metadata(regclass) IS + 'Deletes a row from pg_dist_partition with table ownership checks'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_partition_metadata(table_name regclass) RETURNS void LANGUAGE C STRICT diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/12.2-1.sql new file mode 100644 index 00000000000..f78c9a08e98 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/12.2-1.sql @@ -0,0 +1,19 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_placement_metadata( + placement_id bigint) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME', +$$citus_internal_delete_placement_metadata$$; +COMMENT ON FUNCTION citus_internal.delete_placement_metadata(bigint) + IS 'Delete placement with given id from pg_dist_placement metadata table.'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_placement_metadata( + placement_id bigint) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME', +$$citus_internal_delete_placement_metadata$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_delete_placement_metadata(bigint) + IS 'Delete placement with given id from pg_dist_placement metadata table.'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/latest.sql index 5af65f0bebb..f78c9a08e98 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/latest.sql @@ -1,3 +1,13 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_placement_metadata( + placement_id bigint) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME', +$$citus_internal_delete_placement_metadata$$; +COMMENT ON FUNCTION citus_internal.delete_placement_metadata(bigint) + IS 'Delete placement with given id from pg_dist_placement metadata table.'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_placement_metadata( placement_id bigint) RETURNS void diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/12.2-1.sql new file mode 100644 index 00000000000..bcd121b0de6 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/12.2-1.sql @@ -0,0 +1,14 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_shard_metadata(shard_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_delete_shard_metadata$$; +COMMENT ON FUNCTION citus_internal.delete_shard_metadata(bigint) IS + 'Deletes rows from pg_dist_shard and pg_dist_shard_placement with user checks'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_shard_metadata(shard_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_delete_shard_metadata(bigint) IS + 'Deletes rows from pg_dist_shard and pg_dist_shard_placement with user checks'; + diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/latest.sql index 7bfd86bdd9a..bcd121b0de6 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/latest.sql @@ -1,3 +1,10 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_shard_metadata(shard_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_delete_shard_metadata$$; +COMMENT ON FUNCTION citus_internal.delete_shard_metadata(bigint) IS + 'Deletes rows from pg_dist_shard and pg_dist_shard_placement with user checks'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_shard_metadata(shard_id bigint) RETURNS void LANGUAGE C STRICT diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/12.2-1.sql new file mode 100644 index 00000000000..2c36108b4c6 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/12.2-1.sql @@ -0,0 +1,17 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_tenant_schema(schema_id Oid) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME', $$citus_internal_delete_tenant_schema$$; + +COMMENT ON FUNCTION citus_internal.delete_tenant_schema(Oid) IS + 'delete given tenant schema from pg_dist_schema'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_tenant_schema(schema_id Oid) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME'; + +COMMENT ON FUNCTION pg_catalog.citus_internal_delete_tenant_schema(Oid) IS + 'delete given tenant schema from pg_dist_schema'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/latest.sql index 4a2bf00675d..2c36108b4c6 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/latest.sql @@ -1,3 +1,12 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_tenant_schema(schema_id Oid) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME', $$citus_internal_delete_tenant_schema$$; + +COMMENT ON FUNCTION citus_internal.delete_tenant_schema(Oid) IS + 'delete given tenant schema from pg_dist_schema'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_tenant_schema(schema_id Oid) RETURNS void LANGUAGE C diff --git a/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/12.2-1.sql new file mode 100644 index 00000000000..da8e98c20cf --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/12.2-1.sql @@ -0,0 +1,35 @@ +CREATE OR REPLACE FUNCTION citus_internal.global_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$; +COMMENT ON FUNCTION citus_internal.global_blocked_processes() +IS 'returns a global list of blocked backends originating from this node'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_global_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_global_blocked_processes() +IS 'returns a global list of blocked backends originating from this node'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/latest.sql index 510cdf93d4d..da8e98c20cf 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/latest.sql @@ -1,3 +1,21 @@ +CREATE OR REPLACE FUNCTION citus_internal.global_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$; +COMMENT ON FUNCTION citus_internal.global_blocked_processes() +IS 'returns a global list of blocked backends originating from this node'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_global_blocked_processes( OUT waiting_global_pid int8, OUT waiting_pid int4, diff --git a/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/12.2-1.sql new file mode 100644 index 00000000000..b27f16d5316 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/12.2-1.sql @@ -0,0 +1,35 @@ +CREATE OR REPLACE FUNCTION citus_internal.local_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$; +COMMENT ON FUNCTION citus_internal.local_blocked_processes() +IS 'returns all local lock wait chains, that start from any citus backend'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_local_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_local_blocked_processes() +IS 'returns all local lock wait chains, that start from any citus backend'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/latest.sql index 3157a9aad83..b27f16d5316 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/latest.sql @@ -1,3 +1,21 @@ +CREATE OR REPLACE FUNCTION citus_internal.local_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$; +COMMENT ON FUNCTION citus_internal.local_blocked_processes() +IS 'returns all local lock wait chains, that start from any citus backend'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_local_blocked_processes( OUT waiting_global_pid int8, OUT waiting_pid int4, diff --git a/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/12.2-1.sql new file mode 100644 index 00000000000..8635b969946 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/12.2-1.sql @@ -0,0 +1,13 @@ +CREATE OR REPLACE FUNCTION citus_internal.mark_node_not_synced(parent_pid int, nodeid int) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_mark_node_not_synced$$; +COMMENT ON FUNCTION citus_internal.mark_node_not_synced(int, int) + IS 'marks given node not synced by unsetting metadatasynced column at the start of the nontransactional sync.'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_mark_node_not_synced(parent_pid int, nodeid int) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_mark_node_not_synced$$; +COMMENT ON FUNCTION citus_internal_mark_node_not_synced(int, int) + IS 'marks given node not synced by unsetting metadatasynced column at the start of the nontransactional sync.'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/latest.sql index 0d90c8f1afe..8635b969946 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/latest.sql @@ -1,3 +1,10 @@ +CREATE OR REPLACE FUNCTION citus_internal.mark_node_not_synced(parent_pid int, nodeid int) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_mark_node_not_synced$$; +COMMENT ON FUNCTION citus_internal.mark_node_not_synced(int, int) + IS 'marks given node not synced by unsetting metadatasynced column at the start of the nontransactional sync.'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_mark_node_not_synced(parent_pid int, nodeid int) RETURNS VOID LANGUAGE C STRICT diff --git a/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/12.2-1.sql new file mode 100644 index 00000000000..b07eb425d9e --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/12.2-1.sql @@ -0,0 +1,15 @@ +CREATE OR REPLACE FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME', $$citus_internal_unregister_tenant_schema_globally$$; +COMMENT ON FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text) IS + 'Delete a tenant schema and the corresponding colocation group from metadata tables.'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text) IS + 'Delete a tenant schema and the corresponding colocation group from metadata tables.'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/latest.sql index 1863f1ddf6f..b07eb425d9e 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/latest.sql @@ -1,3 +1,11 @@ +CREATE OR REPLACE FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME', $$citus_internal_unregister_tenant_schema_globally$$; +COMMENT ON FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text) IS + 'Delete a tenant schema and the corresponding colocation group from metadata tables.'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text) RETURNS void LANGUAGE C diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/12.2-1.sql new file mode 100644 index 00000000000..cab96054412 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/12.2-1.sql @@ -0,0 +1,23 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_none_dist_table_metadata( + relation_id oid, + replication_model "char", + colocation_id bigint, + auto_converted boolean) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME', $$citus_internal_update_none_dist_table_metadata$$; +COMMENT ON FUNCTION citus_internal.update_none_dist_table_metadata(oid, "char", bigint, boolean) + IS 'Update pg_dist_partition metadata table for given none-distributed table, to convert it to another type of none-distributed table.'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata( + relation_id oid, + replication_model "char", + colocation_id bigint, + auto_converted boolean) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata(oid, "char", bigint, boolean) + IS 'Update pg_dist_partition metadata table for given none-distributed table, to convert it to another type of none-distributed table.'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/latest.sql index bcd05d8d014..cab96054412 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/latest.sql @@ -1,3 +1,15 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_none_dist_table_metadata( + relation_id oid, + replication_model "char", + colocation_id bigint, + auto_converted boolean) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME', $$citus_internal_update_none_dist_table_metadata$$; +COMMENT ON FUNCTION citus_internal.update_none_dist_table_metadata(oid, "char", bigint, boolean) + IS 'Update pg_dist_partition metadata table for given none-distributed table, to convert it to another type of none-distributed table.'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata( relation_id oid, replication_model "char", diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/12.2-1.sql new file mode 100644 index 00000000000..b7c47364744 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/12.2-1.sql @@ -0,0 +1,19 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_placement_metadata( + shard_id bigint, source_group_id integer, + target_group_id integer) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_update_placement_metadata$$; + +COMMENT ON FUNCTION citus_internal.update_placement_metadata(bigint, integer, integer) IS + 'Updates into pg_dist_placement with user checks'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_placement_metadata( + shard_id bigint, source_group_id integer, + target_group_id integer) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME'; + +COMMENT ON FUNCTION pg_catalog.citus_internal_update_placement_metadata(bigint, integer, integer) IS + 'Updates into pg_dist_placement with user checks'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/latest.sql index 7cb71774011..b7c47364744 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/latest.sql @@ -1,3 +1,13 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_placement_metadata( + shard_id bigint, source_group_id integer, + target_group_id integer) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_update_placement_metadata$$; + +COMMENT ON FUNCTION citus_internal.update_placement_metadata(bigint, integer, integer) IS + 'Updates into pg_dist_placement with user checks'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_placement_metadata( shard_id bigint, source_group_id integer, target_group_id integer) diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/12.2-1.sql new file mode 100644 index 00000000000..2266895296f --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/12.2-1.sql @@ -0,0 +1,14 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_relation_colocation(relation_id Oid, target_colocation_id int) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_update_relation_colocation$$; +COMMENT ON FUNCTION citus_internal.update_relation_colocation(oid, int) IS + 'Updates colocationId field of pg_dist_partition for the relation_id'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_relation_colocation(relation_id Oid, target_colocation_id int) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_update_relation_colocation(oid, int) IS + 'Updates colocationId field of pg_dist_partition for the relation_id'; + diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/latest.sql index a7f2ec1c604..2266895296f 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/latest.sql @@ -1,3 +1,10 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_relation_colocation(relation_id Oid, target_colocation_id int) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_update_relation_colocation$$; +COMMENT ON FUNCTION citus_internal.update_relation_colocation(oid, int) IS + 'Updates colocationId field of pg_dist_partition for the relation_id'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_relation_colocation(relation_id Oid, target_colocation_id int) RETURNS void LANGUAGE C STRICT diff --git a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/12.2-1.sql new file mode 100644 index 00000000000..6f494fa74a5 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/12.2-1.sql @@ -0,0 +1,45 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_isolation_test_session_is_blocked(pBlockedPid integer, pInterestingPids integer[]) +RETURNS boolean AS $$ + DECLARE + mBlockedGlobalPid int8; + workerProcessId integer := current_setting('citus.isolation_test_session_remote_process_id'); + coordinatorProcessId integer := current_setting('citus.isolation_test_session_process_id'); + BEGIN + IF pg_catalog.old_pg_isolation_test_session_is_blocked(pBlockedPid, pInterestingPids) THEN + RETURN true; + END IF; + + -- pg says we're not blocked locally; check whether we're blocked globally. + -- Note that worker process may be blocked or waiting for a lock. So we need to + -- get transaction number for both of them. Following IF provides the transaction + -- number when the worker process waiting for other session. + IF EXISTS (SELECT 1 FROM get_global_active_transactions() + WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId) THEN + SELECT global_pid INTO mBlockedGlobalPid FROM get_global_active_transactions() + WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId; + ELSE + -- Check whether transactions initiated from the coordinator get locked + SELECT global_pid INTO mBlockedGlobalPid + FROM get_all_active_transactions() WHERE process_id = pBlockedPid; + END IF; + + -- We convert the blocking_global_pid to a regular pid and only look at + -- blocks caused by the interesting pids, or the workerProcessPid. If we + -- don't do that we might find unrelated blocks caused by some random + -- other processes that are not involved in this isolation test. Because we + -- run our isolation tests on a single physical machine, the PID part of + -- the GPID is known to be unique within the whole cluster. + RETURN EXISTS ( + SELECT 1 FROM citus_internal.global_blocked_processes() + WHERE waiting_global_pid = mBlockedGlobalPid + AND ( + citus_pid_for_gpid(blocking_global_pid) in ( + select * from unnest(pInterestingPids) + ) + OR citus_pid_for_gpid(blocking_global_pid) = workerProcessId + ) + ); + END; +$$ LANGUAGE plpgsql; + +REVOKE ALL ON FUNCTION citus_isolation_test_session_is_blocked(integer,integer[]) FROM PUBLIC; diff --git a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql index ff098391002..6f494fa74a5 100644 --- a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql @@ -30,7 +30,7 @@ RETURNS boolean AS $$ -- run our isolation tests on a single physical machine, the PID part of -- the GPID is known to be unique within the whole cluster. RETURN EXISTS ( - SELECT 1 FROM citus_internal_global_blocked_processes() + SELECT 1 FROM citus_internal.global_blocked_processes() WHERE waiting_global_pid = mBlockedGlobalPid AND ( citus_pid_for_gpid(blocking_global_pid) in ( diff --git a/src/backend/distributed/sql/udfs/citus_lock_waits/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_lock_waits/12.2-1.sql new file mode 100644 index 00000000000..880306b992f --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_lock_waits/12.2-1.sql @@ -0,0 +1,47 @@ +SET search_path = 'pg_catalog'; + +CREATE VIEW citus.citus_lock_waits AS +WITH +unique_global_wait_edges_with_calculated_gpids AS ( +SELECT + -- if global_pid is NULL, it is most likely that a backend is blocked on a DDL + -- also for legacy reasons citus_internal.global_blocked_processes() returns groupId, we replace that with nodeIds + case WHEN waiting_global_pid !=0 THEN waiting_global_pid ELSE citus_calculate_gpid(get_nodeid_for_groupid(waiting_node_id), waiting_pid) END waiting_global_pid, + case WHEN blocking_global_pid !=0 THEN blocking_global_pid ELSE citus_calculate_gpid(get_nodeid_for_groupid(blocking_node_id), blocking_pid) END blocking_global_pid, + + -- citus_internal.global_blocked_processes returns groupId, we replace it here with actual + -- nodeId to be consisten with the other views + get_nodeid_for_groupid(blocking_node_id) as blocking_node_id, + get_nodeid_for_groupid(waiting_node_id) as waiting_node_id, + + blocking_transaction_waiting + + FROM citus_internal.global_blocked_processes() +), +unique_global_wait_edges AS +( + SELECT DISTINCT ON(waiting_global_pid, blocking_global_pid) * FROM unique_global_wait_edges_with_calculated_gpids +), +citus_dist_stat_activity_with_calculated_gpids AS +( + -- if global_pid is NULL, it is most likely that a backend is blocked on a DDL + SELECT CASE WHEN global_pid != 0 THEN global_pid ELSE citus_calculate_gpid(nodeid, pid) END global_pid, nodeid, pid, query FROM citus_dist_stat_activity +) +SELECT + waiting.global_pid as waiting_gpid, + blocking.global_pid as blocking_gpid, + waiting.query AS blocked_statement, + blocking.query AS current_statement_in_blocking_process, + waiting.nodeid AS waiting_nodeid, + blocking.nodeid AS blocking_nodeid +FROM + unique_global_wait_edges + JOIN + citus_dist_stat_activity_with_calculated_gpids waiting ON (unique_global_wait_edges.waiting_global_pid = waiting.global_pid) + JOIN + citus_dist_stat_activity_with_calculated_gpids blocking ON (unique_global_wait_edges.blocking_global_pid = blocking.global_pid); + +ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC; + +RESET search_path; diff --git a/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql b/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql index b3de12632b5..880306b992f 100644 --- a/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql @@ -5,18 +5,18 @@ WITH unique_global_wait_edges_with_calculated_gpids AS ( SELECT -- if global_pid is NULL, it is most likely that a backend is blocked on a DDL - -- also for legacy reasons citus_internal_global_blocked_processes() returns groupId, we replace that with nodeIds + -- also for legacy reasons citus_internal.global_blocked_processes() returns groupId, we replace that with nodeIds case WHEN waiting_global_pid !=0 THEN waiting_global_pid ELSE citus_calculate_gpid(get_nodeid_for_groupid(waiting_node_id), waiting_pid) END waiting_global_pid, case WHEN blocking_global_pid !=0 THEN blocking_global_pid ELSE citus_calculate_gpid(get_nodeid_for_groupid(blocking_node_id), blocking_pid) END blocking_global_pid, - -- citus_internal_global_blocked_processes returns groupId, we replace it here with actual + -- citus_internal.global_blocked_processes returns groupId, we replace it here with actual -- nodeId to be consisten with the other views get_nodeid_for_groupid(blocking_node_id) as blocking_node_id, get_nodeid_for_groupid(waiting_node_id) as waiting_node_id, blocking_transaction_waiting - FROM citus_internal_global_blocked_processes() + FROM citus_internal.global_blocked_processes() ), unique_global_wait_edges AS ( diff --git a/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/12.2-1.sql new file mode 100644 index 00000000000..3c1b1bdec0a --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/12.2-1.sql @@ -0,0 +1,7 @@ +CREATE FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int, checkobjectexistence boolean DEFAULT true) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_unmark_object_distributed$$; +COMMENT ON FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int, checkobjectexistence boolean) + IS 'Removes an object from citus.pg_dist_object after deletion. If checkobjectexistence is true, object existence check performed.' + 'Otherwise, object existence check is skipped.'; diff --git a/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/latest.sql b/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/latest.sql index 3f60c60c3cb..3c1b1bdec0a 100644 --- a/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_unmark_object_distributed/latest.sql @@ -1,6 +1,7 @@ -CREATE FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int) +CREATE FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int, checkobjectexistence boolean DEFAULT true) RETURNS void LANGUAGE C STRICT AS 'MODULE_PATHNAME', $$citus_unmark_object_distributed$$; -COMMENT ON FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int) - IS 'remove an object address from citus.pg_dist_object once the object has been deleted'; +COMMENT ON FUNCTION pg_catalog.citus_unmark_object_distributed(classid oid, objid oid, objsubid int, checkobjectexistence boolean) + IS 'Removes an object from citus.pg_dist_object after deletion. If checkobjectexistence is true, object existence check performed.' + 'Otherwise, object existence check is skipped.'; diff --git a/src/backend/distributed/sql/udfs/commit_management_command_2pc/12.2-1.sql b/src/backend/distributed/sql/udfs/commit_management_command_2pc/12.2-1.sql new file mode 100644 index 00000000000..8c24e6dd47d --- /dev/null +++ b/src/backend/distributed/sql/udfs/commit_management_command_2pc/12.2-1.sql @@ -0,0 +1,7 @@ +CREATE OR REPLACE FUNCTION citus_internal.commit_management_command_2pc() + RETURNS VOID + LANGUAGE C +AS 'MODULE_PATHNAME', $$commit_management_command_2pc$$; + +COMMENT ON FUNCTION citus_internal.commit_management_command_2pc() + IS 'commits the coordinated remote transactions, is a wrapper function for CoordinatedRemoteTransactionsCommit'; diff --git a/src/backend/distributed/sql/udfs/commit_management_command_2pc/latest.sql b/src/backend/distributed/sql/udfs/commit_management_command_2pc/latest.sql new file mode 100644 index 00000000000..8c24e6dd47d --- /dev/null +++ b/src/backend/distributed/sql/udfs/commit_management_command_2pc/latest.sql @@ -0,0 +1,7 @@ +CREATE OR REPLACE FUNCTION citus_internal.commit_management_command_2pc() + RETURNS VOID + LANGUAGE C +AS 'MODULE_PATHNAME', $$commit_management_command_2pc$$; + +COMMENT ON FUNCTION citus_internal.commit_management_command_2pc() + IS 'commits the coordinated remote transactions, is a wrapper function for CoordinatedRemoteTransactionsCommit'; diff --git a/src/backend/distributed/sql/udfs/execute_command_on_remote_nodes_as_user/12.2-1.sql b/src/backend/distributed/sql/udfs/execute_command_on_remote_nodes_as_user/12.2-1.sql new file mode 100644 index 00000000000..fc1076e9c0a --- /dev/null +++ b/src/backend/distributed/sql/udfs/execute_command_on_remote_nodes_as_user/12.2-1.sql @@ -0,0 +1,7 @@ +CREATE OR REPLACE FUNCTION citus_internal.execute_command_on_remote_nodes_as_user(query text, username text) + RETURNS VOID + LANGUAGE C +AS 'MODULE_PATHNAME', $$execute_command_on_remote_nodes_as_user$$; + +COMMENT ON FUNCTION citus_internal.execute_command_on_remote_nodes_as_user(query text, username text) + IS 'executes a query on the nodes other than the current one'; diff --git a/src/backend/distributed/sql/udfs/execute_command_on_remote_nodes_as_user/latest.sql b/src/backend/distributed/sql/udfs/execute_command_on_remote_nodes_as_user/latest.sql new file mode 100644 index 00000000000..fc1076e9c0a --- /dev/null +++ b/src/backend/distributed/sql/udfs/execute_command_on_remote_nodes_as_user/latest.sql @@ -0,0 +1,7 @@ +CREATE OR REPLACE FUNCTION citus_internal.execute_command_on_remote_nodes_as_user(query text, username text) + RETURNS VOID + LANGUAGE C +AS 'MODULE_PATHNAME', $$execute_command_on_remote_nodes_as_user$$; + +COMMENT ON FUNCTION citus_internal.execute_command_on_remote_nodes_as_user(query text, username text) + IS 'executes a query on the nodes other than the current one'; diff --git a/src/backend/distributed/sql/udfs/mark_object_distributed/12.2-1.sql b/src/backend/distributed/sql/udfs/mark_object_distributed/12.2-1.sql new file mode 100644 index 00000000000..25d35c028a3 --- /dev/null +++ b/src/backend/distributed/sql/udfs/mark_object_distributed/12.2-1.sql @@ -0,0 +1,7 @@ +CREATE OR REPLACE FUNCTION citus_internal.mark_object_distributed(classId Oid, objectName text, objectId Oid, connectionUser text) + RETURNS VOID + LANGUAGE C +AS 'MODULE_PATHNAME', $$mark_object_distributed$$; + +COMMENT ON FUNCTION citus_internal.mark_object_distributed(classId Oid, objectName text, objectId Oid, connectionUser text) + IS 'adds an object to pg_dist_object on all nodes'; diff --git a/src/backend/distributed/sql/udfs/mark_object_distributed/latest.sql b/src/backend/distributed/sql/udfs/mark_object_distributed/latest.sql new file mode 100644 index 00000000000..25d35c028a3 --- /dev/null +++ b/src/backend/distributed/sql/udfs/mark_object_distributed/latest.sql @@ -0,0 +1,7 @@ +CREATE OR REPLACE FUNCTION citus_internal.mark_object_distributed(classId Oid, objectName text, objectId Oid, connectionUser text) + RETURNS VOID + LANGUAGE C +AS 'MODULE_PATHNAME', $$mark_object_distributed$$; + +COMMENT ON FUNCTION citus_internal.mark_object_distributed(classId Oid, objectName text, objectId Oid, connectionUser text) + IS 'adds an object to pg_dist_object on all nodes'; diff --git a/src/backend/distributed/sql/udfs/repl_origin_helper/12.2-1.sql b/src/backend/distributed/sql/udfs/repl_origin_helper/12.2-1.sql new file mode 100644 index 00000000000..8c6d175d074 --- /dev/null +++ b/src/backend/distributed/sql/udfs/repl_origin_helper/12.2-1.sql @@ -0,0 +1,41 @@ +CREATE OR REPLACE FUNCTION citus_internal.start_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_start_replication_origin_tracking$$; +COMMENT ON FUNCTION citus_internal.start_replication_origin_tracking() + IS 'To start replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION citus_internal.stop_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_stop_replication_origin_tracking$$; +COMMENT ON FUNCTION citus_internal.stop_replication_origin_tracking() + IS 'To stop replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION citus_internal.is_replication_origin_tracking_active() +RETURNS boolean +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_is_replication_origin_tracking_active$$; +COMMENT ON FUNCTION citus_internal.is_replication_origin_tracking_active() + IS 'To check if replication origin tracking is active for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_start_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_start_replication_origin_tracking$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_start_replication_origin_tracking() + IS 'To start replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_stop_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_stop_replication_origin_tracking$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_stop_replication_origin_tracking() + IS 'To stop replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_is_replication_origin_tracking_active() +RETURNS boolean +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_is_replication_origin_tracking_active$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_is_replication_origin_tracking_active() + IS 'To check if replication origin tracking is active for skipping publishing of duplicated events during internal data movements for CDC'; diff --git a/src/backend/distributed/sql/udfs/repl_origin_helper/latest.sql b/src/backend/distributed/sql/udfs/repl_origin_helper/latest.sql index 5fe5a3bb92d..8c6d175d074 100644 --- a/src/backend/distributed/sql/udfs/repl_origin_helper/latest.sql +++ b/src/backend/distributed/sql/udfs/repl_origin_helper/latest.sql @@ -1,3 +1,24 @@ +CREATE OR REPLACE FUNCTION citus_internal.start_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_start_replication_origin_tracking$$; +COMMENT ON FUNCTION citus_internal.start_replication_origin_tracking() + IS 'To start replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION citus_internal.stop_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_stop_replication_origin_tracking$$; +COMMENT ON FUNCTION citus_internal.stop_replication_origin_tracking() + IS 'To stop replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION citus_internal.is_replication_origin_tracking_active() +RETURNS boolean +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_is_replication_origin_tracking_active$$; +COMMENT ON FUNCTION citus_internal.is_replication_origin_tracking_active() + IS 'To check if replication origin tracking is active for skipping publishing of duplicated events during internal data movements for CDC'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_start_replication_origin_tracking() RETURNS void LANGUAGE C STRICT diff --git a/src/backend/distributed/sql/udfs/start_management_transaction/12.2-1.sql b/src/backend/distributed/sql/udfs/start_management_transaction/12.2-1.sql new file mode 100644 index 00000000000..ec1f416d0e6 --- /dev/null +++ b/src/backend/distributed/sql/udfs/start_management_transaction/12.2-1.sql @@ -0,0 +1,7 @@ +CREATE OR REPLACE FUNCTION citus_internal.start_management_transaction(outer_xid xid8) + RETURNS VOID + LANGUAGE C +AS 'MODULE_PATHNAME', $$start_management_transaction$$; + +COMMENT ON FUNCTION citus_internal.start_management_transaction(outer_xid xid8) + IS 'internal Citus function that starts a management transaction in the main database'; diff --git a/src/backend/distributed/sql/udfs/start_management_transaction/latest.sql b/src/backend/distributed/sql/udfs/start_management_transaction/latest.sql new file mode 100644 index 00000000000..ec1f416d0e6 --- /dev/null +++ b/src/backend/distributed/sql/udfs/start_management_transaction/latest.sql @@ -0,0 +1,7 @@ +CREATE OR REPLACE FUNCTION citus_internal.start_management_transaction(outer_xid xid8) + RETURNS VOID + LANGUAGE C +AS 'MODULE_PATHNAME', $$start_management_transaction$$; + +COMMENT ON FUNCTION citus_internal.start_management_transaction(outer_xid xid8) + IS 'internal Citus function that starts a management transaction in the main database'; diff --git a/src/backend/distributed/test/backend_counter.c b/src/backend/distributed/test/backend_counter.c index 1b9984ac9e0..f3f19f0d312 100644 --- a/src/backend/distributed/test/backend_counter.c +++ b/src/backend/distributed/test/backend_counter.c @@ -11,6 +11,7 @@ */ #include "postgres.h" + #include "fmgr.h" #include "distributed/backend_data.h" diff --git a/src/backend/distributed/test/citus_depended_object.c b/src/backend/distributed/test/citus_depended_object.c index 4e1e919e814..77fc2e4827c 100644 --- a/src/backend/distributed/test/citus_depended_object.c +++ b/src/backend/distributed/test/citus_depended_object.c @@ -13,17 +13,17 @@ #include "catalog/pg_am.h" #include "catalog/pg_amop.h" #include "catalog/pg_amproc.h" -#include "catalog/pg_attribute.h" #include "catalog/pg_attrdef.h" -#include "catalog/pg_constraint.h" +#include "catalog/pg_attribute.h" #include "catalog/pg_class.h" +#include "catalog/pg_constraint.h" #include "catalog/pg_depend.h" #include "catalog/pg_enum.h" #include "catalog/pg_event_trigger.h" #include "catalog/pg_language.h" #include "catalog/pg_namespace.h" -#include "catalog/pg_operator.h" #include "catalog/pg_opclass.h" +#include "catalog/pg_operator.h" #include "catalog/pg_opfamily.h" #include "catalog/pg_proc.h" #include "catalog/pg_rewrite.h" @@ -34,11 +34,12 @@ #include "catalog/pg_ts_dict.h" #include "catalog/pg_ts_template.h" #include "catalog/pg_type.h" + #include "distributed/citus_depended_object.h" #include "distributed/listutils.h" -#include "distributed/metadata_cache.h" #include "distributed/metadata/dependency.h" #include "distributed/metadata/distobject.h" +#include "distributed/metadata_cache.h" static bool IsCitusDependentObject(ObjectAddress objectAddress); diff --git a/src/backend/distributed/test/citus_stat_tenants.c b/src/backend/distributed/test/citus_stat_tenants.c index 2cfe0029b6e..b8fe305c634 100644 --- a/src/backend/distributed/test/citus_stat_tenants.c +++ b/src/backend/distributed/test/citus_stat_tenants.c @@ -10,11 +10,13 @@ */ #include "postgres.h" + #include "fmgr.h" -#include "distributed/utils/citus_stat_tenants.h" #include "sys/time.h" +#include "distributed/utils/citus_stat_tenants.h" + PG_FUNCTION_INFO_V1(sleep_until_next_period); /* diff --git a/src/backend/distributed/test/colocation_utils.c b/src/backend/distributed/test/colocation_utils.c index 19a4e166461..6a87539c46b 100644 --- a/src/backend/distributed/test/colocation_utils.c +++ b/src/backend/distributed/test/colocation_utils.c @@ -11,9 +11,11 @@ */ #include "postgres.h" + #include "fmgr.h" #include "catalog/pg_type.h" + #include "distributed/colocation_utils.h" #include "distributed/listutils.h" #include "distributed/metadata_cache.h" diff --git a/src/backend/distributed/test/create_shards.c b/src/backend/distributed/test/create_shards.c index 4ed1db7c712..4ef13f1cb78 100644 --- a/src/backend/distributed/test/create_shards.c +++ b/src/backend/distributed/test/create_shards.c @@ -10,16 +10,18 @@ *------------------------------------------------------------------------- */ +#include + #include "postgres.h" + #include "c.h" #include "fmgr.h" -#include - -#include "distributed/listutils.h" #include "lib/stringinfo.h" #include "nodes/pg_list.h" +#include "distributed/listutils.h" + /* local function forward declarations */ static int CompareStrings(const void *leftElement, const void *rightElement); diff --git a/src/backend/distributed/test/deparse_function_query.c b/src/backend/distributed/test/deparse_function_query.c index 7a6e54424ac..8971f597a7d 100644 --- a/src/backend/distributed/test/deparse_function_query.c +++ b/src/backend/distributed/test/deparse_function_query.c @@ -13,9 +13,10 @@ #include "postgres.h" +#include "utils/builtins.h" + #include "distributed/deparser.h" #include "distributed/multi_executor.h" -#include "utils/builtins.h" /* declarations for dynamic loading */ PG_FUNCTION_INFO_V1(deparse_test); diff --git a/src/backend/distributed/test/deparse_shard_query.c b/src/backend/distributed/test/deparse_shard_query.c index a6196146fef..a9b4ced1da4 100644 --- a/src/backend/distributed/test/deparse_shard_query.c +++ b/src/backend/distributed/test/deparse_shard_query.c @@ -10,18 +10,14 @@ *------------------------------------------------------------------------- */ +#include + #include "postgres.h" + #include "c.h" #include "fmgr.h" -#include - #include "catalog/pg_type.h" -#include "distributed/listutils.h" -#include "distributed/coordinator_protocol.h" -#include "distributed/citus_ruleutils.h" -#include "distributed/insert_select_planner.h" -#include "distributed/multi_router_planner.h" #include "lib/stringinfo.h" #include "nodes/makefuncs.h" #include "nodes/nodes.h" @@ -33,6 +29,12 @@ #include "utils/builtins.h" #include "utils/palloc.h" +#include "distributed/citus_ruleutils.h" +#include "distributed/coordinator_protocol.h" +#include "distributed/insert_select_planner.h" +#include "distributed/listutils.h" +#include "distributed/multi_router_planner.h" + /* declarations for dynamic loading */ PG_FUNCTION_INFO_V1(deparse_shard_query_test); diff --git a/src/backend/distributed/test/dependency.c b/src/backend/distributed/test/dependency.c index 82e818b8ce1..7afbfdec732 100644 --- a/src/backend/distributed/test/dependency.c +++ b/src/backend/distributed/test/dependency.c @@ -9,6 +9,7 @@ *------------------------------------------------------------------------- */ #include "postgres.h" + #include "c.h" #include "fmgr.h" diff --git a/src/backend/distributed/test/distributed_deadlock_detection.c b/src/backend/distributed/test/distributed_deadlock_detection.c index d3fa34db284..68b5622a72d 100644 --- a/src/backend/distributed/test/distributed_deadlock_detection.c +++ b/src/backend/distributed/test/distributed_deadlock_detection.c @@ -10,10 +10,15 @@ *------------------------------------------------------------------------- */ #include "postgres.h" + #include "funcapi.h" #include "miscadmin.h" #include "access/hash.h" +#include "nodes/pg_list.h" +#include "utils/hsearch.h" +#include "utils/timestamp.h" + #include "distributed/backend_data.h" #include "distributed/distributed_deadlock_detection.h" #include "distributed/hash_helpers.h" @@ -22,9 +27,6 @@ #include "distributed/metadata_cache.h" #include "distributed/transaction_identifier.h" #include "distributed/tuplestore.h" -#include "nodes/pg_list.h" -#include "utils/hsearch.h" -#include "utils/timestamp.h" PG_FUNCTION_INFO_V1(get_adjacency_list_wait_graph); diff --git a/src/backend/distributed/test/distributed_intermediate_results.c b/src/backend/distributed/test/distributed_intermediate_results.c index c3b286f526b..843bda476aa 100644 --- a/src/backend/distributed/test/distributed_intermediate_results.c +++ b/src/backend/distributed/test/distributed_intermediate_results.c @@ -13,12 +13,15 @@ #include #include "postgres.h" + #include "funcapi.h" #include "libpq-fe.h" #include "miscadmin.h" #include "pgstat.h" #include "catalog/pg_type.h" +#include "tcop/tcopprot.h" + #include "distributed/commands/multi_copy.h" #include "distributed/connection_management.h" #include "distributed/intermediate_results.h" @@ -26,10 +29,8 @@ #include "distributed/multi_executor.h" #include "distributed/remote_commands.h" #include "distributed/tuplestore.h" -#include "distributed/listutils.h" #include "distributed/utils/array_type.h" #include "distributed/version_compat.h" -#include "tcop/tcopprot.h" PG_FUNCTION_INFO_V1(partition_task_list_results); PG_FUNCTION_INFO_V1(redistribute_task_list_results); diff --git a/src/backend/distributed/test/distribution_metadata.c b/src/backend/distributed/test/distribution_metadata.c index c3bc7fb5189..01117922e36 100644 --- a/src/backend/distributed/test/distribution_metadata.c +++ b/src/backend/distributed/test/distribution_metadata.c @@ -10,37 +10,39 @@ *------------------------------------------------------------------------- */ +#include +#include + #include "postgres.h" + #include "c.h" #include "fmgr.h" -#include -#include - #include "access/heapam.h" #include "catalog/pg_type.h" -#include "distributed/distribution_column.h" -#include "distributed/listutils.h" -#include "distributed/metadata_utility.h" -#include "distributed/coordinator_protocol.h" -#include "distributed/metadata_cache.h" -#include "distributed/multi_join_order.h" -#include "distributed/multi_physical_planner.h" -#include "distributed/pg_dist_shard.h" -#include "distributed/query_utils.h" -#include "distributed/resource_lock.h" -#include "distributed/utils/array_type.h" #include "lib/stringinfo.h" #include "nodes/pg_list.h" #include "nodes/primnodes.h" #include "storage/lock.h" #include "tcop/tcopprot.h" #include "utils/array.h" +#include "utils/builtins.h" #include "utils/elog.h" #include "utils/errcodes.h" -#include "utils/builtins.h" #include "utils/palloc.h" +#include "distributed/coordinator_protocol.h" +#include "distributed/distribution_column.h" +#include "distributed/listutils.h" +#include "distributed/metadata_cache.h" +#include "distributed/metadata_utility.h" +#include "distributed/multi_join_order.h" +#include "distributed/multi_physical_planner.h" +#include "distributed/pg_dist_shard.h" +#include "distributed/query_utils.h" +#include "distributed/resource_lock.h" +#include "distributed/utils/array_type.h" + /* declarations for dynamic loading */ PG_FUNCTION_INFO_V1(load_shard_id_array); diff --git a/src/backend/distributed/test/fake_am.c b/src/backend/distributed/test/fake_am.c index 8a723e4c44f..cff124961ac 100644 --- a/src/backend/distributed/test/fake_am.c +++ b/src/backend/distributed/test/fake_am.c @@ -19,14 +19,10 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" -#include "pg_version_compat.h" - - #include "access/amapi.h" #include "access/heapam.h" -#include "access/tableam.h" #include "access/multixact.h" +#include "access/tableam.h" #include "access/xact.h" #include "catalog/index.h" #include "catalog/storage.h" @@ -36,6 +32,9 @@ #include "storage/smgr.h" #include "utils/snapmgr.h" +#include "pg_version_compat.h" +#include "pg_version_constants.h" + PG_FUNCTION_INFO_V1(fake_am_handler); static const TableAmRoutine fake_methods; diff --git a/src/backend/distributed/test/fake_fdw.c b/src/backend/distributed/test/fake_fdw.c index 4784248c0bd..585e61d4108 100644 --- a/src/backend/distributed/test/fake_fdw.c +++ b/src/backend/distributed/test/fake_fdw.c @@ -10,27 +10,27 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" +#include -#include "distributed/pg_version_constants.h" +#include "postgres.h" #include "c.h" #include "fmgr.h" -#include - #include "executor/tuptable.h" #include "foreign/fdwapi.h" #include "nodes/execnodes.h" #include "nodes/nodes.h" +#include "nodes/pathnodes.h" #include "nodes/pg_list.h" #include "nodes/plannodes.h" -#include "nodes/pathnodes.h" #include "optimizer/pathnode.h" #include "optimizer/planmain.h" #include "optimizer/restrictinfo.h" #include "utils/palloc.h" +#include "pg_version_constants.h" + /* local function forward declarations */ static void FakeGetForeignRelSize(PlannerInfo *root, RelOptInfo *baserel, Oid foreigntableid); diff --git a/src/backend/distributed/test/foreign_key_relationship_query.c b/src/backend/distributed/test/foreign_key_relationship_query.c index 545c2e97006..af187111a70 100644 --- a/src/backend/distributed/test/foreign_key_relationship_query.c +++ b/src/backend/distributed/test/foreign_key_relationship_query.c @@ -11,18 +11,20 @@ */ #include "postgres.h" + #include "fmgr.h" #include "funcapi.h" #include "catalog/dependency.h" #include "catalog/pg_constraint.h" -#include "distributed/foreign_key_relationship.h" +#include "utils/builtins.h" + #include "distributed/coordinator_protocol.h" +#include "distributed/foreign_key_relationship.h" #include "distributed/listutils.h" #include "distributed/metadata_cache.h" #include "distributed/tuplestore.h" #include "distributed/version_compat.h" -#include "utils/builtins.h" #define GET_FKEY_CONNECTED_RELATIONS_COLUMNS 1 diff --git a/src/backend/distributed/test/global_pid.c b/src/backend/distributed/test/global_pid.c index de54f1929f2..b63b39b444e 100644 --- a/src/backend/distributed/test/global_pid.c +++ b/src/backend/distributed/test/global_pid.c @@ -10,6 +10,7 @@ */ #include "postgres.h" + #include "fmgr.h" #include "distributed/backend_data.h" diff --git a/src/backend/distributed/test/hide_shards.c b/src/backend/distributed/test/hide_shards.c index 59e738c360e..b1adf61b40a 100644 --- a/src/backend/distributed/test/hide_shards.c +++ b/src/backend/distributed/test/hide_shards.c @@ -10,6 +10,7 @@ *------------------------------------------------------------------------- */ #include "postgres.h" + #include "funcapi.h" #include "miscadmin.h" #include "pgstat.h" diff --git a/src/backend/distributed/test/intermediate_results.c b/src/backend/distributed/test/intermediate_results.c index b4f14bca6af..8681a6ca928 100644 --- a/src/backend/distributed/test/intermediate_results.c +++ b/src/backend/distributed/test/intermediate_results.c @@ -13,6 +13,7 @@ #include #include "postgres.h" + #include "funcapi.h" #include "libpq-fe.h" #include "miscadmin.h" diff --git a/src/backend/distributed/test/make_external_connection.c b/src/backend/distributed/test/make_external_connection.c index 424793deadd..14be057ab0b 100644 --- a/src/backend/distributed/test/make_external_connection.c +++ b/src/backend/distributed/test/make_external_connection.c @@ -11,10 +11,17 @@ */ #include "postgres.h" -#include "miscadmin.h" + #include "libpq-fe.h" +#include "miscadmin.h" #include "access/xact.h" +#include "executor/spi.h" +#include "lib/stringinfo.h" +#include "postmaster/postmaster.h" +#include "utils/builtins.h" +#include "utils/memutils.h" + #include "distributed/connection_management.h" #include "distributed/coordinator_protocol.h" #include "distributed/function_utils.h" @@ -23,13 +30,7 @@ #include "distributed/metadata_cache.h" #include "distributed/remote_commands.h" #include "distributed/run_from_same_connection.h" - #include "distributed/version_compat.h" -#include "executor/spi.h" -#include "lib/stringinfo.h" -#include "postmaster/postmaster.h" -#include "utils/builtins.h" -#include "utils/memutils.h" PG_FUNCTION_INFO_V1(make_external_connection_to_node); diff --git a/src/backend/distributed/test/metadata_sync.c b/src/backend/distributed/test/metadata_sync.c index 46d2303d621..ce025cff9bd 100644 --- a/src/backend/distributed/test/metadata_sync.c +++ b/src/backend/distributed/test/metadata_sync.c @@ -10,10 +10,17 @@ *------------------------------------------------------------------------- */ #include "postgres.h" + #include "c.h" #include "fmgr.h" +#include "miscadmin.h" #include "catalog/pg_type.h" +#include "postmaster/postmaster.h" +#include "storage/latch.h" +#include "utils/array.h" +#include "utils/builtins.h" + #include "distributed/connection_management.h" #include "distributed/intermediate_result_pruning.h" #include "distributed/listutils.h" @@ -22,11 +29,6 @@ #include "distributed/remote_commands.h" #include "distributed/utils/array_type.h" #include "distributed/worker_manager.h" -#include "postmaster/postmaster.h" -#include "miscadmin.h" -#include "storage/latch.h" -#include "utils/array.h" -#include "utils/builtins.h" /* declarations for dynamic loading */ @@ -48,6 +50,13 @@ activate_node_snapshot(PG_FUNCTION_ARGS) * so we are using first primary worker node just for test purposes. */ WorkerNode *dummyWorkerNode = GetFirstPrimaryWorkerNode(); + if (dummyWorkerNode == NULL) + { + ereport(ERROR, (errmsg("no worker nodes found"), + errdetail("Function activate_node_snapshot is meant to be " + "used when running tests on a multi-node cluster " + "with workers."))); + } /* * Create MetadataSyncContext which is used throughout nodes' activation. @@ -91,43 +100,53 @@ activate_node_snapshot(PG_FUNCTION_ARGS) /* - * wait_until_metadata_sync waits until the maintenance daemon does a metadata - * sync, or times out. + * IsMetadataSynced checks the workers to see if all workers with metadata are + * synced. */ -Datum -wait_until_metadata_sync(PG_FUNCTION_ARGS) +static bool +IsMetadataSynced(void) { - uint32 timeout = PG_GETARG_UINT32(0); - List *workerList = ActivePrimaryNonCoordinatorNodeList(NoLock); - bool waitNotifications = false; WorkerNode *workerNode = NULL; foreach_ptr(workerNode, workerList) { - /* if already has metadata, no need to do it again */ if (workerNode->hasMetadata && !workerNode->metadataSynced) { - waitNotifications = true; - break; + return false; } } + return true; +} + + +/* + * wait_until_metadata_sync waits until the maintenance daemon does a metadata + * sync, or times out. + */ +Datum +wait_until_metadata_sync(PG_FUNCTION_ARGS) +{ + uint32 timeout = PG_GETARG_UINT32(0); + + /* First we start listening. */ + MultiConnection *connection = GetNodeConnection(FORCE_NEW_CONNECTION, + LocalHostName, PostPortNumber); + ExecuteCriticalRemoteCommand(connection, "LISTEN " METADATA_SYNC_CHANNEL); + /* * If all the metadata nodes have already been synced, we should not wait. * That's primarily because the maintenance deamon might have already sent * the notification and we'd wait unnecessarily here. Worse, the test outputs * might be inconsistent across executions due to the warning. */ - if (!waitNotifications) + if (IsMetadataSynced()) { + CloseConnection(connection); PG_RETURN_VOID(); } - MultiConnection *connection = GetNodeConnection(FORCE_NEW_CONNECTION, - LOCAL_HOST_NAME, PostPortNumber); - ExecuteCriticalRemoteCommand(connection, "LISTEN " METADATA_SYNC_CHANNEL); - int waitFlags = WL_SOCKET_READABLE | WL_TIMEOUT | WL_POSTMASTER_DEATH; int waitResult = WaitLatchOrSocket(NULL, waitFlags, PQsocket(connection->pgConn), timeout, 0); @@ -139,7 +158,7 @@ wait_until_metadata_sync(PG_FUNCTION_ARGS) { ClearResults(connection, true); } - else if (waitResult & WL_TIMEOUT) + else if (waitResult & WL_TIMEOUT && !IsMetadataSynced()) { elog(WARNING, "waiting for metadata sync timed out"); } diff --git a/src/backend/distributed/test/partitioning_utils.c b/src/backend/distributed/test/partitioning_utils.c index 95adaddf6fd..be916356145 100644 --- a/src/backend/distributed/test/partitioning_utils.c +++ b/src/backend/distributed/test/partitioning_utils.c @@ -10,16 +10,18 @@ *------------------------------------------------------------------------- */ #include "postgres.h" + #include "fmgr.h" #include "catalog/pg_type.h" -#include "distributed/listutils.h" -#include "distributed/multi_partitioning_utils.h" -#include "distributed/reference_table_utils.h" #include "lib/stringinfo.h" #include "utils/builtins.h" #include "utils/lsyscache.h" +#include "distributed/listutils.h" +#include "distributed/multi_partitioning_utils.h" +#include "distributed/reference_table_utils.h" + PG_FUNCTION_INFO_V1(generate_alter_table_detach_partition_command); PG_FUNCTION_INFO_V1(generate_alter_table_attach_partition_command); diff --git a/src/backend/distributed/test/progress_utils.c b/src/backend/distributed/test/progress_utils.c index 42b065dae21..e1ea09e3d2f 100644 --- a/src/backend/distributed/test/progress_utils.c +++ b/src/backend/distributed/test/progress_utils.c @@ -11,18 +11,20 @@ */ +#include + #include "postgres.h" -#include "miscadmin.h" + #include "fmgr.h" #include "funcapi.h" +#include "miscadmin.h" -#include +#include "nodes/execnodes.h" +#include "utils/tuplestore.h" #include "distributed/listutils.h" #include "distributed/multi_progress.h" #include "distributed/tuplestore.h" -#include "nodes/execnodes.h" -#include "utils/tuplestore.h" PG_FUNCTION_INFO_V1(create_progress); diff --git a/src/backend/distributed/test/prune_shard_list.c b/src/backend/distributed/test/prune_shard_list.c index a9f5e4a880b..f972281ecc6 100644 --- a/src/backend/distributed/test/prune_shard_list.c +++ b/src/backend/distributed/test/prune_shard_list.c @@ -10,25 +10,15 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" +#include -#include "distributed/pg_version_constants.h" +#include "postgres.h" #include "c.h" #include "fmgr.h" -#include - #include "access/stratnum.h" #include "catalog/pg_type.h" -#include "distributed/listutils.h" -#include "distributed/metadata_cache.h" -#include "distributed/metadata_utility.h" -#include "distributed/multi_join_order.h" -#include "distributed/multi_physical_planner.h" -#include "distributed/resource_lock.h" -#include "distributed/shard_pruning.h" -#include "distributed/utils/array_type.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "nodes/nodes.h" @@ -38,6 +28,17 @@ #include "utils/array.h" #include "utils/palloc.h" +#include "pg_version_constants.h" + +#include "distributed/listutils.h" +#include "distributed/metadata_cache.h" +#include "distributed/metadata_utility.h" +#include "distributed/multi_join_order.h" +#include "distributed/multi_physical_planner.h" +#include "distributed/resource_lock.h" +#include "distributed/shard_pruning.h" +#include "distributed/utils/array_type.h" + /* local function forward declarations */ static Expr * MakeTextPartitionExpression(Oid distributedTableId, text *value); diff --git a/src/backend/distributed/test/relation_access_tracking.c b/src/backend/distributed/test/relation_access_tracking.c index 5715bd03d61..85c0ff2aacf 100644 --- a/src/backend/distributed/test/relation_access_tracking.c +++ b/src/backend/distributed/test/relation_access_tracking.c @@ -10,6 +10,7 @@ */ #include "postgres.h" + #include "c.h" #include "fmgr.h" diff --git a/src/backend/distributed/test/run_from_same_connection.c b/src/backend/distributed/test/run_from_same_connection.c index 04a3149dafa..52b2e0b181b 100644 --- a/src/backend/distributed/test/run_from_same_connection.c +++ b/src/backend/distributed/test/run_from_same_connection.c @@ -12,10 +12,17 @@ */ #include "postgres.h" -#include "miscadmin.h" + #include "libpq-fe.h" +#include "miscadmin.h" #include "access/xact.h" +#include "executor/spi.h" +#include "lib/stringinfo.h" +#include "postmaster/postmaster.h" +#include "utils/builtins.h" +#include "utils/memutils.h" + #include "distributed/connection_management.h" #include "distributed/coordinator_protocol.h" #include "distributed/function_utils.h" @@ -24,13 +31,7 @@ #include "distributed/metadata_cache.h" #include "distributed/remote_commands.h" #include "distributed/run_from_same_connection.h" - #include "distributed/version_compat.h" -#include "executor/spi.h" -#include "lib/stringinfo.h" -#include "postmaster/postmaster.h" -#include "utils/builtins.h" -#include "utils/memutils.h" #define ALTER_CURRENT_PROCESS_ID \ @@ -154,7 +155,7 @@ run_commands_on_session_level_connection_to_node(PG_FUNCTION_ARGS) StringInfo processStringInfo = makeStringInfo(); StringInfo workerProcessStringInfo = makeStringInfo(); - MultiConnection *localConnection = GetNodeConnection(0, LOCAL_HOST_NAME, + MultiConnection *localConnection = GetNodeConnection(0, LocalHostName, PostPortNumber); if (!singleConnection) diff --git a/src/backend/distributed/test/sequential_execution.c b/src/backend/distributed/test/sequential_execution.c index 9b88e3b7a5f..f967eb75fc4 100644 --- a/src/backend/distributed/test/sequential_execution.c +++ b/src/backend/distributed/test/sequential_execution.c @@ -11,6 +11,7 @@ */ #include "postgres.h" + #include "fmgr.h" #include "distributed/multi_executor.h" diff --git a/src/backend/distributed/test/shard_rebalancer.c b/src/backend/distributed/test/shard_rebalancer.c index 56a06398232..32bfd9f463c 100644 --- a/src/backend/distributed/test/shard_rebalancer.c +++ b/src/backend/distributed/test/shard_rebalancer.c @@ -11,26 +11,27 @@ */ #include "postgres.h" -#include "libpq-fe.h" +#include "funcapi.h" +#include "libpq-fe.h" +#include "miscadmin.h" #include "safe_lib.h" #include "catalog/pg_type.h" -#include "distributed/citus_safe_lib.h" +#include "utils/builtins.h" +#include "utils/json.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" + #include "distributed/citus_ruleutils.h" +#include "distributed/citus_safe_lib.h" #include "distributed/connection_management.h" #include "distributed/listutils.h" #include "distributed/metadata_utility.h" #include "distributed/multi_physical_planner.h" +#include "distributed/relay_utility.h" #include "distributed/shard_cleaner.h" #include "distributed/shard_rebalancer.h" -#include "distributed/relay_utility.h" -#include "funcapi.h" -#include "miscadmin.h" -#include "utils/builtins.h" -#include "utils/json.h" -#include "utils/lsyscache.h" -#include "utils/memutils.h" /* static declarations for json conversion */ static List * JsonArrayToShardPlacementTestInfoList( diff --git a/src/backend/distributed/test/shared_connection_counters.c b/src/backend/distributed/test/shared_connection_counters.c index 641cfd314c2..c596028876a 100644 --- a/src/backend/distributed/test/shared_connection_counters.c +++ b/src/backend/distributed/test/shared_connection_counters.c @@ -11,14 +11,16 @@ */ #include "postgres.h" -#include "miscadmin.h" + #include "fmgr.h" +#include "miscadmin.h" -#include "distributed/shared_connection_stats.h" -#include "distributed/listutils.h" #include "nodes/parsenodes.h" #include "utils/guc.h" +#include "distributed/listutils.h" +#include "distributed/shared_connection_stats.h" + /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(wake_up_connection_pool_waiters); PG_FUNCTION_INFO_V1(set_max_shared_pool_size); diff --git a/src/backend/distributed/test/xact_stats.c b/src/backend/distributed/test/xact_stats.c index 87e15aa646c..a968f8cb66e 100644 --- a/src/backend/distributed/test/xact_stats.c +++ b/src/backend/distributed/test/xact_stats.c @@ -13,6 +13,7 @@ #include #include "postgres.h" + #include "funcapi.h" #include "libpq-fe.h" #include "miscadmin.h" diff --git a/src/backend/distributed/transaction/backend_data.c b/src/backend/distributed/transaction/backend_data.c index 3e2ea5ca108..67acadd2940 100644 --- a/src/backend/distributed/transaction/backend_data.c +++ b/src/backend/distributed/transaction/backend_data.c @@ -12,18 +12,29 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" - +#include "funcapi.h" #include "miscadmin.h" -#include "unistd.h" - #include "safe_lib.h" +#include "unistd.h" -#include "funcapi.h" #include "access/htup_details.h" #include "catalog/pg_authid.h" #include "catalog/pg_type.h" #include "datatype/timestamp.h" +#include "nodes/execnodes.h" +#include "postmaster/autovacuum.h" /* to access autovacuum_max_workers */ +#include "replication/walsender.h" +#include "storage/ipc.h" +#include "storage/lmgr.h" +#include "storage/lwlock.h" +#include "storage/proc.h" +#include "storage/procarray.h" +#include "storage/s_lock.h" +#include "storage/spin.h" +#include "utils/timestamp.h" + +#include "pg_version_constants.h" + #include "distributed/backend_data.h" #include "distributed/connection_management.h" #include "distributed/listutils.h" @@ -34,17 +45,6 @@ #include "distributed/transaction_identifier.h" #include "distributed/tuplestore.h" #include "distributed/worker_manager.h" -#include "nodes/execnodes.h" -#include "postmaster/autovacuum.h" /* to access autovacuum_max_workers */ -#include "replication/walsender.h" -#include "storage/ipc.h" -#include "storage/lmgr.h" -#include "storage/lwlock.h" -#include "storage/procarray.h" -#include "storage/proc.h" -#include "storage/spin.h" -#include "storage/s_lock.h" -#include "utils/timestamp.h" #define GET_ACTIVE_TRANSACTION_QUERY "SELECT * FROM get_all_active_transactions();" @@ -395,7 +395,7 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto bool showCurrentBackendDetails = showAllBackends; BackendData *currentBackend = &backendManagementShmemData->backends[backendIndex]; - PGPROC *currentProc = &ProcGlobal->allProcs[backendIndex]; + PGPROC *currentProc = GetPGProcByNumber(backendIndex); /* to work on data after releasing g spinlock to protect against errors */ uint64 transactionNumber = 0; @@ -420,7 +420,7 @@ StoreAllActiveTransactions(Tuplestorestate *tupleStore, TupleDesc tupleDescripto } Oid databaseId = currentBackend->databaseId; - int backendPid = ProcGlobal->allProcs[backendIndex].pid; + int backendPid = GetPGProcByNumber(backendIndex)->pid; /* * We prefer to use worker_query instead of distributedCommandOriginator in @@ -1279,7 +1279,7 @@ ActiveDistributedTransactionNumbers(void) /* build list of starting procs */ for (int curBackend = 0; curBackend < MaxBackends; curBackend++) { - PGPROC *currentProc = &ProcGlobal->allProcs[curBackend]; + PGPROC *currentProc = GetPGProcByNumber(curBackend); BackendData currentBackendData; if (currentProc->pid == 0) diff --git a/src/backend/distributed/transaction/citus_dist_stat_activity.c b/src/backend/distributed/transaction/citus_dist_stat_activity.c index 3aa6372e6e7..b0ffc05eb0b 100644 --- a/src/backend/distributed/transaction/citus_dist_stat_activity.c +++ b/src/backend/distributed/transaction/citus_dist_stat_activity.c @@ -10,8 +10,9 @@ */ #include "postgres.h" -#include "miscadmin.h" + #include "funcapi.h" +#include "miscadmin.h" PG_FUNCTION_INFO_V1(citus_dist_stat_activity); PG_FUNCTION_INFO_V1(citus_worker_stat_activity); diff --git a/src/backend/distributed/transaction/distributed_deadlock_detection.c b/src/backend/distributed/transaction/distributed_deadlock_detection.c index cf8dd43f5ba..5e8060a4f63 100644 --- a/src/backend/distributed/transaction/distributed_deadlock_detection.c +++ b/src/backend/distributed/transaction/distributed_deadlock_detection.c @@ -15,8 +15,11 @@ #include "pgstat.h" #include "access/hash.h" +#include "nodes/pg_list.h" +#include "utils/hsearch.h" +#include "utils/timestamp.h" + #include "distributed/backend_data.h" -#include "distributed/errormessage.h" #include "distributed/distributed_deadlock_detection.h" #include "distributed/errormessage.h" #include "distributed/hash_helpers.h" @@ -25,9 +28,6 @@ #include "distributed/log_utils.h" #include "distributed/metadata_cache.h" #include "distributed/transaction_identifier.h" -#include "nodes/pg_list.h" -#include "utils/hsearch.h" -#include "utils/timestamp.h" /* used only for finding the deadlock cycle path */ @@ -375,7 +375,7 @@ AssociateDistributedTransactionWithBackendProc(TransactionNode *transactionNode) for (int backendIndex = 0; backendIndex < MaxBackends; ++backendIndex) { - PGPROC *currentProc = &ProcGlobal->allProcs[backendIndex]; + PGPROC *currentProc = GetPGProcByNumber(backendIndex); BackendData currentBackendData; /* we're not interested in processes that are not active or waiting on a lock */ diff --git a/src/backend/distributed/transaction/lock_graph.c b/src/backend/distributed/transaction/lock_graph.c index 0b4c0f02e9a..695df2bf4c2 100644 --- a/src/backend/distributed/transaction/lock_graph.c +++ b/src/backend/distributed/transaction/lock_graph.c @@ -18,6 +18,11 @@ #include "miscadmin.h" #include "access/hash.h" +#include "storage/proc.h" +#include "utils/builtins.h" +#include "utils/hsearch.h" +#include "utils/timestamp.h" + #include "distributed/backend_data.h" #include "distributed/connection_management.h" #include "distributed/hash_helpers.h" @@ -26,10 +31,6 @@ #include "distributed/metadata_cache.h" #include "distributed/remote_commands.h" #include "distributed/tuplestore.h" -#include "storage/proc.h" -#include "utils/builtins.h" -#include "utils/hsearch.h" -#include "utils/timestamp.h" /* @@ -191,7 +192,7 @@ BuildGlobalWaitGraph(bool onlyDistributedTx) "waiting_node_id, waiting_transaction_num, waiting_transaction_stamp, " "blocking_global_pid,blocking_pid, blocking_node_id, " "blocking_transaction_num, blocking_transaction_stamp, blocking_transaction_waiting " - "FROM citus_internal_local_blocked_processes()"); + "FROM citus_internal.local_blocked_processes()"); } int querySent = SendRemoteCommand(connection, queryString->data); @@ -225,7 +226,7 @@ BuildGlobalWaitGraph(bool onlyDistributedTx) else if (!onlyDistributedTx && colCount != 11) { ereport(WARNING, (errmsg("unexpected number of columns from " - "citus_internal_local_blocked_processes"))); + "citus_internal.local_blocked_processes"))); continue; } @@ -558,7 +559,7 @@ BuildLocalWaitGraph(bool onlyDistributedTx) /* build list of starting procs */ for (int curBackend = 0; curBackend < totalProcs; curBackend++) { - PGPROC *currentProc = &ProcGlobal->allProcs[curBackend]; + PGPROC *currentProc = GetPGProcByNumber(curBackend); BackendData currentBackendData; if (currentProc->pid == 0) diff --git a/src/backend/distributed/transaction/relation_access_tracking.c b/src/backend/distributed/transaction/relation_access_tracking.c index b0af4e476e7..5044941c471 100644 --- a/src/backend/distributed/transaction/relation_access_tracking.c +++ b/src/backend/distributed/transaction/relation_access_tracking.c @@ -15,22 +15,23 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" - #include "miscadmin.h" #include "access/xact.h" +#include "common/hashfn.h" +#include "utils/hsearch.h" +#include "utils/lsyscache.h" + +#include "pg_version_constants.h" + #include "distributed/colocation_utils.h" #include "distributed/hash_helpers.h" #include "distributed/listutils.h" +#include "distributed/metadata_cache.h" #include "distributed/multi_executor.h" #include "distributed/multi_join_order.h" #include "distributed/multi_partitioning_utils.h" -#include "distributed/metadata_cache.h" #include "distributed/relation_access_tracking.h" -#include "utils/hsearch.h" -#include "common/hashfn.h" -#include "utils/lsyscache.h" /* Config variables managed via guc.c */ diff --git a/src/backend/distributed/transaction/remote_transaction.c b/src/backend/distributed/transaction/remote_transaction.c index 0f62417931f..4c26e2478ca 100644 --- a/src/backend/distributed/transaction/remote_transaction.c +++ b/src/backend/distributed/transaction/remote_transaction.c @@ -16,15 +16,22 @@ #include "postgres.h" #include "libpq-fe.h" - #include "miscadmin.h" #include "access/xact.h" +#include "postmaster/postmaster.h" +#include "utils/builtins.h" +#include "utils/hsearch.h" +#include "utils/xid8.h" + #include "distributed/backend_data.h" #include "distributed/citus_safe_lib.h" +#include "distributed/commands/utility_hook.h" #include "distributed/connection_management.h" #include "distributed/listutils.h" +#include "distributed/metadata/distobject.h" #include "distributed/metadata_cache.h" +#include "distributed/metadata_sync.h" #include "distributed/placement_connection.h" #include "distributed/remote_commands.h" #include "distributed/remote_transaction.h" @@ -32,8 +39,6 @@ #include "distributed/transaction_management.h" #include "distributed/transaction_recovery.h" #include "distributed/worker_manager.h" -#include "utils/builtins.h" -#include "utils/hsearch.h" #define PREPARED_TRANSACTION_NAME_FORMAT "citus_%u_%u_"UINT64_FORMAT "_%u" @@ -56,6 +61,9 @@ static void FinishRemoteTransactionSavepointRollback(MultiConnection *connection static void Assign2PCIdentifier(MultiConnection *connection); +PG_FUNCTION_INFO_V1(start_management_transaction); +PG_FUNCTION_INFO_V1(execute_command_on_remote_nodes_as_user); +PG_FUNCTION_INFO_V1(commit_management_command_2pc); static char *IsolationLevelName[] = { "READ UNCOMMITTED", @@ -64,6 +72,164 @@ static char *IsolationLevelName[] = { "SERIALIZABLE" }; +/* + * These variables are necessary for running queries from a database that is not + * the Citus main database. Some of these queries need to be propagated to the + * workers and Citus main database will be used for these queries, such as + * CREATE ROLE. For that we create a connection to the Citus main database and + * run queries from there. + */ + +/* The MultiConnection used for connecting Citus main database. */ +MultiConnection *MainDBConnection = NULL; + +/* + * IsMainDBCommand is true if this is a query in the Citus main database that is started + * by a query from a different database. + */ +bool IsMainDBCommand = false; + +/* + * The transaction id of the query from the other database that started the + * main database query. + */ +FullTransactionId OuterXid; + +/* + * Shows if this is the Citus main database or not. We needed a variable instead of + * checking if this database's name is the same as MainDb because we sometimes need + * this value outside a transaction where we cannot reach the current database name. + */ +bool IsMainDB = true; + +/* + * Name of a superuser role to be used during main database connections. + */ +char *SuperuserRole = NULL; + +/* + * IsMainDBCommandInXact shows if the query sent to the main database requires + * a transaction + */ +bool IsMainDBCommandInXact = true; + + +/* + * start_management_transaction starts a management transaction + * in the main database by recording the outer transaction's transaction id and setting + * IsMainDBCommand to true. + */ +Datum +start_management_transaction(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + EnsureSuperUser(); + + OuterXid = PG_GETARG_FULLTRANSACTIONID(0); + IsMainDBCommand = true; + + Use2PCForCoordinatedTransaction(); + + PG_RETURN_VOID(); +} + + +/* + * execute_command_on_remote_nodes_as_user executes the query on the nodes + * other than the current node, using the user passed. + */ +Datum +execute_command_on_remote_nodes_as_user(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + EnsureSuperUser(); + + text *queryText = PG_GETARG_TEXT_P(0); + char *query = text_to_cstring(queryText); + + text *usernameText = PG_GETARG_TEXT_P(1); + char *username = text_to_cstring(usernameText); + + StringInfo queryToSend = makeStringInfo(); + + appendStringInfo(queryToSend, "%s;%s;%s", DISABLE_METADATA_SYNC, query, + ENABLE_METADATA_SYNC); + + SendCommandToWorkersAsUser(REMOTE_NODES, username, queryToSend->data); + PG_RETURN_VOID(); +} + + +/* + * commit_management_command_2pc is a wrapper UDF for + * CoordinatedRemoteTransactionsCommit + */ +Datum +commit_management_command_2pc(PG_FUNCTION_ARGS) +{ + CheckCitusVersion(ERROR); + EnsureSuperUser(); + + RecoverTwoPhaseCommits(); + + PG_RETURN_VOID(); +} + + +/* + * RunCitusMainDBQuery creates a connection to Citus main database if necessary + * and runs the query over the connection in the main database. + */ +void +RunCitusMainDBQuery(char *query) +{ + if (MainDBConnection == NULL) + { + if (strlen(SuperuserRole) == 0) + { + ereport(ERROR, (errmsg("No superuser role is given for Citus main " + "database connection"), + errhint("Set citus.superuser to a superuser role name"))); + } + int flags = 0; + MainDBConnection = GetNodeUserDatabaseConnection(flags, LocalHostName, + PostPortNumber, + SuperuserRole, + MainDb); + + if (IsMainDBCommandInXact) + { + RemoteTransactionBegin(MainDBConnection); + } + } + + SendRemoteCommand(MainDBConnection, query); + + PGresult *result = GetRemoteCommandResult(MainDBConnection, true); + + if (!IsResponseOK(result)) + { + ReportResultError(MainDBConnection, result, ERROR); + } + + ForgetResults(MainDBConnection); +} + + +/* + * CleanCitusMainDBConnection closes and removes the connection to Citus main database. + */ +void +CleanCitusMainDBConnection(void) +{ + if (MainDBConnection == NULL) + { + return; + } + CloseConnection(MainDBConnection); + MainDBConnection = NULL; +} + /* * StartRemoteTransactionBegin initiates beginning the remote transaction in @@ -616,7 +782,7 @@ StartRemoteTransactionPrepare(struct MultiConnection *connection) WorkerNode *workerNode = FindWorkerNode(connection->hostname, connection->port); if (workerNode != NULL) { - LogTransactionRecord(workerNode->groupId, transaction->preparedName); + LogTransactionRecord(workerNode->groupId, transaction->preparedName, OuterXid); } /* diff --git a/src/backend/distributed/transaction/transaction_management.c b/src/backend/distributed/transaction/transaction_management.c index 9a7bd908918..9c7b456807e 100644 --- a/src/backend/distributed/transaction/transaction_management.c +++ b/src/backend/distributed/transaction/transaction_management.c @@ -14,15 +14,23 @@ #include "postgres.h" #include "libpq-fe.h" - #include "miscadmin.h" #include "access/twophase.h" #include "access/xact.h" #include "catalog/dependency.h" #include "common/hashfn.h" +#include "nodes/print.h" +#include "postmaster/postmaster.h" +#include "storage/fd.h" +#include "utils/datum.h" +#include "utils/guc.h" +#include "utils/hsearch.h" +#include "utils/memutils.h" + #include "distributed/backend_data.h" #include "distributed/citus_safe_lib.h" +#include "distributed/commands.h" #include "distributed/connection_management.h" #include "distributed/distributed_planner.h" #include "distributed/function_call_delegation.h" @@ -33,27 +41,24 @@ #include "distributed/locally_reserved_shared_connections.h" #include "distributed/maintenanced.h" #include "distributed/metadata/dependency.h" +#include "distributed/metadata_cache.h" #include "distributed/multi_executor.h" -#include "distributed/multi_logical_replication.h" #include "distributed/multi_explain.h" -#include "distributed/repartition_join_execution.h" -#include "distributed/replication_origin_session_utils.h" -#include "distributed/transaction_management.h" +#include "distributed/multi_logical_replication.h" #include "distributed/placement_connection.h" #include "distributed/relation_access_tracking.h" -#include "distributed/shared_connection_stats.h" +#include "distributed/remote_commands.h" +#include "distributed/repartition_join_execution.h" +#include "distributed/replication_origin_session_utils.h" #include "distributed/shard_cleaner.h" +#include "distributed/shared_connection_stats.h" #include "distributed/subplan_execution.h" +#include "distributed/transaction_management.h" #include "distributed/version_compat.h" #include "distributed/worker_log_messages.h" -#include "distributed/commands.h" -#include "distributed/metadata_cache.h" -#include "utils/hsearch.h" -#include "utils/guc.h" -#include "utils/memutils.h" -#include "utils/datum.h" -#include "storage/fd.h" -#include "nodes/print.h" + +#define COMMIT_MANAGEMENT_COMMAND_2PC \ + "SELECT citus_internal.commit_management_command_2pc()" CoordinatedTransactionState CurrentCoordinatedTransactionState = COORD_TRANS_NONE; @@ -317,12 +322,23 @@ CoordinatedTransactionCallback(XactEvent event, void *arg) MemoryContext previousContext = MemoryContextSwitchTo(CitusXactCallbackContext); - if (CurrentCoordinatedTransactionState == COORD_TRANS_PREPARED) + if (CurrentCoordinatedTransactionState == COORD_TRANS_PREPARED && + !IsMainDBCommand) { /* handles both already prepared and open transactions */ CoordinatedRemoteTransactionsCommit(); } + /* + * If this is a non-Citus main database we should try to commit the prepared + * transactions created by the Citus main database on the worker nodes. + */ + if (!IsMainDB && MainDBConnection != NULL && IsMainDBCommandInXact) + { + RunCitusMainDBQuery(COMMIT_MANAGEMENT_COMMAND_2PC); + CleanCitusMainDBConnection(); + } + /* close connections etc. */ if (CurrentCoordinatedTransactionState != COORD_TRANS_NONE) { @@ -378,6 +394,8 @@ CoordinatedTransactionCallback(XactEvent event, void *arg) RemoveIntermediateResultsDirectories(); + CleanCitusMainDBConnection(); + /* handles both already prepared and open transactions */ if (CurrentCoordinatedTransactionState > COORD_TRANS_IDLE) { @@ -509,6 +527,17 @@ CoordinatedTransactionCallback(XactEvent event, void *arg) break; } + + /* + * If this is a non-Citus main database we should commit the Citus + * main database query. So if some error happens on the distributed main + * database query we wouldn't have committed the current query. + */ + if (!IsMainDB && MainDBConnection != NULL && IsMainDBCommandInXact) + { + RunCitusMainDBQuery("COMMIT"); + } + /* * TODO: It'd probably be a good idea to force constraints and * such to 'immediate' here. Deferred triggers might try to send @@ -537,7 +566,10 @@ CoordinatedTransactionCallback(XactEvent event, void *arg) * us to mark failed placements as invalid. Better don't use * this for anything important (i.e. DDL/metadata). */ - CoordinatedRemoteTransactionsCommit(); + if (IsMainDB) + { + CoordinatedRemoteTransactionsCommit(); + } CurrentCoordinatedTransactionState = COORD_TRANS_COMMITTED; } @@ -1139,18 +1171,17 @@ ResetPropagatedObjects(void) /* - * HasAnyDependencyInPropagatedObjects decides if any dependency of given object is + * HasAnyObjectInPropagatedObjects decides if any of the objects in given list are * propagated in the current transaction. */ bool -HasAnyDependencyInPropagatedObjects(const ObjectAddress *objectAddress) +HasAnyObjectInPropagatedObjects(List *objectList) { - List *dependencyList = GetAllSupportedDependenciesForObject(objectAddress); - ObjectAddress *dependency = NULL; - foreach_ptr(dependency, dependencyList) + ObjectAddress *object = NULL; + foreach_ptr(object, objectList) { /* first search in root transaction */ - if (DependencyInPropagatedObjectsHash(PropagatedObjectsInTx, dependency)) + if (DependencyInPropagatedObjectsHash(PropagatedObjectsInTx, object)) { return true; } @@ -1163,7 +1194,7 @@ HasAnyDependencyInPropagatedObjects(const ObjectAddress *objectAddress) SubXactContext *state = NULL; foreach_ptr(state, activeSubXactContexts) { - if (DependencyInPropagatedObjectsHash(state->propagatedObjects, dependency)) + if (DependencyInPropagatedObjectsHash(state->propagatedObjects, object)) { return true; } diff --git a/src/backend/distributed/transaction/transaction_recovery.c b/src/backend/distributed/transaction/transaction_recovery.c index b46419dc2d8..c31dc85a2a9 100644 --- a/src/backend/distributed/transaction/transaction_recovery.c +++ b/src/backend/distributed/transaction/transaction_recovery.c @@ -12,15 +12,13 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" +#include +#include -#include "distributed/pg_version_constants.h" +#include "postgres.h" -#include "miscadmin.h" #include "libpq-fe.h" - -#include -#include +#include "miscadmin.h" #include "access/genam.h" #include "access/heapam.h" @@ -28,6 +26,19 @@ #include "access/relscan.h" #include "access/xact.h" #include "catalog/indexing.h" +#include "lib/stringinfo.h" +#include "storage/lmgr.h" +#include "storage/lock.h" +#include "storage/procarray.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/memutils.h" +#include "utils/rel.h" +#include "utils/syscache.h" +#include "utils/xid8.h" + +#include "pg_version_constants.h" + #include "distributed/backend_data.h" #include "distributed/connection_management.h" #include "distributed/listutils.h" @@ -36,15 +47,8 @@ #include "distributed/remote_commands.h" #include "distributed/resource_lock.h" #include "distributed/transaction_recovery.h" -#include "distributed/worker_manager.h" #include "distributed/version_compat.h" -#include "lib/stringinfo.h" -#include "storage/lmgr.h" -#include "storage/lock.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/memutils.h" -#include "utils/rel.h" +#include "distributed/worker_manager.h" /* exports for SQL callable functions */ @@ -81,7 +85,7 @@ recover_prepared_transactions(PG_FUNCTION_ARGS) * prepared transaction should be committed. */ void -LogTransactionRecord(int32 groupId, char *transactionName) +LogTransactionRecord(int32 groupId, char *transactionName, FullTransactionId outerXid) { Datum values[Natts_pg_dist_transaction]; bool isNulls[Natts_pg_dist_transaction]; @@ -92,6 +96,7 @@ LogTransactionRecord(int32 groupId, char *transactionName) values[Anum_pg_dist_transaction_groupid - 1] = Int32GetDatum(groupId); values[Anum_pg_dist_transaction_gid - 1] = CStringGetTextDatum(transactionName); + values[Anum_pg_dist_transaction_outerxid - 1] = FullTransactionIdGetDatum(outerXid); /* open transaction relation and insert new tuple */ Relation pgDistTransaction = table_open(DistTransactionRelationId(), @@ -257,6 +262,71 @@ RecoverWorkerTransactions(WorkerNode *workerNode) continue; } + bool outerXidIsNull = false; + Datum outerXidDatum = 0; + if (EnableVersionChecks || + SearchSysCacheExistsAttName(DistTransactionRelationId(), "outer_xid")) + { + /* Check if the transaction is created by an outer transaction from a non-main database */ + outerXidDatum = heap_getattr(heapTuple, + Anum_pg_dist_transaction_outerxid, + tupleDescriptor, &outerXidIsNull); + } + else + { + /* + * Normally we don't try to recover prepared transactions when the + * binary version doesn't match the sql version. However, we skip + * those checks in regression tests by disabling + * citus.enable_version_checks. And when this is the case, while + * the C code looks for "outer_xid" attribute, pg_dist_transaction + * doesn't yet have it. + */ + Assert(!EnableVersionChecks); + } + + TransactionId outerXid = 0; + if (!outerXidIsNull) + { + FullTransactionId outerFullXid = DatumGetFullTransactionId(outerXidDatum); + outerXid = XidFromFullTransactionId(outerFullXid); + } + + if (outerXid != 0) + { + bool outerXactIsInProgress = TransactionIdIsInProgress(outerXid); + bool outerXactDidCommit = TransactionIdDidCommit(outerXid); + if (outerXactIsInProgress && !outerXactDidCommit) + { + /* + * The transaction is initiated from an outer transaction and the outer + * transaction is not yet committed, so we should not commit either. + * We remove this transaction from the pendingTransactionSet so it'll + * not be aborted by the loop below. + */ + hash_search(pendingTransactionSet, transactionName, HASH_REMOVE, + &foundPreparedTransactionBeforeCommit); + continue; + } + else if (!outerXactIsInProgress && !outerXactDidCommit) + { + /* + * Since outer transaction isn't in progress and did not commit we need to + * abort the prepared transaction too. We do this by simply doing the same + * thing we would do for transactions that are initiated from the main + * database. + */ + continue; + } + else + { + /* + * Outer transaction did commit, so we can try to commit the prepared + * transaction too. + */ + } + } + /* * Remove the transaction from the pending list such that only transactions * that need to be aborted remain at the end. diff --git a/src/backend/distributed/transaction/worker_transaction.c b/src/backend/distributed/transaction/worker_transaction.c index 03ecbea7233..c6fcee107d6 100644 --- a/src/backend/distributed/transaction/worker_transaction.c +++ b/src/backend/distributed/transaction/worker_transaction.c @@ -11,29 +11,33 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" -#include "miscadmin.h" -#include "libpq-fe.h" - #include #include +#include "postgres.h" + +#include "libpq-fe.h" +#include "miscadmin.h" + #include "access/xact.h" +#include "utils/builtins.h" +#include "utils/memutils.h" + #include "distributed/connection_management.h" +#include "distributed/jsonbutils.h" #include "distributed/listutils.h" #include "distributed/metadata_cache.h" -#include "distributed/resource_lock.h" #include "distributed/metadata_sync.h" -#include "distributed/remote_commands.h" #include "distributed/pg_dist_node.h" #include "distributed/pg_dist_transaction.h" +#include "distributed/remote_commands.h" +#include "distributed/resource_lock.h" #include "distributed/transaction_recovery.h" #include "distributed/worker_manager.h" #include "distributed/worker_transaction.h" -#include "distributed/jsonbutils.h" -#include "utils/memutils.h" -#include "utils/builtins.h" +static void SendBareCommandListToMetadataNodesInternal(List *commandList, + TargetWorkerSet targetWorkerSet); static void SendCommandToMetadataWorkersParams(const char *command, const char *user, int parameterCount, const Oid *parameterTypes, @@ -150,6 +154,74 @@ SendCommandListToWorkersWithMetadata(List *commands) } +/* + * SendCommandToRemoteNodesWithMetadata sends a command to remote nodes in + * parallel. Commands are committed on the nodes when the local transaction + * commits. + */ +void +SendCommandToRemoteNodesWithMetadata(const char *command) +{ + SendCommandToRemoteMetadataNodesParams(command, CurrentUserName(), + 0, NULL, NULL); +} + + +/* + * SendCommandToRemoteNodesWithMetadataViaSuperUser sends a command to remote + * nodes in parallel by opening a super user connection. Commands are committed + * on the nodes when the local transaction commits. The connection are made as + * the extension owner to ensure write access to the Citus metadata tables. + * + * Since we prevent to open superuser connections for metadata tables, it is + * discouraged to use it. Consider using it only for propagating pg_dist_object + * tuples for dependent objects. + */ +void +SendCommandToRemoteNodesWithMetadataViaSuperUser(const char *command) +{ + SendCommandToRemoteMetadataNodesParams(command, CitusExtensionOwnerName(), + 0, NULL, NULL); +} + + +/* + * SendCommandListToRemoteNodesWithMetadata sends all commands to remote nodes + * with the current user. See `SendCommandToRemoteNodesWithMetadata`for details. + */ +void +SendCommandListToRemoteNodesWithMetadata(List *commands) +{ + char *command = NULL; + foreach_ptr(command, commands) + { + SendCommandToRemoteNodesWithMetadata(command); + } +} + + +/* + * SendCommandToRemoteMetadataNodesParams is a wrapper around + * SendCommandToWorkersParamsInternal() that can be used to send commands + * to remote metadata nodes. + */ +void +SendCommandToRemoteMetadataNodesParams(const char *command, + const char *user, int parameterCount, + const Oid *parameterTypes, + const char *const *parameterValues) +{ + /* use METADATA_NODES so that ErrorIfAnyMetadataNodeOutOfSync checks local node as well */ + List *workerNodeList = TargetWorkerSetNodeList(METADATA_NODES, + RowShareLock); + + ErrorIfAnyMetadataNodeOutOfSync(workerNodeList); + + SendCommandToWorkersParamsInternal(REMOTE_METADATA_NODES, command, user, + parameterCount, parameterTypes, parameterValues); +} + + /* * TargetWorkerSetNodeList returns a list of WorkerNode's that satisfies the * TargetWorkerSet. @@ -158,21 +230,34 @@ List * TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet, LOCKMODE lockMode) { List *workerNodeList = NIL; - if (targetWorkerSet == ALL_SHARD_NODES || targetWorkerSet == METADATA_NODES) + if (targetWorkerSet == ALL_SHARD_NODES || + targetWorkerSet == METADATA_NODES) { workerNodeList = ActivePrimaryNodeList(lockMode); } - else + else if (targetWorkerSet == REMOTE_NODES || targetWorkerSet == REMOTE_METADATA_NODES) + { + workerNodeList = ActivePrimaryRemoteNodeList(lockMode); + } + else if (targetWorkerSet == NON_COORDINATOR_METADATA_NODES || + targetWorkerSet == NON_COORDINATOR_NODES) { workerNodeList = ActivePrimaryNonCoordinatorNodeList(lockMode); } + else + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid target worker set: %d", targetWorkerSet))); + } + List *result = NIL; WorkerNode *workerNode = NULL; foreach_ptr(workerNode, workerNodeList) { - if ((targetWorkerSet == NON_COORDINATOR_METADATA_NODES || targetWorkerSet == - METADATA_NODES) && + if ((targetWorkerSet == NON_COORDINATOR_METADATA_NODES || + targetWorkerSet == REMOTE_METADATA_NODES || + targetWorkerSet == METADATA_NODES) && !workerNode->hasMetadata) { continue; @@ -186,16 +271,42 @@ TargetWorkerSetNodeList(TargetWorkerSet targetWorkerSet, LOCKMODE lockMode) /* - * SendBareCommandListToMetadataWorkers sends a list of commands to metadata - * workers in serial. Commands are committed immediately: new connections are - * always used and no transaction block is used (hence "bare"). The connections - * are made as the extension owner to ensure write access to the Citus metadata - * tables. Primarly useful for INDEX commands using CONCURRENTLY. + * SendBareCommandListToRemoteMetadataNodes is a wrapper around + * SendBareCommandListToMetadataNodesInternal() that can be used to send + * bare commands to remote metadata nodes. + */ +void +SendBareCommandListToRemoteMetadataNodes(List *commandList) +{ + SendBareCommandListToMetadataNodesInternal(commandList, + REMOTE_METADATA_NODES); +} + + +/* + * SendBareCommandListToMetadataWorkers is a wrapper around + * SendBareCommandListToMetadataNodesInternal() that can be used to send + * bare commands to metadata workers. */ void SendBareCommandListToMetadataWorkers(List *commandList) { - TargetWorkerSet targetWorkerSet = NON_COORDINATOR_METADATA_NODES; + SendBareCommandListToMetadataNodesInternal(commandList, + NON_COORDINATOR_METADATA_NODES); +} + + +/* + * SendBareCommandListToMetadataNodesInternal sends a list of commands to given + * target worker set in serial. Commands are committed immediately: new connections + * are always used and no transaction block is used (hence "bare"). The connections + * are made as the extension owner to ensure write access to the Citus metadata + * tables. Primarly useful for INDEX commands using CONCURRENTLY. + */ +static void +SendBareCommandListToMetadataNodesInternal(List *commandList, + TargetWorkerSet targetWorkerSet) +{ List *workerNodeList = TargetWorkerSetNodeList(targetWorkerSet, RowShareLock); char *nodeUser = CurrentUserName(); diff --git a/src/backend/distributed/utils/acquire_lock.c b/src/backend/distributed/utils/acquire_lock.c index f414167b3d7..d0f6193c272 100644 --- a/src/backend/distributed/utils/acquire_lock.c +++ b/src/backend/distributed/utils/acquire_lock.c @@ -22,12 +22,12 @@ #include "postgres.h" +#include "miscadmin.h" +#include "pgstat.h" #include "access/xact.h" #include "catalog/pg_type.h" #include "executor/spi.h" -#include "miscadmin.h" -#include "pgstat.h" #include "portability/instr_time.h" #include "storage/ipc.h" #include "storage/latch.h" diff --git a/src/backend/distributed/utils/aggregate_utils.c b/src/backend/distributed/utils/aggregate_utils.c index 773e0aa25cd..3fd584df9ae 100644 --- a/src/backend/distributed/utils/aggregate_utils.c +++ b/src/backend/distributed/utils/aggregate_utils.c @@ -16,11 +16,14 @@ #include "postgres.h" +#include "fmgr.h" +#include "miscadmin.h" +#include "pg_config_manual.h" + #include "access/htup_details.h" #include "catalog/pg_aggregate.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" -#include "distributed/version_compat.h" #include "nodes/nodeFuncs.h" #include "utils/acl.h" #include "utils/builtins.h" @@ -28,9 +31,8 @@ #include "utils/lsyscache.h" #include "utils/syscache.h" #include "utils/typcache.h" -#include "fmgr.h" -#include "miscadmin.h" -#include "pg_config_manual.h" + +#include "distributed/version_compat.h" PG_FUNCTION_INFO_V1(worker_partial_agg_sfunc); PG_FUNCTION_INFO_V1(worker_partial_agg_ffunc); diff --git a/src/backend/distributed/utils/array_type.c b/src/backend/distributed/utils/array_type.c index 70c7dde141f..1c3663d4360 100644 --- a/src/backend/distributed/utils/array_type.c +++ b/src/backend/distributed/utils/array_type.c @@ -10,16 +10,19 @@ */ #include "postgres.h" + #include "miscadmin.h" -#include "pg_version_compat.h" #include "catalog/pg_type.h" #include "nodes/pg_list.h" -#include "distributed/utils/array_type.h" #include "utils/array.h" #include "utils/builtins.h" #include "utils/lsyscache.h" +#include "pg_version_compat.h" + +#include "distributed/utils/array_type.h" + /* * DeconstructArrayObject takes in a single dimensional array, and deserializes diff --git a/src/backend/distributed/utils/background_jobs.c b/src/backend/distributed/utils/background_jobs.c index 2b5ce2dca10..a7a124c7487 100644 --- a/src/backend/distributed/utils/background_jobs.c +++ b/src/backend/distributed/utils/background_jobs.c @@ -27,17 +27,17 @@ #include "postgres.h" +#include "libpq-fe.h" +#include "pgstat.h" #include "safe_mem_lib.h" #include "access/xact.h" #include "commands/dbcommands.h" #include "common/hashfn.h" -#include "libpq-fe.h" #include "libpq/pqformat.h" #include "libpq/pqmq.h" #include "libpq/pqsignal.h" #include "parser/analyze.h" -#include "pgstat.h" #include "storage/dsm.h" #include "storage/ipc.h" #include "storage/procarray.h" @@ -62,9 +62,9 @@ #include "distributed/maintenanced.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_utility.h" +#include "distributed/resource_lock.h" #include "distributed/shard_cleaner.h" #include "distributed/shard_rebalancer.h" -#include "distributed/resource_lock.h" /* Table-of-contents constants for our dynamic shared memory segment. */ #define CITUS_BACKGROUND_TASK_MAGIC 0x51028081 diff --git a/src/backend/distributed/utils/cancel_utils.c b/src/backend/distributed/utils/cancel_utils.c index 17383c03436..f135212e4ba 100644 --- a/src/backend/distributed/utils/cancel_utils.c +++ b/src/backend/distributed/utils/cancel_utils.c @@ -8,7 +8,9 @@ #include "postgres.h" + #include "miscadmin.h" + #include "distributed/cancel_utils.h" diff --git a/src/backend/distributed/utils/citus_clauses.c b/src/backend/distributed/utils/citus_clauses.c index 82900ea1a74..f88b173afe0 100644 --- a/src/backend/distributed/utils/citus_clauses.c +++ b/src/backend/distributed/utils/citus_clauses.c @@ -8,12 +8,6 @@ #include "postgres.h" -#include "distributed/citus_clauses.h" -#include "distributed/insert_select_planner.h" -#include "distributed/metadata_cache.h" -#include "distributed/multi_router_planner.h" -#include "distributed/version_compat.h" - #include "catalog/pg_proc.h" #include "catalog/pg_type.h" #include "executor/executor.h" @@ -28,6 +22,12 @@ #include "utils/lsyscache.h" #include "utils/syscache.h" +#include "distributed/citus_clauses.h" +#include "distributed/insert_select_planner.h" +#include "distributed/metadata_cache.h" +#include "distributed/multi_router_planner.h" +#include "distributed/version_compat.h" + /* private function declarations */ static bool IsVariableExpression(Node *node); diff --git a/src/backend/distributed/utils/citus_copyfuncs.c b/src/backend/distributed/utils/citus_copyfuncs.c index 7e1379ef32a..e283a3034c2 100644 --- a/src/backend/distributed/utils/citus_copyfuncs.c +++ b/src/backend/distributed/utils/citus_copyfuncs.c @@ -11,11 +11,11 @@ */ #include "postgres.h" +#include "utils/datum.h" #include "distributed/citus_nodefuncs.h" -#include "distributed/multi_server_executor.h" #include "distributed/listutils.h" -#include "utils/datum.h" +#include "distributed/multi_server_executor.h" /* @@ -326,7 +326,7 @@ CopyNodeTask(COPYFUNC_ARGS) COPY_STRING_FIELD(fetchedExplainAnalyzePlan); COPY_SCALAR_FIELD(fetchedExplainAnalyzeExecutionDuration); COPY_SCALAR_FIELD(isLocalTableModification); - COPY_SCALAR_FIELD(cannotBeExecutedInTransction); + COPY_SCALAR_FIELD(cannotBeExecutedInTransaction); } diff --git a/src/backend/distributed/utils/citus_depended_object.c b/src/backend/distributed/utils/citus_depended_object.c index 3b5a34b54f0..7588f85949c 100644 --- a/src/backend/distributed/utils/citus_depended_object.c +++ b/src/backend/distributed/utils/citus_depended_object.c @@ -7,6 +7,7 @@ */ #include "postgres.h" + #include "miscadmin.h" #include "catalog/namespace.h" @@ -14,17 +15,17 @@ #include "catalog/pg_am.h" #include "catalog/pg_amop.h" #include "catalog/pg_amproc.h" -#include "catalog/pg_attribute.h" #include "catalog/pg_attrdef.h" -#include "catalog/pg_constraint.h" +#include "catalog/pg_attribute.h" #include "catalog/pg_class.h" +#include "catalog/pg_constraint.h" #include "catalog/pg_depend.h" #include "catalog/pg_enum.h" #include "catalog/pg_event_trigger.h" #include "catalog/pg_language.h" #include "catalog/pg_namespace.h" -#include "catalog/pg_operator.h" #include "catalog/pg_opclass.h" +#include "catalog/pg_operator.h" #include "catalog/pg_opfamily.h" #include "catalog/pg_proc.h" #include "catalog/pg_rewrite.h" @@ -35,12 +36,6 @@ #include "catalog/pg_ts_dict.h" #include "catalog/pg_ts_template.h" #include "catalog/pg_type.h" -#include "distributed/citus_depended_object.h" -#include "distributed/metadata_cache.h" -#include "distributed/commands.h" -#include "distributed/listutils.h" -#include "distributed/log_utils.h" -#include "distributed/shared_library_init.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "nodes/parsenodes.h" @@ -49,6 +44,13 @@ #include "utils/lsyscache.h" #include "utils/syscache.h" +#include "distributed/citus_depended_object.h" +#include "distributed/commands.h" +#include "distributed/listutils.h" +#include "distributed/log_utils.h" +#include "distributed/metadata_cache.h" +#include "distributed/shared_library_init.h" + /* * GUC hides any objects, which depends on citus extension, from pg meta class queries, * it is intended to be used in vanilla tests to not break postgres test logs @@ -463,8 +465,8 @@ static bool AnyObjectViolatesOwnership(DropStmt *dropStmt) { bool hasOwnershipViolation = false; - volatile ObjectAddress objectAddress = { 0 }; - Relation relation = NULL; + ObjectAddress objectAddress = { 0 }; + volatile Relation relation = NULL; ObjectType objectType = dropStmt->removeType; bool missingOk = dropStmt->missing_ok; @@ -478,8 +480,17 @@ AnyObjectViolatesOwnership(DropStmt *dropStmt) Node *object = NULL; foreach_ptr(object, dropStmt->objects) { + Relation rel = NULL; objectAddress = get_object_address(objectType, object, - &relation, AccessShareLock, missingOk); + &rel, AccessShareLock, missingOk); + + /* + * The object relation is qualified with volatile and its value is obtained from + * get_object_address(). Unless we can qualify the corresponding parameter of + * get_object_address() with volatile (this is a function defined in PostgreSQL), + * we cannot get rid of this assignment. + */ + relation = rel; if (OidIsValid(objectAddress.objectId)) { diff --git a/src/backend/distributed/utils/citus_nodefuncs.c b/src/backend/distributed/utils/citus_nodefuncs.c index aee1ff48ae3..0b03926f862 100644 --- a/src/backend/distributed/utils/citus_nodefuncs.c +++ b/src/backend/distributed/utils/citus_nodefuncs.c @@ -10,16 +10,17 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" - #include "catalog/pg_type.h" -#include "distributed/citus_nodes.h" + +#include "pg_version_constants.h" + #include "distributed/citus_nodefuncs.h" +#include "distributed/citus_nodes.h" #include "distributed/coordinator_protocol.h" +#include "distributed/distributed_planner.h" #include "distributed/errormessage.h" #include "distributed/log_utils.h" #include "distributed/metadata_cache.h" -#include "distributed/distributed_planner.h" #include "distributed/multi_router_planner.h" #include "distributed/multi_server_executor.h" @@ -141,7 +142,17 @@ SetRangeTblExtraData(RangeTblEntry *rte, CitusRTEKind rteKind, char *fragmentSch fauxFunction->funcexpr = (Node *) fauxFuncExpr; /* set the column count to pass ruleutils checks, not used elsewhere */ - fauxFunction->funccolcount = list_length(rte->eref->colnames); + if (rte->relid != 0) + { + Relation rel = RelationIdGetRelation(rte->relid); + fauxFunction->funccolcount = RelationGetNumberOfAttributes(rel); + RelationClose(rel); + } + else + { + fauxFunction->funccolcount = list_length(rte->eref->colnames); + } + fauxFunction->funccolnames = funcColumnNames; fauxFunction->funccoltypes = funcColumnTypes; fauxFunction->funccoltypmods = funcColumnTypeMods; diff --git a/src/backend/distributed/utils/citus_outfuncs.c b/src/backend/distributed/utils/citus_outfuncs.c index b4062751ace..751063789d7 100644 --- a/src/backend/distributed/utils/citus_outfuncs.c +++ b/src/backend/distributed/utils/citus_outfuncs.c @@ -18,7 +18,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #include @@ -535,7 +535,7 @@ OutTask(OUTFUNC_ARGS) WRITE_STRING_FIELD(fetchedExplainAnalyzePlan); WRITE_FLOAT_FIELD(fetchedExplainAnalyzeExecutionDuration, "%.2f"); WRITE_BOOL_FIELD(isLocalTableModification); - WRITE_BOOL_FIELD(cannotBeExecutedInTransction); + WRITE_BOOL_FIELD(cannotBeExecutedInTransaction); } diff --git a/src/backend/distributed/utils/citus_safe_lib.c b/src/backend/distributed/utils/citus_safe_lib.c index 82fa8f6f225..2d504a644f1 100644 --- a/src/backend/distributed/utils/citus_safe_lib.c +++ b/src/backend/distributed/utils/citus_safe_lib.c @@ -12,16 +12,17 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" +#include -#include "distributed/pg_version_constants.h" +#include "postgres.h" #include "safe_lib.h" -#include +#include "lib/stringinfo.h" + +#include "pg_version_constants.h" #include "distributed/citus_safe_lib.h" -#include "lib/stringinfo.h" /* diff --git a/src/backend/distributed/utils/citus_stat_tenants.c b/src/backend/distributed/utils/citus_stat_tenants.c index aa813e15243..6af5c0d586c 100644 --- a/src/backend/distributed/utils/citus_stat_tenants.c +++ b/src/backend/distributed/utils/citus_stat_tenants.c @@ -8,21 +8,13 @@ *------------------------------------------------------------------------- */ +#include + #include "postgres.h" + #include "unistd.h" #include "access/hash.h" -#include "distributed/citus_safe_lib.h" -#include "distributed/colocation_utils.h" -#include "distributed/distributed_planner.h" -#include "distributed/jsonbutils.h" -#include "distributed/log_utils.h" -#include "distributed/listutils.h" -#include "distributed/metadata_cache.h" -#include "distributed/multi_executor.h" -#include "distributed/tenant_schema_metadata.h" -#include "distributed/tuplestore.h" -#include "distributed/utils/citus_stat_tenants.h" #include "executor/execdesc.h" #include "storage/ipc.h" #include "storage/lwlock.h" @@ -34,7 +26,17 @@ #include "utils/lsyscache.h" #include "utils/syscache.h" -#include +#include "distributed/citus_safe_lib.h" +#include "distributed/colocation_utils.h" +#include "distributed/distributed_planner.h" +#include "distributed/jsonbutils.h" +#include "distributed/listutils.h" +#include "distributed/log_utils.h" +#include "distributed/metadata_cache.h" +#include "distributed/multi_executor.h" +#include "distributed/tenant_schema_metadata.h" +#include "distributed/tuplestore.h" +#include "distributed/utils/citus_stat_tenants.h" #if (PG_VERSION_NUM >= PG_VERSION_15) #include "common/pg_prng.h" diff --git a/src/backend/distributed/utils/citus_version.c b/src/backend/distributed/utils/citus_version.c index 95945a30f62..edae4f9273f 100644 --- a/src/backend/distributed/utils/citus_version.c +++ b/src/backend/distributed/utils/citus_version.c @@ -11,9 +11,10 @@ #include "postgres.h" -#include "citus_version.h" #include "utils/builtins.h" +#include "citus_version.h" + /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(citus_version); diff --git a/src/backend/distributed/utils/colocation_utils.c b/src/backend/distributed/utils/colocation_utils.c index e7007874bb3..c189195271d 100644 --- a/src/backend/distributed/utils/colocation_utils.c +++ b/src/backend/distributed/utils/colocation_utils.c @@ -10,6 +10,7 @@ */ #include "postgres.h" + #include "miscadmin.h" #include "access/genam.h" @@ -19,28 +20,29 @@ #include "catalog/indexing.h" #include "catalog/pg_type.h" #include "commands/sequence.h" +#include "storage/lmgr.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" +#include "utils/rel.h" + #include "distributed/colocation_utils.h" #include "distributed/commands.h" -#include "distributed/listutils.h" -#include "distributed/metadata_utility.h" #include "distributed/coordinator_protocol.h" +#include "distributed/listutils.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" +#include "distributed/metadata_utility.h" #include "distributed/multi_logical_planner.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/pg_dist_colocation.h" #include "distributed/resource_lock.h" #include "distributed/shardinterval_utils.h" #include "distributed/tenant_schema_metadata.h" -#include "distributed/version_compat.h" #include "distributed/utils/array_type.h" +#include "distributed/version_compat.h" #include "distributed/worker_protocol.h" #include "distributed/worker_transaction.h" -#include "storage/lmgr.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/lsyscache.h" -#include "utils/rel.h" /* local function forward declarations */ @@ -360,10 +362,8 @@ ErrorIfShardPlacementsNotColocated(Oid leftRelationId, Oid rightRelationId) leftRelationName, rightRelationName))); } - List *leftPlacementList = ShardPlacementListSortedByWorker( - leftShardId); - List *rightPlacementList = ShardPlacementListSortedByWorker( - rightShardId); + List *leftPlacementList = ShardPlacementList(leftShardId); + List *rightPlacementList = ShardPlacementList(rightShardId); if (list_length(leftPlacementList) != list_length(rightPlacementList)) { diff --git a/src/backend/distributed/utils/directory.c b/src/backend/distributed/utils/directory.c index b749b9cd607..6701bf8fb13 100644 --- a/src/backend/distributed/utils/directory.c +++ b/src/backend/distributed/utils/directory.c @@ -12,6 +12,7 @@ #include #include "postgres.h" + #include "funcapi.h" #include "miscadmin.h" @@ -28,7 +29,7 @@ static bool FileIsLink(const char *filename, struct stat filestat); void CitusCreateDirectory(StringInfo directoryName) { - int makeOK = mkdir(directoryName->data, S_IRWXU); + int makeOK = MakePGDirectory(directoryName->data); if (makeOK != 0) { ereport(ERROR, (errcode_for_file_access(), diff --git a/src/backend/distributed/utils/distribution_column.c b/src/backend/distributed/utils/distribution_column.c index 474133f73bc..5927be612fe 100644 --- a/src/backend/distributed/utils/distribution_column.c +++ b/src/backend/distributed/utils/distribution_column.c @@ -12,19 +12,14 @@ #include "postgres.h" - #include "access/attnum.h" #include "access/heapam.h" #include "access/htup_details.h" -#include "distributed/distribution_column.h" -#include "distributed/metadata_cache.h" -#include "distributed/multi_partitioning_utils.h" -#include "distributed/version_compat.h" #include "nodes/makefuncs.h" #include "nodes/nodes.h" #include "nodes/primnodes.h" -#include "parser/scansup.h" #include "parser/parse_relation.h" +#include "parser/scansup.h" #include "utils/builtins.h" #include "utils/elog.h" #include "utils/errcodes.h" @@ -33,6 +28,11 @@ #include "utils/relcache.h" #include "utils/syscache.h" +#include "distributed/distribution_column.h" +#include "distributed/metadata_cache.h" +#include "distributed/multi_partitioning_utils.h" +#include "distributed/version_compat.h" + /* exports for SQL callable functions */ PG_FUNCTION_INFO_V1(column_name_to_column); diff --git a/src/backend/distributed/utils/distribution_column_map.c b/src/backend/distributed/utils/distribution_column_map.c index c3c0db01f72..43f9939b1d2 100644 --- a/src/backend/distributed/utils/distribution_column_map.c +++ b/src/backend/distributed/utils/distribution_column_map.c @@ -11,12 +11,13 @@ #include "postgres.h" #include "common/hashfn.h" +#include "nodes/primnodes.h" + #include "distributed/distribution_column.h" #include "distributed/listutils.h" #include "distributed/multi_join_order.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/utils/distribution_column_map.h" -#include "nodes/primnodes.h" /* diff --git a/src/backend/distributed/utils/enable_ssl.c b/src/backend/distributed/utils/enable_ssl.c index cac32f74c1a..261225450c3 100644 --- a/src/backend/distributed/utils/enable_ssl.c +++ b/src/backend/distributed/utils/enable_ssl.c @@ -18,17 +18,19 @@ * it otherwise we get warnings about redefining this value. This needs to be * done before including libpq.h. */ -#include "distributed/pg_version_constants.h" +#include "miscadmin.h" -#include "distributed/connection_management.h" -#include "distributed/memutils.h" -#include "distributed/worker_protocol.h" #include "libpq/libpq.h" -#include "miscadmin.h" #include "nodes/parsenodes.h" #include "postmaster/postmaster.h" #include "utils/guc.h" +#include "pg_version_constants.h" + +#include "distributed/connection_management.h" +#include "distributed/memutils.h" +#include "distributed/worker_protocol.h" + #ifdef USE_OPENSSL #include "openssl/dsa.h" #include "openssl/err.h" diff --git a/src/backend/distributed/utils/errormessage.c b/src/backend/distributed/utils/errormessage.c index 72758f9cac8..dbc55019deb 100644 --- a/src/backend/distributed/utils/errormessage.c +++ b/src/backend/distributed/utils/errormessage.c @@ -6,13 +6,14 @@ */ #include "postgres.h" -#include "utils/memutils.h" #include "common/sha2.h" +#include "utils/builtins.h" +#include "utils/memutils.h" + #include "distributed/citus_nodes.h" #include "distributed/errormessage.h" #include "distributed/log_utils.h" -#include "utils/builtins.h" /* diff --git a/src/backend/distributed/utils/foreign_key_relationship.c b/src/backend/distributed/utils/foreign_key_relationship.c index 2858e6ed3ba..1abb7ae0717 100644 --- a/src/backend/distributed/utils/foreign_key_relationship.c +++ b/src/backend/distributed/utils/foreign_key_relationship.c @@ -12,28 +12,29 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" - #include "access/genam.h" #include "access/htup_details.h" #include "access/stratnum.h" #include "access/table.h" #include "catalog/pg_constraint.h" -#include "distributed/commands.h" -#include "distributed/hash_helpers.h" -#include "distributed/foreign_key_relationship.h" -#include "distributed/hash_helpers.h" -#include "distributed/listutils.h" -#include "distributed/metadata_cache.h" -#include "distributed/version_compat.h" +#include "common/hashfn.h" #include "nodes/pg_list.h" #include "storage/lockdefs.h" +#include "utils/catcache.h" #include "utils/fmgroids.h" #include "utils/hsearch.h" -#include "common/hashfn.h" #include "utils/inval.h" #include "utils/memutils.h" +#include "pg_version_constants.h" + +#include "distributed/commands.h" +#include "distributed/foreign_key_relationship.h" +#include "distributed/hash_helpers.h" +#include "distributed/listutils.h" +#include "distributed/metadata_cache.h" +#include "distributed/version_compat.h" + /* * ForeignConstraintRelationshipGraph holds the graph data structure for foreign constraint relationship @@ -96,6 +97,8 @@ static List * GetConnectedListHelper(ForeignConstraintRelationshipNode *node, bool isReferencing); static List * GetForeignConstraintRelationshipHelper(Oid relationId, bool isReferencing); +MemoryContext ForeignConstraintRelationshipMemoryContext = NULL; + /* * GetForeignKeyConnectedRelationIdList returns a list of relation id's for @@ -321,17 +324,36 @@ CreateForeignConstraintRelationshipGraph() return; } - ClearForeignConstraintRelationshipGraphContext(); + /* + * Lazily create our memory context once and reset on every reuse. + * Since we have cleared and invalidated the fConstraintRelationshipGraph, right + * before we can simply reset the context if it was already existing. + */ + if (ForeignConstraintRelationshipMemoryContext == NULL) + { + /* make sure we've initialized CacheMemoryContext */ + if (CacheMemoryContext == NULL) + { + CreateCacheMemoryContext(); + } + + ForeignConstraintRelationshipMemoryContext = AllocSetContextCreate( + CacheMemoryContext, + "Foreign Constraint Relationship Graph Context", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + } + else + { + fConstraintRelationshipGraph = NULL; + MemoryContextReset(ForeignConstraintRelationshipMemoryContext); + } - MemoryContext fConstraintRelationshipMemoryContext = AllocSetContextCreateInternal( - CacheMemoryContext, - "Forign Constraint Relationship Graph Context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + Assert(fConstraintRelationshipGraph == NULL); MemoryContext oldContext = MemoryContextSwitchTo( - fConstraintRelationshipMemoryContext); + ForeignConstraintRelationshipMemoryContext); fConstraintRelationshipGraph = (ForeignConstraintRelationshipGraph *) palloc( sizeof(ForeignConstraintRelationshipGraph)); @@ -631,22 +653,3 @@ CreateOrFindNode(HTAB *adjacencyLists, Oid relid) return node; } - - -/* - * ClearForeignConstraintRelationshipGraphContext clear all the allocated memory obtained - * for foreign constraint relationship graph. Since all the variables of relationship - * graph was obtained within the same context, destroying hash map is enough as - * it deletes the context. - */ -void -ClearForeignConstraintRelationshipGraphContext() -{ - if (fConstraintRelationshipGraph == NULL) - { - return; - } - - hash_destroy(fConstraintRelationshipGraph->nodeMap); - fConstraintRelationshipGraph = NULL; -} diff --git a/src/backend/distributed/utils/function.c b/src/backend/distributed/utils/function.c index bfb59181c75..dcfcff6fea9 100644 --- a/src/backend/distributed/utils/function.c +++ b/src/backend/distributed/utils/function.c @@ -10,13 +10,15 @@ */ #include "postgres.h" + #include "fmgr.h" #include "miscadmin.h" #include "commands/defrem.h" -#include "distributed/utils/function.h" #include "utils/lsyscache.h" +#include "distributed/utils/function.h" + /* * GetFunctionInfo first resolves the operator for the given data type, access diff --git a/src/backend/distributed/utils/function_utils.c b/src/backend/distributed/utils/function_utils.c index 48f878e1327..0770b8cb9b3 100644 --- a/src/backend/distributed/utils/function_utils.c +++ b/src/backend/distributed/utils/function_utils.c @@ -10,12 +10,13 @@ #include "postgres.h" #include "catalog/namespace.h" -#include "distributed/function_utils.h" -#include "distributed/version_compat.h" #include "executor/executor.h" #include "utils/builtins.h" #include "utils/regproc.h" +#include "distributed/function_utils.h" +#include "distributed/version_compat.h" + /* * FunctionOid searches for a function that has the given name and the given diff --git a/src/backend/distributed/utils/hash_helpers.c b/src/backend/distributed/utils/hash_helpers.c index d2bfe38fa21..2aaaaef12bf 100644 --- a/src/backend/distributed/utils/hash_helpers.c +++ b/src/backend/distributed/utils/hash_helpers.c @@ -11,9 +11,10 @@ #include "postgres.h" #include "common/hashfn.h" +#include "utils/hsearch.h" + #include "distributed/citus_safe_lib.h" #include "distributed/hash_helpers.h" -#include "utils/hsearch.h" /* diff --git a/src/backend/distributed/utils/jsonbutils.c b/src/backend/distributed/utils/jsonbutils.c index 4855ee00465..47e6aa2c7b8 100644 --- a/src/backend/distributed/utils/jsonbutils.c +++ b/src/backend/distributed/utils/jsonbutils.c @@ -1,21 +1,21 @@ #include "postgres.h" -#include "pg_version_compat.h" +#include "fmgr.h" #include "catalog/namespace.h" #include "catalog/pg_class.h" #include "catalog/pg_collation.h" #include "catalog/pg_type.h" - #include "utils/array.h" +#include "utils/builtins.h" #include "utils/json.h" +#include "utils/lsyscache.h" + +#include "pg_version_compat.h" + #include "distributed/jsonbutils.h" #include "distributed/metadata_cache.h" -#include "utils/builtins.h" -#include "utils/lsyscache.h" -#include "fmgr.h" - /* * ExtractFieldJsonb gets value of fieldName from jsonbDoc and puts it diff --git a/src/backend/distributed/utils/listutils.c b/src/backend/distributed/utils/listutils.c index dd54443c4f8..eddef1fea09 100644 --- a/src/backend/distributed/utils/listutils.c +++ b/src/backend/distributed/utils/listutils.c @@ -10,16 +10,18 @@ */ #include "postgres.h" + #include "c.h" #include "port.h" -#include "utils/lsyscache.h" #include "lib/stringinfo.h" -#include "distributed/citus_safe_lib.h" -#include "distributed/listutils.h" #include "nodes/pg_list.h" +#include "utils/lsyscache.h" #include "utils/memutils.h" +#include "distributed/citus_safe_lib.h" +#include "distributed/listutils.h" + /* * SortList takes in a list of void pointers, and sorts these pointers (and the diff --git a/src/backend/distributed/utils/log_utils.c b/src/backend/distributed/utils/log_utils.c index 59a090a16f6..2e9d94c440d 100644 --- a/src/backend/distributed/utils/log_utils.c +++ b/src/backend/distributed/utils/log_utils.c @@ -9,16 +9,15 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" - -#include "utils/guc.h" -#include "distributed/log_utils.h" -#include "distributed/errormessage.h" +#include "common/cryptohash.h" #include "common/sha2.h" - #include "utils/builtins.h" +#include "utils/guc.h" -#include "common/cryptohash.h" +#include "pg_version_constants.h" + +#include "distributed/errormessage.h" +#include "distributed/log_utils.h" /* diff --git a/src/backend/distributed/utils/maintenanced.c b/src/backend/distributed/utils/maintenanced.c index 5f49de20adf..9cef13539b4 100644 --- a/src/backend/distributed/utils/maintenanced.c +++ b/src/backend/distributed/utils/maintenanced.c @@ -14,52 +14,52 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" - -#include "distributed/pg_version_constants.h" - #include +#include "postgres.h" + #include "miscadmin.h" #include "pgstat.h" #include "access/xact.h" #include "access/xlog.h" -#include "catalog/pg_extension.h" -#include "citus_version.h" +#include "catalog/namespace.h" #include "catalog/pg_authid.h" +#include "catalog/pg_extension.h" #include "catalog/pg_namespace.h" #include "commands/async.h" #include "commands/extension.h" +#include "common/hashfn.h" #include "libpq/pqsignal.h" -#include "catalog/namespace.h" -#include "distributed/background_jobs.h" -#include "distributed/citus_safe_lib.h" -#include "distributed/distributed_deadlock_detection.h" -#include "distributed/maintenanced.h" -#include "distributed/coordinator_protocol.h" -#include "distributed/metadata_cache.h" -#include "distributed/shard_cleaner.h" -#include "distributed/metadata_sync.h" -#include "distributed/query_stats.h" -#include "distributed/statistics_collection.h" -#include "distributed/transaction_recovery.h" -#include "distributed/version_compat.h" #include "nodes/makefuncs.h" #include "postmaster/bgworker.h" #include "postmaster/postmaster.h" -#include "nodes/makefuncs.h" #include "storage/ipc.h" -#include "storage/proc.h" #include "storage/latch.h" #include "storage/lmgr.h" #include "storage/lwlock.h" +#include "storage/proc.h" #include "tcop/tcopprot.h" -#include "common/hashfn.h" #include "utils/builtins.h" -#include "utils/memutils.h" #include "utils/lsyscache.h" +#include "utils/memutils.h" + +#include "citus_version.h" +#include "pg_version_constants.h" + +#include "distributed/background_jobs.h" +#include "distributed/citus_safe_lib.h" +#include "distributed/coordinator_protocol.h" +#include "distributed/distributed_deadlock_detection.h" +#include "distributed/maintenanced.h" +#include "distributed/metadata_cache.h" +#include "distributed/metadata_sync.h" +#include "distributed/query_stats.h" #include "distributed/resource_lock.h" +#include "distributed/shard_cleaner.h" +#include "distributed/statistics_collection.h" +#include "distributed/transaction_recovery.h" +#include "distributed/version_compat.h" /* * Shared memory data for all maintenance workers. @@ -99,6 +99,7 @@ int Recover2PCInterval = 60000; int DeferShardDeleteInterval = 15000; int BackgroundTaskQueueCheckInterval = 5000; int MaxBackgroundTaskExecutors = 4; +char *MainDb = ""; /* config variables for metadata sync timeout */ int MetadataSyncInterval = 60000; @@ -112,7 +113,7 @@ static MaintenanceDaemonControlData *MaintenanceDaemonControl = NULL; * activated. */ static HTAB *MaintenanceDaemonDBHash; - +static ErrorContextCallback errorCallback = { 0 }; static volatile sig_atomic_t got_SIGHUP = false; static volatile sig_atomic_t got_SIGTERM = false; @@ -125,6 +126,8 @@ static void MaintenanceDaemonShmemExit(int code, Datum arg); static void MaintenanceDaemonErrorContext(void *arg); static bool MetadataSyncTriggeredCheckAndReset(MaintenanceDaemonDBData *dbData); static void WarnMaintenanceDaemonNotStarted(void); +static MaintenanceDaemonDBData * GetMaintenanceDaemonDBHashEntry(Oid databaseId, + bool *found); /* * InitializeMaintenanceDaemon, called at server start, is responsible for @@ -139,6 +142,82 @@ InitializeMaintenanceDaemon(void) } +/* + * GetMaintenanceDaemonDBHashEntry searches the MaintenanceDaemonDBHash for the + * databaseId. It returns the entry if found or creates a new entry and initializes + * the value with zeroes. + */ +MaintenanceDaemonDBData * +GetMaintenanceDaemonDBHashEntry(Oid databaseId, bool *found) +{ + MaintenanceDaemonDBData *dbData = (MaintenanceDaemonDBData *) hash_search( + MaintenanceDaemonDBHash, + &MyDatabaseId, + HASH_ENTER_NULL, + found); + + if (!dbData) + { + elog(LOG, + "cannot create or find the maintenance deamon hash entry for database %u", + databaseId); + return NULL; + } + + if (!*found) + { + /* ensure the values in MaintenanceDaemonDBData are zero */ + memset(((char *) dbData) + sizeof(Oid), 0, + sizeof(MaintenanceDaemonDBData) - sizeof(Oid)); + } + + return dbData; +} + + +/* + * InitializeMaintenanceDaemonForMainDb is called in _PG_Init + * at which stage we are not in a transaction or have databaseOid + */ +void +InitializeMaintenanceDaemonForMainDb(void) +{ + if (strcmp(MainDb, "") == 0) + { + elog(LOG, "There is no designated Main database."); + return; + } + + BackgroundWorker worker; + + memset(&worker, 0, sizeof(worker)); + + + strcpy_s(worker.bgw_name, sizeof(worker.bgw_name), + "Citus Maintenance Daemon for Main DB"); + + /* request ability to connect to target database */ + worker.bgw_flags = BGWORKER_SHMEM_ACCESS | BGWORKER_BACKEND_DATABASE_CONNECTION; + + /* + * No point in getting started before able to run query, but we do + * want to get started on Hot-Standby. + */ + worker.bgw_start_time = BgWorkerStart_ConsistentState; + + /* Restart after a bit after errors, but don't bog the system. */ + worker.bgw_restart_time = 5; + strcpy_s(worker.bgw_library_name, + sizeof(worker.bgw_library_name), "citus"); + strcpy_s(worker.bgw_function_name, sizeof(worker.bgw_library_name), + "CitusMaintenanceDaemonMain"); + + worker.bgw_main_arg = (Datum) 0; + + RegisterBackgroundWorker(&worker); +} + + /* * InitializeMaintenanceDaemonBackend, called at backend start and * configuration changes, is responsible for starting a per-database @@ -148,31 +227,20 @@ void InitializeMaintenanceDaemonBackend(void) { Oid extensionOwner = CitusExtensionOwner(); - bool found; + bool found = false; LWLockAcquire(&MaintenanceDaemonControl->lock, LW_EXCLUSIVE); - MaintenanceDaemonDBData *dbData = (MaintenanceDaemonDBData *) hash_search( - MaintenanceDaemonDBHash, - &MyDatabaseId, - HASH_ENTER_NULL, - &found); + MaintenanceDaemonDBData *dbData = GetMaintenanceDaemonDBHashEntry(MyDatabaseId, + &found); if (dbData == NULL) { WarnMaintenanceDaemonNotStarted(); LWLockRelease(&MaintenanceDaemonControl->lock); - return; } - if (!found) - { - /* ensure the values in MaintenanceDaemonDBData are zero */ - memset(((char *) dbData) + sizeof(Oid), 0, - sizeof(MaintenanceDaemonDBData) - sizeof(Oid)); - } - if (IsMaintenanceDaemon) { /* @@ -271,66 +339,97 @@ WarnMaintenanceDaemonNotStarted(void) /* - * CitusMaintenanceDaemonMain is the maintenance daemon's main routine, it'll - * be started by the background worker infrastructure. If it errors out, - * it'll be restarted after a few seconds. + * ConnectToDatabase connects to the database for the given databaseOid. + * if databaseOid is 0, connects to MainDb and then creates a hash entry. + * If a hash entry cannot be created for MainDb it exits the process requesting a restart. + * However for regular databases, it exits without requesting a restart since another + * subsequent backend is expected to start the Maintenance Daemon. + * If the found hash entry has a valid workerPid, it exits + * without requesting a restart since there is already a daemon running. */ -void -CitusMaintenanceDaemonMain(Datum main_arg) +static MaintenanceDaemonDBData * +ConnectToDatabase(Oid databaseOid) { - Oid databaseOid = DatumGetObjectId(main_arg); - TimestampTz nextStatsCollectionTime USED_WITH_LIBCURL_ONLY = - TimestampTzPlusMilliseconds(GetCurrentTimestamp(), 60 * 1000); - bool retryStatsCollection USED_WITH_LIBCURL_ONLY = false; - TimestampTz lastRecoveryTime = 0; - TimestampTz lastShardCleanTime = 0; - TimestampTz lastStatStatementsPurgeTime = 0; - TimestampTz nextMetadataSyncTime = 0; + MaintenanceDaemonDBData *myDbData = NULL; - /* state kept for the background tasks queue monitor */ - TimestampTz lastBackgroundTaskQueueCheck = GetCurrentTimestamp(); - BackgroundWorkerHandle *backgroundTasksQueueBgwHandle = NULL; - bool backgroundTasksQueueWarnedForLock = false; - /* - * We do metadata sync in a separate background worker. We need its - * handle to be able to check its status. - */ - BackgroundWorkerHandle *metadataSyncBgwHandle = NULL; + bool isMainDb = false; - /* - * Look up this worker's configuration. - */ LWLockAcquire(&MaintenanceDaemonControl->lock, LW_EXCLUSIVE); - MaintenanceDaemonDBData *myDbData = (MaintenanceDaemonDBData *) - hash_search(MaintenanceDaemonDBHash, &databaseOid, - HASH_FIND, NULL); - if (!myDbData) + + if (databaseOid == 0) { + char *databaseName = MainDb; + /* - * When the database crashes, background workers are restarted, but - * the state in shared memory is lost. In that case, we exit and - * wait for a session to call InitializeMaintenanceDaemonBackend - * to properly add it to the hash. + * Since we cannot query databaseOid without initializing Postgres + * first, connect to the database by name. */ + BackgroundWorkerInitializeConnection(databaseName, NULL, 0); - proc_exit(0); - } - - if (myDbData->workerPid != 0) - { /* - * Another maintenance daemon is running. This usually happens because - * postgres restarts the daemon after an non-zero exit, and - * InitializeMaintenanceDaemonBackend started one before postgres did. - * In that case, the first one stays and the last one exits. + * Now we have a valid MyDatabaseId. + * Insert the hash entry for the database to the Maintenance Deamon Hash. */ + bool found = false; + + myDbData = GetMaintenanceDaemonDBHashEntry(MyDatabaseId, &found); + + if (!myDbData) + { + /* + * If an entry cannot be created, + * return code of 1 requests worker restart + * Since BackgroundWorker for the MainDb is only registered + * once during server startup, we need to retry. + */ + proc_exit(1); + } + + if (found && myDbData->workerPid != 0) + { + /* Another maintenance daemon is running.*/ + + proc_exit(0); + } - proc_exit(0); + databaseOid = MyDatabaseId; + myDbData->userOid = GetSessionUserId(); + isMainDb = true; } + else + { + myDbData = (MaintenanceDaemonDBData *) + hash_search(MaintenanceDaemonDBHash, &databaseOid, + HASH_FIND, NULL); - before_shmem_exit(MaintenanceDaemonShmemExit, main_arg); + if (!myDbData) + { + /* + * When the database crashes, background workers are restarted, but + * the state in shared memory is lost. In that case, we exit and + * wait for a session to call InitializeMaintenanceDaemonBackend + * to properly add it to the hash. + */ + + proc_exit(0); + } + + if (myDbData->workerPid != 0) + { + /* + * Another maintenance daemon is running. This usually happens because + * postgres restarts the daemon after an non-zero exit, and + * InitializeMaintenanceDaemonBackend started one before postgres did. + * In that case, the first one stays and the last one exits. + */ + + proc_exit(0); + } + } + + before_shmem_exit(MaintenanceDaemonShmemExit, ObjectIdGetDatum(databaseOid)); /* * Signal that I am the maintenance daemon now. @@ -356,25 +455,55 @@ CitusMaintenanceDaemonMain(Datum main_arg) LWLockRelease(&MaintenanceDaemonControl->lock); - /* - * Setup error context so log messages can be properly attributed. Some of - * them otherwise sound like they might be from a normal user connection. - * Do so before setting up signals etc, so we never exit without the - * context setup. - */ - ErrorContextCallback errorCallback = { 0 }; memset(&errorCallback, 0, sizeof(errorCallback)); errorCallback.callback = MaintenanceDaemonErrorContext; errorCallback.arg = (void *) myDbData; errorCallback.previous = error_context_stack; error_context_stack = &errorCallback; - elog(LOG, "starting maintenance daemon on database %u user %u", databaseOid, myDbData->userOid); - /* connect to database, after that we can actually access catalogs */ - BackgroundWorkerInitializeConnectionByOid(databaseOid, myDbData->userOid, 0); + if (!isMainDb) + { + /* connect to database, after that we can actually access catalogs */ + BackgroundWorkerInitializeConnectionByOid(databaseOid, myDbData->userOid, 0); + } + + return myDbData; +} + + +/* + * CitusMaintenanceDaemonMain is the maintenance daemon's main routine, it'll + * be started by the background worker infrastructure. If it errors out, + * it'll be restarted after a few seconds. + */ +void +CitusMaintenanceDaemonMain(Datum main_arg) +{ + Oid databaseOid = DatumGetObjectId(main_arg); + TimestampTz nextStatsCollectionTime USED_WITH_LIBCURL_ONLY = + TimestampTzPlusMilliseconds(GetCurrentTimestamp(), 60 * 1000); + bool retryStatsCollection USED_WITH_LIBCURL_ONLY = false; + TimestampTz lastRecoveryTime = 0; + TimestampTz lastShardCleanTime = 0; + TimestampTz lastStatStatementsPurgeTime = 0; + TimestampTz nextMetadataSyncTime = 0; + + /* state kept for the background tasks queue monitor */ + TimestampTz lastBackgroundTaskQueueCheck = GetCurrentTimestamp(); + BackgroundWorkerHandle *backgroundTasksQueueBgwHandle = NULL; + bool backgroundTasksQueueWarnedForLock = false; + + + /* + * We do metadata sync in a separate background worker. We need its + * handle to be able to check its status. + */ + BackgroundWorkerHandle *metadataSyncBgwHandle = NULL; + + MaintenanceDaemonDBData *myDbData = ConnectToDatabase(databaseOid); /* make worker recognizable in pg_stat_activity */ pgstat_report_appname("Citus Maintenance Daemon"); @@ -383,7 +512,7 @@ CitusMaintenanceDaemonMain(Datum main_arg) * Terminate orphaned metadata sync daemons spawned from previously terminated * or crashed maintenanced instances. */ - SignalMetadataSyncDaemon(databaseOid, SIGTERM); + SignalMetadataSyncDaemon(MyDatabaseId, SIGTERM); /* enter main loop */ while (!got_SIGTERM) @@ -945,7 +1074,7 @@ MaintenanceDaemonShmemExit(int code, Datum arg) } -/* MaintenanceDaemonSigTermHandler calls proc_exit(0) */ +/* MaintenanceDaemonSigTermHandler sets the got_SIGTERM flag.*/ static void MaintenanceDaemonSigTermHandler(SIGNAL_ARGS) { diff --git a/src/backend/distributed/utils/multi_partitioning_utils.c b/src/backend/distributed/utils/multi_partitioning_utils.c index 924ba4c54d2..ede2008cabc 100644 --- a/src/backend/distributed/utils/multi_partitioning_utils.c +++ b/src/backend/distributed/utils/multi_partitioning_utils.c @@ -6,7 +6,7 @@ */ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pgstat.h" #include "access/genam.h" #include "access/heapam.h" @@ -19,8 +19,21 @@ #include "catalog/pg_inherits.h" #include "commands/tablecmds.h" #include "common/string.h" -#include "distributed/citus_nodes.h" +#include "lib/stringinfo.h" +#include "nodes/makefuncs.h" +#include "nodes/pg_list.h" +#include "partitioning/partdesc.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" +#include "utils/rel.h" +#include "utils/syscache.h" +#include "utils/varlena.h" + +#include "pg_version_constants.h" + #include "distributed/adaptive_executor.h" +#include "distributed/citus_nodes.h" #include "distributed/citus_ruleutils.h" #include "distributed/colocation_utils.h" #include "distributed/commands.h" @@ -36,17 +49,6 @@ #include "distributed/shardinterval_utils.h" #include "distributed/version_compat.h" #include "distributed/worker_protocol.h" -#include "lib/stringinfo.h" -#include "nodes/makefuncs.h" -#include "nodes/pg_list.h" -#include "pgstat.h" -#include "partitioning/partdesc.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/lsyscache.h" -#include "utils/rel.h" -#include "utils/syscache.h" -#include "utils/varlena.h" static char * PartitionBound(Oid partitionId); static Relation try_relation_open_nolock(Oid relationId); diff --git a/src/backend/distributed/utils/namespace_utils.c b/src/backend/distributed/utils/namespace_utils.c index 4f822b7d2b8..a5401b00c1f 100644 --- a/src/backend/distributed/utils/namespace_utils.c +++ b/src/backend/distributed/utils/namespace_utils.c @@ -11,10 +11,11 @@ #include "postgres.h" -#include "distributed/namespace_utils.h" #include "utils/guc.h" #include "utils/regproc.h" +#include "distributed/namespace_utils.h" + /* * We use the equivalent of a function SET option to allow the setting to * persist for the exact duration of the transaction, guc.c takes care of diff --git a/src/backend/distributed/utils/param_utils.c b/src/backend/distributed/utils/param_utils.c index 8aefecb7ddf..a500b5b6535 100644 --- a/src/backend/distributed/utils/param_utils.c +++ b/src/backend/distributed/utils/param_utils.c @@ -9,12 +9,13 @@ #include "postgres.h" -#include -#include -#include -#include -#include -#include +#include "nodes/bitmapset.h" +#include "nodes/nodeFuncs.h" +#include "nodes/nodes.h" +#include "nodes/params.h" +#include "nodes/parsenodes.h" +#include "nodes/primnodes.h" + #include "distributed/param_utils.h" /* diff --git a/src/backend/distributed/utils/priority.c b/src/backend/distributed/utils/priority.c index 2e7972d2d62..ceb75ac2668 100644 --- a/src/backend/distributed/utils/priority.c +++ b/src/backend/distributed/utils/priority.c @@ -7,13 +7,13 @@ *------------------------------------------------------------------------- */ -#include "postgres.h" - -#include #include #include #include #include +#include + +#include "postgres.h" #include "distributed/priority.h" diff --git a/src/backend/distributed/utils/query_utils.c b/src/backend/distributed/utils/query_utils.c index 4ae49ed817b..ac33bdd523f 100644 --- a/src/backend/distributed/utils/query_utils.c +++ b/src/backend/distributed/utils/query_utils.c @@ -11,13 +11,14 @@ */ #include "postgres.h" -#include "nodes/primnodes.h" #include "catalog/pg_class.h" +#include "nodes/nodeFuncs.h" +#include "nodes/primnodes.h" + +#include "distributed/listutils.h" #include "distributed/query_utils.h" #include "distributed/version_compat.h" -#include "distributed/listutils.h" -#include "nodes/nodeFuncs.h" static bool CitusQueryableRangeTableRelation(RangeTblEntry *rangeTableEntry); diff --git a/src/backend/distributed/utils/reference_table_utils.c b/src/backend/distributed/utils/reference_table_utils.c index 314044ab5cb..b1710c1d6d2 100644 --- a/src/backend/distributed/utils/reference_table_utils.c +++ b/src/backend/distributed/utils/reference_table_utils.c @@ -10,19 +10,27 @@ */ #include "postgres.h" + #include "miscadmin.h" +#include "access/genam.h" #include "access/heapam.h" #include "access/htup_details.h" -#include "access/genam.h" +#include "postmaster/postmaster.h" +#include "storage/lmgr.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" +#include "utils/rel.h" + #include "distributed/backend_data.h" #include "distributed/colocation_utils.h" #include "distributed/commands.h" -#include "distributed/listutils.h" #include "distributed/coordinator_protocol.h" -#include "distributed/metadata_utility.h" +#include "distributed/listutils.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" +#include "distributed/metadata_utility.h" #include "distributed/multi_executor.h" #include "distributed/multi_logical_planner.h" #include "distributed/reference_table_utils.h" @@ -33,12 +41,6 @@ #include "distributed/transaction_management.h" #include "distributed/worker_manager.h" #include "distributed/worker_transaction.h" -#include "postmaster/postmaster.h" -#include "storage/lmgr.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/lsyscache.h" -#include "utils/rel.h" /* local function forward declarations */ static List * WorkersWithoutReferenceTablePlacement(uint64 shardId, LOCKMODE lockMode); diff --git a/src/backend/distributed/utils/replication_origin_session_utils.c b/src/backend/distributed/utils/replication_origin_session_utils.c index 800d82ef77a..f96e23f8f26 100644 --- a/src/backend/distributed/utils/replication_origin_session_utils.c +++ b/src/backend/distributed/utils/replication_origin_session_utils.c @@ -7,13 +7,16 @@ * *------------------------------------------------------------------------- */ +#include "postgres.h" -#include "distributed/replication_origin_session_utils.h" -#include "distributed/remote_commands.h" -#include "distributed/metadata_cache.h" -#include "utils/builtins.h" #include "miscadmin.h" +#include "utils/builtins.h" + +#include "distributed/metadata_cache.h" +#include "distributed/remote_commands.h" +#include "distributed/replication_origin_session_utils.h" + static bool IsRemoteReplicationOriginSessionSetup(MultiConnection *connection); static void SetupMemoryContextResetReplicationOriginHandler(void); @@ -183,7 +186,7 @@ SetupReplicationOriginRemoteSession(MultiConnection *connection) { StringInfo replicationOriginSessionSetupQuery = makeStringInfo(); appendStringInfo(replicationOriginSessionSetupQuery, - "select pg_catalog.citus_internal_start_replication_origin_tracking();"); + "select citus_internal.start_replication_origin_tracking();"); ExecuteCriticalRemoteCommand(connection, replicationOriginSessionSetupQuery->data); connection->isReplicationOriginSessionSetup = true; @@ -202,7 +205,7 @@ ResetReplicationOriginRemoteSession(MultiConnection *connection) { StringInfo replicationOriginSessionResetQuery = makeStringInfo(); appendStringInfo(replicationOriginSessionResetQuery, - "select pg_catalog.citus_internal_stop_replication_origin_tracking();"); + "select citus_internal.stop_replication_origin_tracking();"); ExecuteCriticalRemoteCommand(connection, replicationOriginSessionResetQuery->data); connection->isReplicationOriginSessionSetup = false; @@ -226,7 +229,7 @@ IsRemoteReplicationOriginSessionSetup(MultiConnection *connection) StringInfo isReplicationOriginSessionSetupQuery = makeStringInfo(); appendStringInfo(isReplicationOriginSessionSetupQuery, - "SELECT pg_catalog.citus_internal_is_replication_origin_tracking_active()"); + "SELECT citus_internal.is_replication_origin_tracking_active()"); bool result = ExecuteRemoteCommandAndCheckResult(connection, isReplicationOriginSessionSetupQuery->data, diff --git a/src/backend/distributed/utils/resource_lock.c b/src/backend/distributed/utils/resource_lock.c index c76830c1d34..8ac269e4314 100644 --- a/src/backend/distributed/utils/resource_lock.c +++ b/src/backend/distributed/utils/resource_lock.c @@ -14,38 +14,40 @@ */ #include "postgres.h" + #include "c.h" #include "miscadmin.h" #include "access/xact.h" #include "catalog/namespace.h" #include "commands/tablecmds.h" +#include "storage/lmgr.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/varlena.h" + #include "distributed/colocation_utils.h" #include "distributed/commands.h" -#include "distributed/listutils.h" -#include "distributed/metadata_utility.h" #include "distributed/coordinator_protocol.h" +#include "distributed/distributed_planner.h" +#include "distributed/listutils.h" +#include "distributed/local_executor.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" +#include "distributed/metadata_utility.h" #include "distributed/multi_executor.h" #include "distributed/multi_join_order.h" #include "distributed/multi_partitioning_utils.h" -#include "distributed/distributed_planner.h" -#include "distributed/relay_utility.h" #include "distributed/reference_table_utils.h" +#include "distributed/relay_utility.h" #include "distributed/remote_commands.h" #include "distributed/resource_lock.h" #include "distributed/shardinterval_utils.h" -#include "distributed/worker_protocol.h" -#include "distributed/worker_transaction.h" #include "distributed/utils/array_type.h" #include "distributed/version_compat.h" -#include "distributed/local_executor.h" +#include "distributed/worker_protocol.h" #include "distributed/worker_shard_visibility.h" -#include "storage/lmgr.h" -#include "utils/builtins.h" -#include "utils/lsyscache.h" -#include "utils/varlena.h" +#include "distributed/worker_transaction.h" #define LOCK_RELATION_IF_EXISTS \ "SELECT pg_catalog.lock_relation_if_exists(%s, %s);" @@ -705,13 +707,27 @@ SerializeNonCommutativeWrites(List *shardIntervalList, LOCKMODE lockMode) } List *replicatedShardList = NIL; - if (AnyTableReplicated(shardIntervalList, &replicatedShardList)) + bool anyTableReplicated = AnyTableReplicated(shardIntervalList, &replicatedShardList); + + /* + * Acquire locks on the modified table. + * If the table is replicated, the locks are first acquired on the first worker node then locally. + * But if we're already on the first worker, acquiring on the first worker node and locally are the same operation. + * So we only acquire locally in that case. + */ + if (anyTableReplicated && ClusterHasKnownMetadataWorkers() && !IsFirstWorkerNode()) { - if (ClusterHasKnownMetadataWorkers() && !IsFirstWorkerNode()) - { - LockShardListResourcesOnFirstWorker(lockMode, replicatedShardList); - } + LockShardListResourcesOnFirstWorker(lockMode, replicatedShardList); + } + LockShardListResources(shardIntervalList, lockMode); + /* + * Next, acquire locks on the reference tables that are referenced by a foreign key if there are any. + * Note that LockReferencedReferenceShardResources() first acquires locks on the first worker, + * then locally. + */ + if (anyTableReplicated) + { ShardInterval *firstShardInterval = (ShardInterval *) linitial(replicatedShardList); if (ReferenceTableShardId(firstShardInterval->shardId)) @@ -726,8 +742,6 @@ SerializeNonCommutativeWrites(List *shardIntervalList, LOCKMODE lockMode) LockReferencedReferenceShardResources(firstShardInterval->shardId, lockMode); } } - - LockShardListResources(shardIntervalList, lockMode); } diff --git a/src/backend/distributed/utils/role.c b/src/backend/distributed/utils/role.c index 3a9a90f9fca..9e92a3290a1 100644 --- a/src/backend/distributed/utils/role.c +++ b/src/backend/distributed/utils/role.c @@ -10,13 +10,15 @@ */ #include "postgres.h" -#include "distributed/commands/utility_hook.h" -#include "distributed/worker_protocol.h" #include "fmgr.h" + #include "tcop/dest.h" #include "tcop/utility.h" #include "utils/builtins.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/worker_protocol.h" + PG_FUNCTION_INFO_V1(alter_role_if_exists); PG_FUNCTION_INFO_V1(worker_create_or_alter_role); diff --git a/src/backend/distributed/utils/shard_utils.c b/src/backend/distributed/utils/shard_utils.c index d6d41f19223..cd688b745b0 100644 --- a/src/backend/distributed/utils/shard_utils.c +++ b/src/backend/distributed/utils/shard_utils.c @@ -12,9 +12,11 @@ #include "postgres.h" #include "miscadmin.h" + #include "utils/builtins.h" #include "utils/fmgrprotos.h" #include "utils/lsyscache.h" + #include "distributed/coordinator_protocol.h" #include "distributed/listutils.h" #include "distributed/log_utils.h" diff --git a/src/backend/distributed/utils/shardinterval_utils.c b/src/backend/distributed/utils/shardinterval_utils.c index 6c18e201ec7..124bfbdf1c6 100644 --- a/src/backend/distributed/utils/shardinterval_utils.c +++ b/src/backend/distributed/utils/shardinterval_utils.c @@ -8,23 +8,25 @@ * *------------------------------------------------------------------------- */ -#include "stdint.h" #include "postgres.h" +#include "stdint.h" + #include "access/nbtree.h" #include "catalog/pg_am.h" #include "catalog/pg_collation.h" #include "catalog/pg_type.h" +#include "utils/catcache.h" +#include "utils/memutils.h" + +#include "distributed/distributed_planner.h" #include "distributed/listutils.h" #include "distributed/metadata_cache.h" #include "distributed/multi_join_order.h" -#include "distributed/distributed_planner.h" +#include "distributed/pg_dist_partition.h" #include "distributed/shard_pruning.h" #include "distributed/shardinterval_utils.h" -#include "distributed/pg_dist_partition.h" #include "distributed/worker_protocol.h" -#include "utils/catcache.h" -#include "utils/memutils.h" /* @@ -468,12 +470,11 @@ SingleReplicatedTable(Oid relationId) return false; } - List *shardIntervalList = LoadShardList(relationId); uint64 *shardIdPointer = NULL; - foreach_ptr(shardIdPointer, shardIntervalList) + foreach_ptr(shardIdPointer, shardList) { uint64 shardId = *shardIdPointer; - shardPlacementList = ShardPlacementListSortedByWorker(shardId); + shardPlacementList = ShardPlacementList(shardId); if (list_length(shardPlacementList) != 1) { diff --git a/src/backend/distributed/utils/statistics_collection.c b/src/backend/distributed/utils/statistics_collection.c index a442aac9544..1cadea968c3 100644 --- a/src/backend/distributed/utils/statistics_collection.c +++ b/src/backend/distributed/utils/statistics_collection.c @@ -10,10 +10,12 @@ #include "postgres.h" -#include "citus_version.h" #include "fmgr.h" + #include "utils/uuid.h" +#include "citus_version.h" + #if defined(HAVE_LIBCURL) && defined(ENABLE_CITUS_STATISTICS_COLLECTION) bool EnableStatisticsCollection = true; /* send basic usage statistics to Citus */ #else @@ -28,18 +30,19 @@ PG_FUNCTION_INFO_V1(citus_server_id); #include #include "access/xact.h" +#include "lib/stringinfo.h" +#include "utils/builtins.h" +#include "utils/fmgrprotos.h" +#include "utils/json.h" +#include "utils/jsonb.h" + #include "distributed/listutils.h" #include "distributed/metadata_cache.h" #include "distributed/multi_join_order.h" #include "distributed/shardinterval_utils.h" #include "distributed/statistics_collection.h" -#include "distributed/worker_manager.h" #include "distributed/version_compat.h" -#include "lib/stringinfo.h" -#include "utils/builtins.h" -#include "utils/json.h" -#include "utils/jsonb.h" -#include "utils/fmgrprotos.h" +#include "distributed/worker_manager.h" static size_t StatisticsCallback(char *contents, size_t size, size_t count, void *userData); diff --git a/src/backend/distributed/utils/task_execution_utils.c b/src/backend/distributed/utils/task_execution_utils.c index 50652b6bd78..5a6f74283d8 100644 --- a/src/backend/distributed/utils/task_execution_utils.c +++ b/src/backend/distributed/utils/task_execution_utils.c @@ -1,16 +1,21 @@ -#include "postgres.h" -#include "miscadmin.h" - +#include #include #include -#include -#include "distributed/pg_version_constants.h" +#include "postgres.h" -#include "common/hashfn.h" +#include "miscadmin.h" #include "commands/dbcommands.h" +#include "common/hashfn.h" +#include "storage/fd.h" +#include "utils/builtins.h" +#include "utils/hsearch.h" +#include "utils/timestamp.h" + +#include "pg_version_constants.h" + #include "distributed/citus_custom_scan.h" #include "distributed/citus_nodes.h" #include "distributed/connection_management.h" @@ -27,12 +32,8 @@ #include "distributed/resource_lock.h" #include "distributed/subplan_execution.h" #include "distributed/task_execution_utils.h" -#include "distributed/worker_protocol.h" #include "distributed/version_compat.h" -#include "storage/fd.h" -#include "utils/builtins.h" -#include "utils/hsearch.h" -#include "utils/timestamp.h" +#include "distributed/worker_protocol.h" /* TaskMapKey is used as a key in task hash */ typedef struct TaskMapKey diff --git a/src/backend/distributed/utils/tenant_schema_metadata.c b/src/backend/distributed/utils/tenant_schema_metadata.c index e634795a217..57ae1d15199 100644 --- a/src/backend/distributed/utils/tenant_schema_metadata.c +++ b/src/backend/distributed/utils/tenant_schema_metadata.c @@ -14,14 +14,15 @@ #include "access/genam.h" #include "access/htup.h" #include "access/table.h" +#include "storage/lockdefs.h" +#include "utils/fmgroids.h" +#include "utils/relcache.h" + #include "distributed/colocation_utils.h" #include "distributed/metadata_cache.h" #include "distributed/metadata_sync.h" #include "distributed/pg_dist_schema.h" #include "distributed/tenant_schema_metadata.h" -#include "storage/lockdefs.h" -#include "utils/relcache.h" -#include "utils/fmgroids.h" /* diff --git a/src/backend/distributed/utils/tuplestore.c b/src/backend/distributed/utils/tuplestore.c index 4473c1f3eb1..ea59e70405a 100644 --- a/src/backend/distributed/utils/tuplestore.c +++ b/src/backend/distributed/utils/tuplestore.c @@ -10,9 +10,10 @@ #include "postgres.h" -#include "distributed/tuplestore.h" #include "miscadmin.h" +#include "distributed/tuplestore.h" + /* * CheckTuplestoreReturn checks if a tuplestore can be returned in the callsite * of the UDF. diff --git a/src/backend/distributed/utils/type_utils.c b/src/backend/distributed/utils/type_utils.c index 66a924a02ac..fca3313741e 100644 --- a/src/backend/distributed/utils/type_utils.c +++ b/src/backend/distributed/utils/type_utils.c @@ -10,13 +10,14 @@ */ #include "postgres.h" + #include "fmgr.h" #include "libpq-fe.h" #include "catalog/pg_type.h" +#include "libpq/pqformat.h" #include "nodes/pg_list.h" #include "utils/syscache.h" -#include "libpq/pqformat.h" #include "distributed/causal_clock.h" diff --git a/src/backend/distributed/worker/task_tracker_protocol.c b/src/backend/distributed/worker/task_tracker_protocol.c index 9b2016f67ab..abe1f765e1e 100644 --- a/src/backend/distributed/worker/task_tracker_protocol.c +++ b/src/backend/distributed/worker/task_tracker_protocol.c @@ -12,6 +12,7 @@ */ #include "postgres.h" + #include "funcapi.h" #include "miscadmin.h" diff --git a/src/backend/distributed/worker/worker_create_or_replace.c b/src/backend/distributed/worker/worker_create_or_replace.c index 804e711259a..2fab84ac6b6 100644 --- a/src/backend/distributed/worker/worker_create_or_replace.c +++ b/src/backend/distributed/worker/worker_create_or_replace.c @@ -9,23 +9,24 @@ #include "postgres.h" +#include "fmgr.h" +#include "funcapi.h" + #include "access/htup_details.h" #include "catalog/dependency.h" #include "catalog/pg_collation.h" #include "catalog/pg_proc.h" #include "catalog/pg_ts_config.h" #include "catalog/pg_type.h" -#include "fmgr.h" -#include "funcapi.h" #include "nodes/makefuncs.h" #include "nodes/nodes.h" #include "parser/parse_type.h" #include "tcop/dest.h" #include "tcop/utility.h" #include "utils/builtins.h" -#include "utils/syscache.h" #include "utils/lsyscache.h" #include "utils/regproc.h" +#include "utils/syscache.h" #include "distributed/commands.h" #include "distributed/commands/utility_hook.h" diff --git a/src/backend/distributed/worker/worker_data_fetch_protocol.c b/src/backend/distributed/worker/worker_data_fetch_protocol.c index 11fdda28715..f51d9c80c31 100644 --- a/src/backend/distributed/worker/worker_data_fetch_protocol.c +++ b/src/backend/distributed/worker/worker_data_fetch_protocol.c @@ -12,12 +12,14 @@ *------------------------------------------------------------------------- */ +#include +#include + #include "postgres.h" + #include "funcapi.h" #include "libpq-fe.h" #include "miscadmin.h" -#include -#include #include "access/xact.h" #include "catalog/dependency.h" @@ -27,6 +29,17 @@ #include "commands/dbcommands.h" #include "commands/extension.h" #include "commands/sequence.h" +#include "executor/spi.h" +#include "nodes/makefuncs.h" +#include "parser/parse_relation.h" +#include "storage/lmgr.h" +#include "tcop/tcopprot.h" +#include "tcop/utility.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/regproc.h" +#include "utils/varlena.h" + #include "distributed/citus_ruleutils.h" #include "distributed/commands.h" #include "distributed/commands/multi_copy.h" @@ -44,20 +57,9 @@ #include "distributed/relay_utility.h" #include "distributed/remote_commands.h" #include "distributed/resource_lock.h" - +#include "distributed/version_compat.h" #include "distributed/worker_create_or_replace.h" #include "distributed/worker_protocol.h" -#include "distributed/version_compat.h" -#include "executor/spi.h" -#include "nodes/makefuncs.h" -#include "parser/parse_relation.h" -#include "storage/lmgr.h" -#include "tcop/tcopprot.h" -#include "tcop/utility.h" -#include "utils/builtins.h" -#include "utils/lsyscache.h" -#include "utils/regproc.h" -#include "utils/varlena.h" /* Local functions forward declarations */ diff --git a/src/backend/distributed/worker/worker_drop_protocol.c b/src/backend/distributed/worker/worker_drop_protocol.c index 16b7bb66a75..280de4493bd 100644 --- a/src/backend/distributed/worker/worker_drop_protocol.c +++ b/src/backend/distributed/worker/worker_drop_protocol.c @@ -19,21 +19,22 @@ #include "catalog/dependency.h" #include "catalog/pg_depend.h" #include "catalog/pg_foreign_server.h" +#include "foreign/foreign.h" +#include "tcop/utility.h" +#include "utils/builtins.h" +#include "utils/fmgroids.h" +#include "utils/lsyscache.h" + #include "distributed/citus_ruleutils.h" +#include "distributed/commands/utility_hook.h" +#include "distributed/coordinator_protocol.h" #include "distributed/distribution_column.h" #include "distributed/listutils.h" -#include "distributed/metadata_utility.h" -#include "distributed/coordinator_protocol.h" -#include "distributed/commands/utility_hook.h" -#include "distributed/metadata_cache.h" #include "distributed/metadata/distobject.h" +#include "distributed/metadata_cache.h" +#include "distributed/metadata_utility.h" #include "distributed/multi_partitioning_utils.h" #include "distributed/worker_protocol.h" -#include "foreign/foreign.h" -#include "tcop/utility.h" -#include "utils/builtins.h" -#include "utils/fmgroids.h" -#include "utils/lsyscache.h" PG_FUNCTION_INFO_V1(worker_drop_distributed_table); PG_FUNCTION_INFO_V1(worker_drop_shell_table); @@ -169,14 +170,10 @@ WorkerDropDistributedTable(Oid relationId) */ if (!IsAnyObjectAddressOwnedByExtension(list_make1(distributedTableObject), NULL)) { - char *relName = get_rel_name(relationId); - Oid schemaId = get_rel_namespace(relationId); - char *schemaName = get_namespace_name(schemaId); - StringInfo dropCommand = makeStringInfo(); appendStringInfo(dropCommand, "DROP%sTABLE %s CASCADE", IsForeignTable(relationId) ? " FOREIGN " : " ", - quote_qualified_identifier(schemaName, relName)); + generate_qualified_relation_name(relationId)); Node *dropCommandNode = ParseTreeNode(dropCommand->data); diff --git a/src/backend/distributed/worker/worker_partition_protocol.c b/src/backend/distributed/worker/worker_partition_protocol.c index 2291633d470..cdbda6d3e07 100644 --- a/src/backend/distributed/worker/worker_partition_protocol.c +++ b/src/backend/distributed/worker/worker_partition_protocol.c @@ -10,6 +10,7 @@ */ #include "postgres.h" + #include "funcapi.h" #include "miscadmin.h" diff --git a/src/backend/distributed/worker/worker_shard_visibility.c b/src/backend/distributed/worker/worker_shard_visibility.c index 63a9cca348a..3725800c30b 100644 --- a/src/backend/distributed/worker/worker_shard_visibility.c +++ b/src/backend/distributed/worker/worker_shard_visibility.c @@ -8,25 +8,27 @@ */ #include "postgres.h" + #include "miscadmin.h" #include "catalog/index.h" #include "catalog/namespace.h" #include "catalog/pg_class.h" #include "catalog/pg_type.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" +#include "utils/lsyscache.h" +#include "utils/syscache.h" +#include "utils/varlena.h" + #include "distributed/backend_data.h" -#include "distributed/metadata_cache.h" #include "distributed/coordinator_protocol.h" #include "distributed/listutils.h" #include "distributed/local_executor.h" +#include "distributed/metadata_cache.h" #include "distributed/query_colocation_checker.h" #include "distributed/worker_protocol.h" #include "distributed/worker_shard_visibility.h" -#include "nodes/makefuncs.h" -#include "nodes/nodeFuncs.h" -#include "utils/lsyscache.h" -#include "utils/syscache.h" -#include "utils/varlena.h" /* HideShardsMode is used to determine whether to hide shards */ @@ -52,6 +54,7 @@ static bool ShouldHideShardsInternal(void); static bool IsPgBgWorker(void); static bool FilterShardsFromPgclass(Node *node, void *context); static Node * CreateRelationIsAKnownShardFilter(int pgClassVarno); +static bool HasRangeTableRef(Node *node, int *varno); PG_FUNCTION_INFO_V1(citus_table_is_visible); PG_FUNCTION_INFO_V1(relation_is_a_known_shard); @@ -419,8 +422,8 @@ IsPgBgWorker(void) /* - * FilterShardsFromPgclass adds a NOT relation_is_a_known_shard(oid) filter - * to the security quals of pg_class RTEs. + * FilterShardsFromPgclass adds a "relation_is_a_known_shard(oid) IS NOT TRUE" + * filter to the quals of queries that query pg_class. */ static bool FilterShardsFromPgclass(Node *node, void *context) @@ -454,12 +457,35 @@ FilterShardsFromPgclass(Node *node, void *context) continue; } + /* + * Skip if pg_class is not actually queried. This is possible on + * INSERT statements that insert into pg_class. + */ + if (!expression_tree_walker((Node *) query->jointree->fromlist, + HasRangeTableRef, &varno)) + { + /* the query references pg_class */ + continue; + } + /* make sure the expression is in the right memory context */ MemoryContext originalContext = MemoryContextSwitchTo(queryContext); - /* add NOT relation_is_a_known_shard(oid) to the security quals of the RTE */ - rangeTableEntry->securityQuals = - list_make1(CreateRelationIsAKnownShardFilter(varno)); + + /* add relation_is_a_known_shard(oid) IS NOT TRUE to the quals of the query */ + Node *newQual = CreateRelationIsAKnownShardFilter(varno); + Node *oldQuals = query->jointree->quals; + if (oldQuals) + { + query->jointree->quals = (Node *) makeBoolExpr( + AND_EXPR, + list_make2(oldQuals, newQual), + -1); + } + else + { + query->jointree->quals = newQual; + } MemoryContextSwitchTo(originalContext); } @@ -471,9 +497,37 @@ FilterShardsFromPgclass(Node *node, void *context) } +/* + * HasRangeTableRef passed to expression_tree_walker to check if a node is a + * RangeTblRef of the given varno is present in a fromlist. + */ +static bool +HasRangeTableRef(Node *node, int *varno) +{ + if (node == NULL) + { + return false; + } + + if (IsA(node, RangeTblRef)) + { + RangeTblRef *rangeTblRef = (RangeTblRef *) node; + return rangeTblRef->rtindex == *varno; + } + + return expression_tree_walker(node, HasRangeTableRef, varno); +} + + /* * CreateRelationIsAKnownShardFilter constructs an expression of the form: - * NOT pg_catalog.relation_is_a_known_shard(oid) + * pg_catalog.relation_is_a_known_shard(oid) IS NOT TRUE + * + * The difference between "NOT pg_catalog.relation_is_a_known_shard(oid)" and + * "pg_catalog.relation_is_a_known_shard(oid) IS NOT TRUE" is that the former + * will return FALSE if the function returns NULL, while the second will return + * TRUE. This difference is important in the case of outer joins, because this + * filter might be applied on an oid that is then NULL. */ static Node * CreateRelationIsAKnownShardFilter(int pgClassVarno) @@ -494,9 +548,9 @@ CreateRelationIsAKnownShardFilter(int pgClassVarno) funcExpr->location = -1; funcExpr->args = list_make1(oidVar); - BoolExpr *notExpr = makeNode(BoolExpr); - notExpr->boolop = NOT_EXPR; - notExpr->args = list_make1(funcExpr); + BooleanTest *notExpr = makeNode(BooleanTest); + notExpr->booltesttype = IS_NOT_TRUE; + notExpr->arg = (Expr *) funcExpr; notExpr->location = -1; return (Node *) notExpr; diff --git a/src/backend/distributed/worker/worker_sql_task_protocol.c b/src/backend/distributed/worker/worker_sql_task_protocol.c index 38dba5e35f0..708fee15d02 100644 --- a/src/backend/distributed/worker/worker_sql_task_protocol.c +++ b/src/backend/distributed/worker/worker_sql_task_protocol.c @@ -9,20 +9,23 @@ *------------------------------------------------------------------------- */ +/* necessary to get S_IRUSR, S_IWUSR definitions on illumos */ +#include + #include "postgres.h" + #include "funcapi.h" #include "pgstat.h" +#include "utils/builtins.h" +#include "utils/memutils.h" + #include "distributed/commands/multi_copy.h" #include "distributed/multi_executor.h" #include "distributed/transmit.h" #include "distributed/version_compat.h" #include "distributed/worker_protocol.h" -#include "utils/builtins.h" -#include "utils/memutils.h" -/* necessary to get S_IRUSR, S_IWUSR definitions on illumos */ -#include #define COPY_BUFFER_SIZE (4 * 1024 * 1024) @@ -123,7 +126,6 @@ TaskFileDestReceiverStartup(DestReceiver *dest, int operation, const char *nullPrintCharacter = "\\N"; const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); - const int fileMode = (S_IRUSR | S_IWUSR); /* use the memory context that was in place when the DestReceiver was created */ MemoryContext oldContext = MemoryContextSwitchTo(taskFileDest->memoryContext); @@ -145,8 +147,7 @@ TaskFileDestReceiverStartup(DestReceiver *dest, int operation, taskFileDest->fileCompat = FileCompatFromFileStart(FileOpenForTransmit( taskFileDest->filePath, - fileFlags, - fileMode)); + fileFlags)); if (copyOutState->binary) { diff --git a/src/backend/distributed/worker/worker_truncate_trigger_protocol.c b/src/backend/distributed/worker/worker_truncate_trigger_protocol.c index dc4e7ffd89d..3f8f96b2d70 100644 --- a/src/backend/distributed/worker/worker_truncate_trigger_protocol.c +++ b/src/backend/distributed/worker/worker_truncate_trigger_protocol.c @@ -12,16 +12,18 @@ */ #include "postgres.h" + #include "fmgr.h" -#include "distributed/citus_ruleutils.h" -#include "distributed/metadata_utility.h" -#include "distributed/metadata_cache.h" -#include "distributed/metadata_sync.h" #include "utils/elog.h" #include "utils/fmgroids.h" #include "utils/lsyscache.h" +#include "distributed/citus_ruleutils.h" +#include "distributed/metadata_cache.h" +#include "distributed/metadata_sync.h" +#include "distributed/metadata_utility.h" + PG_FUNCTION_INFO_V1(worker_create_truncate_trigger); diff --git a/src/include/columnar/columnar.h b/src/include/columnar/columnar.h index 64cf745e101..66413dddd38 100644 --- a/src/include/columnar/columnar.h +++ b/src/include/columnar/columnar.h @@ -14,22 +14,25 @@ #include "postgres.h" #include "fmgr.h" + #include "lib/stringinfo.h" #include "nodes/parsenodes.h" -#include "pg_version_compat.h" #include "storage/bufpage.h" #include "storage/lockdefs.h" -#if PG_VERSION_NUM >= PG_VERSION_16 -#include "storage/relfilelocator.h" -#else -#include "storage/relfilenode.h" -#endif #include "utils/relcache.h" #include "utils/snapmgr.h" +#include "pg_version_compat.h" + #include "columnar/columnar_compression.h" #include "columnar/columnar_metadata.h" +#if PG_VERSION_NUM >= PG_VERSION_16 +#include "storage/relfilelocator.h" +#else +#include "storage/relfilenode.h" +#endif + #define COLUMNAR_AM_NAME "columnar" #define COLUMNAR_MODULE_NAME "citus_columnar" diff --git a/src/include/columnar/columnar_metadata.h b/src/include/columnar/columnar_metadata.h index 2af4354a005..64867ae2fbb 100644 --- a/src/include/columnar/columnar_metadata.h +++ b/src/include/columnar/columnar_metadata.h @@ -12,7 +12,17 @@ #ifndef COLUMNAR_METADATA_H #define COLUMNAR_METADATA_H +#include "postgres.h" + #include "pg_version_compat.h" +#include "pg_version_constants.h" + +#if PG_VERSION_NUM >= PG_VERSION_16 +#include "storage/relfilelocator.h" +#else +#include "storage/relfilenode.h" +#endif + /* * StripeMetadata represents information about a stripe. This information is diff --git a/src/include/columnar/columnar_tableam.h b/src/include/columnar/columnar_tableam.h index 657491ef87a..18331bd70b4 100644 --- a/src/include/columnar/columnar_tableam.h +++ b/src/include/columnar/columnar_tableam.h @@ -1,16 +1,18 @@ #ifndef COLUMNAR_TABLEAM_H #define COLUMNAR_TABLEAM_H -#include "citus_version.h" - #include "postgres.h" + #include "fmgr.h" -#include "access/tableam.h" -#include "access/skey.h" -#include "nodes/bitmapset.h" + #include "access/heapam.h" +#include "access/skey.h" +#include "access/tableam.h" #include "catalog/indexing.h" +#include "nodes/bitmapset.h" #include "utils/acl.h" +#include "citus_version.h" + /* * Number of valid ItemPointer Offset's for "row number" <> "ItemPointer" * mapping. diff --git a/src/include/columnar/columnar_version_compat.h b/src/include/columnar/columnar_version_compat.h index 0e0ae311264..d9b29cdb0c4 100644 --- a/src/include/columnar/columnar_version_compat.h +++ b/src/include/columnar/columnar_version_compat.h @@ -12,7 +12,7 @@ #ifndef COLUMNAR_COMPAT_H #define COLUMNAR_COMPAT_H -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #if PG_VERSION_NUM >= PG_VERSION_15 #define ExecARDeleteTriggers_compat(a, b, c, d, e, f) \ diff --git a/src/include/distributed/backend_data.h b/src/include/distributed/backend_data.h index 1fcd3114176..8014fe5a6a9 100644 --- a/src/include/distributed/backend_data.h +++ b/src/include/distributed/backend_data.h @@ -15,12 +15,13 @@ #include "access/twophase.h" #include "datatype/timestamp.h" -#include "distributed/transaction_identifier.h" #include "nodes/pg_list.h" #include "storage/lwlock.h" #include "storage/proc.h" #include "storage/s_lock.h" +#include "distributed/transaction_identifier.h" + /* * Each backend's active distributed transaction information is tracked via diff --git a/src/include/distributed/citus_custom_scan.h b/src/include/distributed/citus_custom_scan.h index a3da4958c8d..db1f0ce1f2a 100644 --- a/src/include/distributed/citus_custom_scan.h +++ b/src/include/distributed/citus_custom_scan.h @@ -10,11 +10,12 @@ #ifndef CITUS_CUSTOM_SCAN_H #define CITUS_CUSTOM_SCAN_H -#include "distributed/distributed_planner.h" -#include "distributed/multi_server_executor.h" #include "executor/execdesc.h" #include "nodes/plannodes.h" +#include "distributed/distributed_planner.h" +#include "distributed/multi_server_executor.h" + typedef struct CitusScanState { CustomScanState customScanState; /* underlying custom scan node */ diff --git a/src/include/distributed/citus_depended_object.h b/src/include/distributed/citus_depended_object.h index b520184119e..1efbe4e2a1d 100644 --- a/src/include/distributed/citus_depended_object.h +++ b/src/include/distributed/citus_depended_object.h @@ -12,10 +12,11 @@ #ifndef CITUS_DEPENDED_OBJECT_H #define CITUS_DEPENDED_OBJECT_H -#include "distributed/commands.h" #include "nodes/nodes.h" #include "nodes/parsenodes.h" +#include "distributed/commands.h" + extern bool HideCitusDependentObjects; /* DistOpsValidationState to be used to determine validity of dist ops */ diff --git a/src/include/distributed/citus_nodefuncs.h b/src/include/distributed/citus_nodefuncs.h index caeda3a728c..f7c0061b977 100644 --- a/src/include/distributed/citus_nodefuncs.h +++ b/src/include/distributed/citus_nodefuncs.h @@ -11,10 +11,11 @@ #ifndef CITUS_NODEFUNCS_H #define CITUS_NODEFUNCS_H -#include "distributed/multi_physical_planner.h" #include "nodes/nodes.h" #include "nodes/parsenodes.h" +#include "distributed/multi_physical_planner.h" + /* citus_nodefuncs.c */ extern void SetRangeTblExtraData(RangeTblEntry *rte, CitusRTEKind rteKind, char *fragmentSchemaName, char *fragmentTableName, diff --git a/src/include/distributed/citus_nodes.h b/src/include/distributed/citus_nodes.h index 888133a8978..16df367aa75 100644 --- a/src/include/distributed/citus_nodes.h +++ b/src/include/distributed/citus_nodes.h @@ -92,38 +92,21 @@ CitusNodeTagI(Node *node) return ((CitusNode*)(node))->citus_tag; } -/* - * Postgres's nodes/nodes.h has more information on why we do this. - */ -#ifdef __GNUC__ /* Citus variant of newNode(), don't use directly. */ -#define CitusNewNode(size, tag) \ -({ CitusNode *_result; \ - AssertMacro((size) >= sizeof(CitusNode)); /* need the tag, at least */ \ - _result = (CitusNode *) palloc0fast(size); \ - _result->extensible.type = T_ExtensibleNode; \ - _result->extensible.extnodename = CitusNodeTagNames[tag - CITUS_NODE_TAG_START]; \ - _result->citus_tag =(int) (tag); \ - _result; \ -}) - -#else - -extern CitusNode *newCitusNodeMacroHolder; - -#define CitusNewNode(size, tag) \ -( \ - AssertMacro((size) >= sizeof(CitusNode)), /* need the tag, at least */ \ - newCitusNodeMacroHolder = (CitusNode *) palloc0fast(size), \ - newCitusNodeMacroHolder->extensible.type = T_ExtensibleNode, \ - newCitusNodeMacroHolder->extensible.extnodename = CitusNodeTagNames[tag - CITUS_NODE_TAG_START], \ - newCitusNodeMacroHolder->citus_tag =(int) (tag), \ - newCitusNodeMacroHolder \ -) - -#endif +static inline CitusNode * +CitusNewNode(size_t size, CitusNodeTag tag) +{ + CitusNode *result; + Assert(size >= sizeof(CitusNode)); /* need the ExtensibleNode and the tag, at least */ + result = (CitusNode *) palloc0(size); + result->extensible.type = T_ExtensibleNode; + result->extensible.extnodename = CitusNodeTagNames[tag - CITUS_NODE_TAG_START]; + result->citus_tag = (int) (tag); + + return result; +} /* * IsA equivalent that compares node tags, including Citus-specific nodes. diff --git a/src/include/distributed/citus_ruleutils.h b/src/include/distributed/citus_ruleutils.h index e45ddb26978..3a9c364824f 100644 --- a/src/include/distributed/citus_ruleutils.h +++ b/src/include/distributed/citus_ruleutils.h @@ -15,11 +15,12 @@ #include "catalog/pg_sequence.h" #include "commands/sequence.h" -#include "distributed/coordinator_protocol.h" #include "lib/stringinfo.h" #include "nodes/parsenodes.h" #include "nodes/pg_list.h" +#include "distributed/coordinator_protocol.h" + /* Function declarations for version independent Citus ruleutils wrapper functions */ extern char * pg_get_extensiondef_string(Oid tableRelationId); extern Oid get_extension_schema(Oid ext_oid); diff --git a/src/include/distributed/colocation_utils.h b/src/include/distributed/colocation_utils.h index bba78afd1ef..018f9757078 100644 --- a/src/include/distributed/colocation_utils.h +++ b/src/include/distributed/colocation_utils.h @@ -12,9 +12,10 @@ #ifndef COLOCATION_UTILS_H_ #define COLOCATION_UTILS_H_ -#include "distributed/shardinterval_utils.h" #include "nodes/pg_list.h" +#include "distributed/shardinterval_utils.h" + #define INVALID_COLOCATION_ID 0 extern uint32 TableColocationId(Oid distributedTableId); diff --git a/src/include/distributed/combine_query_planner.h b/src/include/distributed/combine_query_planner.h index 71001091355..2afc8aa5f53 100644 --- a/src/include/distributed/combine_query_planner.h +++ b/src/include/distributed/combine_query_planner.h @@ -14,9 +14,8 @@ #include "lib/stringinfo.h" #include "nodes/parsenodes.h" -#include "nodes/plannodes.h" - #include "nodes/pathnodes.h" +#include "nodes/plannodes.h" /* Function declarations for building local plans on the coordinator node */ diff --git a/src/include/distributed/commands.h b/src/include/distributed/commands.h index 43429278f1a..084308a8f3e 100644 --- a/src/include/distributed/commands.h +++ b/src/include/distributed/commands.h @@ -15,12 +15,13 @@ #include "postgres.h" -#include "distributed/metadata_utility.h" -#include "utils/rel.h" #include "nodes/parsenodes.h" #include "tcop/dest.h" #include "tcop/utility.h" #include "utils/acl.h" +#include "utils/rel.h" + +#include "distributed/metadata_utility.h" extern bool AddAllLocalTablesToMetadata; @@ -103,6 +104,10 @@ typedef struct DistributeObjectOps const DistributeObjectOps * GetDistributeObjectOps(Node *node); +/* functions to support node-wide object management commands from non-main dbs */ +extern bool RunPreprocessNonMainDBCommand(Node *parsetree); +extern void RunPostprocessNonMainDBCommand(Node *parsetree); + /* * Flags that can be passed to GetForeignKeyOids to indicate * which foreign key constraint OIDs are to be extracted @@ -229,11 +234,29 @@ extern List * PreprocessAlterDatabaseStmt(Node *node, const char *queryString, extern List * PreprocessAlterDatabaseRefreshCollStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); +extern List * GetDatabaseMetadataSyncCommands(Oid dbOid); extern List * PreprocessAlterDatabaseSetStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); +extern List * PreprocessCreateDatabaseStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext); +extern List * PostprocessCreateDatabaseStmt(Node *node, const char *queryString); +extern List * PreprocessDropDatabaseStmt(Node *node, const char *queryString, + ProcessUtilityContext processUtilityContext); +extern List * DropDatabaseStmtObjectAddress(Node *node, bool missingOk, + bool isPostprocess); +extern List * CreateDatabaseStmtObjectAddress(Node *node, bool missingOk, + bool isPostprocess); +extern List * GenerateGrantDatabaseCommandList(void); +extern List * PreprocessAlterDatabaseRenameStmt(Node *node, const char *queryString, + ProcessUtilityContext + processUtilityContext); +extern List * PostprocessAlterDatabaseRenameStmt(Node *node, const char *queryString); +extern void EnsureSupportedCreateDatabaseCommand(CreatedbStmt *stmt); +extern char * CreateDatabaseDDLCommand(Oid dbId); + /* domain.c - forward declarations */ extern List * CreateDomainStmtObjectAddress(Node *node, bool missing_ok, bool @@ -427,6 +450,7 @@ extern List * CreateExtensionStmtObjectAddress(Node *stmt, bool missing_ok, bool /* owned.c - forward declarations */ extern List * PreprocessDropOwnedStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); +extern List * PostprocessReassignOwnedStmt(Node *node, const char *queryString); /* policy.c - forward declarations */ extern List * CreatePolicyCommands(Oid relationId); @@ -521,6 +545,11 @@ extern List * AlterSchemaOwnerStmtObjectAddress(Node *node, bool missing_ok, extern List * AlterSchemaRenameStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess); +/* seclabel.c - forward declarations*/ +extern List * PostprocessSecLabelStmt(Node *node, const char *queryString); +extern List * SecLabelStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess); +extern void citus_test_object_relabel(const ObjectAddress *object, const char *seclabel); + /* sequence.c - forward declarations */ extern List * PreprocessAlterSequenceStmt(Node *node, const char *queryString, ProcessUtilityContext processUtilityContext); @@ -669,11 +698,6 @@ extern List * AlterTextSearchConfigurationSchemaStmtObjectAddress(Node *node, extern List * AlterTextSearchDictionarySchemaStmtObjectAddress(Node *node, bool missing_ok, bool isPostprocess); -extern List * TextSearchConfigurationCommentObjectAddress(Node *node, - bool missing_ok, bool - isPostprocess); -extern List * TextSearchDictCommentObjectAddress(Node *node, - bool missing_ok, bool isPostprocess); extern List * AlterTextSearchConfigurationOwnerObjectAddress(Node *node, bool missing_ok, bool isPostprocess); diff --git a/src/include/distributed/commands/multi_copy.h b/src/include/distributed/commands/multi_copy.h index fa59894ad87..1fc42df60ca 100644 --- a/src/include/distributed/commands/multi_copy.h +++ b/src/include/distributed/commands/multi_copy.h @@ -13,14 +13,15 @@ #define MULTI_COPY_H -#include "distributed/metadata_utility.h" -#include "distributed/metadata_cache.h" -#include "distributed/version_compat.h" #include "nodes/execnodes.h" #include "nodes/parsenodes.h" #include "parser/parse_coerce.h" #include "tcop/dest.h" +#include "distributed/metadata_cache.h" +#include "distributed/metadata_utility.h" +#include "distributed/version_compat.h" + #define INVALID_PARTITION_COLUMN_INDEX -1 diff --git a/src/include/distributed/commands/utility_hook.h b/src/include/distributed/commands/utility_hook.h index f02f83fe315..52fcf70912c 100644 --- a/src/include/distributed/commands/utility_hook.h +++ b/src/include/distributed/commands/utility_hook.h @@ -10,12 +10,12 @@ #ifndef MULTI_UTILITY_H #define MULTI_UTILITY_H -#include "distributed/pg_version_constants.h" - #include "postgres.h" -#include "utils/relcache.h" #include "tcop/utility.h" +#include "utils/relcache.h" + +#include "pg_version_constants.h" #include "distributed/coordinator_protocol.h" #include "distributed/function_call_delegation.h" @@ -40,6 +40,7 @@ typedef enum extern PropSetCmdBehavior PropagateSetCommands; extern bool EnableDDLPropagation; extern int CreateObjectPropagationMode; +extern bool EnableCreateDatabasePropagation; extern bool EnableCreateTypePropagation; extern bool EnableCreateRolePropagation; extern bool EnableAlterRolePropagation; @@ -74,11 +75,20 @@ typedef struct DDLJob const char *metadataSyncCommand; List *taskList; /* worker DDL tasks to execute */ + + /* + * Only applicable when any of the tasks cannot be executed in a + * transaction block. + * + * Controls whether to emit a warning within the utility hook in case of a + * failure. + */ + bool warnForPartialFailure; } DDLJob; extern ProcessUtility_hook_type PrevProcessUtility; -extern void multi_ProcessUtility(PlannedStmt *pstmt, const char *queryString, +extern void citus_ProcessUtility(PlannedStmt *pstmt, const char *queryString, bool readOnlyTree, ProcessUtilityContext context, ParamListInfo params, struct QueryEnvironment *queryEnv, DestReceiver *dest, @@ -93,6 +103,8 @@ extern void ProcessUtilityParseTree(Node *node, const char *queryString, extern void MarkInvalidateForeignKeyGraph(void); extern void InvalidateForeignKeyGraphForDDL(void); extern List * DDLTaskList(Oid relationId, const char *commandString); +extern List * NontransactionalNodeDDLTaskList(TargetWorkerSet targets, List *commands, + bool warnForPartialFailure); extern List * NodeDDLTaskList(TargetWorkerSet targets, List *commands); extern bool AlterTableInProgress(void); extern bool DropSchemaOrDBInProgress(void); diff --git a/src/include/distributed/comment.h b/src/include/distributed/comment.h new file mode 100644 index 00000000000..bef216ae488 --- /dev/null +++ b/src/include/distributed/comment.h @@ -0,0 +1,26 @@ +/*------------------------------------------------------------------------- + * + * comment.h + * Declarations for comment related operations. + * + * Copyright (c) Citus Data, Inc. + * + *------------------------------------------------------------------------- + */ + +#ifndef COMMENT_H +#define COMMENT_H + +#include "postgres.h" + +#include "nodes/parsenodes.h" + + +extern const char *ObjectTypeNames[]; + + +extern List * GetCommentPropagationCommands(Oid classOid, Oid oid, char *objectName, + ObjectType objectType); +extern List * CommentObjectAddress(Node *node, bool missing_ok, bool isPostprocess); + +# endif /* COMMENT_H */ diff --git a/src/include/distributed/connection_management.h b/src/include/distributed/connection_management.h index 158f0b1ce3d..d93e4483abf 100644 --- a/src/include/distributed/connection_management.h +++ b/src/include/distributed/connection_management.h @@ -13,16 +13,18 @@ #include "postgres.h" -#include "distributed/transaction_management.h" -#include "distributed/remote_transaction.h" -#include "lib/ilist.h" #include "pg_config.h" + +#include "lib/ilist.h" #include "portability/instr_time.h" #include "storage/latch.h" #include "utils/guc.h" #include "utils/hsearch.h" #include "utils/timestamp.h" +#include "distributed/remote_transaction.h" +#include "distributed/transaction_management.h" + /* maximum (textual) lengths of hostname and port */ #define MAX_NODE_LENGTH 255 /* includes 0 byte */ @@ -59,14 +61,6 @@ */ #define LOCAL_NODE_ID UINT32_MAX -/* - * If you want to connect to the current node use `LocalHostName`, which is a GUC, instead - * of the hardcoded loopback hostname. Only if you really need the loopback hostname use - * this define. - */ -#define LOCAL_HOST_NAME "localhost" - - /* forward declare, to avoid forcing large headers on everyone */ struct pg_conn; /* target of the PGconn typedef */ struct MemoryContextData; diff --git a/src/include/distributed/coordinator_protocol.h b/src/include/distributed/coordinator_protocol.h index 0dcc6614118..b2170fd2edc 100644 --- a/src/include/distributed/coordinator_protocol.h +++ b/src/include/distributed/coordinator_protocol.h @@ -13,16 +13,18 @@ #define COORDINATOR_PROTOCOL_H #include "postgres.h" + #include "c.h" #include "fmgr.h" -#include "distributed/connection_management.h" -#include "distributed/shardinterval_utils.h" #include "nodes/pg_list.h" -#include "distributed/metadata_utility.h" #include "columnar/columnar.h" +#include "distributed/connection_management.h" +#include "distributed/metadata_utility.h" +#include "distributed/shardinterval_utils.h" + /* * In our distributed database, we need a mechanism to make remote procedure * calls between clients, the coordinator node, and worker nodes. These remote calls diff --git a/src/include/distributed/deparse_shard_query.h b/src/include/distributed/deparse_shard_query.h index 9370e51e225..8fb012588de 100644 --- a/src/include/distributed/deparse_shard_query.h +++ b/src/include/distributed/deparse_shard_query.h @@ -18,6 +18,7 @@ #include "nodes/nodes.h" #include "nodes/parsenodes.h" #include "nodes/pg_list.h" + #include "distributed/citus_custom_scan.h" diff --git a/src/include/distributed/deparser.h b/src/include/distributed/deparser.h index 95d948bc9e9..4d4005c1958 100644 --- a/src/include/distributed/deparser.h +++ b/src/include/distributed/deparser.h @@ -15,10 +15,10 @@ #include "postgres.h" -#include "nodes/nodes.h" -#include "nodes/parsenodes.h" #include "catalog/objectaddress.h" #include "lib/stringinfo.h" +#include "nodes/nodes.h" +#include "nodes/parsenodes.h" /* forward declarations for format_collate.c */ /* Control flags for FormatCollateExtended, compatible with format_type_extended */ @@ -121,6 +121,33 @@ extern void AppendGrantedByInGrant(StringInfo buf, GrantStmt *stmt); extern void AppendGrantSharedPrefix(StringInfo buf, GrantStmt *stmt); extern void AppendGrantSharedSuffix(StringInfo buf, GrantStmt *stmt); +extern void AppendColumnNameList(StringInfo buf, List *columns); + +/* Common deparser utils */ + +typedef struct DefElemOptionFormat +{ + char *name; + char *format; + int type; +} DefElemOptionFormat; + +typedef enum OptionFormatType +{ + OPTION_FORMAT_STRING, + OPTION_FORMAT_LITERAL_CSTR, + OPTION_FORMAT_BOOLEAN, + OPTION_FORMAT_INTEGER +} OptionFormatType; + + +extern void DefElemOptionToStatement(StringInfo buf, DefElem *option, + const DefElemOptionFormat *opt_formats, + int opt_formats_len); + +/* forward declarations for deparse_comment_stmts.c */ +extern char * DeparseCommentStmt(Node *node); + /* forward declarations for deparse_statistics_stmts.c */ extern char * DeparseCreateStatisticsStmt(Node *node); @@ -209,6 +236,7 @@ extern void QualifyAlterRoleSetStmt(Node *stmt); extern char * DeparseCreateRoleStmt(Node *stmt); extern char * DeparseDropRoleStmt(Node *stmt); extern char * DeparseGrantRoleStmt(Node *stmt); +extern char * DeparseReassignOwnedStmt(Node *node); /* forward declarations for deparse_owned_stmts.c */ extern char * DeparseDropOwnedStmt(Node *node); @@ -227,6 +255,9 @@ extern char * DeparseGrantOnDatabaseStmt(Node *node); extern char * DeparseAlterDatabaseStmt(Node *node); extern char * DeparseAlterDatabaseRefreshCollStmt(Node *node); extern char * DeparseAlterDatabaseSetStmt(Node *node); +extern char * DeparseCreateDatabaseStmt(Node *node); +extern char * DeparseDropDatabaseStmt(Node *node); +extern char * DeparseAlterDatabaseRenameStmt(Node *node); /* forward declaration for deparse_publication_stmts.c */ @@ -260,6 +291,9 @@ extern void QualifyRenameTextSearchDictionaryStmt(Node *node); extern void QualifyTextSearchConfigurationCommentStmt(Node *node); extern void QualifyTextSearchDictionaryCommentStmt(Node *node); +/* forward declarations for deparse_seclabel_stmts.c */ +extern char * DeparseSecLabelStmt(Node *node); + /* forward declarations for deparse_sequence_stmts.c */ extern char * DeparseDropSequenceStmt(Node *node); extern char * DeparseRenameSequenceStmt(Node *node); diff --git a/src/include/distributed/distributed_deadlock_detection.h b/src/include/distributed/distributed_deadlock_detection.h index 23f6554efe7..5f391cc7073 100644 --- a/src/include/distributed/distributed_deadlock_detection.h +++ b/src/include/distributed/distributed_deadlock_detection.h @@ -14,11 +14,12 @@ #include "postgres.h" #include "access/hash.h" +#include "nodes/pg_list.h" + #include "distributed/backend_data.h" #include "distributed/listutils.h" #include "distributed/lock_graph.h" #include "distributed/transaction_identifier.h" -#include "nodes/pg_list.h" typedef struct TransactionNode { diff --git a/src/include/distributed/distributed_execution_locks.h b/src/include/distributed/distributed_execution_locks.h index e789843ae33..3ca31b3308e 100644 --- a/src/include/distributed/distributed_execution_locks.h +++ b/src/include/distributed/distributed_execution_locks.h @@ -14,6 +14,7 @@ #include "nodes/pg_list.h" #include "storage/lockdefs.h" + #include "distributed/multi_physical_planner.h" extern void AcquireExecutorShardLocksForExecution(RowModifyLevel modLevel, diff --git a/src/include/distributed/distributed_planner.h b/src/include/distributed/distributed_planner.h index d46fbf2e639..23540f6f683 100644 --- a/src/include/distributed/distributed_planner.h +++ b/src/include/distributed/distributed_planner.h @@ -12,11 +12,10 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" - +#include "nodes/pathnodes.h" #include "nodes/plannodes.h" -#include "nodes/pathnodes.h" +#include "pg_version_constants.h" #include "distributed/citus_nodes.h" #include "distributed/errormessage.h" @@ -105,7 +104,7 @@ typedef struct FastPathRestrictionContext * Set to true when distKey = Param; in the queryTree */ bool distributionKeyHasParam; -}FastPathRestrictionContext; +} FastPathRestrictionContext; typedef struct PlannerRestrictionContext { diff --git a/src/include/distributed/enterprise.h b/src/include/distributed/enterprise.h index 26a882bd688..2ba2fa1ffb2 100644 --- a/src/include/distributed/enterprise.h +++ b/src/include/distributed/enterprise.h @@ -13,6 +13,7 @@ #define CITUS_ENTERPRISE_H #include "postgres.h" + #include "fmgr.h" diff --git a/src/include/distributed/errormessage.h b/src/include/distributed/errormessage.h index 3c19a9c834f..7a38d513c29 100644 --- a/src/include/distributed/errormessage.h +++ b/src/include/distributed/errormessage.h @@ -11,9 +11,11 @@ #define ERRORMESSAGE_H #include "c.h" -#include "distributed/citus_nodes.h" + #include "pg_version_compat.h" +#include "distributed/citus_nodes.h" + typedef struct DeferredErrorMessage { diff --git a/src/include/distributed/executor_util.h b/src/include/distributed/executor_util.h index 8560c6dfd2e..b39122d263b 100644 --- a/src/include/distributed/executor_util.h +++ b/src/include/distributed/executor_util.h @@ -12,10 +12,11 @@ #include "funcapi.h" #include "access/tupdesc.h" -#include "distributed/multi_physical_planner.h" #include "nodes/params.h" #include "nodes/pg_list.h" +#include "distributed/multi_physical_planner.h" + /* utility functions for dealing with tasks in the executor */ extern bool TaskListModifiesDatabase(RowModifyLevel modLevel, List *taskList); diff --git a/src/include/distributed/foreign_key_relationship.h b/src/include/distributed/foreign_key_relationship.h index ef2c5be33a8..bbaf8be7353 100644 --- a/src/include/distributed/foreign_key_relationship.h +++ b/src/include/distributed/foreign_key_relationship.h @@ -10,17 +10,18 @@ #define FOREIGN_KEY_RELATIONSHIP_H #include "postgres.h" + #include "postgres_ext.h" -#include "utils/relcache.h" -#include "utils/hsearch.h" + #include "nodes/primnodes.h" +#include "utils/hsearch.h" +#include "utils/relcache.h" extern List * GetForeignKeyConnectedRelationIdList(Oid relationId); extern bool ShouldUndistributeCitusLocalTable(Oid relationId); extern List * ReferencedRelationIdList(Oid relationId); extern List * ReferencingRelationIdList(Oid relationId); extern void SetForeignConstraintRelationshipGraphInvalid(void); -extern void ClearForeignConstraintRelationshipGraphContext(void); extern bool OidVisited(HTAB *oidVisitedMap, Oid oid); extern void VisitOid(HTAB *oidVisitedMap, Oid oid); diff --git a/src/include/distributed/hash_helpers.h b/src/include/distributed/hash_helpers.h index 2b16d110c50..b64bfde71ae 100644 --- a/src/include/distributed/hash_helpers.h +++ b/src/include/distributed/hash_helpers.h @@ -11,10 +11,10 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" - #include "utils/hsearch.h" +#include "pg_version_constants.h" + /* * assert_valid_hash_key2 checks if a type that contains 2 fields contains no * padding bytes. This is necessary to use a type as a hash key with tag_hash. diff --git a/src/include/distributed/insert_select_planner.h b/src/include/distributed/insert_select_planner.h index 771d1d60f39..a9100b02dfd 100644 --- a/src/include/distributed/insert_select_planner.h +++ b/src/include/distributed/insert_select_planner.h @@ -16,12 +16,13 @@ #include "postgres.h" -#include "distributed/multi_physical_planner.h" -#include "distributed/distributed_planner.h" #include "nodes/execnodes.h" #include "nodes/parsenodes.h" #include "nodes/plannodes.h" +#include "distributed/distributed_planner.h" +#include "distributed/multi_physical_planner.h" + extern bool InsertSelectIntoCitusTable(Query *query); extern bool CheckInsertSelectQuery(Query *query); diff --git a/src/include/distributed/intermediate_results.h b/src/include/distributed/intermediate_results.h index 63eca5ad1a4..ca4fa581e05 100644 --- a/src/include/distributed/intermediate_results.h +++ b/src/include/distributed/intermediate_results.h @@ -14,13 +14,14 @@ #include "fmgr.h" -#include "distributed/commands/multi_copy.h" #include "nodes/execnodes.h" #include "nodes/pg_list.h" #include "tcop/dest.h" #include "utils/builtins.h" #include "utils/palloc.h" +#include "distributed/commands/multi_copy.h" + /* * DistributedResultFragment represents a fragment of a distributed result. diff --git a/src/include/distributed/listutils.h b/src/include/distributed/listutils.h index 833c77d2263..2a52cbc7527 100644 --- a/src/include/distributed/listutils.h +++ b/src/include/distributed/listutils.h @@ -13,13 +13,15 @@ #define CITUS_LISTUTILS_H #include "postgres.h" + #include "c.h" #include "nodes/pg_list.h" -#include "pg_version_compat.h" #include "utils/array.h" #include "utils/hsearch.h" +#include "pg_version_compat.h" + /* * ListCellAndListWrapper stores a list and list cell. diff --git a/src/include/distributed/local_distributed_join_planner.h b/src/include/distributed/local_distributed_join_planner.h index dfb45f149ae..3390ab213eb 100644 --- a/src/include/distributed/local_distributed_join_planner.h +++ b/src/include/distributed/local_distributed_join_planner.h @@ -14,6 +14,7 @@ #define LOCAL_DISTRIBUTED_JOIN_PLANNER_H #include "postgres.h" + #include "distributed/recursive_planning.h" /* managed via guc.c */ diff --git a/src/include/distributed/lock_graph.h b/src/include/distributed/lock_graph.h index f204ebb034b..e14a515803c 100644 --- a/src/include/distributed/lock_graph.h +++ b/src/include/distributed/lock_graph.h @@ -14,12 +14,14 @@ #include "postgres.h" + #include "libpq-fe.h" #include "datatype/timestamp.h" -#include "distributed/backend_data.h" #include "storage/lock.h" +#include "distributed/backend_data.h" + /* * Describes an edge in a waiting-for graph of locks. This isn't used for diff --git a/src/include/distributed/maintenanced.h b/src/include/distributed/maintenanced.h index de1e6888395..07387a7fd34 100644 --- a/src/include/distributed/maintenanced.h +++ b/src/include/distributed/maintenanced.h @@ -20,6 +20,7 @@ /* config variable for */ extern double DistributedDeadlockDetectionTimeoutFactor; +extern char *MainDb; extern void StopMaintenanceDaemon(Oid databaseId); extern void TriggerNodeMetadataSync(Oid databaseId); @@ -27,6 +28,7 @@ extern void InitializeMaintenanceDaemon(void); extern size_t MaintenanceDaemonShmemSize(void); extern void MaintenanceDaemonShmemInit(void); extern void InitializeMaintenanceDaemonBackend(void); +extern void InitializeMaintenanceDaemonForMainDb(void); extern bool LockCitusExtension(void); extern PGDLLEXPORT void CitusMaintenanceDaemonMain(Datum main_arg); diff --git a/src/include/distributed/merge_planner.h b/src/include/distributed/merge_planner.h index 89829260327..b6636687aa1 100644 --- a/src/include/distributed/merge_planner.h +++ b/src/include/distributed/merge_planner.h @@ -15,6 +15,7 @@ #include "c.h" #include "nodes/parsenodes.h" + #include "distributed/distributed_planner.h" #include "distributed/errormessage.h" #include "distributed/multi_physical_planner.h" diff --git a/src/include/distributed/metadata/dependency.h b/src/include/distributed/metadata/dependency.h index 2d3759e1f08..2cfefc87eb0 100644 --- a/src/include/distributed/metadata/dependency.h +++ b/src/include/distributed/metadata/dependency.h @@ -16,9 +16,10 @@ #include "catalog/objectaddress.h" #include "catalog/pg_depend.h" -#include "distributed/errormessage.h" #include "nodes/pg_list.h" +#include "distributed/errormessage.h" + typedef bool (*AddressPredicate)(const ObjectAddress *); extern List * GetUniqueDependenciesList(List *objectAddressesList); diff --git a/src/include/distributed/metadata/distobject.h b/src/include/distributed/metadata/distobject.h index de56c0e1fb6..e98e6ee8698 100644 --- a/src/include/distributed/metadata/distobject.h +++ b/src/include/distributed/metadata/distobject.h @@ -23,9 +23,13 @@ extern bool CitusExtensionObject(const ObjectAddress *objectAddress); extern bool IsAnyObjectDistributed(const List *addresses); extern bool ClusterHasDistributedFunctionWithDistArgument(void); extern void MarkObjectDistributed(const ObjectAddress *distAddress); +extern void MarkObjectDistributedWithName(const ObjectAddress *distAddress, char *name, + bool useConnectionForLocalQuery, + char *connectionUser); extern void MarkObjectDistributedViaSuperUser(const ObjectAddress *distAddress); extern void MarkObjectDistributedLocally(const ObjectAddress *distAddress); extern void UnmarkObjectDistributed(const ObjectAddress *address); +extern void UnmarkNodeWideObjectsDistributed(Node *node); extern bool IsTableOwnedByExtension(Oid relationId); extern bool ObjectAddressDependsOnExtension(const ObjectAddress *target); extern bool IsAnyObjectAddressOwnedByExtension(const List *targets, diff --git a/src/include/distributed/metadata_cache.h b/src/include/distributed/metadata_cache.h index 34b95b859e4..f1120497b72 100644 --- a/src/include/distributed/metadata_cache.h +++ b/src/include/distributed/metadata_cache.h @@ -14,10 +14,12 @@ #include "postgres.h" #include "fmgr.h" + +#include "utils/hsearch.h" + #include "distributed/metadata_utility.h" #include "distributed/pg_dist_partition.h" #include "distributed/worker_manager.h" -#include "utils/hsearch.h" extern bool EnableVersionChecks; diff --git a/src/include/distributed/metadata_sync.h b/src/include/distributed/metadata_sync.h index 237df363a13..617eed7059b 100644 --- a/src/include/distributed/metadata_sync.h +++ b/src/include/distributed/metadata_sync.h @@ -13,10 +13,11 @@ #define METADATA_SYNC_H +#include "nodes/pg_list.h" + #include "distributed/commands/utility_hook.h" #include "distributed/coordinator_protocol.h" #include "distributed/metadata_cache.h" -#include "nodes/pg_list.h" /* managed via guc.c */ typedef enum @@ -88,6 +89,7 @@ extern List * NodeMetadataCreateCommands(void); extern List * CitusTableMetadataCreateCommandList(Oid relationId); extern List * NodeMetadataDropCommands(void); extern char * MarkObjectsDistributedCreateCommand(List *addresses, + List *names, List *distributionArgumentIndexes, List *colocationIds, List *forceDelegations); @@ -107,6 +109,7 @@ extern char * ColocationIdUpdateCommand(Oid relationId, uint32 colocationId); extern char * CreateSchemaDDLCommand(Oid schemaId); extern List * GrantOnSchemaDDLCommands(Oid schemaId); extern List * GrantOnFunctionDDLCommands(Oid functionOid); +extern List * GrantOnDatabaseDDLCommands(Oid databaseOid); extern List * GrantOnForeignServerDDLCommands(Oid serverId); extern List * GenerateGrantOnForeignServerQueriesFromAclItem(Oid serverId, AclItem *aclItem); @@ -127,8 +130,13 @@ extern List * IdentitySequenceDependencyCommandList(Oid targetRelationId); extern List * DDLCommandsForSequence(Oid sequenceOid, char *ownerName); extern List * GetSequencesFromAttrDef(Oid attrdefOid); +#if PG_VERSION_NUM < PG_VERSION_15 +ObjectAddress GetAttrDefaultColumnAddress(Oid attrdefoid); +#endif +extern List * GetAttrDefsFromSequence(Oid seqOid); extern void GetDependentSequencesWithRelation(Oid relationId, List **seqInfoList, AttrNumber attnum, char depType); +extern List * GetDependentRelationsWithSequence(Oid seqId, char depType); extern List * GetDependentFunctionsWithRelation(Oid relationId); extern Oid GetAttributeTypeOid(Oid relationId, AttrNumber attnum); extern void SetLocalEnableMetadataSync(bool state); @@ -186,7 +194,7 @@ extern void SendInterTableRelationshipCommands(MetadataSyncContext *context); #define WORKER_DROP_ALL_SHELL_TABLES \ "CALL pg_catalog.worker_drop_all_shell_tables(%s)" #define CITUS_INTERNAL_MARK_NODE_NOT_SYNCED \ - "SELECT citus_internal_mark_node_not_synced(%d, %d)" + "SELECT citus_internal.mark_node_not_synced(%d, %d)" #define REMOVE_ALL_CITUS_TABLES_COMMAND \ "SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition" diff --git a/src/include/distributed/metadata_utility.h b/src/include/distributed/metadata_utility.h index 9234adc7648..737e1283ba0 100644 --- a/src/include/distributed/metadata_utility.h +++ b/src/include/distributed/metadata_utility.h @@ -21,13 +21,14 @@ #include "access/tupdesc.h" #include "catalog/indexing.h" #include "catalog/objectaddress.h" +#include "utils/acl.h" +#include "utils/relcache.h" + #include "distributed/citus_nodes.h" #include "distributed/connection_management.h" #include "distributed/errormessage.h" #include "distributed/relay_utility.h" #include "distributed/worker_manager.h" -#include "utils/acl.h" -#include "utils/relcache.h" /* total number of hash tokens (2^32) */ @@ -342,6 +343,7 @@ extern void LookupTaskPlacementHostAndPort(ShardPlacement *taskPlacement, char * int *nodePort); extern bool IsDummyPlacement(ShardPlacement *taskPlacement); extern StringInfo GenerateSizeQueryOnMultiplePlacements(List *shardIntervalList, + Oid indexId, SizeQueryType sizeQueryType, bool optimizePartitionCalculations); extern List * RemoveCoordinatorPlacementIfNotSingleNode(List *placementList); @@ -384,6 +386,7 @@ extern void EnsureUndistributeTenantTableSafe(Oid relationId, const char *operat extern TableConversionReturn * UndistributeTable(TableConversionParameters *params); extern void UndistributeTables(List *relationIdList); +extern void EnsureObjectAndDependenciesExistOnAllNodes(const ObjectAddress *target); extern void EnsureAllObjectDependenciesExistOnAllNodes(const List *targets); extern DeferredErrorMessage * DeferErrorIfCircularDependencyExists(const ObjectAddress * diff --git a/src/include/distributed/multi_executor.h b/src/include/distributed/multi_executor.h index 5ae010d870d..6708d9a6445 100644 --- a/src/include/distributed/multi_executor.h +++ b/src/include/distributed/multi_executor.h @@ -11,8 +11,8 @@ #define MULTI_EXECUTOR_H #include "executor/execdesc.h" -#include "nodes/parsenodes.h" #include "nodes/execnodes.h" +#include "nodes/parsenodes.h" #include "distributed/citus_custom_scan.h" #include "distributed/multi_physical_planner.h" diff --git a/src/include/distributed/multi_explain.h b/src/include/distributed/multi_explain.h index 29663490544..f6dad83c2f3 100644 --- a/src/include/distributed/multi_explain.h +++ b/src/include/distributed/multi_explain.h @@ -10,9 +10,10 @@ #ifndef MULTI_EXPLAIN_H #define MULTI_EXPLAIN_H -#include "executor/executor.h" #include "tuple_destination.h" +#include "executor/executor.h" + typedef enum { EXPLAIN_ANALYZE_SORT_BY_TIME = 0, diff --git a/src/include/distributed/multi_logical_planner.h b/src/include/distributed/multi_logical_planner.h index de4901ea2d0..f68fd3ed5b8 100644 --- a/src/include/distributed/multi_logical_planner.h +++ b/src/include/distributed/multi_logical_planner.h @@ -14,15 +14,16 @@ #ifndef MULTI_LOGICAL_PLANNER_H #define MULTI_LOGICAL_PLANNER_H +#include "nodes/nodes.h" +#include "nodes/parsenodes.h" +#include "nodes/pg_list.h" +#include "nodes/primnodes.h" + #include "distributed/citus_nodes.h" #include "distributed/errormessage.h" #include "distributed/log_utils.h" #include "distributed/multi_join_order.h" #include "distributed/relation_restriction_equivalence.h" -#include "nodes/nodes.h" -#include "nodes/primnodes.h" -#include "nodes/parsenodes.h" -#include "nodes/pg_list.h" #define SUBQUERY_RANGE_TABLE_ID -1 diff --git a/src/include/distributed/multi_logical_replication.h b/src/include/distributed/multi_logical_replication.h index f5a9dc342a5..2a57c02241a 100644 --- a/src/include/distributed/multi_logical_replication.h +++ b/src/include/distributed/multi_logical_replication.h @@ -15,6 +15,7 @@ #include "c.h" #include "nodes/pg_list.h" + #include "distributed/connection_management.h" #include "distributed/hash_helpers.h" #include "distributed/shard_cleaner.h" diff --git a/src/include/distributed/multi_partitioning_utils.h b/src/include/distributed/multi_partitioning_utils.h index b8cfe38c050..7d76b9aa3d3 100644 --- a/src/include/distributed/multi_partitioning_utils.h +++ b/src/include/distributed/multi_partitioning_utils.h @@ -8,9 +8,10 @@ #define MULTI_PARTITIONING_UTILS_H_ -#include "distributed/metadata_utility.h" #include "nodes/pg_list.h" +#include "distributed/metadata_utility.h" + extern bool PartitionedTable(Oid relationId); extern bool PartitionedTableNoLock(Oid relationId); diff --git a/src/include/distributed/multi_physical_planner.h b/src/include/distributed/multi_physical_planner.h index b7acc057412..475a41b37b8 100644 --- a/src/include/distributed/multi_physical_planner.h +++ b/src/include/distributed/multi_physical_planner.h @@ -16,21 +16,22 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" - #include "c.h" #include "datatype/timestamp.h" +#include "lib/stringinfo.h" +#include "nodes/parsenodes.h" +#include "utils/array.h" + +#include "pg_version_constants.h" + #include "distributed/citus_nodes.h" +#include "distributed/distributed_planner.h" #include "distributed/errormessage.h" #include "distributed/log_utils.h" #include "distributed/metadata_utility.h" -#include "distributed/worker_manager.h" #include "distributed/multi_logical_planner.h" -#include "distributed/distributed_planner.h" -#include "lib/stringinfo.h" -#include "nodes/parsenodes.h" -#include "utils/array.h" +#include "distributed/worker_manager.h" /* Definitions local to the physical planner */ @@ -237,8 +238,8 @@ typedef struct Task TaskQuery taskQuery; /* - * A task can have multiple queries, in which case queryCount will be > 1. If - * a task has more one query, then taskQuery->queryType == TASK_QUERY_TEXT_LIST. + * A task can have multiple queries, in which case queryCount will be > 1, and + * taskQuery->queryType == TASK_QUERY_TEXT_LIST. */ int queryCount; @@ -289,7 +290,7 @@ typedef struct Task /* * When we evaluate functions and parameters in the query string then - * we should no longer send the list of parameters long with the + * we should no longer send the list of parameters along with the * query. */ bool parametersInQueryStringResolved; @@ -329,7 +330,7 @@ typedef struct Task /* * Vacuum, create/drop/reindex concurrently cannot be executed in a transaction. */ - bool cannotBeExecutedInTransction; + bool cannotBeExecutedInTransaction; Const *partitionKeyValue; int colocationId; diff --git a/src/include/distributed/multi_progress.h b/src/include/distributed/multi_progress.h index 64bad527f73..2a9cf1cbb5c 100644 --- a/src/include/distributed/multi_progress.h +++ b/src/include/distributed/multi_progress.h @@ -16,6 +16,7 @@ #include "postgres.h" #include "fmgr.h" + #include "nodes/pg_list.h" #include "storage/dsm.h" diff --git a/src/include/distributed/multi_router_planner.h b/src/include/distributed/multi_router_planner.h index 160cf6605ff..ae75ee63147 100644 --- a/src/include/distributed/multi_router_planner.h +++ b/src/include/distributed/multi_router_planner.h @@ -14,12 +14,13 @@ #include "c.h" +#include "nodes/parsenodes.h" + +#include "distributed/distributed_planner.h" #include "distributed/errormessage.h" #include "distributed/log_utils.h" #include "distributed/multi_logical_planner.h" #include "distributed/multi_physical_planner.h" -#include "distributed/distributed_planner.h" -#include "nodes/parsenodes.h" /* reserved alias name for UPSERTs */ diff --git a/src/include/distributed/pg_dist_transaction.h b/src/include/distributed/pg_dist_transaction.h index 815633b7030..95658f782ca 100644 --- a/src/include/distributed/pg_dist_transaction.h +++ b/src/include/distributed/pg_dist_transaction.h @@ -35,9 +35,10 @@ typedef FormData_pg_dist_transaction *Form_pg_dist_transaction; * compiler constants for pg_dist_transaction * ---------------- */ -#define Natts_pg_dist_transaction 2 +#define Natts_pg_dist_transaction 3 #define Anum_pg_dist_transaction_groupid 1 #define Anum_pg_dist_transaction_gid 2 +#define Anum_pg_dist_transaction_outerxid 3 #endif /* PG_DIST_TRANSACTION_H */ diff --git a/src/include/distributed/placement_access.h b/src/include/distributed/placement_access.h index 28b05baae8d..0eafa678a4c 100644 --- a/src/include/distributed/placement_access.h +++ b/src/include/distributed/placement_access.h @@ -11,7 +11,9 @@ #define PLACEMENT_ACCESS_H #include "postgres.h" + #include "nodes/pg_list.h" + #include "distributed/multi_physical_planner.h" /* forward declare, to avoid dependency on ShardPlacement definition */ diff --git a/src/include/distributed/query_colocation_checker.h b/src/include/distributed/query_colocation_checker.h index 562869a9248..2a46d364cfe 100644 --- a/src/include/distributed/query_colocation_checker.h +++ b/src/include/distributed/query_colocation_checker.h @@ -11,10 +11,11 @@ #define QUERY_COLOCATION_CHECKER_H -#include "distributed/distributed_planner.h" #include "nodes/parsenodes.h" #include "nodes/primnodes.h" +#include "distributed/distributed_planner.h" + /* * ColocatedJoinChecker is a helper structure that is used to decide diff --git a/src/include/distributed/query_pushdown_planning.h b/src/include/distributed/query_pushdown_planning.h index 061a4a73082..e0d4f25ddc5 100644 --- a/src/include/distributed/query_pushdown_planning.h +++ b/src/include/distributed/query_pushdown_planning.h @@ -13,10 +13,10 @@ #include "postgres.h" #include "distributed/distributed_planner.h" -#include "distributed/multi_logical_planner.h" -#include "distributed/multi_physical_planner.h" #include "distributed/errormessage.h" #include "distributed/log_utils.h" +#include "distributed/multi_logical_planner.h" +#include "distributed/multi_physical_planner.h" /* Config variables managed via guc.c */ diff --git a/src/include/distributed/query_utils.h b/src/include/distributed/query_utils.h index 7e1ba54e6bd..0b216d158c7 100644 --- a/src/include/distributed/query_utils.h +++ b/src/include/distributed/query_utils.h @@ -12,6 +12,7 @@ #define QUERY_UTILS_H #include "postgres.h" + #include "nodes/pg_list.h" #include "nodes/primnodes.h" diff --git a/src/include/distributed/recursive_planning.h b/src/include/distributed/recursive_planning.h index a883047f6cb..c37eba34334 100644 --- a/src/include/distributed/recursive_planning.h +++ b/src/include/distributed/recursive_planning.h @@ -10,13 +10,15 @@ #ifndef RECURSIVE_PLANNING_H #define RECURSIVE_PLANNING_H -#include "distributed/pg_version_constants.h" +#include "nodes/pathnodes.h" +#include "nodes/pg_list.h" +#include "nodes/primnodes.h" + +#include "pg_version_constants.h" + #include "distributed/errormessage.h" #include "distributed/log_utils.h" #include "distributed/relation_restriction_equivalence.h" -#include "nodes/pg_list.h" -#include "nodes/primnodes.h" -#include "nodes/pathnodes.h" typedef struct RecursivePlanningContextInternal RecursivePlanningContext; diff --git a/src/include/distributed/relation_utils.h b/src/include/distributed/relation_utils.h index acf84a9dad3..d3a5ab105f0 100644 --- a/src/include/distributed/relation_utils.h +++ b/src/include/distributed/relation_utils.h @@ -13,7 +13,7 @@ #include "postgres.h" -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #if PG_VERSION_NUM >= PG_VERSION_16 #include "parser/parse_relation.h" #endif diff --git a/src/include/distributed/relay_utility.h b/src/include/distributed/relay_utility.h index f5a37da45a5..6fa2172e315 100644 --- a/src/include/distributed/relay_utility.h +++ b/src/include/distributed/relay_utility.h @@ -16,6 +16,7 @@ #define RELAY_UTILITY_H #include "fmgr.h" + #include "lib/stringinfo.h" #include "nodes/nodes.h" diff --git a/src/include/distributed/remote_transaction.h b/src/include/distributed/remote_transaction.h index 6136f25c9f8..45e2eba70f5 100644 --- a/src/include/distributed/remote_transaction.h +++ b/src/include/distributed/remote_transaction.h @@ -12,8 +12,9 @@ #include "libpq-fe.h" -#include "nodes/pg_list.h" + #include "lib/ilist.h" +#include "nodes/pg_list.h" /* forward declare, to avoid recursive includes */ @@ -143,4 +144,14 @@ extern void CoordinatedRemoteTransactionsSavepointBegin(SubTransactionId subId); extern void CoordinatedRemoteTransactionsSavepointRelease(SubTransactionId subId); extern void CoordinatedRemoteTransactionsSavepointRollback(SubTransactionId subId); +extern void RunCitusMainDBQuery(char *query); +extern void CleanCitusMainDBConnection(void); + +extern bool IsMainDBCommand; +extern bool IsMainDB; +extern char *SuperuserRole; +extern char *MainDb; +extern struct MultiConnection *MainDBConnection; +extern bool IsMainDBCommandInXact; + #endif /* REMOTE_TRANSACTION_H */ diff --git a/src/include/distributed/replication_origin_session_utils.h b/src/include/distributed/replication_origin_session_utils.h index e90bd8ab87b..b11d56ffc4d 100644 --- a/src/include/distributed/replication_origin_session_utils.h +++ b/src/include/distributed/replication_origin_session_utils.h @@ -12,7 +12,9 @@ #define REPLICATION_ORIGIN_SESSION_UTILS_H #include "postgres.h" + #include "replication/origin.h" + #include "distributed/connection_management.h" extern void InitializeReplicationOriginSessionUtils(void); diff --git a/src/include/distributed/resource_lock.h b/src/include/distributed/resource_lock.h index 9efa1b7672c..576d2bf1516 100644 --- a/src/include/distributed/resource_lock.h +++ b/src/include/distributed/resource_lock.h @@ -11,13 +11,16 @@ #define RESOURCE_LOCK_H #include "postgres.h" /* IWYU pragma: keep */ + #include "c.h" -#include "distributed/worker_transaction.h" +#include "catalog/dependency.h" #include "nodes/pg_list.h" #include "storage/lock.h" #include "tcop/utility.h" +#include "distributed/worker_transaction.h" + /* * Postgres' advisory locks use 'field4' to discern between different kind of @@ -43,7 +46,8 @@ typedef enum AdvisoryLocktagClass ADV_LOCKTAG_CLASS_CITUS_CLEANUP_OPERATION_ID = 10, ADV_LOCKTAG_CLASS_CITUS_LOGICAL_REPLICATION = 12, ADV_LOCKTAG_CLASS_CITUS_REBALANCE_PLACEMENT_COLOCATION = 13, - ADV_LOCKTAG_CLASS_CITUS_BACKGROUND_TASK = 14 + ADV_LOCKTAG_CLASS_CITUS_BACKGROUND_TASK = 14, + ADV_LOCKTAG_CLASS_CITUS_GLOBAL_DDL_SERIALIZATION = 15 } AdvisoryLocktagClass; /* CitusOperations has constants for citus operations */ @@ -140,6 +144,72 @@ typedef enum CitusOperations (uint32) (taskId), \ ADV_LOCKTAG_CLASS_CITUS_BACKGROUND_TASK) +/* + * IsNodeWideObjectClass returns true if the given object class is node-wide, + * i.e., that is not bound to a particular database but to whole server. + * + * Defined here as an inlined function so that SET_LOCKTAG_GLOBAL_DDL_SERIALIZATION + * macro can use it. + */ +static inline bool +IsNodeWideObjectClass(ObjectClass objectClass) +{ + if ((int) objectClass < 0 || objectClass > LAST_OCLASS) + { + elog(ERROR, "invalid object class: %d", objectClass); + } + + /* + * We don't expect Postgres to change an object class to a node-wide one in the + * future, but a newly added object class may be node-wide. + * + * So we put a static assert here to make sure that the developer who adds support + * for a new Postgres version is aware of this. + * + * If new object classes are added and none of them are node-wide, then update + * this assertion check based on latest supported major Postgres version. + */ + StaticAssertStmt(PG_MAJORVERSION_NUM <= 16, + "better to check if any of newly added ObjectClass'es are node-wide"); + + switch (objectClass) + { + case OCLASS_ROLE: + case OCLASS_DATABASE: + case OCLASS_TBLSPACE: +#if PG_VERSION_NUM >= PG_VERSION_15 + case OCLASS_PARAMETER_ACL: +#endif +#if PG_VERSION_NUM >= PG_VERSION_16 + case OCLASS_ROLE_MEMBERSHIP: +#endif + { + return true; + } + + default: + return false; + } +} + + +/* + * Automatically sets databaseId to InvalidOid if the object class is + * node-wide, i.e., that is not bound to a particular database but to + * whole server. If the object class is not node-wide, sets databaseId + * to MyDatabaseId. + * + * That way, the lock is local to each database if the object class is + * not node-wide, and global if it is. + */ +#define SET_LOCKTAG_GLOBAL_DDL_SERIALIZATION(tag, objectClass, oid) \ + SET_LOCKTAG_ADVISORY(tag, \ + (uint32) (IsNodeWideObjectClass(objectClass) ? InvalidOid : \ + MyDatabaseId), \ + (uint32) objectClass, \ + (uint32) oid, \ + ADV_LOCKTAG_CLASS_CITUS_GLOBAL_DDL_SERIALIZATION) + /* * DistLockConfigs are used to configure the locking behaviour of AcquireDistributedLockOnRelations */ diff --git a/src/include/distributed/serialize_distributed_ddls.h b/src/include/distributed/serialize_distributed_ddls.h new file mode 100644 index 00000000000..c62c617d137 --- /dev/null +++ b/src/include/distributed/serialize_distributed_ddls.h @@ -0,0 +1,37 @@ +/*------------------------------------------------------------------------- + * + * serialize_distributed_ddls.h + * + * Declarations for public functions related to serializing distributed + * DDLs. + * + *------------------------------------------------------------------------- + */ + +#ifndef SERIALIZE_DDLS_OVER_CATALOG_H +#define SERIALIZE_DDLS_OVER_CATALOG_H + +#include "postgres.h" + +#include "catalog/dependency.h" + +/* + * Note that those two lock types don't conflict with each other and are + * acquired for different purposes. The lock on the object class + * --SerializeDistributedDDLsOnObjectClass()-- is used to serialize DDLs + * that target the object class itself, e.g., when creating a new object + * of that class, and the latter one --SerializeDistributedDDLsOnObjectClassObject()-- + * is used to serialize DDLs that target a specific object of that class, + * e.g., when altering an object. + * + * In some cases, we may want to acquire both locks at the same time. For + * example, when renaming a database, we want to acquire both lock types + * because while the object class lock is used to ensure that another session + * doesn't create a new database with the same name, the object lock is used + * to ensure that another session doesn't alter the same database. + */ +extern void SerializeDistributedDDLsOnObjectClass(ObjectClass objectClass); +extern void SerializeDistributedDDLsOnObjectClassObject(ObjectClass objectClass, + char *qualifiedObjectName); + +#endif /* SERIALIZE_DDLS_OVER_CATALOG_H */ diff --git a/src/include/distributed/shard_cleaner.h b/src/include/distributed/shard_cleaner.h index e7d3dea1bf7..7609bd90024 100644 --- a/src/include/distributed/shard_cleaner.h +++ b/src/include/distributed/shard_cleaner.h @@ -41,7 +41,8 @@ typedef enum CleanupObject CLEANUP_OBJECT_SUBSCRIPTION = 2, CLEANUP_OBJECT_REPLICATION_SLOT = 3, CLEANUP_OBJECT_PUBLICATION = 4, - CLEANUP_OBJECT_USER = 5 + CLEANUP_OBJECT_USER = 5, + CLEANUP_OBJECT_DATABASE = 6 } CleanupObject; /* @@ -81,16 +82,16 @@ typedef enum CleanupPolicy extern OperationId RegisterOperationNeedingCleanup(void); /* - * InsertCleanupRecordInCurrentTransaction inserts a new pg_dist_cleanup entry + * InsertCleanupOnSuccessRecordInCurrentTransaction inserts a new pg_dist_cleanup entry * as part of the current transaction. * * This is primarily useful for deferred cleanup (CLEANUP_DEFERRED_ON_SUCCESS) - * scenarios, since the records would roll back in case of failure. + * scenarios, since the records would roll back in case of failure. And for the + * same reason, always sets the policy type to CLEANUP_DEFERRED_ON_SUCCESS. */ -extern void InsertCleanupRecordInCurrentTransaction(CleanupObject objectType, - char *objectName, - int nodeGroupId, - CleanupPolicy policy); +extern void InsertCleanupOnSuccessRecordInCurrentTransaction(CleanupObject objectType, + char *objectName, + int nodeGroupId); /* * InsertCleanupRecordInSeparateTransaction inserts a new pg_dist_cleanup entry @@ -99,10 +100,10 @@ extern void InsertCleanupRecordInCurrentTransaction(CleanupObject objectType, * This is used in scenarios where we need to cleanup resources on operation * completion (CLEANUP_ALWAYS) or on failure (CLEANUP_ON_FAILURE). */ -extern void InsertCleanupRecordInSubtransaction(CleanupObject objectType, - char *objectName, - int nodeGroupId, - CleanupPolicy policy); +extern void InsertCleanupRecordOutsideTransaction(CleanupObject objectType, + char *objectName, + int nodeGroupId, + CleanupPolicy policy); /* * FinalizeOperationNeedingCleanupOnSuccess is be called by an operation to signal diff --git a/src/include/distributed/shard_pruning.h b/src/include/distributed/shard_pruning.h index 04176314e81..1b1ffce8abc 100644 --- a/src/include/distributed/shard_pruning.h +++ b/src/include/distributed/shard_pruning.h @@ -11,9 +11,10 @@ #ifndef SHARD_PRUNING_H_ #define SHARD_PRUNING_H_ -#include "distributed/metadata_cache.h" #include "nodes/primnodes.h" +#include "distributed/metadata_cache.h" + #define INVALID_SHARD_INDEX -1 /* Function declarations for shard pruning */ diff --git a/src/include/distributed/shard_rebalancer.h b/src/include/distributed/shard_rebalancer.h index 38ce4f48562..79414eb3c88 100644 --- a/src/include/distributed/shard_rebalancer.h +++ b/src/include/distributed/shard_rebalancer.h @@ -17,7 +17,9 @@ #include "postgres.h" #include "fmgr.h" + #include "nodes/pg_list.h" + #include "distributed/coordinator_protocol.h" #include "distributed/worker_manager.h" @@ -189,7 +191,7 @@ typedef struct RebalancePlanFunctions extern char *VariablesToBePassedToNewConnections; extern int MaxRebalancerLoggedIgnoredMoves; extern int RebalancerByDiskSizeBaseCost; -extern bool RunningUnderIsolationTest; +extern bool RunningUnderCitusTestSuite; extern bool PropagateSessionSettingsForLoopbackConnection; extern int MaxBackgroundTaskExecutorsPerNode; diff --git a/src/include/distributed/shard_transfer.h b/src/include/distributed/shard_transfer.h index a6d024a2e97..c1621879bb8 100644 --- a/src/include/distributed/shard_transfer.h +++ b/src/include/distributed/shard_transfer.h @@ -9,9 +9,10 @@ #include "postgres.h" -#include "distributed/shard_rebalancer.h" #include "nodes/pg_list.h" +#include "distributed/shard_rebalancer.h" + extern Datum citus_move_shard_placement(PG_FUNCTION_ARGS); extern Datum citus_move_shard_placement_with_nodeid(PG_FUNCTION_ARGS); diff --git a/src/include/distributed/shardinterval_utils.h b/src/include/distributed/shardinterval_utils.h index 4cc99e6d53c..ed5600a11d8 100644 --- a/src/include/distributed/shardinterval_utils.h +++ b/src/include/distributed/shardinterval_utils.h @@ -12,10 +12,11 @@ #ifndef SHARDINTERVAL_UTILS_H_ #define SHARDINTERVAL_UTILS_H_ -#include "distributed/metadata_utility.h" -#include "distributed/metadata_cache.h" #include "nodes/primnodes.h" +#include "distributed/metadata_cache.h" +#include "distributed/metadata_utility.h" + #define INVALID_SHARD_INDEX -1 /* OperatorCacheEntry contains information for each element in OperatorCache */ diff --git a/src/include/distributed/transaction_management.h b/src/include/distributed/transaction_management.h index ca4e632a93a..ee3153d10b6 100644 --- a/src/include/distributed/transaction_management.h +++ b/src/include/distributed/transaction_management.h @@ -14,7 +14,6 @@ #include "lib/ilist.h" #include "lib/stringinfo.h" #include "nodes/pg_list.h" -#include "lib/stringinfo.h" #include "nodes/primnodes.h" #include "utils/hsearch.h" @@ -164,7 +163,7 @@ extern bool MaybeExecutingUDF(void); extern void TrackPropagatedObject(const ObjectAddress *objectAddress); extern void TrackPropagatedTableAndSequences(Oid relationId); extern void ResetPropagatedObjects(void); -extern bool HasAnyDependencyInPropagatedObjects(const ObjectAddress *objectAddress); +extern bool HasAnyObjectInPropagatedObjects(List *objectList); /* initialization function(s) */ extern void InitializeTransactionManagement(void); diff --git a/src/include/distributed/transaction_recovery.h b/src/include/distributed/transaction_recovery.h index 811dbb949f0..a4073875aa2 100644 --- a/src/include/distributed/transaction_recovery.h +++ b/src/include/distributed/transaction_recovery.h @@ -17,7 +17,8 @@ extern int Recover2PCInterval; /* Functions declarations for worker transactions */ -extern void LogTransactionRecord(int32 groupId, char *transactionName); +extern void LogTransactionRecord(int32 groupId, char *transactionName, + FullTransactionId outerXid); extern int RecoverTwoPhaseCommits(void); extern void DeleteWorkerTransactions(WorkerNode *workerNode); diff --git a/src/include/distributed/transmit.h b/src/include/distributed/transmit.h index b86fd91506b..9c2ab87ab65 100644 --- a/src/include/distributed/transmit.h +++ b/src/include/distributed/transmit.h @@ -21,7 +21,8 @@ /* Function declarations for transmitting files between two nodes */ extern void RedirectCopyDataToRegularFile(const char *filename); extern void SendRegularFile(const char *filename); -extern File FileOpenForTransmit(const char *filename, int fileFlags, int fileMode); +extern File FileOpenForTransmit(const char *filename, int fileFlags); +extern File FileOpenForTransmitPerm(const char *filename, int fileFlags, int fileMode); #endif /* TRANSMIT_H */ diff --git a/src/include/distributed/tuple_destination.h b/src/include/distributed/tuple_destination.h index 0480ffdc465..5b4f649835f 100644 --- a/src/include/distributed/tuple_destination.h +++ b/src/include/distributed/tuple_destination.h @@ -11,10 +11,11 @@ #define TUPLE_DESTINATION_H #include "access/tupdesc.h" -#include "distributed/multi_physical_planner.h" #include "tcop/dest.h" #include "utils/tuplestore.h" +#include "distributed/multi_physical_planner.h" + typedef struct TupleDestination TupleDestination; diff --git a/src/include/distributed/utils/citus_stat_tenants.h b/src/include/distributed/utils/citus_stat_tenants.h index 0a482b2417b..573502606eb 100644 --- a/src/include/distributed/utils/citus_stat_tenants.h +++ b/src/include/distributed/utils/citus_stat_tenants.h @@ -11,13 +11,14 @@ #ifndef CITUS_ATTRIBUTE_H #define CITUS_ATTRIBUTE_H -#include "distributed/hash_helpers.h" #include "executor/execdesc.h" #include "executor/executor.h" #include "storage/lwlock.h" #include "utils/datetime.h" #include "utils/hsearch.h" +#include "distributed/hash_helpers.h" + #define MAX_TENANT_ATTRIBUTE_LENGTH 100 /* diff --git a/src/include/distributed/utils/directory.h b/src/include/distributed/utils/directory.h index 7ed8a3f95a1..76b6cf1dfdc 100644 --- a/src/include/distributed/utils/directory.h +++ b/src/include/distributed/utils/directory.h @@ -12,6 +12,7 @@ #define CITUS_DIRECTORY_H #include "postgres.h" + #include "lib/stringinfo.h" diff --git a/src/include/distributed/utils/function.h b/src/include/distributed/utils/function.h index 91d4ab84b94..6f527218c27 100644 --- a/src/include/distributed/utils/function.h +++ b/src/include/distributed/utils/function.h @@ -12,6 +12,7 @@ #define CITUS_FUNCTION_H #include "postgres.h" + #include "fmgr.h" diff --git a/src/include/distributed/version_compat.h b/src/include/distributed/version_compat.h index b990b82ef39..f450dc1cee8 100644 --- a/src/include/distributed/version_compat.h +++ b/src/include/distributed/version_compat.h @@ -13,21 +13,21 @@ #include "postgres.h" -#include "access/sdir.h" #include "access/heapam.h" -#include "commands/explain.h" +#include "access/sdir.h" #include "catalog/namespace.h" -#include "distributed/citus_ruleutils.h" -#include "distributed/citus_safe_lib.h" +#include "commands/explain.h" #include "executor/tuptable.h" #include "nodes/parsenodes.h" -#include "parser/parse_func.h" #include "optimizer/optimizer.h" - +#include "parser/parse_func.h" #include "tcop/tcopprot.h" #include "pg_version_compat.h" +#include "distributed/citus_ruleutils.h" +#include "distributed/citus_safe_lib.h" + typedef struct { File fd; diff --git a/src/include/distributed/worker_manager.h b/src/include/distributed/worker_manager.h index 5ad7f496294..02a43fe0b99 100644 --- a/src/include/distributed/worker_manager.h +++ b/src/include/distributed/worker_manager.h @@ -16,9 +16,9 @@ #include "postgres.h" +#include "nodes/pg_list.h" #include "storage/lmgr.h" #include "storage/lockdefs.h" -#include "nodes/pg_list.h" /* Worker nodeName's, nodePort's, and nodeCluster's maximum length */ @@ -87,6 +87,7 @@ extern WorkerNode * FindNodeWithNodeId(int nodeId, bool missingOk); extern WorkerNode * ModifiableWorkerNode(const char *nodeName, int32 nodePort); extern List * ReadDistNode(bool includeNodesFromOtherClusters); extern void EnsureCoordinator(void); +extern void EnsurePropagationToCoordinator(void); extern void EnsureCoordinatorIsInMetadata(void); extern void InsertCoordinatorIfClusterEmpty(void); extern uint32 GroupForNode(char *nodeName, int32 nodePort); diff --git a/src/include/distributed/worker_protocol.h b/src/include/distributed/worker_protocol.h index 29d364247b0..21c0c44c87e 100644 --- a/src/include/distributed/worker_protocol.h +++ b/src/include/distributed/worker_protocol.h @@ -17,11 +17,13 @@ #include "postgres.h" #include "fmgr.h" -#include "distributed/shardinterval_utils.h" + #include "lib/stringinfo.h" #include "nodes/parsenodes.h" #include "storage/fd.h" #include "utils/array.h" + +#include "distributed/shardinterval_utils.h" #include "distributed/version_compat.h" diff --git a/src/include/distributed/worker_transaction.h b/src/include/distributed/worker_transaction.h index 631940edf1d..1b3809a0eb2 100644 --- a/src/include/distributed/worker_transaction.h +++ b/src/include/distributed/worker_transaction.h @@ -12,9 +12,10 @@ #ifndef WORKER_TRANSACTION_H #define WORKER_TRANSACTION_H +#include "storage/lockdefs.h" + #include "distributed/connection_management.h" #include "distributed/worker_manager.h" -#include "storage/lockdefs.h" /* @@ -29,11 +30,22 @@ typedef enum TargetWorkerSet */ NON_COORDINATOR_METADATA_NODES, + /* + * All the active primary nodes in the metadata which have metadata + * except the local node + */ + REMOTE_METADATA_NODES, + /* * All the active primary nodes in the metadata except the coordinator */ NON_COORDINATOR_NODES, + /* + * All the active primary nodes in the metadata except the local node + */ + REMOTE_NODES, + /* * All active primary nodes in the metadata */ @@ -56,6 +68,10 @@ extern void SendCommandToWorkersAsUser(TargetWorkerSet targetWorkerSet, const char *nodeUser, const char *command); extern void SendCommandToWorkerAsUser(const char *nodeName, int32 nodePort, const char *nodeUser, const char *command); +extern void SendCommandToRemoteMetadataNodesParams(const char *command, + const char *user, int parameterCount, + const Oid *parameterTypes, + const char *const *parameterValues); extern bool SendOptionalCommandListToWorkerOutsideTransaction(const char *nodeName, int32 nodePort, const char *nodeUser, @@ -74,6 +90,10 @@ extern bool SendOptionalMetadataCommandListToWorkerInCoordinatedTransaction(cons extern void SendCommandToWorkersWithMetadata(const char *command); extern void SendCommandToWorkersWithMetadataViaSuperUser(const char *command); extern void SendCommandListToWorkersWithMetadata(List *commands); +extern void SendCommandToRemoteNodesWithMetadata(const char *command); +extern void SendCommandToRemoteNodesWithMetadataViaSuperUser(const char *command); +extern void SendCommandListToRemoteNodesWithMetadata(List *commands); +extern void SendBareCommandListToRemoteMetadataNodes(List *commandList); extern void SendBareCommandListToMetadataWorkers(List *commandList); extern void EnsureNoModificationsHaveBeenDone(void); extern void SendCommandListToWorkerOutsideTransaction(const char *nodeName, diff --git a/src/include/pg_version_compat.h b/src/include/pg_version_compat.h index 1bdbae58016..665cd30c264 100644 --- a/src/include/pg_version_compat.h +++ b/src/include/pg_version_compat.h @@ -11,7 +11,7 @@ #ifndef PG_VERSION_COMPAT_H #define PG_VERSION_COMPAT_H -#include "distributed/pg_version_constants.h" +#include "pg_version_constants.h" #if PG_VERSION_NUM >= PG_VERSION_16 @@ -48,14 +48,21 @@ get_guc_variables_compat(int *gucCount) #define pgstat_fetch_stat_local_beentry(a) pgstat_get_local_beentry_by_index(a) +#define have_createdb_privilege() have_createdb_privilege() + #else +#include "miscadmin.h" + +#include "catalog/pg_authid.h" #include "catalog/pg_class_d.h" +#include "catalog/pg_database_d.h" #include "catalog/pg_namespace.h" #include "catalog/pg_proc_d.h" #include "storage/relfilenode.h" #include "utils/guc.h" #include "utils/guc_tables.h" +#include "utils/syscache.h" #define pg_clean_ascii_compat(a, b) pg_clean_ascii(a) @@ -105,6 +112,11 @@ object_ownercheck(Oid classid, Oid objectid, Oid roleid) return pg_proc_ownercheck(objectid, roleid); } + case DatabaseRelationId: + { + return pg_database_ownercheck(objectid, roleid); + } + default: { ereport(ERROR, @@ -140,6 +152,28 @@ object_aclcheck(Oid classid, Oid objectid, Oid roleid, AclMode mode) } +static inline bool +have_createdb_privilege(void) +{ + bool result = false; + HeapTuple utup; + + /* Superusers can always do everything */ + if (superuser()) + { + return true; + } + + utup = SearchSysCache1(AUTHOID, ObjectIdGetDatum(GetUserId())); + if (HeapTupleIsValid(utup)) + { + result = ((Form_pg_authid) GETSTRUCT(utup))->rolcreatedb; + ReleaseSysCache(utup); + } + return result; +} + + typedef bool TU_UpdateIndexes; /* diff --git a/src/include/distributed/pg_version_constants.h b/src/include/pg_version_constants.h similarity index 100% rename from src/include/distributed/pg_version_constants.h rename to src/include/pg_version_constants.h diff --git a/src/test/cdc/t/016_cdc_wal2json.pl b/src/test/cdc/t/016_cdc_wal2json.pl index 10475ba859d..ab384df6433 100644 --- a/src/test/cdc/t/016_cdc_wal2json.pl +++ b/src/test/cdc/t/016_cdc_wal2json.pl @@ -9,13 +9,6 @@ use threads; -my $pg_major_version = int($ENV{'pg_major_version'}); -print("working with PG major version : $pg_major_version\n"); -if ($pg_major_version >= 16) { - plan skip_all => 'wal2json is not available for PG16 yet'; - exit 0; -} - # Initialize co-ordinator node my $select_stmt = qq(SELECT * FROM data_100008 ORDER BY id;); my $result = 0; diff --git a/src/test/regress/Pipfile b/src/test/regress/Pipfile index 5bce63004cf..a863d795ec3 100644 --- a/src/test/regress/Pipfile +++ b/src/test/regress/Pipfile @@ -7,7 +7,7 @@ verify_ssl = true mitmproxy = {editable = true, ref = "main", git = "https://github.com/citusdata/mitmproxy.git"} construct = "==2.9.45" docopt = "==0.6.2" -cryptography = ">=39.0.1" +cryptography = ">=41.0.4" pytest = "*" psycopg = "*" filelock = "*" @@ -16,6 +16,7 @@ pytest-timeout = "*" pytest-xdist = "*" pytest-repeat = "*" pyyaml = "*" +werkzeug = "==2.3.7" [dev-packages] black = "*" diff --git a/src/test/regress/Pipfile.lock b/src/test/regress/Pipfile.lock index e6717e5fc0e..c0f8734a02e 100644 --- a/src/test/regress/Pipfile.lock +++ b/src/test/regress/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "9568b1f3e4d4fd408e5e263f6346b0a4f479ac88e02f64bb79a9d482096e6a03" + "sha256": "bf20354a2d9c93d46041ac4c6fa427588ebfe29343ea0b02138b9079f2d82f18" }, "pipfile-spec": 6, "requires": { @@ -21,6 +21,7 @@ "sha256:4ef1ab46b484e3c706329cedeff284a5d40824200638503f5768edb6de7d58e9", "sha256:ffc141aa908e6f175673e7b1b3b7af4fdb0ecb738fc5c8b88f69f055c2415214" ], + "markers": "python_version >= '3.6'", "version": "==3.4.1" }, "blinker": { @@ -118,85 +119,76 @@ }, "certifi": { "hashes": [ - "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082", - "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9" + "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f", + "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1" ], - "version": "==2023.7.22" + "markers": "python_version >= '3.6'", + "version": "==2024.2.2" }, "cffi": { "hashes": [ - "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5", - "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef", - "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104", - "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426", - "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405", - "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375", - "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a", - "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e", - "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc", - "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf", - "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185", - "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497", - "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3", - "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35", - "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c", - "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83", - "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21", - "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca", - "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984", - "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac", - "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd", - "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee", - "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a", - "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2", - "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192", - "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7", - "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585", - "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f", - "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e", - "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27", - "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b", - "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e", - "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e", - "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d", - "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c", - "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415", - "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82", - "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02", - "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314", - "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325", - "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c", - "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3", - "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914", - "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045", - "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d", - "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9", - "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5", - "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2", - "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c", - "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3", - "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2", - "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8", - "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d", - "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d", - "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9", - "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162", - "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76", - "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4", - "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e", - "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9", - "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6", - "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b", - "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01", - "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0" - ], - "version": "==1.15.1" + "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc", + "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a", + "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417", + "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab", + "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520", + "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36", + "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743", + "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8", + "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed", + "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684", + "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56", + "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324", + "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d", + "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235", + "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e", + "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088", + "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000", + "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7", + "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e", + "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673", + "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c", + "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe", + "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2", + "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098", + "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8", + "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a", + "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0", + "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b", + "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896", + "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e", + "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9", + "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2", + "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b", + "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6", + "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404", + "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f", + "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0", + "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4", + "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc", + "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936", + "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba", + "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872", + "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb", + "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614", + "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1", + "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d", + "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969", + "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b", + "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4", + "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627", + "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956", + "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357" + ], + "markers": "platform_python_implementation != 'PyPy'", + "version": "==1.16.0" }, "click": { "hashes": [ "sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1", "sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb" ], + "markers": "python_version >= '3.6'", "version": "==8.0.4" }, "construct": { @@ -208,32 +200,42 @@ }, "cryptography": { "hashes": [ - "sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306", - "sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84", - "sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47", - "sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d", - "sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116", - "sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207", - "sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81", - "sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087", - "sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd", - "sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507", - "sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858", - "sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae", - "sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34", - "sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906", - "sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd", - "sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922", - "sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7", - "sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4", - "sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574", - "sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1", - "sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c", - "sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e", - "sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de" + "sha256:04859aa7f12c2b5f7e22d25198ddd537391f1695df7057c8700f71f26f47a129", + "sha256:069d2ce9be5526a44093a0991c450fe9906cdf069e0e7cd67d9dee49a62b9ebe", + "sha256:0d3ec384058b642f7fb7e7bff9664030011ed1af8f852540c76a1317a9dd0d20", + "sha256:0fab2a5c479b360e5e0ea9f654bcebb535e3aa1e493a715b13244f4e07ea8eec", + "sha256:0fea01527d4fb22ffe38cd98951c9044400f6eff4788cf52ae116e27d30a1ba3", + "sha256:1b797099d221df7cce5ff2a1d272761d1554ddf9a987d3e11f6459b38cd300fd", + "sha256:1e935c2900fb53d31f491c0de04f41110351377be19d83d908c1fd502ae8daa5", + "sha256:20100c22b298c9eaebe4f0b9032ea97186ac2555f426c3e70670f2517989543b", + "sha256:20180da1b508f4aefc101cebc14c57043a02b355d1a652b6e8e537967f1e1b46", + "sha256:25b09b73db78facdfd7dd0fa77a3f19e94896197c86e9f6dc16bce7b37a96504", + "sha256:2619487f37da18d6826e27854a7f9d4d013c51eafb066c80d09c63cf24505306", + "sha256:2eb6368d5327d6455f20327fb6159b97538820355ec00f8cc9464d617caecead", + "sha256:35772a6cffd1f59b85cb670f12faba05513446f80352fe811689b4e439b5d89e", + "sha256:39d5c93e95bcbc4c06313fc6a500cee414ee39b616b55320c1904760ad686938", + "sha256:3d96ea47ce6d0055d5b97e761d37b4e84195485cb5a38401be341fabf23bc32a", + "sha256:4dcab7c25e48fc09a73c3e463d09ac902a932a0f8d0c568238b3696d06bf377b", + "sha256:5fbf0f3f0fac7c089308bd771d2c6c7b7d53ae909dce1db52d8e921f6c19bb3a", + "sha256:6c25e1e9c2ce682d01fc5e2dde6598f7313027343bd14f4049b82ad0402e52cd", + "sha256:762f3771ae40e111d78d77cbe9c1035e886ac04a234d3ee0856bf4ecb3749d54", + "sha256:90147dad8c22d64b2ff7331f8d4cddfdc3ee93e4879796f837bdbb2a0b141e0c", + "sha256:935cca25d35dda9e7bd46a24831dfd255307c55a07ff38fd1a92119cffc34857", + "sha256:93fbee08c48e63d5d1b39ab56fd3fdd02e6c2431c3da0f4edaf54954744c718f", + "sha256:9541c69c62d7446539f2c1c06d7046aef822940d248fa4b8962ff0302862cc1f", + "sha256:c23f03cfd7d9826cdcbad7850de67e18b4654179e01fe9bc623d37c2638eb4ef", + "sha256:c3d1f5a1d403a8e640fa0887e9f7087331abb3f33b0f2207d2cc7f213e4a864c", + "sha256:d1998e545081da0ab276bcb4b33cce85f775adb86a516e8f55b3dac87f469548", + "sha256:d5cf11bc7f0b71fb71af26af396c83dfd3f6eed56d4b6ef95d57867bf1e4ba65", + "sha256:db0480ffbfb1193ac4e1e88239f31314fe4c6cdcf9c0b8712b55414afbf80db4", + "sha256:de4ae486041878dc46e571a4c70ba337ed5233a1344c14a0790c4c4be4bbb8b4", + "sha256:de5086cd475d67113ccb6f9fae6d8fe3ac54a4f9238fd08bfdb07b03d791ff0a", + "sha256:df34312149b495d9d03492ce97471234fd9037aa5ba217c2a6ea890e9166f151", + "sha256:ead69ba488f806fe1b1b4050febafdbf206b81fa476126f3e16110c818bac396" ], "index": "pypi", - "version": "==41.0.3" + "markers": "python_version >= '3.7'", + "version": "==42.0.3" }, "docopt": { "hashes": [ @@ -244,32 +246,35 @@ }, "exceptiongroup": { "hashes": [ - "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9", - "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3" + "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14", + "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68" ], "markers": "python_version < '3.11'", - "version": "==1.1.3" + "version": "==1.2.0" }, "execnet": { "hashes": [ "sha256:88256416ae766bc9e8895c76a87928c0012183da3cc4fc18016e6f050e025f41", "sha256:cc59bc4423742fd71ad227122eb0dd44db51efb3dc4095b45ac9a08c770096af" ], + "markers": "python_version >= '3.7'", "version": "==2.0.2" }, "filelock": { "hashes": [ - "sha256:002740518d8aa59a26b0c76e10fb8c6e15eae825d34b6fdf670333fd7b938d81", - "sha256:cbb791cdea2a72f23da6ac5b5269ab0a0d161e9ef0100e653b69049a7706d1ec" + "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e", + "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c" ], "index": "pypi", - "version": "==3.12.2" + "markers": "python_version >= '3.8'", + "version": "==3.13.1" }, "flask": { "hashes": [ "sha256:59da8a3170004800a2837844bfa84d49b022550616070f7cb1a659682b2e7c9f", "sha256:e1120c228ca2f553b470df4a5fa927ab66258467526069981b3eb0a91902687d" ], + "markers": "python_version >= '3.6'", "version": "==2.0.3" }, "h11": { @@ -277,6 +282,7 @@ "sha256:36a3cb8c0a032f56e2da7084577878a035d3b61d104230d4bd49c0c6b555a9c6", "sha256:47222cb6067e4a307d535814917cd98fd0a57b6788ce715755fa2b6c28b56042" ], + "markers": "python_version >= '3.6'", "version": "==0.12.0" }, "h2": { @@ -284,6 +290,7 @@ "sha256:03a46bcf682256c95b5fd9e9a99c1323584c3eec6440d379b9903d709476bc6d", "sha256:a83aca08fbe7aacb79fec788c9c0bac936343560ed9ec18b82a13a12c28d2abb" ], + "markers": "python_full_version >= '3.6.1'", "version": "==4.1.0" }, "hpack": { @@ -291,6 +298,7 @@ "sha256:84a076fad3dc9a9f8063ccb8041ef100867b1878b25ef0ee63847a5d53818a6c", "sha256:fc41de0c63e687ebffde81187a948221294896f6bdc0ae2312708df339430095" ], + "markers": "python_full_version >= '3.6.1'", "version": "==4.0.0" }, "hyperframe": { @@ -298,6 +306,7 @@ "sha256:0ec6bafd80d8ad2195c4f03aacba3a8265e57bc4cff261e802bf39970ed02a15", "sha256:ae510046231dc8e9ecb1a6586f63d2347bf4c8905914aa84ba585ae85f28a914" ], + "markers": "python_full_version >= '3.6.1'", "version": "==6.0.1" }, "iniconfig": { @@ -305,6 +314,7 @@ "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374" ], + "markers": "python_version >= '3.7'", "version": "==2.0.0" }, "itsdangerous": { @@ -312,14 +322,16 @@ "sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44", "sha256:5dbbc68b317e5e42f327f9021763545dc3fc3bfe22e6deb96aaf1fc38874156a" ], + "markers": "python_version >= '3.7'", "version": "==2.1.2" }, "jinja2": { "hashes": [ - "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852", - "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61" + "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa", + "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90" ], - "version": "==3.1.2" + "markers": "python_version >= '3.7'", + "version": "==3.1.3" }, "kaitaistruct": { "hashes": [ @@ -339,138 +351,145 @@ }, "markupsafe": { "hashes": [ - "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e", - "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e", - "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431", - "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686", - "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559", - "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc", - "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c", - "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0", - "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4", - "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9", - "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575", - "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba", - "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d", - "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3", - "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00", - "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155", - "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac", - "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52", - "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f", - "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8", - "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b", - "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24", - "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea", - "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198", - "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0", - "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee", - "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be", - "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2", - "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707", - "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6", - "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58", - "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779", - "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636", - "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c", - "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad", - "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee", - "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc", - "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2", - "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48", - "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7", - "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e", - "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b", - "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa", - "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5", - "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e", - "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb", - "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9", - "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57", - "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc", - "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2" - ], - "version": "==2.1.3" + "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf", + "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff", + "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f", + "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3", + "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532", + "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f", + "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617", + "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df", + "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4", + "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906", + "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f", + "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4", + "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8", + "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371", + "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2", + "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465", + "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52", + "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6", + "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169", + "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad", + "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2", + "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0", + "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029", + "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f", + "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a", + "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced", + "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5", + "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c", + "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf", + "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9", + "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb", + "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad", + "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3", + "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1", + "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46", + "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc", + "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a", + "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee", + "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900", + "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5", + "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea", + "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f", + "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5", + "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e", + "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a", + "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f", + "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50", + "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a", + "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b", + "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4", + "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff", + "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2", + "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46", + "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b", + "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf", + "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5", + "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5", + "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab", + "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd", + "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68" + ], + "markers": "python_version >= '3.7'", + "version": "==2.1.5" }, "mitmproxy": { "editable": true, "git": "https://github.com/citusdata/mitmproxy.git", + "markers": "python_version >= '3.9'", "ref": "2fd18ef051b987925a36337ab1d61aa674353b44" }, "msgpack": { "hashes": [ - "sha256:06f5174b5f8ed0ed919da0e62cbd4ffde676a374aba4020034da05fab67b9164", - "sha256:0c05a4a96585525916b109bb85f8cb6511db1c6f5b9d9cbcbc940dc6b4be944b", - "sha256:137850656634abddfb88236008339fdaba3178f4751b28f270d2ebe77a563b6c", - "sha256:17358523b85973e5f242ad74aa4712b7ee560715562554aa2134d96e7aa4cbbf", - "sha256:18334484eafc2b1aa47a6d42427da7fa8f2ab3d60b674120bce7a895a0a85bdd", - "sha256:1835c84d65f46900920b3708f5ba829fb19b1096c1800ad60bae8418652a951d", - "sha256:1967f6129fc50a43bfe0951c35acbb729be89a55d849fab7686004da85103f1c", - "sha256:1ab2f3331cb1b54165976a9d976cb251a83183631c88076613c6c780f0d6e45a", - "sha256:1c0f7c47f0087ffda62961d425e4407961a7ffd2aa004c81b9c07d9269512f6e", - "sha256:20a97bf595a232c3ee6d57ddaadd5453d174a52594bf9c21d10407e2a2d9b3bd", - "sha256:20c784e66b613c7f16f632e7b5e8a1651aa5702463d61394671ba07b2fc9e025", - "sha256:266fa4202c0eb94d26822d9bfd7af25d1e2c088927fe8de9033d929dd5ba24c5", - "sha256:28592e20bbb1620848256ebc105fc420436af59515793ed27d5c77a217477705", - "sha256:288e32b47e67f7b171f86b030e527e302c91bd3f40fd9033483f2cacc37f327a", - "sha256:3055b0455e45810820db1f29d900bf39466df96ddca11dfa6d074fa47054376d", - "sha256:332360ff25469c346a1c5e47cbe2a725517919892eda5cfaffe6046656f0b7bb", - "sha256:362d9655cd369b08fda06b6657a303eb7172d5279997abe094512e919cf74b11", - "sha256:366c9a7b9057e1547f4ad51d8facad8b406bab69c7d72c0eb6f529cf76d4b85f", - "sha256:36961b0568c36027c76e2ae3ca1132e35123dcec0706c4b7992683cc26c1320c", - "sha256:379026812e49258016dd84ad79ac8446922234d498058ae1d415f04b522d5b2d", - "sha256:382b2c77589331f2cb80b67cc058c00f225e19827dbc818d700f61513ab47bea", - "sha256:476a8fe8fae289fdf273d6d2a6cb6e35b5a58541693e8f9f019bfe990a51e4ba", - "sha256:48296af57cdb1d885843afd73c4656be5c76c0c6328db3440c9601a98f303d87", - "sha256:4867aa2df9e2a5fa5f76d7d5565d25ec76e84c106b55509e78c1ede0f152659a", - "sha256:4c075728a1095efd0634a7dccb06204919a2f67d1893b6aa8e00497258bf926c", - "sha256:4f837b93669ce4336e24d08286c38761132bc7ab29782727f8557e1eb21b2080", - "sha256:4f8d8b3bf1ff2672567d6b5c725a1b347fe838b912772aa8ae2bf70338d5a198", - "sha256:525228efd79bb831cf6830a732e2e80bc1b05436b086d4264814b4b2955b2fa9", - "sha256:5494ea30d517a3576749cad32fa27f7585c65f5f38309c88c6d137877fa28a5a", - "sha256:55b56a24893105dc52c1253649b60f475f36b3aa0fc66115bffafb624d7cb30b", - "sha256:56a62ec00b636583e5cb6ad313bbed36bb7ead5fa3a3e38938503142c72cba4f", - "sha256:57e1f3528bd95cc44684beda696f74d3aaa8a5e58c816214b9046512240ef437", - "sha256:586d0d636f9a628ddc6a17bfd45aa5b5efaf1606d2b60fa5d87b8986326e933f", - "sha256:5cb47c21a8a65b165ce29f2bec852790cbc04936f502966768e4aae9fa763cb7", - "sha256:6c4c68d87497f66f96d50142a2b73b97972130d93677ce930718f68828b382e2", - "sha256:821c7e677cc6acf0fd3f7ac664c98803827ae6de594a9f99563e48c5a2f27eb0", - "sha256:916723458c25dfb77ff07f4c66aed34e47503b2eb3188b3adbec8d8aa6e00f48", - "sha256:9e6ca5d5699bcd89ae605c150aee83b5321f2115695e741b99618f4856c50898", - "sha256:9f5ae84c5c8a857ec44dc180a8b0cc08238e021f57abdf51a8182e915e6299f0", - "sha256:a2b031c2e9b9af485d5e3c4520f4220d74f4d222a5b8dc8c1a3ab9448ca79c57", - "sha256:a61215eac016f391129a013c9e46f3ab308db5f5ec9f25811e811f96962599a8", - "sha256:a740fa0e4087a734455f0fc3abf5e746004c9da72fbd541e9b113013c8dc3282", - "sha256:a9985b214f33311df47e274eb788a5893a761d025e2b92c723ba4c63936b69b1", - "sha256:ab31e908d8424d55601ad7075e471b7d0140d4d3dd3272daf39c5c19d936bd82", - "sha256:ac9dd47af78cae935901a9a500104e2dea2e253207c924cc95de149606dc43cc", - "sha256:addab7e2e1fcc04bd08e4eb631c2a90960c340e40dfc4a5e24d2ff0d5a3b3edb", - "sha256:b1d46dfe3832660f53b13b925d4e0fa1432b00f5f7210eb3ad3bb9a13c6204a6", - "sha256:b2de4c1c0538dcb7010902a2b97f4e00fc4ddf2c8cda9749af0e594d3b7fa3d7", - "sha256:b5ef2f015b95f912c2fcab19c36814963b5463f1fb9049846994b007962743e9", - "sha256:b72d0698f86e8d9ddf9442bdedec15b71df3598199ba33322d9711a19f08145c", - "sha256:bae7de2026cbfe3782c8b78b0db9cbfc5455e079f1937cb0ab8d133496ac55e1", - "sha256:bf22a83f973b50f9d38e55c6aade04c41ddda19b00c4ebc558930d78eecc64ed", - "sha256:c075544284eadc5cddc70f4757331d99dcbc16b2bbd4849d15f8aae4cf36d31c", - "sha256:c396e2cc213d12ce017b686e0f53497f94f8ba2b24799c25d913d46c08ec422c", - "sha256:cb5aaa8c17760909ec6cb15e744c3ebc2ca8918e727216e79607b7bbce9c8f77", - "sha256:cdc793c50be3f01106245a61b739328f7dccc2c648b501e237f0699fe1395b81", - "sha256:d25dd59bbbbb996eacf7be6b4ad082ed7eacc4e8f3d2df1ba43822da9bfa122a", - "sha256:e42b9594cc3bf4d838d67d6ed62b9e59e201862a25e9a157019e171fbe672dd3", - "sha256:e57916ef1bd0fee4f21c4600e9d1da352d8816b52a599c46460e93a6e9f17086", - "sha256:ed40e926fa2f297e8a653c954b732f125ef97bdd4c889f243182299de27e2aa9", - "sha256:ef8108f8dedf204bb7b42994abf93882da1159728a2d4c5e82012edd92c9da9f", - "sha256:f933bbda5a3ee63b8834179096923b094b76f0c7a73c1cfe8f07ad608c58844b", - "sha256:fe5c63197c55bce6385d9aee16c4d0641684628f63ace85f73571e65ad1c1e8d" - ], - "version": "==1.0.5" + "sha256:04ad6069c86e531682f9e1e71b71c1c3937d6014a7c3e9edd2aa81ad58842862", + "sha256:0bfdd914e55e0d2c9e1526de210f6fe8ffe9705f2b1dfcc4aecc92a4cb4b533d", + "sha256:1dc93e8e4653bdb5910aed79f11e165c85732067614f180f70534f056da97db3", + "sha256:1e2d69948e4132813b8d1131f29f9101bc2c915f26089a6d632001a5c1349672", + "sha256:235a31ec7db685f5c82233bddf9858748b89b8119bf4538d514536c485c15fe0", + "sha256:27dcd6f46a21c18fa5e5deed92a43d4554e3df8d8ca5a47bf0615d6a5f39dbc9", + "sha256:28efb066cde83c479dfe5a48141a53bc7e5f13f785b92ddde336c716663039ee", + "sha256:3476fae43db72bd11f29a5147ae2f3cb22e2f1a91d575ef130d2bf49afd21c46", + "sha256:36e17c4592231a7dbd2ed09027823ab295d2791b3b1efb2aee874b10548b7524", + "sha256:384d779f0d6f1b110eae74cb0659d9aa6ff35aaf547b3955abf2ab4c901c4819", + "sha256:38949d30b11ae5f95c3c91917ee7a6b239f5ec276f271f28638dec9156f82cfc", + "sha256:3967e4ad1aa9da62fd53e346ed17d7b2e922cba5ab93bdd46febcac39be636fc", + "sha256:3e7bf4442b310ff154b7bb9d81eb2c016b7d597e364f97d72b1acc3817a0fdc1", + "sha256:3f0c8c6dfa6605ab8ff0611995ee30d4f9fcff89966cf562733b4008a3d60d82", + "sha256:484ae3240666ad34cfa31eea7b8c6cd2f1fdaae21d73ce2974211df099a95d81", + "sha256:4a7b4f35de6a304b5533c238bee86b670b75b03d31b7797929caa7a624b5dda6", + "sha256:4cb14ce54d9b857be9591ac364cb08dc2d6a5c4318c1182cb1d02274029d590d", + "sha256:4e71bc4416de195d6e9b4ee93ad3f2f6b2ce11d042b4d7a7ee00bbe0358bd0c2", + "sha256:52700dc63a4676669b341ba33520f4d6e43d3ca58d422e22ba66d1736b0a6e4c", + "sha256:572efc93db7a4d27e404501975ca6d2d9775705c2d922390d878fcf768d92c87", + "sha256:576eb384292b139821c41995523654ad82d1916da6a60cff129c715a6223ea84", + "sha256:5b0bf0effb196ed76b7ad883848143427a73c355ae8e569fa538365064188b8e", + "sha256:5b6ccc0c85916998d788b295765ea0e9cb9aac7e4a8ed71d12e7d8ac31c23c95", + "sha256:5ed82f5a7af3697b1c4786053736f24a0efd0a1b8a130d4c7bfee4b9ded0f08f", + "sha256:6d4c80667de2e36970ebf74f42d1088cc9ee7ef5f4e8c35eee1b40eafd33ca5b", + "sha256:730076207cb816138cf1af7f7237b208340a2c5e749707457d70705715c93b93", + "sha256:7687e22a31e976a0e7fc99c2f4d11ca45eff652a81eb8c8085e9609298916dcf", + "sha256:822ea70dc4018c7e6223f13affd1c5c30c0f5c12ac1f96cd8e9949acddb48a61", + "sha256:84b0daf226913133f899ea9b30618722d45feffa67e4fe867b0b5ae83a34060c", + "sha256:85765fdf4b27eb5086f05ac0491090fc76f4f2b28e09d9350c31aac25a5aaff8", + "sha256:8dd178c4c80706546702c59529ffc005681bd6dc2ea234c450661b205445a34d", + "sha256:8f5b234f567cf76ee489502ceb7165c2a5cecec081db2b37e35332b537f8157c", + "sha256:98bbd754a422a0b123c66a4c341de0474cad4a5c10c164ceed6ea090f3563db4", + "sha256:993584fc821c58d5993521bfdcd31a4adf025c7d745bbd4d12ccfecf695af5ba", + "sha256:a40821a89dc373d6427e2b44b572efc36a2778d3f543299e2f24eb1a5de65415", + "sha256:b291f0ee7961a597cbbcc77709374087fa2a9afe7bdb6a40dbbd9b127e79afee", + "sha256:b573a43ef7c368ba4ea06050a957c2a7550f729c31f11dd616d2ac4aba99888d", + "sha256:b610ff0f24e9f11c9ae653c67ff8cc03c075131401b3e5ef4b82570d1728f8a9", + "sha256:bdf38ba2d393c7911ae989c3bbba510ebbcdf4ecbdbfec36272abe350c454075", + "sha256:bfef2bb6ef068827bbd021017a107194956918ab43ce4d6dc945ffa13efbc25f", + "sha256:cab3db8bab4b7e635c1c97270d7a4b2a90c070b33cbc00c99ef3f9be03d3e1f7", + "sha256:cb70766519500281815dfd7a87d3a178acf7ce95390544b8c90587d76b227681", + "sha256:cca1b62fe70d761a282496b96a5e51c44c213e410a964bdffe0928e611368329", + "sha256:ccf9a39706b604d884d2cb1e27fe973bc55f2890c52f38df742bc1d79ab9f5e1", + "sha256:dc43f1ec66eb8440567186ae2f8c447d91e0372d793dfe8c222aec857b81a8cf", + "sha256:dd632777ff3beaaf629f1ab4396caf7ba0bdd075d948a69460d13d44357aca4c", + "sha256:e45ae4927759289c30ccba8d9fdce62bb414977ba158286b5ddaf8df2cddb5c5", + "sha256:e50ebce52f41370707f1e21a59514e3375e3edd6e1832f5e5235237db933c98b", + "sha256:ebbbba226f0a108a7366bf4b59bf0f30a12fd5e75100c630267d94d7f0ad20e5", + "sha256:ec79ff6159dffcc30853b2ad612ed572af86c92b5168aa3fc01a67b0fa40665e", + "sha256:f0936e08e0003f66bfd97e74ee530427707297b0d0361247e9b4f59ab78ddc8b", + "sha256:f26a07a6e877c76a88e3cecac8531908d980d3d5067ff69213653649ec0f60ad", + "sha256:f64e376cd20d3f030190e8c32e1c64582eba56ac6dc7d5b0b49a9d44021b52fd", + "sha256:f6ffbc252eb0d229aeb2f9ad051200668fc3a9aaa8994e49f0cb2ffe2b7867e7", + "sha256:f9a7c509542db4eceed3dcf21ee5267ab565a83555c9b88a8109dcecc4709002", + "sha256:ff1d0899f104f3921d94579a5638847f783c9b04f2d5f229392ca77fba5b82fc" + ], + "markers": "python_version >= '3.8'", + "version": "==1.0.7" }, "packaging": { "hashes": [ - "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61", - "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f" + "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5", + "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7" ], - "version": "==23.1" + "markers": "python_version >= '3.7'", + "version": "==23.2" }, "passlib": { "hashes": [ @@ -481,10 +500,11 @@ }, "pluggy": { "hashes": [ - "sha256:c2fd55a7d7a3863cba1a013e4e2414658b1d07b6bc57b3919e0c63c9abb99849", - "sha256:d12f0c4b579b15f5e054301bb226ee85eeeba08ffec228092f8defbaa3a4c4b3" + "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981", + "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be" ], - "version": "==1.2.0" + "markers": "python_version >= '3.8'", + "version": "==1.4.0" }, "protobuf": { "hashes": [ @@ -510,15 +530,17 @@ "sha256:e68ad00695547d9397dd14abd3efba23cb31cef67228f4512d41396971889812", "sha256:e9bffd52d6ee039a1cafb72475b2900c6fd0f0dca667fb7a09af0a3e119e78cb" ], + "markers": "python_version >= '3.5'", "version": "==3.18.3" }, "psycopg": { "hashes": [ - "sha256:15b25741494344c24066dc2479b0f383dd1b82fa5e75612fa4fa5bb30726e9b6", - "sha256:8bbeddae5075c7890b2fa3e3553440376d3c5e28418335dee3c3656b06fa2b52" + "sha256:31144d3fb4c17d78094d9e579826f047d4af1da6a10427d91dfcfb6ecdf6f12b", + "sha256:4d5a0a5a8590906daa58ebd5f3cfc34091377354a1acced269dd10faf55da60e" ], "index": "pypi", - "version": "==3.1.10" + "markers": "python_version >= '3.7'", + "version": "==3.1.18" }, "publicsuffix2": { "hashes": [ @@ -529,10 +551,11 @@ }, "pyasn1": { "hashes": [ - "sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57", - "sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde" + "sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58", + "sha256:6d391a96e59b23130a5cfa74d6fd7f388dbbe26cc8f1edf39fdddf08d9d6676c" ], - "version": "==0.5.0" + "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", + "version": "==0.5.1" }, "pycparser": { "hashes": [ @@ -543,16 +566,18 @@ }, "pyopenssl": { "hashes": [ - "sha256:24f0dc5227396b3e831f4c7f602b950a5e9833d292c8e4a2e06b709292806ae2", - "sha256:276f931f55a452e7dea69c7173e984eb2a4407ce413c918aa34b55f82f9b8bac" + "sha256:6aa33039a93fffa4563e655b61d11364d01264be8ccb49906101e02a334530bf", + "sha256:ba07553fb6fd6a7a2259adb9b84e12302a9a8a75c44046e8bb5d3e5ee887e3c3" ], - "version": "==23.2.0" + "markers": "python_version >= '3.7'", + "version": "==24.0.0" }, "pyparsing": { "hashes": [ "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" ], + "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", "version": "==2.4.7" }, "pyperclip": { @@ -563,47 +588,54 @@ }, "pytest": { "hashes": [ - "sha256:78bf16451a2eb8c7a2ea98e32dc119fd2aa758f1d5d66dbf0a59d69a3969df32", - "sha256:b4bf8c45bd59934ed84001ad51e11b4ee40d40a1229d2c79f9c592b0a3f6bd8a" + "sha256:249b1b0864530ba251b7438274c4d251c58d868edaaec8762893ad4a0d71c36c", + "sha256:50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6" ], "index": "pypi", - "version": "==7.4.0" + "markers": "python_version >= '3.8'", + "version": "==8.0.0" }, "pytest-asyncio": { "hashes": [ - "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d", - "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b" + "sha256:3a048872a9c4ba14c3e90cc1aa20cbc2def7d01c7c8db3777ec281ba9c057675", + "sha256:4e7093259ba018d58ede7d5315131d21923a60f8a6e9ee266ce1589685c89eac" ], "index": "pypi", - "version": "==0.21.1" + "markers": "python_version >= '3.8'", + "version": "==0.23.5" }, "pytest-repeat": { "hashes": [ - "sha256:4474a7d9e9137f6d8cc8ae297f8c4168d33c56dd740aa78cfffe562557e6b96e", - "sha256:5cd3289745ab3156d43eb9c8e7f7d00a926f3ae5c9cf425bec649b2fe15bad5b" + "sha256:26ab2df18226af9d5ce441c858f273121e92ff55f5bb311d25755b8d7abdd8ed", + "sha256:ffd3836dfcd67bb270bec648b330e20be37d2966448c4148c4092d1e8aba8185" ], "index": "pypi", - "version": "==0.9.1" + "markers": "python_version >= '3.7'", + "version": "==0.9.3" }, "pytest-timeout": { "hashes": [ - "sha256:c07ca07404c612f8abbe22294b23c368e2e5104b521c1790195561f37e1ac3d9", - "sha256:f6f50101443ce70ad325ceb4473c4255e9d74e3c7cd0ef827309dfa4c0d975c6" + "sha256:3b0b95dabf3cb50bac9ef5ca912fa0cfc286526af17afc806824df20c2f72c90", + "sha256:bde531e096466f49398a59f2dde76fa78429a09a12411466f88a07213e220de2" ], "index": "pypi", - "version": "==2.1.0" + "markers": "python_version >= '3.7'", + "version": "==2.2.0" }, "pytest-xdist": { "hashes": [ - "sha256:d5ee0520eb1b7bcca50a60a518ab7a7707992812c578198f8b44fdfac78e8c93", - "sha256:ff9daa7793569e6a68544850fd3927cd257cc03a7ef76c95e86915355e82b5f2" + "sha256:cbb36f3d67e0c478baa57fa4edc8843887e0f6cfc42d677530a36d7472b32d8a", + "sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24" ], "index": "pypi", - "version": "==3.3.1" + "markers": "python_version >= '3.7'", + "version": "==3.5.0" }, "pyyaml": { "hashes": [ + "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5", "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc", + "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df", "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741", "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206", "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27", @@ -611,7 +643,10 @@ "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62", "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98", "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696", + "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290", + "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9", "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d", + "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6", "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867", "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47", "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486", @@ -619,11 +654,15 @@ "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3", "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007", "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938", + "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0", "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c", "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735", "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d", + "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28", + "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4", "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba", "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8", + "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef", "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5", "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd", "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3", @@ -636,7 +675,9 @@ "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43", "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859", "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673", + "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54", "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a", + "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b", "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab", "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa", "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c", @@ -645,6 +686,7 @@ "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f" ], "index": "pypi", + "markers": "python_version >= '3.6'", "version": "==6.0.1" }, "ruamel.yaml": { @@ -652,50 +694,64 @@ "sha256:1a771fc92d3823682b7f0893ad56cb5a5c87c48e62b5399d6f42c8759a583b33", "sha256:ea21da1198c4b41b8e7a259301cc9710d3b972bf8ba52f06218478e6802dd1f1" ], + "markers": "python_version >= '3'", "version": "==0.17.16" }, "ruamel.yaml.clib": { "hashes": [ - "sha256:045e0626baf1c52e5527bd5db361bc83180faaba2ff586e763d3d5982a876a9e", - "sha256:15910ef4f3e537eea7fe45f8a5d19997479940d9196f357152a09031c5be59f3", - "sha256:184faeaec61dbaa3cace407cffc5819f7b977e75360e8d5ca19461cd851a5fc5", - "sha256:1a6391a7cabb7641c32517539ca42cf84b87b667bad38b78d4d42dd23e957c81", - "sha256:1f08fd5a2bea9c4180db71678e850b995d2a5f4537be0e94557668cf0f5f9497", - "sha256:2aa261c29a5545adfef9296b7e33941f46aa5bbd21164228e833412af4c9c75f", - "sha256:3110a99e0f94a4a3470ff67fc20d3f96c25b13d24c6980ff841e82bafe827cac", - "sha256:3243f48ecd450eddadc2d11b5feb08aca941b5cd98c9b1db14b2fd128be8c697", - "sha256:370445fd795706fd291ab00c9df38a0caed0f17a6fb46b0f607668ecb16ce763", - "sha256:40d030e2329ce5286d6b231b8726959ebbe0404c92f0a578c0e2482182e38282", - "sha256:41d0f1fa4c6830176eef5b276af04c89320ea616655d01327d5ce65e50575c94", - "sha256:4a4d8d417868d68b979076a9be6a38c676eca060785abaa6709c7b31593c35d1", - "sha256:4b3a93bb9bc662fc1f99c5c3ea8e623d8b23ad22f861eb6fce9377ac07ad6072", - "sha256:5bc0667c1eb8f83a3752b71b9c4ba55ef7c7058ae57022dd9b29065186a113d9", - "sha256:763d65baa3b952479c4e972669f679fe490eee058d5aa85da483ebae2009d231", - "sha256:7bdb4c06b063f6fd55e472e201317a3bb6cdeeee5d5a38512ea5c01e1acbdd93", - "sha256:8831a2cedcd0f0927f788c5bdf6567d9dc9cc235646a434986a852af1cb54b4b", - "sha256:91a789b4aa0097b78c93e3dc4b40040ba55bef518f84a40d4442f713b4094acb", - "sha256:92460ce908546ab69770b2e576e4f99fbb4ce6ab4b245345a3869a0a0410488f", - "sha256:99e77daab5d13a48a4054803d052ff40780278240a902b880dd37a51ba01a307", - "sha256:9c7617df90c1365638916b98cdd9be833d31d337dbcd722485597b43c4a215bf", - "sha256:a234a20ae07e8469da311e182e70ef6b199d0fbeb6c6cc2901204dd87fb867e8", - "sha256:a7b301ff08055d73223058b5c46c55638917f04d21577c95e00e0c4d79201a6b", - "sha256:be2a7ad8fd8f7442b24323d24ba0b56c51219513cfa45b9ada3b87b76c374d4b", - "sha256:bf9a6bc4a0221538b1a7de3ed7bca4c93c02346853f44e1cd764be0023cd3640", - "sha256:c3ca1fbba4ae962521e5eb66d72998b51f0f4d0f608d3c0347a48e1af262efa7", - "sha256:d000f258cf42fec2b1bbf2863c61d7b8918d31ffee905da62dede869254d3b8a", - "sha256:d5859983f26d8cd7bb5c287ef452e8aacc86501487634573d260968f753e1d71", - "sha256:d5e51e2901ec2366b79f16c2299a03e74ba4531ddcfacc1416639c557aef0ad8", - "sha256:da538167284de58a52109a9b89b8f6a53ff8437dd6dc26d33b57bf6699153122", - "sha256:debc87a9516b237d0466a711b18b6ebeb17ba9f391eb7f91c649c5c4ec5006c7", - "sha256:df5828871e6648db72d1c19b4bd24819b80a755c4541d3409f0f7acd0f335c80", - "sha256:ecdf1a604009bd35c674b9225a8fa609e0282d9b896c03dd441a91e5f53b534e", - "sha256:efa08d63ef03d079dcae1dfe334f6c8847ba8b645d08df286358b1f5293d24ab", - "sha256:f01da5790e95815eb5a8a138508c01c758e5f5bc0ce4286c4f7028b8dd7ac3d0", - "sha256:f34019dced51047d6f70cb9383b2ae2853b7fc4dce65129a5acd49f4f9256646", - "sha256:f6d3d39611ac2e4f62c3128a9eed45f19a6608670c5a2f4f07f24e8de3441d38" - ], - "markers": "platform_python_implementation == 'CPython' and python_version < '3.10'", - "version": "==0.2.7" + "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d", + "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001", + "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462", + "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9", + "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe", + "sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b", + "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b", + "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615", + "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62", + "sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15", + "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b", + "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1", + "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9", + "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675", + "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899", + "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7", + "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7", + "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312", + "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa", + "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91", + "sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b", + "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6", + "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3", + "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334", + "sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5", + "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3", + "sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe", + "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c", + "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed", + "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337", + "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880", + "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f", + "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d", + "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248", + "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d", + "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf", + "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512", + "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069", + "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb", + "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942", + "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d", + "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31", + "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92", + "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5", + "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28", + "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d", + "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1", + "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2", + "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875", + "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412" + ], + "markers": "python_version < '3.10' and platform_python_implementation == 'CPython'", + "version": "==0.2.8" }, "sortedcontainers": { "hashes": [ @@ -714,26 +770,28 @@ }, "tornado": { "hashes": [ - "sha256:1bd19ca6c16882e4d37368e0152f99c099bad93e0950ce55e71daed74045908f", - "sha256:22d3c2fa10b5793da13c807e6fc38ff49a4f6e1e3868b0a6f4164768bb8e20f5", - "sha256:502fba735c84450974fec147340016ad928d29f1e91f49be168c0a4c18181e1d", - "sha256:65ceca9500383fbdf33a98c0087cb975b2ef3bfb874cb35b8de8740cf7f41bd3", - "sha256:71a8db65160a3c55d61839b7302a9a400074c9c753040455494e2af74e2501f2", - "sha256:7ac51f42808cca9b3613f51ffe2a965c8525cb1b00b7b2d56828b8045354f76a", - "sha256:7d01abc57ea0dbb51ddfed477dfe22719d376119844e33c661d873bf9c0e4a16", - "sha256:805d507b1f588320c26f7f097108eb4023bbaa984d63176d1652e184ba24270a", - "sha256:9dc4444c0defcd3929d5c1eb5706cbe1b116e762ff3e0deca8b715d14bf6ec17", - "sha256:ceb917a50cd35882b57600709dd5421a418c29ddc852da8bcdab1f0db33406b0", - "sha256:e7d8db41c0181c80d76c982aacc442c0783a2c54d6400fe028954201a2e032fe" - ], - "version": "==6.3.3" + "sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0", + "sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63", + "sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263", + "sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052", + "sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f", + "sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee", + "sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78", + "sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579", + "sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212", + "sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e", + "sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2" + ], + "markers": "python_version >= '3.8'", + "version": "==6.4" }, "typing-extensions": { "hashes": [ - "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36", - "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2" + "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783", + "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd" ], - "version": "==4.7.1" + "markers": "python_version >= '3.8'", + "version": "==4.9.0" }, "urwid": { "hashes": [ @@ -746,6 +804,8 @@ "sha256:2b8c0e447b4b9dbcc85dd97b6eeb4dcbaf6c8b6c3be0bd654e25553e0a2157d8", "sha256:effc12dba7f3bd72e605ce49807bbe692bd729c3bb122a3b91747a6ae77df528" ], + "index": "pypi", + "markers": "python_version >= '3.8'", "version": "==2.3.7" }, "wsproto": { @@ -753,6 +813,7 @@ "sha256:868776f8456997ad0d9720f7322b746bbe9193751b5b290b7f924659377c8c38", "sha256:d8345d1808dd599b5ffb352c25a367adb6157e664e140dbecba3f9bc007edb9f" ], + "markers": "python_full_version >= '3.6.1'", "version": "==1.0.0" }, "zstandard": { @@ -806,81 +867,89 @@ "sha256:f98fc5750aac2d63d482909184aac72a979bfd123b112ec53fd365104ea15b1c", "sha256:ff5b75f94101beaa373f1511319580a010f6e03458ee51b1a386d7de5331440a" ], + "markers": "python_version >= '3.5'", "version": "==0.15.2" } }, "develop": { "attrs": { "hashes": [ - "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04", - "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015" + "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30", + "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1" ], - "version": "==23.1.0" + "markers": "python_version >= '3.7'", + "version": "==23.2.0" }, "black": { "hashes": [ - "sha256:01ede61aac8c154b55f35301fac3e730baf0c9cf8120f65a9cd61a81cfb4a0c3", - "sha256:022a582720b0d9480ed82576c920a8c1dde97cc38ff11d8d8859b3bd6ca9eedb", - "sha256:25cc308838fe71f7065df53aedd20327969d05671bac95b38fdf37ebe70ac087", - "sha256:27eb7a0c71604d5de083757fbdb245b1a4fae60e9596514c6ec497eb63f95320", - "sha256:327a8c2550ddc573b51e2c352adb88143464bb9d92c10416feb86b0f5aee5ff6", - "sha256:47e56d83aad53ca140da0af87678fb38e44fd6bc0af71eebab2d1f59b1acf1d3", - "sha256:501387a9edcb75d7ae8a4412bb8749900386eaef258f1aefab18adddea1936bc", - "sha256:552513d5cd5694590d7ef6f46e1767a4df9af168d449ff767b13b084c020e63f", - "sha256:5c4bc552ab52f6c1c506ccae05681fab58c3f72d59ae6e6639e8885e94fe2587", - "sha256:642496b675095d423f9b8448243336f8ec71c9d4d57ec17bf795b67f08132a91", - "sha256:6d1c6022b86f83b632d06f2b02774134def5d4d4f1dac8bef16d90cda18ba28a", - "sha256:7f3bf2dec7d541b4619b8ce526bda74a6b0bffc480a163fed32eb8b3c9aed8ad", - "sha256:831d8f54c3a8c8cf55f64d0422ee875eecac26f5f649fb6c1df65316b67c8926", - "sha256:8417dbd2f57b5701492cd46edcecc4f9208dc75529bcf76c514864e48da867d9", - "sha256:86cee259349b4448adb4ef9b204bb4467aae74a386bce85d56ba4f5dc0da27be", - "sha256:893695a76b140881531062d48476ebe4a48f5d1e9388177e175d76234ca247cd", - "sha256:9fd59d418c60c0348505f2ddf9609c1e1de8e7493eab96198fc89d9f865e7a96", - "sha256:ad0014efc7acf0bd745792bd0d8857413652979200ab924fbf239062adc12491", - "sha256:b5b0ee6d96b345a8b420100b7d71ebfdd19fab5e8301aff48ec270042cd40ac2", - "sha256:c333286dc3ddca6fdff74670b911cccedacb4ef0a60b34e491b8a67c833b343a", - "sha256:f9062af71c59c004cd519e2fb8f5d25d39e46d3af011b41ab43b9c74e27e236f", - "sha256:fb074d8b213749fa1d077d630db0d5f8cc3b2ae63587ad4116e8a436e9bbe995" + "sha256:057c3dc602eaa6fdc451069bd027a1b2635028b575a6c3acfd63193ced20d9c8", + "sha256:08654d0797e65f2423f850fc8e16a0ce50925f9337fb4a4a176a7aa4026e63f8", + "sha256:163baf4ef40e6897a2a9b83890e59141cc8c2a98f2dda5080dc15c00ee1e62cd", + "sha256:1e08fb9a15c914b81dd734ddd7fb10513016e5ce7e6704bdd5e1251ceee51ac9", + "sha256:4dd76e9468d5536abd40ffbc7a247f83b2324f0c050556d9c371c2b9a9a95e31", + "sha256:4f9de21bafcba9683853f6c96c2d515e364aee631b178eaa5145fc1c61a3cc92", + "sha256:61a0391772490ddfb8a693c067df1ef5227257e72b0e4108482b8d41b5aee13f", + "sha256:6981eae48b3b33399c8757036c7f5d48a535b962a7c2310d19361edeef64ce29", + "sha256:7e53a8c630f71db01b28cd9602a1ada68c937cbf2c333e6ed041390d6968faf4", + "sha256:810d445ae6069ce64030c78ff6127cd9cd178a9ac3361435708b907d8a04c693", + "sha256:93601c2deb321b4bad8f95df408e3fb3943d85012dddb6121336b8e24a0d1218", + "sha256:992e451b04667116680cb88f63449267c13e1ad134f30087dec8527242e9862a", + "sha256:9db528bccb9e8e20c08e716b3b09c6bdd64da0dd129b11e160bf082d4642ac23", + "sha256:a0057f800de6acc4407fe75bb147b0c2b5cbb7c3ed110d3e5999cd01184d53b0", + "sha256:ba15742a13de85e9b8f3239c8f807723991fbfae24bad92d34a2b12e81904982", + "sha256:bce4f25c27c3435e4dace4815bcb2008b87e167e3bf4ee47ccdc5ce906eb4894", + "sha256:ca610d29415ee1a30a3f30fab7a8f4144e9d34c89a235d81292a1edb2b55f540", + "sha256:d533d5e3259720fdbc1b37444491b024003e012c5173f7d06825a77508085430", + "sha256:d84f29eb3ee44859052073b7636533ec995bd0f64e2fb43aeceefc70090e752b", + "sha256:e37c99f89929af50ffaf912454b3e3b47fd64109659026b678c091a4cd450fb2", + "sha256:e8a6ae970537e67830776488bca52000eaa37fa63b9988e8c487458d9cd5ace6", + "sha256:faf2ee02e6612577ba0181f4347bcbcf591eb122f7841ae5ba233d12c39dcb4d" ], "index": "pypi", - "version": "==23.7.0" + "markers": "python_version >= '3.8'", + "version": "==24.2.0" }, "click": { "hashes": [ "sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1", "sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb" ], + "markers": "python_version >= '3.6'", "version": "==8.0.4" }, "flake8": { "hashes": [ - "sha256:d5b3857f07c030bdb5bf41c7f53799571d75c4491748a3adcd47de929e34cd23", - "sha256:ffdfce58ea94c6580c77888a86506937f9a1a227dfcd15f245d694ae20a6b6e5" + "sha256:33f96621059e65eec474169085dc92bf26e7b2d47366b70be2f67ab80dc25132", + "sha256:a6dfbb75e03252917f2473ea9653f7cd799c3064e54d4c8140044c5c065f53c3" ], "index": "pypi", - "version": "==6.1.0" + "markers": "python_full_version >= '3.8.1'", + "version": "==7.0.0" }, "flake8-bugbear": { "hashes": [ - "sha256:0ebdc7d8ec1ca8bd49347694562381f099f4de2f8ec6bda7a7dca65555d9e0d4", - "sha256:d99d005114020fbef47ed5e4aebafd22f167f9a0fbd0d8bf3c9e90612cb25c34" + "sha256:663ef5de80cd32aacd39d362212983bc4636435a6f83700b4ed35acbd0b7d1b8", + "sha256:f9cb5f2a9e792dd80ff68e89a14c12eed8620af8b41a49d823b7a33064ac9658" ], "index": "pypi", - "version": "==23.7.10" + "markers": "python_full_version >= '3.8.1'", + "version": "==24.2.6" }, "isort": { "hashes": [ - "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504", - "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6" + "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109", + "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6" ], "index": "pypi", - "version": "==5.12.0" + "markers": "python_full_version >= '3.8.0'", + "version": "==5.13.2" }, "mccabe": { "hashes": [ "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e" ], + "markers": "python_version >= '3.6'", "version": "==0.7.0" }, "mypy-extensions": { @@ -888,42 +957,48 @@ "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782" ], + "markers": "python_version >= '3.5'", "version": "==1.0.0" }, "packaging": { "hashes": [ - "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61", - "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f" + "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5", + "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7" ], - "version": "==23.1" + "markers": "python_version >= '3.7'", + "version": "==23.2" }, "pathspec": { "hashes": [ - "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20", - "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3" + "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", + "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712" ], - "version": "==0.11.2" + "markers": "python_version >= '3.8'", + "version": "==0.12.1" }, "platformdirs": { "hashes": [ - "sha256:b45696dab2d7cc691a3226759c0d3b00c47c8b6e293d96f6436f733303f77f6d", - "sha256:d7c24979f292f916dc9cbf8648319032f551ea8c49a4c9bf2fb556a02070ec1d" + "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068", + "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768" ], - "version": "==3.10.0" + "markers": "python_version >= '3.8'", + "version": "==4.2.0" }, "pycodestyle": { "hashes": [ - "sha256:259bcc17857d8a8b3b4a2327324b79e5f020a13c16074670f9c8c8f872ea76d0", - "sha256:5d1013ba8dc7895b548be5afb05740ca82454fd899971563d2ef625d090326f8" + "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f", + "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67" ], - "version": "==2.11.0" + "markers": "python_version >= '3.8'", + "version": "==2.11.1" }, "pyflakes": { "hashes": [ - "sha256:4132f6d49cb4dae6819e5379898f2b8cce3c5f23994194c24b77d5da2e36f774", - "sha256:a0aae034c444db0071aa077972ba4768d40c830d9539fd45bf4cd3f8f6992efc" + "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f", + "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a" ], - "version": "==3.1.0" + "markers": "python_version >= '3.8'", + "version": "==3.2.0" }, "tomli": { "hashes": [ @@ -935,10 +1010,11 @@ }, "typing-extensions": { "hashes": [ - "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36", - "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2" + "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783", + "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd" ], - "version": "==4.7.1" + "markers": "python_version >= '3.8'", + "version": "==4.9.0" } } } diff --git a/src/test/regress/after_citus_upgrade_coord_schedule b/src/test/regress/after_citus_upgrade_coord_schedule index f4f6bb29fda..83dd1d9ebc2 100644 --- a/src/test/regress/after_citus_upgrade_coord_schedule +++ b/src/test/regress/after_citus_upgrade_coord_schedule @@ -3,4 +3,5 @@ test: upgrade_citus_finish_citus_upgrade test: upgrade_pg_dist_cleanup_after test: upgrade_basic_after +test: upgrade_basic_after_non_mixed test: upgrade_post_11_after diff --git a/src/test/regress/after_pg_upgrade_schedule b/src/test/regress/after_pg_upgrade_schedule index b47763bdb3e..82e05cf3f79 100644 --- a/src/test/regress/after_pg_upgrade_schedule +++ b/src/test/regress/after_pg_upgrade_schedule @@ -1,4 +1,4 @@ -test: upgrade_basic_after upgrade_ref2ref_after upgrade_type_after upgrade_distributed_function_after upgrade_rebalance_strategy_after upgrade_list_citus_objects upgrade_autoconverted_after upgrade_citus_stat_activity upgrade_citus_locks upgrade_single_shard_table_after upgrade_schema_based_sharding_after +test: upgrade_basic_after upgrade_ref2ref_after upgrade_type_after upgrade_distributed_function_after upgrade_rebalance_strategy_after upgrade_list_citus_objects upgrade_autoconverted_after upgrade_citus_stat_activity upgrade_citus_locks upgrade_single_shard_table_after upgrade_schema_based_sharding_after upgrade_basic_after_non_mixed # This test cannot be run with run_test.py currently due to its dependence on # the specific PG versions that we use to run upgrade tests. For now we leave diff --git a/src/test/regress/before_citus_upgrade_coord_schedule b/src/test/regress/before_citus_upgrade_coord_schedule index 1195058d68d..cc6afd30d77 100644 --- a/src/test/regress/before_citus_upgrade_coord_schedule +++ b/src/test/regress/before_citus_upgrade_coord_schedule @@ -1,5 +1,5 @@ # this schedule is to be run on only coordinators -test: upgrade_basic_before +test: upgrade_basic_before upgrade_basic_before_non_mixed test: upgrade_pg_dist_cleanup_before test: upgrade_post_11_before diff --git a/src/test/regress/before_pg_upgrade_schedule b/src/test/regress/before_pg_upgrade_schedule index 05810d3d5dd..95957f8cee4 100644 --- a/src/test/regress/before_pg_upgrade_schedule +++ b/src/test/regress/before_pg_upgrade_schedule @@ -1,5 +1,5 @@ # The basic tests runs analyze which depends on shard numbers -test: multi_test_helpers multi_test_helpers_superuser +test: multi_test_helpers multi_test_helpers_superuser upgrade_basic_before_non_mixed test: multi_test_catalog_views test: upgrade_basic_before test: upgrade_ref2ref_before diff --git a/src/test/regress/bin/normalize.sed b/src/test/regress/bin/normalize.sed index efa9e310f3d..fb51bdc33ca 100644 --- a/src/test/regress/bin/normalize.sed +++ b/src/test/regress/bin/normalize.sed @@ -215,14 +215,14 @@ s/^(ERROR: The index name \(test_index_creation1_p2020_09_26)_([0-9])+_(tenant_ s/^(DEBUG: the index name on the shards of the partition is too long, switching to sequential and local execution mode to prevent self deadlocks: test_index_creation1_p2020_09_26)_([0-9])+_(tenant_id_timeperiod_idx)/\1_xxxxxx_\3/g # normalize errors for not being able to connect to a non-existing host -s/could not translate host name "foobar" to address: .*$/could not translate host name "foobar" to address: /g +s/could not translate host name "([A-Za-z0-9\.\-]+)" to address: .*$/could not translate host name "\1" to address: /g # ignore PL/pgSQL line numbers that differ on Mac builds s/(CONTEXT: PL\/pgSQL function .* line )([0-9]+)/\1XX/g s/^(PL\/pgSQL function .* line) [0-9]+ (.*)/\1 XX \2/g # normalize a test difference in multi_move_mx -s/ connection to server at "\w+" \(127\.0\.0\.1\), port [0-9]+ failed://g +s/ connection to server at "\w+" (\(127\.0\.0\.1\)|\(::1\)), port [0-9]+ failed://g # normalize differences in tablespace of new index s/pg14\.idx.*/pg14\.xxxxx/g diff --git a/src/test/regress/citus_tests/common.py b/src/test/regress/citus_tests/common.py index 907102482e1..6c09e0b3852 100644 --- a/src/test/regress/citus_tests/common.py +++ b/src/test/regress/citus_tests/common.py @@ -92,7 +92,7 @@ def get_pg_major_version(): OLDEST_SUPPORTED_CITUS_VERSION_MATRIX = { 14: "10.2.0", 15: "11.1.5", - 16: "12.1devel", + 16: "12.1.1", } OLDEST_SUPPORTED_CITUS_VERSION = OLDEST_SUPPORTED_CITUS_VERSION_MATRIX[PG_MAJOR_VERSION] @@ -294,6 +294,9 @@ def _run_pg_regress( output_dir, "--use-existing", ] + if PG_MAJOR_VERSION >= 16: + command.append("--expecteddir") + command.append(output_dir) if extra_tests != "": command.append(extra_tests) @@ -431,6 +434,12 @@ def sudo(command, *args, shell=True, **kwargs): def notice_handler(diag: psycopg.errors.Diagnostic): print(f"{diag.severity}: {diag.message_primary}") + if diag.message_detail: + print(f"DETAIL: {diag.message_detail}") + if diag.message_hint: + print(f"HINT: {diag.message_hint}") + if diag.context: + print(f"CONTEXT: {diag.context}") def cleanup_test_leftovers(nodes): @@ -453,6 +462,9 @@ def cleanup_test_leftovers(nodes): for node in nodes: node.cleanup_schemas() + for node in nodes: + node.cleanup_databases() + for node in nodes: node.cleanup_users() @@ -578,6 +590,14 @@ def sql(self, query, params=None, **kwargs): with self.cur(**kwargs) as cur: cur.execute(query, params=params) + def sql_prepared(self, query, params=None, **kwargs): + """Run an SQL query, with prepare=True + + This opens a new connection and closes it once the query is done + """ + with self.cur(**kwargs) as cur: + cur.execute(query, params=params, prepare=True) + def sql_row(self, query, params=None, allow_empty_result=False, **kwargs): """Run an SQL query that returns a single row and returns this row @@ -753,6 +773,7 @@ def __init__(self, pgdata): self.subscriptions = set() self.publications = set() self.replication_slots = set() + self.databases = set() self.schemas = set() self.users = set() @@ -993,6 +1014,10 @@ def create_user(self, name, args: typing.Optional[psycopg.sql.Composable] = None args = sql.SQL("") self.sql(sql.SQL("CREATE USER {} {}").format(sql.Identifier(name), args)) + def create_database(self, name): + self.databases.add(name) + self.sql(sql.SQL("CREATE DATABASE {}").format(sql.Identifier(name))) + def create_schema(self, name): self.schemas.add(name) self.sql(sql.SQL("CREATE SCHEMA {}").format(sql.Identifier(name))) @@ -1020,6 +1045,12 @@ def cleanup_users(self): for user in self.users: self.sql(sql.SQL("DROP USER IF EXISTS {}").format(sql.Identifier(user))) + def cleanup_databases(self): + for database in self.databases: + self.sql( + sql.SQL("DROP DATABASE IF EXISTS {}").format(sql.Identifier(database)) + ) + def cleanup_schemas(self): for schema in self.schemas: self.sql( diff --git a/src/test/regress/citus_tests/run_test.py b/src/test/regress/citus_tests/run_test.py index f1e1ec827d0..9a648c0ab8a 100755 --- a/src/test/regress/citus_tests/run_test.py +++ b/src/test/regress/citus_tests/run_test.py @@ -125,7 +125,6 @@ def extra_tests(self): "multi_mx_create_table": TestDeps( None, [ - "multi_test_helpers_superuser", "multi_mx_node_metadata", "multi_cluster_management", "multi_mx_function_table_reference", @@ -136,22 +135,13 @@ def extra_tests(self): ), "alter_role_propagation": TestDeps("minimal_schedule"), "background_rebalance": TestDeps( - None, - [ - "multi_test_helpers", - "multi_cluster_management", - ], - worker_count=3, + None, ["multi_test_helpers", "multi_cluster_management"], worker_count=3 ), "background_rebalance_parallel": TestDeps( - None, - [ - "multi_test_helpers", - "multi_cluster_management", - ], - worker_count=6, + None, ["multi_test_helpers", "multi_cluster_management"], worker_count=6 ), "function_propagation": TestDeps("minimal_schedule"), + "citus_shards": TestDeps("minimal_schedule"), "grant_on_foreign_server_propagation": TestDeps("minimal_schedule"), "multi_modifying_xacts": TestDeps("minimal_schedule"), "multi_mx_modifying_xacts": TestDeps(None, ["multi_mx_create_table"]), @@ -163,17 +153,77 @@ def extra_tests(self): "isolation_extension_commands": TestDeps( None, ["isolation_setup", "isolation_add_remove_node"] ), + "isolation_update_node": TestDeps( + None, ["isolation_setup", "isolation_add_remove_node"] + ), "schema_based_sharding": TestDeps("minimal_schedule"), "multi_sequence_default": TestDeps( + None, ["multi_test_helpers", "multi_cluster_management", "multi_table_ddl"] + ), + "grant_on_schema_propagation": TestDeps("minimal_schedule"), + "propagate_extension_commands": TestDeps("minimal_schedule"), + "multi_size_queries": TestDeps("base_schedule", ["multi_copy"]), + "multi_mx_node_metadata": TestDeps( + None, ["multi_extension", "multi_test_helpers", "multi_test_helpers_superuser"] + ), + "multi_mx_function_table_reference": TestDeps( + None, + ["multi_cluster_management", "remove_coordinator_from_metadata"], + # because it queries node group id and it changes as we add / remove nodes + repeatable=False, + ), + "multi_mx_add_coordinator": TestDeps( None, [ - "multi_test_helpers", "multi_cluster_management", - "multi_table_ddl", + "remove_coordinator_from_metadata", + "multi_mx_function_table_reference", ], ), - "grant_on_schema_propagation": TestDeps("minimal_schedule"), - "propagate_extension_commands": TestDeps("minimal_schedule"), + "metadata_sync_helpers": TestDeps( + None, ["multi_mx_node_metadata", "multi_cluster_management"] + ), + "multi_utilities": TestDeps("minimal_schedule", ["multi_data_types"]), + "multi_tenant_isolation_nonblocking": TestDeps( + "minimal_schedule", ["multi_data_types", "remove_coordinator_from_metadata"] + ), + "remove_non_default_nodes": TestDeps( + None, ["multi_mx_node_metadata", "multi_cluster_management"], repeatable=False + ), + "citus_split_shard_columnar_partitioned": TestDeps( + "minimal_schedule", ["remove_coordinator_from_metadata"] + ), + "add_coordinator": TestDeps( + "minimal_schedule", ["remove_coordinator_from_metadata"], repeatable=False + ), + "multi_multiuser_auth": TestDeps( + "minimal_schedule", + ["multi_create_table", "multi_create_users", "multi_multiuser_load_data"], + repeatable=False, + ), + "multi_prepare_plsql": TestDeps("base_schedule"), + "pg15": TestDeps("base_schedule"), + "foreign_key_to_reference_shard_rebalance": TestDeps( + "minimal_schedule", ["remove_coordinator_from_metadata"] + ), + "limit_intermediate_size": TestDeps("base_schedule"), + "columnar_drop": TestDeps( + "minimal_schedule", + ["columnar_create", "columnar_load"], + repeatable=False, + ), + "multi_metadata_sync": TestDeps( + None, + [ + "multi_sequence_default", + "alter_database_propagation", + "alter_role_propagation", + "grant_on_schema_propagation", + "multi_test_catalog_views", + "multi_drop_extension", + ], + repeatable=False, + ), } @@ -266,9 +316,13 @@ def run_schedule_with_multiregress(test_name, schedule, dependencies, args): worker_count = needed_worker_count(test_name, dependencies) # find suitable make recipe - if dependencies.schedule == "base_isolation_schedule" or "isolation" in test_name: + if dependencies.schedule == "base_isolation_schedule" or test_name.startswith( + "isolation" + ): make_recipe = "check-isolation-custom-schedule" - elif dependencies.schedule == "failure_base_schedule" or "failure" in test_name: + elif dependencies.schedule == "failure_base_schedule" or test_name.startswith( + "failure" + ): make_recipe = "check-failure-custom-schedule" else: make_recipe = "check-custom-schedule" @@ -381,10 +435,7 @@ def test_dependencies(test_name, test_schedule, schedule_line, args): if "upgrade_columnar_before" not in before_tests: before_tests.append("upgrade_columnar_before") - return TestDeps( - default_base_schedule(test_schedule, args), - before_tests, - ) + return TestDeps(default_base_schedule(test_schedule, args), before_tests) # before_ tests leave stuff around on purpose for the after tests. So they # are not repeatable by definition. diff --git a/src/test/regress/citus_tests/test/README.md b/src/test/regress/citus_tests/test/README.md index 6aac98e49aa..73435ecf646 100644 --- a/src/test/regress/citus_tests/test/README.md +++ b/src/test/regress/citus_tests/test/README.md @@ -82,6 +82,7 @@ the name of the fixture. For example: ```python def test_some_query(cluster): cluster.coordinator.sql("SELECT 1") + assert cluster.workers[0].sql_value('SELECT 2') == 2 ``` If you need a cluster of a specific size you can use the `cluster_factory` diff --git a/src/test/regress/citus_tests/test/test_maintenancedeamon.py b/src/test/regress/citus_tests/test/test_maintenancedeamon.py new file mode 100644 index 00000000000..3f6cb501ece --- /dev/null +++ b/src/test/regress/citus_tests/test/test_maintenancedeamon.py @@ -0,0 +1,74 @@ +# This test checks that once citus.main_db is set and the +# server is restarted. A Citus Maintenance Daemon for the main_db +# is launched. This should happen even if there is no query run +# in main_db yet. +import time + + +def wait_until_maintenance_deamons_start(deamoncount, cluster): + i = 0 + n = 0 + + while i < 10: + i += 1 + n = cluster.coordinator.sql_value( + "SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon';" + ) + + if n == deamoncount: + break + + time.sleep(0.1) + + assert n == deamoncount + + +def test_set_maindb(cluster_factory): + cluster = cluster_factory(0) + + # Test that once citus.main_db is set to a database name + # there are two maintenance deamons running upon restart. + # One maintenance deamon for the database of the current connection + # and one for the citus.main_db. + cluster.coordinator.create_database("mymaindb") + cluster.coordinator.configure("citus.main_db='mymaindb'") + cluster.coordinator.restart() + + assert cluster.coordinator.sql_value("SHOW citus.main_db;") == "mymaindb" + + wait_until_maintenance_deamons_start(2, cluster) + + assert ( + cluster.coordinator.sql_value( + "SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon' AND datname='mymaindb';" + ) + == 1 + ) + + # Test that once citus.main_db is set to empty string + # there is only one maintenance deamon for the database + # of the current connection. + cluster.coordinator.configure("citus.main_db=''") + cluster.coordinator.restart() + assert cluster.coordinator.sql_value("SHOW citus.main_db;") == "" + + wait_until_maintenance_deamons_start(1, cluster) + + # Test that after citus.main_db is dropped. The maintenance + # deamon for this database is terminated. + cluster.coordinator.configure("citus.main_db='mymaindb'") + cluster.coordinator.restart() + assert cluster.coordinator.sql_value("SHOW citus.main_db;") == "mymaindb" + + wait_until_maintenance_deamons_start(2, cluster) + + cluster.coordinator.sql("DROP DATABASE mymaindb;") + + wait_until_maintenance_deamons_start(1, cluster) + + assert ( + cluster.coordinator.sql_value( + "SELECT count(*) FROM pg_stat_activity WHERE application_name = 'Citus Maintenance Daemon' AND datname='mymaindb';" + ) + == 0 + ) diff --git a/src/test/regress/citus_tests/test/test_other_databases.py b/src/test/regress/citus_tests/test/test_other_databases.py new file mode 100644 index 00000000000..4943016928f --- /dev/null +++ b/src/test/regress/citus_tests/test/test_other_databases.py @@ -0,0 +1,198 @@ +def test_main_commited_outer_not_yet(cluster): + c = cluster.coordinator + w0 = cluster.workers[0] + + # create a non-main database + c.sql("CREATE DATABASE db1") + + # we will use cur1 to simulate non-main database user and + # cur2 to manually do the steps we would do in the main database + with c.cur(dbname="db1") as cur1, c.cur() as cur2: + # let's start a transaction and find its transaction id + cur1.execute("BEGIN") + cur1.execute("SELECT txid_current()") + txid = cur1.fetchall() + + # using the transaction id of the cur1 simulate the main database commands manually + cur2.execute("BEGIN") + cur2.execute( + "SELECT citus_internal.start_management_transaction(%s)", (str(txid[0][0]),) + ) + cur2.execute( + "SELECT citus_internal.execute_command_on_remote_nodes_as_user('CREATE USER u1;', 'postgres')" + ) + cur2.execute( + "SELECT citus_internal.mark_object_distributed(1260, 'u1', 123123, 'postgres')" + ) + cur2.execute("COMMIT") + + # run the transaction recovery + c.sql("SELECT recover_prepared_transactions()") + + # user should not be created on the worker because outer transaction is not committed yet + role_before_commit = w0.sql_value( + "SELECT count(*) FROM pg_roles WHERE rolname = 'u1'" + ) + + assert ( + int(role_before_commit) == 0 + ), "role is in pg_dist_object despite not committing" + + # user should not be in pg_dist_object on the coordinator because outer transaction is not committed yet + pdo_coordinator_before_commit = c.sql_value( + "SELECT count(*) FROM pg_dist_object WHERE objid = 123123" + ) + + assert ( + int(pdo_coordinator_before_commit) == 0 + ), "role is in pg_dist_object on coordinator despite not committing" + + # user should not be in pg_dist_object on the worker because outer transaction is not committed yet + pdo_worker_before_commit = w0.sql_value( + "SELECT count(*) FROM pg_dist_object WHERE objid::regrole::text = 'u1'" + ) + + assert ( + int(pdo_worker_before_commit) == 0 + ), "role is in pg_dist_object on worker despite not committing" + + # commit in cur1 so the transaction recovery thinks this is a successful transaction + cur1.execute("COMMIT") + + # run the transaction recovery again after committing + c.sql("SELECT recover_prepared_transactions()") + + # check that the user is created by the transaction recovery on the worker + role_after_commit = w0.sql_value( + "SELECT count(*) FROM pg_roles WHERE rolname = 'u1'" + ) + + assert ( + int(role_after_commit) == 1 + ), "role is not created during recovery despite committing" + + # check that the user is in pg_dist_object on the coordinator after transaction recovery + pdo_coordinator_after_commit = c.sql_value( + "SELECT count(*) FROM pg_dist_object WHERE objid = 123123" + ) + + assert ( + int(pdo_coordinator_after_commit) == 1 + ), "role is not in pg_dist_object on coordinator after recovery despite committing" + + # check that the user is in pg_dist_object on the worker after transaction recovery + pdo_worker_after_commit = w0.sql_value( + "SELECT count(*) FROM pg_dist_object WHERE objid::regrole::text = 'u1'" + ) + + assert ( + int(pdo_worker_after_commit) == 1 + ), "role is not in pg_dist_object on worker after recovery despite committing" + + c.sql("DROP DATABASE db1") + c.sql( + "SELECT citus_internal.execute_command_on_remote_nodes_as_user('DROP USER u1', 'postgres')" + ) + c.sql( + """ + SELECT run_command_on_workers($$ + DELETE FROM pg_dist_object + WHERE objid::regrole::text = 'u1' + $$) + """ + ) + c.sql( + """ + DELETE FROM pg_dist_object + WHERE objid = 123123 + """ + ) + + +def test_main_commited_outer_aborted(cluster): + c = cluster.coordinator + w0 = cluster.workers[0] + + # create a non-main database + c.sql("CREATE DATABASE db2") + + # we will use cur1 to simulate non-main database user and + # cur2 to manually do the steps we would do in the main database + with c.cur(dbname="db2") as cur1, c.cur() as cur2: + # let's start a transaction and find its transaction id + cur1.execute("BEGIN") + cur1.execute("SELECT txid_current()") + txid = cur1.fetchall() + + # using the transaction id of the cur1 simulate the main database commands manually + cur2.execute("BEGIN") + cur2.execute( + "SELECT citus_internal.start_management_transaction(%s)", (str(txid[0][0]),) + ) + cur2.execute( + "SELECT citus_internal.execute_command_on_remote_nodes_as_user('CREATE USER u2;', 'postgres')" + ) + cur2.execute( + "SELECT citus_internal.mark_object_distributed(1260, 'u2', 321321, 'postgres')" + ) + cur2.execute("COMMIT") + + # abort cur1 so the transaction recovery thinks this is an aborted transaction + cur1.execute("ABORT") + + # check that the user is not yet created on the worker + role_before_recovery = w0.sql_value( + "SELECT count(*) FROM pg_roles WHERE rolname = 'u2'" + ) + + assert int(role_before_recovery) == 0, "role is already created before recovery" + + # check that the user is not in pg_dist_object on the coordinator + pdo_coordinator_before_recovery = c.sql_value( + "SELECT count(*) FROM pg_dist_object WHERE objid = 321321" + ) + + assert ( + int(pdo_coordinator_before_recovery) == 0 + ), "role is already in pg_dist_object on coordinator before recovery" + + # check that the user is not in pg_dist_object on the worker + pdo_worker_before_recovery = w0.sql_value( + "SELECT count(*) FROM pg_dist_object WHERE objid::regrole::text = 'u2'" + ) + + assert ( + int(pdo_worker_before_recovery) == 0 + ), "role is already in pg_dist_object on worker before recovery" + + # run the transaction recovery + c.sql("SELECT recover_prepared_transactions()") + + # check that the user is not created by the transaction recovery on the worker + role_after_recovery = w0.sql_value( + "SELECT count(*) FROM pg_roles WHERE rolname = 'u2'" + ) + + assert ( + int(role_after_recovery) == 0 + ), "role is created during recovery despite aborting" + + # check that the user is not in pg_dist_object on the coordinator after transaction recovery + pdo_coordinator_after_recovery = c.sql_value( + "SELECT count(*) FROM pg_dist_object WHERE objid = 321321" + ) + + assert ( + int(pdo_coordinator_after_recovery) == 0 + ), "role is in pg_dist_object on coordinator after recovery despite aborting" + + # check that the user is not in pg_dist_object on the worker after transaction recovery + pdo_worker_after_recovery = w0.sql_value( + "SELECT count(*) FROM pg_dist_object WHERE objid::regrole::text = 'u2'" + ) + + assert ( + int(pdo_worker_after_recovery) == 0 + ), "role is in pg_dist_object on worker after recovery despite aborting" + + c.sql("DROP DATABASE db2") diff --git a/src/test/regress/citus_tests/test/test_prepared_statements.py b/src/test/regress/citus_tests/test/test_prepared_statements.py new file mode 100644 index 00000000000..761ecc30ce9 --- /dev/null +++ b/src/test/regress/citus_tests/test/test_prepared_statements.py @@ -0,0 +1,30 @@ +def test_call_param(cluster): + # create a distributed table and an associated distributed procedure + # to ensure parameterized CALL succeed, even when the param is the + # distribution key. + coord = cluster.coordinator + coord.sql("CREATE TABLE test(i int)") + coord.sql( + """ + CREATE PROCEDURE p(_i INT) LANGUAGE plpgsql AS $$ + BEGIN + INSERT INTO test(i) VALUES (_i); + END; $$ + """ + ) + sql = "CALL p(%s)" + + # prepare/exec before distributing + coord.sql_prepared(sql, (1,)) + + coord.sql("SELECT create_distributed_table('test', 'i')") + coord.sql( + "SELECT create_distributed_function('p(int)', distribution_arg_name := '_i', colocate_with := 'test')" + ) + + # prepare/exec after distribution + coord.sql_prepared(sql, (2,)) + + sum_i = coord.sql_value("select sum(i) from test;") + + assert sum_i == 3 diff --git a/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py b/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py index 87b00c83c22..1ab44803117 100755 --- a/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py +++ b/src/test/regress/citus_tests/upgrade/citus_upgrade_test.py @@ -115,9 +115,9 @@ def remove_tar_files(tar_path): def restart_databases(pg_path, rel_data_path, mixed_mode, config): for node_name in config.node_name_to_ports.keys(): - if ( - mixed_mode - and config.node_name_to_ports[node_name] == config.chosen_random_worker_port + if mixed_mode and config.node_name_to_ports[node_name] in ( + config.chosen_random_worker_port, + config.coordinator_port(), ): continue abs_data_path = os.path.abspath(os.path.join(rel_data_path, node_name)) @@ -148,7 +148,10 @@ def restart_database(pg_path, abs_data_path, node_name, node_ports, logfile_pref def run_alter_citus(pg_path, mixed_mode, config): for port in config.node_name_to_ports.values(): - if mixed_mode and port == config.chosen_random_worker_port: + if mixed_mode and port in ( + config.chosen_random_worker_port, + config.coordinator_port(), + ): continue utils.psql(pg_path, port, "ALTER EXTENSION citus UPDATE;") @@ -158,7 +161,8 @@ def verify_upgrade(config, mixed_mode, node_ports): actual_citus_version = get_actual_citus_version(config.bindir, port) expected_citus_version = MASTER_VERSION if expected_citus_version != actual_citus_version and not ( - mixed_mode and port == config.chosen_random_worker_port + mixed_mode + and port in (config.chosen_random_worker_port, config.coordinator_port()) ): print( "port: {} citus version {} expected {}".format( diff --git a/src/test/regress/expected/add_coordinator.out b/src/test/regress/expected/add_coordinator.out index 49966938536..01f3a682d5f 100644 --- a/src/test/regress/expected/add_coordinator.out +++ b/src/test/regress/expected/add_coordinator.out @@ -2,13 +2,6 @@ -- ADD_COORDINATOR -- -- node trying to add itself without specifying groupid => 0 should error out --- first remove the coordinator to for testing master_add_node for coordinator -SELECT master_remove_node('localhost', :master_port); - master_remove_node ---------------------------------------------------------------------- - -(1 row) - SELECT master_add_node('localhost', :master_port); ERROR: Node cannot add itself as a worker. HINT: Add the node as a coordinator by using: SELECT citus_set_coordinator_host('localhost', 57636); diff --git a/src/test/regress/expected/alter_database_propagation.out b/src/test/regress/expected/alter_database_propagation.out index 0ce21774956..5c45a25e29c 100644 --- a/src/test/regress/expected/alter_database_propagation.out +++ b/src/test/regress/expected/alter_database_propagation.out @@ -1,38 +1,30 @@ set citus.log_remote_commands = true; set citus.grep_remote_commands = '%ALTER DATABASE%'; --- since ALLOW_CONNECTIONS alter option should be executed in a different database --- and since we don't have a multiple database support for now, --- this statement will get error -alter database regression ALLOW_CONNECTIONS false; -ERROR: ALLOW_CONNECTIONS is not supported alter database regression with CONNECTION LIMIT 100; -NOTICE: issuing ALTER DATABASE regression WITH CONNECTION LIMIT 100; +NOTICE: issuing ALTER DATABASE regression WITH CONNECTION LIMIT 100; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing ALTER DATABASE regression WITH CONNECTION LIMIT 100; +NOTICE: issuing ALTER DATABASE regression WITH CONNECTION LIMIT 100; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx alter database regression with IS_TEMPLATE true CONNECTION LIMIT 50; -NOTICE: issuing ALTER DATABASE regression WITH IS_TEMPLATE 'true' CONNECTION LIMIT 50; +NOTICE: issuing ALTER DATABASE regression WITH IS_TEMPLATE true CONNECTION LIMIT 50; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing ALTER DATABASE regression WITH IS_TEMPLATE 'true' CONNECTION LIMIT 50; +NOTICE: issuing ALTER DATABASE regression WITH IS_TEMPLATE true CONNECTION LIMIT 50; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx alter database regression with CONNECTION LIMIT -1; -NOTICE: issuing ALTER DATABASE regression WITH CONNECTION LIMIT -1; +NOTICE: issuing ALTER DATABASE regression WITH CONNECTION LIMIT -1; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing ALTER DATABASE regression WITH CONNECTION LIMIT -1; +NOTICE: issuing ALTER DATABASE regression WITH CONNECTION LIMIT -1; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx alter database regression with IS_TEMPLATE true; -NOTICE: issuing ALTER DATABASE regression WITH IS_TEMPLATE 'true'; +NOTICE: issuing ALTER DATABASE regression WITH IS_TEMPLATE true; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing ALTER DATABASE regression WITH IS_TEMPLATE 'true'; +NOTICE: issuing ALTER DATABASE regression WITH IS_TEMPLATE true; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx alter database regression with IS_TEMPLATE false; -NOTICE: issuing ALTER DATABASE regression WITH IS_TEMPLATE 'false'; +NOTICE: issuing ALTER DATABASE regression WITH IS_TEMPLATE false; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing ALTER DATABASE regression WITH IS_TEMPLATE 'false'; +NOTICE: issuing ALTER DATABASE regression WITH IS_TEMPLATE false; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx --- this statement will get error since we don't have a multiple database support for now -alter database regression rename to regression2; -ERROR: current database cannot be renamed alter database regression set default_transaction_read_only = true; NOTICE: issuing ALTER DATABASE regression SET default_transaction_read_only = 'true' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx @@ -147,4 +139,96 @@ NOTICE: issuing ALTER DATABASE regression RESET lock_timeout DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing ALTER DATABASE regression RESET lock_timeout DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +set citus.enable_create_database_propagation=on; +SET citus.next_operation_id TO 3000; +create database "regression!'2"; +NOTICE: issuing ALTER DATABASE citus_temp_database_3000_0 RENAME TO "regression!'2" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER DATABASE citus_temp_database_3000_0 RENAME TO "regression!'2" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +alter database "regression!'2" with CONNECTION LIMIT 100; +NOTICE: issuing ALTER DATABASE "regression!'2" WITH CONNECTION LIMIT 100; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER DATABASE "regression!'2" WITH CONNECTION LIMIT 100; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +alter database "regression!'2" with IS_TEMPLATE true CONNECTION LIMIT 50; +NOTICE: issuing ALTER DATABASE "regression!'2" WITH IS_TEMPLATE true CONNECTION LIMIT 50; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER DATABASE "regression!'2" WITH IS_TEMPLATE true CONNECTION LIMIT 50; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +alter database "regression!'2" with IS_TEMPLATE false; +NOTICE: issuing ALTER DATABASE "regression!'2" WITH IS_TEMPLATE false; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER DATABASE "regression!'2" WITH IS_TEMPLATE false; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +\set alter_db_tablespace :abs_srcdir '/tmp_check/ts3' +CREATE TABLESPACE alter_db_tablespace LOCATION :'alter_db_tablespace'; +\c - - - :worker_1_port +\set alter_db_tablespace :abs_srcdir '/tmp_check/ts4' +CREATE TABLESPACE alter_db_tablespace LOCATION :'alter_db_tablespace'; +\c - - - :worker_2_port +\set alter_db_tablespace :abs_srcdir '/tmp_check/ts5' +CREATE TABLESPACE alter_db_tablespace LOCATION :'alter_db_tablespace'; +\c - - - :master_port +set citus.log_remote_commands = true; +set citus.grep_remote_commands = '%ALTER DATABASE%'; +alter database "regression!'2" set TABLESPACE alter_db_tablespace; +NOTICE: issuing ALTER DATABASE "regression!'2" SET TABLESPACE alter_db_tablespace +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER DATABASE "regression!'2" SET TABLESPACE alter_db_tablespace +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +set citus.enable_create_database_propagation=on; +alter database "regression!'2" rename to regression3; +NOTICE: issuing ALTER DATABASE "regression!'2" RENAME TO regression3 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER DATABASE "regression!'2" RENAME TO regression3 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +-- check that the local database rename and alter comnmand is not propagated +set citus.enable_create_database_propagation=off; +CREATE database local_regression; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +alter DATABASE local_regression with CONNECTION LIMIT 100; +alter DATABASE local_regression rename to local_regression2; +drop database local_regression2; +set citus.enable_create_database_propagation=on; +drop database regression3; +SET citus.next_operation_id TO 3100; +create database "regression!'4"; +NOTICE: issuing ALTER DATABASE citus_temp_database_3100_0 RENAME TO "regression!'4" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER DATABASE citus_temp_database_3100_0 RENAME TO "regression!'4" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SELECT result FROM run_command_on_all_nodes( + $$ + ALTER TABLESPACE alter_db_tablespace RENAME TO "ts-needs\!escape" + $$ +); + result +--------------------------------------------------------------------- + ALTER TABLESPACE + ALTER TABLESPACE + ALTER TABLESPACE +(3 rows) + +alter database "regression!'4" set TABLESPACE "ts-needs\!escape"; +NOTICE: issuing ALTER DATABASE "regression!'4" SET TABLESPACE "ts-needs\!escape" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER DATABASE "regression!'4" SET TABLESPACE "ts-needs\!escape" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +drop database "regression!'4"; set citus.log_remote_commands = false; +set citus.enable_create_database_propagation=off; +SELECT result FROM run_command_on_all_nodes( + $$ + drop tablespace "ts-needs\!escape" + $$ +); + result +--------------------------------------------------------------------- + DROP TABLESPACE + DROP TABLESPACE + DROP TABLESPACE +(3 rows) + diff --git a/src/test/regress/expected/alter_role_propagation.out b/src/test/regress/expected/alter_role_propagation.out index 82310f477a5..4beea2a51de 100644 --- a/src/test/regress/expected/alter_role_propagation.out +++ b/src/test/regress/expected/alter_role_propagation.out @@ -254,8 +254,8 @@ SELECT run_command_on_workers('SHOW enable_hashagg'); -- also test case sensitivity CREATE DATABASE "REGRESSION"; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. ALTER ROLE CURRENT_USER IN DATABASE "REGRESSION" SET public.myguc TO "Hello from coordinator only"; SELECT d.datname, r.setconfig FROM pg_db_role_setting r LEFT JOIN pg_database d ON r.setdatabase=d.oid WHERE r.setconfig::text LIKE '%Hello from coordinator only%'; datname | setconfig diff --git a/src/test/regress/expected/alter_table_add_column.out b/src/test/regress/expected/alter_table_add_column.out index 61e7319d961..0408aeeab97 100644 --- a/src/test/regress/expected/alter_table_add_column.out +++ b/src/test/regress/expected/alter_table_add_column.out @@ -44,6 +44,15 @@ ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names HINT: You can issue each command separately such as ALTER TABLE referencing ADD COLUMN test_8 data_type; ALTER TABLE referencing ADD CONSTRAINT constraint_name CHECK (check_expression); ALTER TABLE referencing ADD COLUMN test_8 integer CONSTRAINT check_test_8 CHECK (test_8 > 0); +-- error out properly even if the REFERENCES does not include the column list of the referenced table +ALTER TABLE referencing ADD COLUMN test_9 bool, ADD COLUMN test_10 int REFERENCES referenced; +ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and CHECK constraints +DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names +HINT: You can issue each command separately such as ALTER TABLE referencing ADD COLUMN test_10 data_type; ALTER TABLE referencing ADD CONSTRAINT constraint_name FOREIGN KEY (test_10) REFERENCES referenced; +ALTER TABLE referencing ADD COLUMN test_9 bool, ADD COLUMN test_10 int REFERENCES referenced(int_col); +ERROR: cannot execute ADD COLUMN command with PRIMARY KEY, UNIQUE, FOREIGN and CHECK constraints +DETAIL: Adding a column with a constraint in one command is not supported because all constraints in Citus must have explicit names +HINT: You can issue each command separately such as ALTER TABLE referencing ADD COLUMN test_10 data_type; ALTER TABLE referencing ADD CONSTRAINT constraint_name FOREIGN KEY (test_10) REFERENCES referenced (int_col ); -- try to add test_6 again, but with IF NOT EXISTS ALTER TABLE referencing ADD COLUMN IF NOT EXISTS test_6 text; NOTICE: column "test_6" of relation "referencing" already exists, skipping diff --git a/src/test/regress/expected/citus_internal_access.out b/src/test/regress/expected/citus_internal_access.out new file mode 100644 index 00000000000..21464b38f81 --- /dev/null +++ b/src/test/regress/expected/citus_internal_access.out @@ -0,0 +1,10 @@ +--- Create a non-superuser role and check if it can access citus_internal schema functions +CREATE USER nonsuperuser CREATEROLE; +SET ROLE nonsuperuser; +--- The non-superuser role should not be able to access citus_internal functions +SELECT citus_internal.commit_management_command_2pc(); +ERROR: permission denied for function commit_management_command_2pc +SELECT citus_internal.replace_isolation_tester_func(); +ERROR: permission denied for function replace_isolation_tester_func +RESET ROLE; +DROP USER nonsuperuser; \ No newline at end of file diff --git a/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out b/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out index e2685c2d792..a559ec4428f 100644 --- a/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out +++ b/src/test/regress/expected/citus_non_blocking_split_shard_cleanup.out @@ -107,6 +107,12 @@ SELECT pg_catalog.citus_split_shard_by_split_points( (1 row) +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup +--------------------------------------------------------------------- + +(1 row) + \c - - - :worker_2_port SET search_path TO "citus_split_test_schema"; -- Replication slots should be cleaned up diff --git a/src/test/regress/expected/citus_schema_distribute_undistribute.out b/src/test/regress/expected/citus_schema_distribute_undistribute.out index ae08b6c6a90..352fc776b7f 100644 --- a/src/test/regress/expected/citus_schema_distribute_undistribute.out +++ b/src/test/regress/expected/citus_schema_distribute_undistribute.out @@ -285,14 +285,7 @@ SELECT citus_schema_undistribute('tenant1'); ERROR: must be owner of schema tenant1 -- assign all tables to dummyregular except table5 SET role tenantuser; -SELECT result FROM run_command_on_all_nodes($$ REASSIGN OWNED BY tenantuser TO dummyregular; $$); - result ---------------------------------------------------------------------- - REASSIGN OWNED - REASSIGN OWNED - REASSIGN OWNED -(3 rows) - +REASSIGN OWNED BY tenantuser TO dummyregular; CREATE TABLE tenant1.table5(id int); -- table owner check fails the distribution SET role dummyregular; @@ -366,14 +359,7 @@ SELECT result FROM run_command_on_all_nodes($$ SELECT array_agg(logicalrelid ORD (3 rows) RESET role; -SELECT result FROM run_command_on_all_nodes($$ REASSIGN OWNED BY dummyregular TO tenantuser; $$); - result ---------------------------------------------------------------------- - REASSIGN OWNED - REASSIGN OWNED - REASSIGN OWNED -(3 rows) - +REASSIGN OWNED BY dummyregular TO tenantuser; DROP USER dummyregular; CREATE USER dummysuper superuser; SET role dummysuper; diff --git a/src/test/regress/expected/citus_schema_move.out b/src/test/regress/expected/citus_schema_move.out index 160d2062b3d..9c25919d61a 100644 --- a/src/test/regress/expected/citus_schema_move.out +++ b/src/test/regress/expected/citus_schema_move.out @@ -189,14 +189,7 @@ SELECT citus_schema_move('s2', 'dummy_node', 1234); ERROR: must be owner of schema s2 -- assign all tables to regularuser RESET ROLE; -SELECT result FROM run_command_on_all_nodes($$ REASSIGN OWNED BY tenantuser TO regularuser; $$); - result ---------------------------------------------------------------------- - REASSIGN OWNED - REASSIGN OWNED - REASSIGN OWNED -(3 rows) - +REASSIGN OWNED BY tenantuser TO regularuser; GRANT USAGE ON SCHEMA citus_schema_move TO regularuser; SET ROLE regularuser; SELECT nodeid AS s2_new_nodeid, quote_literal(nodename) AS s2_new_nodename, nodeport AS s2_new_nodeport diff --git a/src/test/regress/expected/citus_shards.out b/src/test/regress/expected/citus_shards.out new file mode 100644 index 00000000000..b434a984b70 --- /dev/null +++ b/src/test/regress/expected/citus_shards.out @@ -0,0 +1,37 @@ +CREATE SCHEMA citus_shards; +SET search_path TO citus_shards; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 99456900; +ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 456900; +CREATE TABLE t1 (i int); +SELECT create_distributed_table('t1', 'i'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE "t with space" (i int); +SELECT create_distributed_table('"t with space"', 'i'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO t1 SELECT generate_series(1, 100); +INSERT INTO "t with space" SELECT generate_series(1, 1000); +SELECT * FROM citus_shards; + table_name | shardid | shard_name | citus_table_type | colocation_id | nodename | nodeport | shard_size +--------------------------------------------------------------------- + "t with space" | 99456904 | citus_shards."t with space_99456904" | distributed | 456900 | localhost | 57637 | 40960 + "t with space" | 99456905 | citus_shards."t with space_99456905" | distributed | 456900 | localhost | 57638 | 40960 + "t with space" | 99456906 | citus_shards."t with space_99456906" | distributed | 456900 | localhost | 57637 | 40960 + "t with space" | 99456907 | citus_shards."t with space_99456907" | distributed | 456900 | localhost | 57638 | 40960 + t1 | 99456900 | citus_shards.t1_99456900 | distributed | 456900 | localhost | 57637 | 8192 + t1 | 99456901 | citus_shards.t1_99456901 | distributed | 456900 | localhost | 57638 | 8192 + t1 | 99456902 | citus_shards.t1_99456902 | distributed | 456900 | localhost | 57637 | 8192 + t1 | 99456903 | citus_shards.t1_99456903 | distributed | 456900 | localhost | 57638 | 8192 +(8 rows) + +SET client_min_messages TO WARNING; +DROP SCHEMA citus_shards CASCADE; diff --git a/src/test/regress/expected/citus_split_shard_by_split_points_negative.out b/src/test/regress/expected/citus_split_shard_by_split_points_negative.out index 85b1fc3eed7..6a4265f8182 100644 --- a/src/test/regress/expected/citus_split_shard_by_split_points_negative.out +++ b/src/test/regress/expected/citus_split_shard_by_split_points_negative.out @@ -135,4 +135,10 @@ NOTICE: drop cascades to 3 other objects DETAIL: drop cascades to table citus_split_shard_by_split_points_negative.range_paritioned_table_to_split drop cascades to table citus_split_shard_by_split_points_negative.table_to_split drop cascades to table citus_split_shard_by_split_points_negative.table_to_split_replication_factor_2 +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup +--------------------------------------------------------------------- + +(1 row) + --END : Cleanup diff --git a/src/test/regress/expected/columnar_create.out b/src/test/regress/expected/columnar_create.out index 73b89117723..a134fd0633a 100644 --- a/src/test/regress/expected/columnar_create.out +++ b/src/test/regress/expected/columnar_create.out @@ -178,32 +178,31 @@ SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_table_1_ CREATE TEMPORARY TABLE columnar_temp(i int) USING columnar; -- reserve some chunks and a stripe INSERT INTO columnar_temp SELECT i FROM generate_series(1,5) i; -SELECT columnar.get_storage_id(oid) AS columnar_temp_storage_id -FROM pg_class WHERE relname='columnar_temp' \gset -SELECT pg_backend_pid() AS val INTO old_backend_pid; +SELECT columnar.get_storage_id(oid) as oid INTO columnar_temp_storage_id +FROM pg_class WHERE relname='columnar_temp'; \c - - - :master_port SET search_path TO columnar_create; --- wait until old backend to expire to make sure that temp table cleanup is complete -SELECT columnar_test_helpers.pg_waitpid(val) FROM old_backend_pid; - pg_waitpid ---------------------------------------------------------------------- - -(1 row) - -DROP TABLE old_backend_pid; --- show that temporary table itself and its metadata is removed -SELECT COUNT(*)=0 FROM pg_class WHERE relname='columnar_temp'; - ?column? ---------------------------------------------------------------------- - t -(1 row) +-- wait until temporary table and its metadata is removed +DO $$ +DECLARE + loop_wait_count integer := 0; +BEGIN + WHILE ( + (SELECT COUNT(*) > 0 FROM pg_class WHERE relname='columnar_temp') OR + (SELECT columnar_test_helpers.columnar_metadata_has_storage_id(oid) FROM columnar_temp_storage_id) + ) + LOOP + IF loop_wait_count > 1000 THEN + RAISE EXCEPTION 'Timeout while waiting for temporary table to be dropped'; + END IF; -SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_temp_storage_id); - columnar_metadata_has_storage_id ---------------------------------------------------------------------- - f -(1 row) + PERFORM pg_sleep(0.001); + loop_wait_count := loop_wait_count + 1; + END LOOP; +END; +$$ language plpgsql; +DROP TABLE columnar_temp_storage_id; -- connect to another session and create a temp table with same name CREATE TEMPORARY TABLE columnar_temp(i int) USING columnar; -- reserve some chunks and a stripe diff --git a/src/test/regress/expected/columnar_drop.out b/src/test/regress/expected/columnar_drop.out index 75333c1e8f0..2e7998b6956 100644 --- a/src/test/regress/expected/columnar_drop.out +++ b/src/test/regress/expected/columnar_drop.out @@ -39,8 +39,8 @@ SELECT :columnar_stripes_before_drop - count(distinct storage_id) FROM columnar. SELECT current_database() datname \gset CREATE DATABASE db_to_drop; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c db_to_drop CREATE EXTENSION citus_columnar; SELECT oid::text databaseoid FROM pg_database WHERE datname = current_database() \gset diff --git a/src/test/regress/expected/comment_on_database.out b/src/test/regress/expected/comment_on_database.out new file mode 100644 index 00000000000..a56fe8e0306 --- /dev/null +++ b/src/test/regress/expected/comment_on_database.out @@ -0,0 +1,101 @@ +set citus.log_remote_commands to on; +set citus.enable_create_database_propagation to on; +set citus.grep_remote_commands to 'COMMENT ON DATABASE'; +create database "test1-\!escape"; +comment on DATABASE "test1-\!escape" is 'test-comment'; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT ds.description AS database_comment + FROM pg_database d + LEFT JOIN pg_shdescription ds ON d.oid = ds.objoid + WHERE d.datname = 'test1-\!escape'; + $$ +); + result +--------------------------------------------------------------------- + test-comment + test-comment + test-comment +(3 rows) + +comment on DATABASE "test1-\!escape" is 'comment-needs\!escape'; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT ds.description AS database_comment + FROM pg_database d + LEFT JOIN pg_shdescription ds ON d.oid = ds.objoid + WHERE d.datname = 'test1-\!escape'; + $$ +); + result +--------------------------------------------------------------------- + comment-needs\!escape + comment-needs\!escape + comment-needs\!escape +(3 rows) + +comment on DATABASE "test1-\!escape" is null; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT ds.description AS database_comment + FROM pg_database d + LEFT JOIN pg_shdescription ds ON d.oid = ds.objoid + WHERE d.datname = 'test1-\!escape'; + $$ +); + result +--------------------------------------------------------------------- + + + +(3 rows) + +drop DATABASE "test1-\!escape"; +--test metadata sync +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +create database "test1-\!escape"; +comment on DATABASE "test1-\!escape" is 'test-comment'; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT ds.description AS database_comment + FROM pg_database d + LEFT JOIN pg_shdescription ds ON d.oid = ds.objoid + WHERE d.datname = 'test1-\!escape'; + $$ +); + result +--------------------------------------------------------------------- + test-comment + test-comment +(2 rows) + +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT ds.description AS database_comment + FROM pg_database d + LEFT JOIN pg_shdescription ds ON d.oid = ds.objoid + WHERE d.datname = 'test1-\!escape'; + $$ +); + result +--------------------------------------------------------------------- + test-comment + test-comment + test-comment +(3 rows) + +drop DATABASE "test1-\!escape"; +reset citus.enable_create_database_propagation; +reset citus.grep_remote_commands; +reset citus.log_remote_commands; diff --git a/src/test/regress/expected/comment_on_role.out b/src/test/regress/expected/comment_on_role.out new file mode 100644 index 00000000000..2981195f1bb --- /dev/null +++ b/src/test/regress/expected/comment_on_role.out @@ -0,0 +1,99 @@ +set citus.log_remote_commands to on; +set citus.grep_remote_commands to 'COMMENT ON ROLE'; +create role "role1-\!escape"; +comment on ROLE "role1-\!escape" is 'test-comment'; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT ds.description AS role_comment + FROM pg_roles r + LEFT JOIN pg_shdescription ds ON r.oid = ds.objoid + WHERE r.rolname = 'role1-\!escape'; + $$ +); + result +--------------------------------------------------------------------- + test-comment + test-comment + test-comment +(3 rows) + +comment on role "role1-\!escape" is 'comment-needs\!escape'; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT ds.description AS role_comment + FROM pg_roles r + LEFT JOIN pg_shdescription ds ON r.oid = ds.objoid + WHERE r.rolname = 'role1-\!escape'; + $$ +); + result +--------------------------------------------------------------------- + comment-needs\!escape + comment-needs\!escape + comment-needs\!escape +(3 rows) + +comment on role "role1-\!escape" is NULL; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT ds.description AS role_comment + FROM pg_roles r + LEFT JOIN pg_shdescription ds ON r.oid = ds.objoid + WHERE r.rolname = 'role1-\!escape'; + $$ +); + result +--------------------------------------------------------------------- + + + +(3 rows) + +drop role "role1-\!escape"; +--test metadata sync +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +create role "role1-\!escape"; +comment on ROLE "role1-\!escape" is 'test-comment'; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT ds.description AS role_comment + FROM pg_roles r + LEFT JOIN pg_shdescription ds ON r.oid = ds.objoid + WHERE r.rolname = 'role1-\!escape'; + $$ +); + result +--------------------------------------------------------------------- + test-comment + test-comment +(2 rows) + +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT ds.description AS role_comment + FROM pg_roles r + LEFT JOIN pg_shdescription ds ON r.oid = ds.objoid + WHERE r.rolname = 'role1-\!escape'; + $$ +); + result +--------------------------------------------------------------------- + test-comment + test-comment + test-comment +(3 rows) + +drop role "role1-\!escape"; +reset citus.grep_remote_commands; +reset citus.log_remote_commands; diff --git a/src/test/regress/expected/create_drop_database_propagation.out b/src/test/regress/expected/create_drop_database_propagation.out new file mode 100644 index 00000000000..4ddbaae3fe3 --- /dev/null +++ b/src/test/regress/expected/create_drop_database_propagation.out @@ -0,0 +1,1377 @@ +-- Test for create/drop database propagation. +-- This test is only executes for Postgres versions < 15. +-- For versions >= 15, pg15_create_drop_database_propagation.sql is used. +-- For versions >= 16, pg16_create_drop_database_propagation.sql is used. +-- Test the UDF that we use to issue database command during metadata sync. +SELECT citus_internal.database_command(null); +ERROR: This is an internal Citus function can only be used in a distributed transaction +CREATE ROLE test_db_commands WITH LOGIN; +ALTER SYSTEM SET citus.enable_manual_metadata_changes_for_user TO 'test_db_commands'; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +SET ROLE test_db_commands; +-- fails on null input +SELECT citus_internal.database_command(null); +ERROR: command cannot be NULL +-- fails on non create / drop db command +SELECT citus_internal.database_command('CREATE TABLE foo_bar(a int)'); +ERROR: citus_internal.database_command() can only be used for CREATE DATABASE command by Citus. +SELECT citus_internal.database_command('SELECT 1'); +ERROR: citus_internal.database_command() can only be used for CREATE DATABASE command by Citus. +SELECT citus_internal.database_command('asfsfdsg'); +ERROR: syntax error at or near "asfsfdsg" +SELECT citus_internal.database_command(''); +ERROR: cannot execute multiple utility events +RESET ROLE; +ALTER ROLE test_db_commands nocreatedb; +SET ROLE test_db_commands; +-- make sure that citus_internal.database_command doesn't cause privilege escalation +SELECT citus_internal.database_command('CREATE DATABASE no_permissions'); +ERROR: permission denied to create database +RESET ROLE; +DROP USER test_db_commands; +ALTER SYSTEM RESET citus.enable_manual_metadata_changes_for_user; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts3' +CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace'; +\c - - - :worker_1_port +\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts4' +CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace'; +\c - - - :worker_2_port +\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts5' +CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace'; +\c - - - :master_port +CREATE DATABASE local_database; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +-- check that it's only created for coordinator +SELECT * FROM public.check_database_on_all_nodes('local_database') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "local_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +DROP DATABASE local_database; +-- and is dropped +SELECT * FROM public.check_database_on_all_nodes('local_database') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c - - - :worker_1_port +CREATE DATABASE local_database; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +-- check that it's only created for coordinator +SELECT * FROM public.check_database_on_all_nodes('local_database') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": {"datacl": null, "datname": "local_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +DROP DATABASE local_database; +-- and is dropped +SELECT * FROM public.check_database_on_all_nodes('local_database') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c - - - :master_port +create user create_drop_db_test_user; +set citus.enable_create_database_propagation=on; +-- Tests for create database propagation with template0 which should fail +CREATE DATABASE mydatabase + WITH OWNER = create_drop_db_test_user + TEMPLATE = 'template0' + ENCODING = 'UTF8' + CONNECTION LIMIT = 10 + TABLESPACE = create_drop_db_tablespace + ALLOW_CONNECTIONS = true + IS_TEMPLATE = false; +ERROR: Only template1 is supported as template parameter for CREATE DATABASE +CREATE DATABASE mydatabase_1 + WITH template=template1 + OWNER = create_drop_db_test_user + ENCODING = 'UTF8' + CONNECTION LIMIT = 10 + TABLESPACE = create_drop_db_tablespace + ALLOW_CONNECTIONS = true + IS_TEMPLATE = false; +SELECT * FROM public.check_database_on_all_nodes('mydatabase_1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "mydatabase_1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "daticurules": null, "datallowconn": true, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "mydatabase_1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "daticurules": null, "datallowconn": true, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "mydatabase_1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "create_drop_db_tablespace", "daticurules": null, "datallowconn": true, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +-- Test LC / LOCALE settings that don't match the ones provided in template db. +-- All should throw an error on the coordinator. +CREATE DATABASE lc_collate_test LC_COLLATE = 'C.UTF-8'; +ERROR: new collation (C.UTF-8) is incompatible with the collation of the template database (C) +HINT: Use the same collation as in the template database, or use template0 as template. +CREATE DATABASE lc_ctype_test LC_CTYPE = 'C.UTF-8'; +ERROR: new LC_CTYPE (C.UTF-8) is incompatible with the LC_CTYPE of the template database (C) +HINT: Use the same LC_CTYPE as in the template database, or use template0 as template. +CREATE DATABASE locale_test LOCALE = 'C.UTF-8'; +ERROR: new collation (C.UTF-8) is incompatible with the collation of the template database (C) +HINT: Use the same collation as in the template database, or use template0 as template. +CREATE DATABASE lc_collate_lc_ctype_test LC_COLLATE = 'C.UTF-8' LC_CTYPE = 'C.UTF-8'; +ERROR: new collation (C.UTF-8) is incompatible with the collation of the template database (C) +HINT: Use the same collation as in the template database, or use template0 as template. +-- Test LC / LOCALE settings that match the ones provided in template db. +CREATE DATABASE lc_collate_test LC_COLLATE = 'C'; +CREATE DATABASE lc_ctype_test LC_CTYPE = 'C'; +CREATE DATABASE locale_test LOCALE = 'C'; +CREATE DATABASE lc_collate_lc_ctype_test LC_COLLATE = 'C' LC_CTYPE = 'C'; +SELECT * FROM public.check_database_on_all_nodes('lc_collate_test') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "lc_collate_test", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "lc_collate_test", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "lc_collate_test", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes('lc_ctype_test') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "lc_ctype_test", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "lc_ctype_test", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "lc_ctype_test", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes('locale_test') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "locale_test", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "locale_test", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "locale_test", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes('lc_collate_lc_ctype_test') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "lc_collate_lc_ctype_test", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "lc_collate_lc_ctype_test", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "lc_collate_lc_ctype_test", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +DROP DATABASE lc_collate_test; +DROP DATABASE lc_ctype_test; +DROP DATABASE locale_test; +DROP DATABASE lc_collate_lc_ctype_test; +-- ALTER TABLESPACE .. RENAME TO .. is not supported, so we need to rename it manually. +SELECT result FROM run_command_on_all_nodes( + $$ + ALTER TABLESPACE create_drop_db_tablespace RENAME TO "ts-needs\!escape" + $$ +); + result +--------------------------------------------------------------------- + ALTER TABLESPACE + ALTER TABLESPACE + ALTER TABLESPACE +(3 rows) + +CREATE USER "role-needs\!escape"; +CREATE DATABASE "db-needs\!escape" owner "role-needs\!escape" tablespace "ts-needs\!escape"; +-- Rename it to make check_database_on_all_nodes happy. +ALTER DATABASE "db-needs\!escape" RENAME TO db_needs_escape; +SELECT * FROM public.check_database_on_all_nodes('db_needs_escape') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "db_needs_escape", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "role-needs\\!escape", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "db_needs_escape", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "role-needs\\!escape", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "db_needs_escape", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "role-needs\\!escape", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +-- test database syncing after node addition +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +--test with is_template true and allow connections false +CREATE DATABASE mydatabase + OWNER = create_drop_db_test_user + CONNECTION LIMIT = 10 + ENCODING = 'UTF8' + TABLESPACE = "ts-needs\!escape" + ALLOW_CONNECTIONS = false + IS_TEMPLATE = false; +SELECT * FROM public.check_database_on_all_nodes('mydatabase') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(2 rows) + +SET citus.metadata_sync_mode to 'transactional'; +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT * FROM public.check_database_on_all_nodes('mydatabase') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes('mydatabase_1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "mydatabase_1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "mydatabase_1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "mydatabase_1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes('db_needs_escape') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "db_needs_escape", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "role-needs\\!escape", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "db_needs_escape", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "role-needs\\!escape", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "db_needs_escape", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "role-needs\\!escape", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SET citus.metadata_sync_mode to 'nontransactional'; +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +RESET citus.metadata_sync_mode; +SELECT * FROM public.check_database_on_all_nodes('mydatabase') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes('mydatabase_1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "mydatabase_1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "mydatabase_1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "mydatabase_1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes('db_needs_escape') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "db_needs_escape", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "role-needs\\!escape", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "db_needs_escape", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "role-needs\\!escape", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "db_needs_escape", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "role-needs\\!escape", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT citus_disable_node_and_wait('localhost', :worker_1_port, true); + citus_disable_node_and_wait +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE test_node_activation; +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT * FROM public.check_database_on_all_nodes('mydatabase') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "mydatabase", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes('mydatabase_1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "mydatabase_1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "mydatabase_1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "mydatabase_1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": 10, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes('db_needs_escape') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "db_needs_escape", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "role-needs\\!escape", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "db_needs_escape", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "role-needs\\!escape", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "db_needs_escape", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "role-needs\\!escape", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes('test_node_activation') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "test_node_activation", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "test_node_activation", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "test_node_activation", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database mydatabase; +NOTICE: issuing DROP DATABASE mydatabase +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing DROP DATABASE mydatabase +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SET citus.log_remote_commands = false; +-- check that we actually drop the database +drop database mydatabase_1; +SELECT * FROM public.check_database_on_all_nodes('mydatabase_1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes('mydatabase') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +-- create a template database with all options set and allow connections false +CREATE DATABASE my_template_database + WITH OWNER = create_drop_db_test_user + ENCODING = 'UTF8' + TABLESPACE = "ts-needs\!escape" + ALLOW_CONNECTIONS = false + IS_TEMPLATE = true; +SELECT * FROM public.check_database_on_all_nodes('my_template_database') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": -1, "daticulocale": null, "datistemplate": true, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": -1, "daticulocale": null, "datistemplate": true, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "my_template_database", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": -1, "daticulocale": null, "datistemplate": true, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +--template databases could not be dropped so we need to change the template flag +SELECT result from run_command_on_all_nodes( + $$ + UPDATE pg_database SET datistemplate = false WHERE datname = 'my_template_database' + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + UPDATE 1 + UPDATE 1 + UPDATE 1 +(3 rows) + +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database my_template_database; +NOTICE: issuing DROP DATABASE my_template_database +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing DROP DATABASE my_template_database +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SET citus.log_remote_commands = false; +SELECT * FROM public.check_database_on_all_nodes('my_template_database') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +--tests for special characters in database name +set citus.enable_create_database_propagation=on; +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%DATABASE%'; +SET citus.next_operation_id TO 2000; +create database "mydatabase#1'2"; +NOTICE: issuing CREATE DATABASE citus_temp_database_2000_0 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing CREATE DATABASE citus_temp_database_2000_0 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER DATABASE citus_temp_database_2000_0 RENAME TO "mydatabase#1'2" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing ALTER DATABASE citus_temp_database_2000_0 RENAME TO "mydatabase#1'2" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database if exists "mydatabase#1'2"; +NOTICE: issuing DROP DATABASE IF EXISTS "mydatabase#1'2" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing DROP DATABASE IF EXISTS "mydatabase#1'2" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +reset citus.grep_remote_commands; +reset citus.log_remote_commands; +-- it doesn't fail thanks to "if exists" +drop database if exists "mydatabase#1'2"; +NOTICE: database "mydatabase#1'2" does not exist, skipping +-- recreate it to verify that it's actually dropped +create database "mydatabase#1'2"; +drop database "mydatabase#1'2"; +-- second time we try to drop it, it fails due to lack of "if exists" +drop database "mydatabase#1'2"; +ERROR: database "mydatabase#1'2" does not exist +\c - - - :worker_1_port +SET citus.enable_create_database_propagation TO ON; +-- show that dropping the database from workers is allowed when citus.enable_create_database_propagation is on +DROP DATABASE db_needs_escape; +-- and the same applies to create database too +create database error_test; +drop database error_test; +\c - - - :master_port +SET citus.enable_create_database_propagation TO ON; +DROP DATABASE test_node_activation; +DROP USER "role-needs\!escape"; +-- drop database with force options test +create database db_force_test; +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database db_force_test with (force); +NOTICE: issuing DROP DATABASE db_force_test WITH ( FORCE ) +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing DROP DATABASE db_force_test WITH ( FORCE ) +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +reset citus.log_remote_commands; +reset citus.grep_remote_commands; +SELECT * FROM public.check_database_on_all_nodes('db_force_test') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +-- test that we won't propagate non-distributed databases in citus_add_node +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SET citus.enable_create_database_propagation TO off; +CREATE DATABASE non_distributed_db; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +SET citus.enable_create_database_propagation TO on; +create database distributed_db; +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +--non_distributed_db should not be propagated to worker_2 +SELECT * FROM public.check_database_on_all_nodes('non_distributed_db') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "non_distributed_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +--distributed_db should be propagated to worker_2 +SELECT * FROM public.check_database_on_all_nodes('distributed_db') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "distributed_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "distributed_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "distributed_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +--clean up resources created by this test +drop database distributed_db; +set citus.enable_create_database_propagation TO off; +drop database non_distributed_db; +-- test role grants on DATABASE in metadata sync +SELECT result from run_command_on_all_nodes( + $$ + create database db_role_grants_test_non_distributed + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + CREATE DATABASE + CREATE DATABASE + CREATE DATABASE +(3 rows) + +SELECT result from run_command_on_all_nodes( + $$ + revoke connect,temp,temporary,create on database db_role_grants_test_non_distributed from public + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + REVOKE + REVOKE + REVOKE +(3 rows) + +SET citus.enable_create_database_propagation TO on; +CREATE ROLE db_role_grants_test_role_exists_on_node_2; +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +CREATE DATABASE db_role_grants_test; +revoke connect,temp,temporary,create on database db_role_grants_test from public; +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%CREATE ROLE%'; +CREATE ROLE db_role_grants_test_role_missing_on_node_2; +NOTICE: issuing SELECT worker_create_or_alter_role('db_role_grants_test_role_missing_on_node_2', 'CREATE ROLE db_role_grants_test_role_missing_on_node_2', 'ALTER ROLE db_role_grants_test_role_missing_on_node_2') +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +RESET citus.log_remote_commands ; +RESET citus.grep_remote_commands; +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%GRANT%'; +grant CONNECT,TEMPORARY,CREATE on DATABASE db_role_grants_test to db_role_grants_test_role_exists_on_node_2; +NOTICE: issuing GRANT connect, temporary, create ON DATABASE db_role_grants_test TO db_role_grants_test_role_exists_on_node_2; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +grant CONNECT,TEMPORARY,CREATE on DATABASE db_role_grants_test to db_role_grants_test_role_missing_on_node_2; +NOTICE: issuing GRANT connect, temporary, create ON DATABASE db_role_grants_test TO db_role_grants_test_role_missing_on_node_2; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +grant CONNECT,TEMPORARY,CREATE on DATABASE db_role_grants_test_non_distributed to db_role_grants_test_role_exists_on_node_2; +grant CONNECT,TEMPORARY,CREATE on DATABASE db_role_grants_test_non_distributed to db_role_grants_test_role_missing_on_node_2; +-- check the privileges before add_node for database db_role_grants_test, +-- role db_role_grants_test_role_exists_on_node_2 +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test', 'CREATE') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + t + t +(2 rows) + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test', 'TEMPORARY') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + t + t +(2 rows) + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test', 'CONNECT') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + t + t +(2 rows) + +-- check the privileges before add_node for database db_role_grants_test, +-- role db_role_grants_test_role_missing_on_node_2 +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test', 'CREATE') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + t + t +(2 rows) + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test', 'TEMPORARY') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + t + t +(2 rows) + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test', 'CONNECT') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + t + t +(2 rows) + +-- check the privileges before add_node for database db_role_grants_test_non_distributed, +-- role db_role_grants_test_role_exists_on_node_2 +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test_non_distributed', 'CREATE') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + f + t +(2 rows) + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test_non_distributed', 'TEMPORARY') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + f + t +(2 rows) + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test_non_distributed', 'CONNECT') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + f + t +(2 rows) + +-- check the privileges before add_node for database db_role_grants_test_non_distributed, +-- role db_role_grants_test_role_missing_on_node_2 +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test_non_distributed', 'CREATE') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + f + t +(2 rows) + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test_non_distributed', 'TEMPORARY') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + f + t +(2 rows) + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test_non_distributed', 'CONNECT') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + f + t +(2 rows) + +RESET citus.log_remote_commands; +RESET citus.grep_remote_commands; +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- check the privileges after add_node for database db_role_grants_test, +-- role db_role_grants_test_role_exists_on_node_2 +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test', 'CREATE') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test', 'TEMPORARY') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test', 'CONNECT') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +-- check the privileges after add_node for database db_role_grants_test, +-- role db_role_grants_test_role_missing_on_node_2 +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test', 'CREATE') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test', 'TEMPORARY') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test', 'CONNECT') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +-- check the privileges after add_node for database db_role_grants_test_non_distributed, +-- role db_role_grants_test_role_exists_on_node_2 +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test_non_distributed', 'CREATE') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + f + f + t +(3 rows) + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test_non_distributed', 'TEMPORARY') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + f + f + t +(3 rows) + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test_non_distributed', 'CONNECT') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + f + f + t +(3 rows) + +-- check the privileges after add_node for database db_role_grants_test_non_distributed, +-- role db_role_grants_test_role_missing_on_node_2 +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test_non_distributed', 'CREATE') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + f + f + t +(3 rows) + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test_non_distributed', 'TEMPORARY') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + f + f + t +(3 rows) + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test_non_distributed', 'CONNECT') + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + f + f + t +(3 rows) + +grant connect,temp,temporary,create on database db_role_grants_test to public; +DROP DATABASE db_role_grants_test; +SELECT result from run_command_on_all_nodes( + $$ + drop database db_role_grants_test_non_distributed + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + DROP DATABASE + DROP DATABASE + DROP DATABASE +(3 rows) + +DROP ROLE db_role_grants_test_role_exists_on_node_2; +DROP ROLE db_role_grants_test_role_missing_on_node_2; +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +set citus.enable_create_role_propagation TO off; +create role non_propagated_role; +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. +set citus.enable_create_role_propagation TO on; +set citus.enable_create_database_propagation TO on; +-- Make sure that we propagate non_propagated_role because it's a dependency of test_db. +-- And hence it becomes a distributed object. +create database test_db OWNER non_propagated_role; +create role propagated_role; +grant connect on database test_db to propagated_role; +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT * FROM public.check_database_on_all_nodes('test_db') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": ["=Tc/non_propagated_role", "non_propagated_role=CTc/non_propagated_role", "propagated_role=c/non_propagated_role"], "datname": "test_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "non_propagated_role", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": ["=Tc/non_propagated_role", "non_propagated_role=CTc/non_propagated_role", "propagated_role=c/non_propagated_role"], "datname": "test_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "non_propagated_role", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": ["=Tc/non_propagated_role", "non_propagated_role=CTc/non_propagated_role", "propagated_role=c/non_propagated_role"], "datname": "test_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "non_propagated_role", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +REVOKE CONNECT ON DATABASE test_db FROM propagated_role; +DROP DATABASE test_db; +DROP ROLE propagated_role, non_propagated_role; +-- show that we don't try to propagate commands on non-distributed databases +SET citus.enable_create_database_propagation TO OFF; +CREATE DATABASE local_database_1; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +SET citus.enable_create_database_propagation TO ON; +CREATE ROLE local_role_1; +GRANT CONNECT, TEMPORARY, CREATE ON DATABASE local_database_1 TO local_role_1; +ALTER DATABASE local_database_1 SET default_transaction_read_only = 'true'; +REVOKE CONNECT, TEMPORARY, CREATE ON DATABASE local_database_1 FROM local_role_1; +DROP ROLE local_role_1; +DROP DATABASE local_database_1; +-- test create / drop database commands from workers +-- remove one of the workers to test node activation too +SELECT 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c - - - :worker_1_port +CREATE DATABASE local_worker_db; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +SET citus.enable_create_database_propagation TO ON; +CREATE DATABASE db_created_from_worker + WITH template=template1 + OWNER = create_drop_db_test_user + ENCODING = 'UTF8' + CONNECTION LIMIT = 42 + TABLESPACE = "ts-needs\!escape" + ALLOW_CONNECTIONS = false; +\c - - - :master_port +SET citus.enable_create_database_propagation TO ON; +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c - - - :worker_1_port +SET citus.enable_create_database_propagation TO ON; +SELECT * FROM public.check_database_on_all_nodes('local_worker_db') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": {"datacl": null, "datname": "local_worker_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes('db_created_from_worker') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": {"datacl": null, "datname": "db_created_from_worker", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": 42, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": {"datacl": null, "datname": "db_created_from_worker", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": 42, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "db_created_from_worker", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "ts-needs\\!escape", "daticurules": null, "datallowconn": false, "datconnlimit": 42, "daticulocale": null, "datistemplate": false, "database_owner": "create_drop_db_test_user", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +DROP DATABASE db_created_from_worker; +SELECT * FROM public.check_database_on_all_nodes('db_created_from_worker') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +-- drop the local database while the GUC is on +DROP DATABASE local_worker_db; +SELECT * FROM public.check_database_on_all_nodes('local_worker_db') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SET citus.enable_create_database_propagation TO OFF; +CREATE DATABASE local_worker_db; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +-- drop the local database while the GUC is off +DROP DATABASE local_worker_db; +SELECT * FROM public.check_database_on_all_nodes('local_worker_db') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SET citus.enable_create_database_propagation TO ON; +CREATE DATABASE another_db_created_from_worker; +\c - - - :master_port +SELECT 1 FROM citus_remove_node('localhost', :master_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c - - - :worker_1_port +SET citus.enable_create_database_propagation TO ON; +-- fails because coordinator is not added into metadata +DROP DATABASE another_db_created_from_worker; +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') on coordinator to configure the coordinator hostname +-- fails because coordinator is not added into metadata +CREATE DATABASE new_db; +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') on coordinator to configure the coordinator hostname +\c - - - :master_port +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_add_node('localhost', :master_port, 0); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +RESET client_min_messages; +SET citus.enable_create_database_propagation TO ON; +-- dropping a database that was created from a worker via a different node works fine +DROP DATABASE another_db_created_from_worker; +SELECT * FROM public.check_database_on_all_nodes('another_db_created_from_worker') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +-- Show that we automatically propagate the dependencies (only roles atm) when +-- creating a database from workers too. +SELECT 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c - - - :worker_1_port +set citus.enable_create_role_propagation TO off; +create role non_propagated_role; +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. +set citus.enable_create_role_propagation TO on; +set citus.enable_create_database_propagation TO on; +create database test_db OWNER non_propagated_role; +create role propagated_role; +\c - - - :master_port +-- not supported from workers, so need to execute this via coordinator +grant connect on database test_db to propagated_role; +SET citus.enable_create_database_propagation TO ON; +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT * FROM public.check_database_on_all_nodes('test_db') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": ["=Tc/non_propagated_role", "non_propagated_role=CTc/non_propagated_role", "propagated_role=c/non_propagated_role"], "datname": "test_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "non_propagated_role", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": ["=Tc/non_propagated_role", "non_propagated_role=CTc/non_propagated_role", "propagated_role=c/non_propagated_role"], "datname": "test_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "non_propagated_role", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": ["=Tc/non_propagated_role", "non_propagated_role=CTc/non_propagated_role", "propagated_role=c/non_propagated_role"], "datname": "test_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "non_propagated_role", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +REVOKE CONNECT ON DATABASE test_db FROM propagated_role; +DROP DATABASE test_db; +DROP ROLE propagated_role, non_propagated_role; +-- test citus_internal.acquire_citus_advisory_object_class_lock with null input +SELECT citus_internal.acquire_citus_advisory_object_class_lock(null, 'regression'); +ERROR: object_class cannot be NULL +SELECT citus_internal.acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), null); + acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +-- OCLASS_DATABASE +SELECT citus_internal.acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), NULL); + acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +SELECT citus_internal.acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), 'regression'); + acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +SELECT citus_internal.acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), ''); +ERROR: database "" does not exist +SELECT citus_internal.acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), 'no_such_db'); +ERROR: database "no_such_db" does not exist +-- invalid OCLASS +SELECT citus_internal.acquire_citus_advisory_object_class_lock(-1, NULL); +ERROR: unsupported object class: -1 +SELECT citus_internal.acquire_citus_advisory_object_class_lock(-1, 'regression'); +ERROR: unsupported object class: -1 +-- invalid OCLASS +SELECT citus_internal.acquire_citus_advisory_object_class_lock(100, NULL); +ERROR: unsupported object class: 100 +SELECT citus_internal.acquire_citus_advisory_object_class_lock(100, 'regression'); +ERROR: unsupported object class: 100 +-- another valid OCLASS, but not implemented yet +SELECT citus_internal.acquire_citus_advisory_object_class_lock(10, NULL); +ERROR: unsupported object class: 10 +SELECT citus_internal.acquire_citus_advisory_object_class_lock(10, 'regression'); +ERROR: unsupported object class: 10 +SELECT 1 FROM run_command_on_all_nodes('ALTER SYSTEM SET citus.enable_create_database_propagation TO ON'); + ?column? +--------------------------------------------------------------------- + 1 + 1 + 1 +(3 rows) + +SELECT 1 FROM run_command_on_all_nodes('SELECT pg_reload_conf()'); + ?column? +--------------------------------------------------------------------- + 1 + 1 + 1 +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +-- only one of them succeeds and we don't run into a distributed deadlock +SELECT COUNT(*) FROM run_command_on_all_nodes('CREATE DATABASE concurrent_create_db') WHERE success; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT * FROM public.check_database_on_all_nodes('concurrent_create_db') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "concurrent_create_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "concurrent_create_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "concurrent_create_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT COUNT(*) FROM run_command_on_all_nodes('DROP DATABASE concurrent_create_db') WHERE success; + count +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT * FROM public.check_database_on_all_nodes('concurrent_create_db') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +-- revert the system wide change that enables citus.enable_create_database_propagation on all nodes +SELECT 1 FROM run_command_on_all_nodes('ALTER SYSTEM SET citus.enable_create_database_propagation TO OFF'); + ?column? +--------------------------------------------------------------------- + 1 + 1 + 1 +(3 rows) + +SELECT 1 FROM run_command_on_all_nodes('SELECT pg_reload_conf()'); + ?column? +--------------------------------------------------------------------- + 1 + 1 + 1 +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +-- but keep it enabled for coordinator for the rest of the tests +SET citus.enable_create_database_propagation TO ON; +CREATE DATABASE distributed_db; +CREATE USER no_createdb; +SET ROLE no_createdb; +SET citus.enable_create_database_propagation TO ON; +CREATE DATABASE no_createdb; +ERROR: permission denied to create / rename database +ALTER DATABASE distributed_db RENAME TO rename_test; +ERROR: permission denied to create / rename database +DROP DATABASE distributed_db; +ERROR: must be owner of database distributed_db +ALTER DATABASE distributed_db SET TABLESPACE pg_default; +ERROR: must be owner of database distributed_db +ALTER DATABASE distributed_db SET timezone TO 'UTC'; +ERROR: must be owner of database distributed_db +ALTER DATABASE distributed_db RESET timezone; +ERROR: must be owner of database distributed_db +GRANT ALL ON DATABASE distributed_db TO postgres; +WARNING: no privileges were granted for "distributed_db" +RESET ROLE; +ALTER ROLE no_createdb createdb; +SET ROLE no_createdb; +CREATE DATABASE no_createdb; +ALTER DATABASE distributed_db RENAME TO rename_test; +ERROR: must be owner of database distributed_db +RESET ROLE; +SELECT 1 FROM run_command_on_all_nodes($$GRANT ALL ON TABLESPACE pg_default TO no_createdb$$); + ?column? +--------------------------------------------------------------------- + 1 + 1 + 1 +(3 rows) + +ALTER DATABASE distributed_db OWNER TO no_createdb; +SET ROLE no_createdb; +ALTER DATABASE distributed_db SET TABLESPACE pg_default; +ALTER DATABASE distributed_db SET timezone TO 'UTC'; +ALTER DATABASE distributed_db RESET timezone; +GRANT ALL ON DATABASE distributed_db TO postgres; +ALTER DATABASE distributed_db RENAME TO rename_test; +DROP DATABASE rename_test; +RESET ROLE; +SELECT 1 FROM run_command_on_all_nodes($$REVOKE ALL ON TABLESPACE pg_default FROM no_createdb$$); + ?column? +--------------------------------------------------------------------- + 1 + 1 + 1 +(3 rows) + +DROP DATABASE no_createdb; +DROP USER no_createdb; +-- Test a failure scenario by trying to create a distributed database that +-- already exists on one of the nodes. +\c - - - :worker_1_port +CREATE DATABASE "test_\!failure"; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +\c - - - :master_port +SET citus.enable_create_database_propagation TO ON; +CREATE DATABASE "test_\!failure"; +ERROR: database "test_\!failure" already exists +CONTEXT: while executing command on localhost:xxxxx +SET client_min_messages TO WARNING; +CALL citus_cleanup_orphaned_resources(); +RESET client_min_messages; +SELECT result AS database_cleanedup_on_node FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); + database_cleanedup_on_node +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes($$test_\!failure$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "test_\\!failure", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SET citus.enable_create_database_propagation TO OFF; +CREATE DATABASE "test_\!failure1"; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +\c - - - :worker_1_port +DROP DATABASE "test_\!failure"; +SET citus.enable_create_database_propagation TO ON; +CREATE DATABASE "test_\!failure1"; +ERROR: database "test_\!failure1" already exists +CONTEXT: while executing command on localhost:xxxxx +SET client_min_messages TO WARNING; +CALL citus_cleanup_orphaned_resources(); +RESET client_min_messages; +SELECT result AS database_cleanedup_on_node FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); + database_cleanedup_on_node +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes($$test_\!failure1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": {"datacl": null, "datname": "test_\\!failure1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c - - - :master_port +-- Before dropping local "test_\!failure1" database, test a failure scenario +-- by trying to create a distributed database that already exists "on local +-- node" this time. +SET citus.enable_create_database_propagation TO ON; +CREATE DATABASE "test_\!failure1"; +ERROR: database "test_\!failure1" already exists +SET client_min_messages TO WARNING; +CALL citus_cleanup_orphaned_resources(); +RESET client_min_messages; +SELECT result AS database_cleanedup_on_node FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); + database_cleanedup_on_node +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT * FROM public.check_database_on_all_nodes($$test_\!failure1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "test_\\!failure1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SET citus.enable_create_database_propagation TO OFF; +DROP DATABASE "test_\!failure1"; +SET citus.enable_create_database_propagation TO ON; +--clean up resources created by this test +-- DROP TABLESPACE is not supported, so we need to drop it manually. +SELECT result FROM run_command_on_all_nodes( + $$ + drop tablespace "ts-needs\!escape" + $$ +); + result +--------------------------------------------------------------------- + DROP TABLESPACE + DROP TABLESPACE + DROP TABLESPACE +(3 rows) + +drop user create_drop_db_test_user; +reset citus.enable_create_database_propagation; diff --git a/src/test/regress/expected/create_drop_database_propagation_pg15.out b/src/test/regress/expected/create_drop_database_propagation_pg15.out new file mode 100644 index 00000000000..7e76d87f34d --- /dev/null +++ b/src/test/regress/expected/create_drop_database_propagation_pg15.out @@ -0,0 +1,88 @@ +-- +-- PG15 +-- +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 +\gset +\if :server_version_ge_15 +\else +\q +\endif +-- create/drop database for pg >= 15 +set citus.enable_create_database_propagation=on; +CREATE DATABASE mydatabase + WITH OID = 966345; +ERROR: CREATE DATABASE option "oid" is not supported +CREATE DATABASE mydatabase + WITH strategy file_copy; +ERROR: Only wal_log is supported as strategy parameter for CREATE DATABASE +CREATE DATABASE st_wal_log + WITH strategy WaL_LoG; +SELECT * FROM public.check_database_on_all_nodes('st_wal_log') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "st_wal_log", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "st_wal_log", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "st_wal_log", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +drop database st_wal_log; +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- test COLLATION_VERSION +CREATE DATABASE test_collation_version + WITH ENCODING = 'UTF8' + COLLATION_VERSION = '1.0' + ALLOW_CONNECTIONS = false; +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT * FROM public.check_database_on_all_nodes('test_collation_version') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "test_collation_version", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": false, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": "1.0", "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "test_collation_version", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": false, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": "1.0", "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "test_collation_version", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": false, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": "1.0", "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +drop database test_collation_version; +SET client_min_messages TO WARNING; +-- test LOCALE_PROVIDER & ICU_LOCALE +CREATE DATABASE test_locale_provider + WITH ENCODING = 'UTF8' + LOCALE_PROVIDER = 'icu' + ICU_LOCALE = 'en_US'; +ERROR: new locale provider (icu) does not match locale provider of the template database (libc) +HINT: Use the same locale provider as in the template database, or use template0 as template. +RESET client_min_messages; +CREATE DATABASE test_locale_provider + WITH ENCODING = 'UTF8' + LOCALE_PROVIDER = 'libc' + ICU_LOCALE = 'en_US'; +ERROR: ICU locale cannot be specified unless locale provider is ICU +CREATE DATABASE test_locale_provider + WITH ENCODING = 'UTF8' + LOCALE_PROVIDER = 'libc'; +SELECT * FROM public.check_database_on_all_nodes('test_locale_provider') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "test_locale_provider", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "test_locale_provider", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "test_locale_provider", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c test_locale_provider - - :worker_2_port +set citus.enable_create_database_propagation to on; +create database unsupported_option_from_non_main_db with oid = 12345; +ERROR: CREATE DATABASE option "oid" is not supported +\c regression - - :master_port +set citus.enable_create_database_propagation to on; +drop database test_locale_provider; +\c - - - :master_port diff --git a/src/test/regress/expected/create_drop_database_propagation_pg15_0.out b/src/test/regress/expected/create_drop_database_propagation_pg15_0.out new file mode 100644 index 00000000000..b1ed9cc5b6c --- /dev/null +++ b/src/test/regress/expected/create_drop_database_propagation_pg15_0.out @@ -0,0 +1,9 @@ +-- +-- PG15 +-- +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 +\gset +\if :server_version_ge_15 +\else +\q diff --git a/src/test/regress/expected/create_drop_database_propagation_pg16.out b/src/test/regress/expected/create_drop_database_propagation_pg16.out new file mode 100644 index 00000000000..75cd99e616c --- /dev/null +++ b/src/test/regress/expected/create_drop_database_propagation_pg16.out @@ -0,0 +1,23 @@ +-- +-- PG16 +-- +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 +\gset +\if :server_version_ge_16 +\else +\q +\endif +-- create/drop database for pg >= 16 +set citus.enable_create_database_propagation=on; +-- test icu_rules +-- +-- practically we don't support it but better to test +CREATE DATABASE citus_icu_rules_test WITH icu_rules='de_DE@collation=phonebook'; +ERROR: ICU rules cannot be specified unless locale provider is ICU +CREATE DATABASE citus_icu_rules_test WITH icu_rules='de_DE@collation=phonebook' locale_provider='icu'; +ERROR: LOCALE or ICU_LOCALE must be specified +CREATE DATABASE citus_icu_rules_test WITH icu_rules='de_DE@collation=phonebook' locale_provider='icu' icu_locale = 'de_DE'; +NOTICE: using standard form "de-DE" for ICU locale "de_DE" +ERROR: new locale provider (icu) does not match locale provider of the template database (libc) +HINT: Use the same locale provider as in the template database, or use template0 as template. diff --git a/src/test/regress/expected/create_drop_database_propagation_pg16_0.out b/src/test/regress/expected/create_drop_database_propagation_pg16_0.out new file mode 100644 index 00000000000..730c916cadc --- /dev/null +++ b/src/test/regress/expected/create_drop_database_propagation_pg16_0.out @@ -0,0 +1,9 @@ +-- +-- PG16 +-- +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 +\gset +\if :server_version_ge_16 +\else +\q diff --git a/src/test/regress/expected/create_ref_dist_from_citus_local.out b/src/test/regress/expected/create_ref_dist_from_citus_local.out index dc67400e0ba..cce7081b079 100644 --- a/src/test/regress/expected/create_ref_dist_from_citus_local.out +++ b/src/test/regress/expected/create_ref_dist_from_citus_local.out @@ -369,9 +369,9 @@ ROLLBACK; \set VERBOSITY DEFAULT -- Test the UDFs that we use to convert Citus local tables to single-shard tables and -- reference tables. -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, true); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', 1, true); ERROR: This is an internal Citus function can only be used in a distributed transaction -SELECT pg_catalog.citus_internal_delete_placement_metadata(1); +SELECT citus_internal.delete_placement_metadata(1); ERROR: This is an internal Citus function can only be used in a distributed transaction CREATE ROLE test_user_create_ref_dist WITH LOGIN; GRANT ALL ON SCHEMA create_ref_dist_from_citus_local TO test_user_create_ref_dist; @@ -393,15 +393,15 @@ SET citus.next_shard_id TO 1850000; SET citus.next_placement_id TO 8510000; SET citus.shard_replication_factor TO 1; SET search_path TO create_ref_dist_from_citus_local; -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(null, 't', 1, true); +SELECT citus_internal.update_none_dist_table_metadata(null, 't', 1, true); ERROR: relation_id cannot be NULL -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, null, 1, true); +SELECT citus_internal.update_none_dist_table_metadata(1, null, 1, true); ERROR: replication_model cannot be NULL -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', null, true); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', null, true); ERROR: colocation_id cannot be NULL -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, null); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', 1, null); ERROR: auto_converted cannot be NULL -SELECT pg_catalog.citus_internal_delete_placement_metadata(null); +SELECT citus_internal.delete_placement_metadata(null); ERROR: placement_id cannot be NULL CREATE TABLE udf_test (col_1 int); SELECT citus_add_local_table_to_metadata('udf_test'); @@ -411,8 +411,8 @@ SELECT citus_add_local_table_to_metadata('udf_test'); (1 row) BEGIN; - SELECT pg_catalog.citus_internal_update_none_dist_table_metadata('create_ref_dist_from_citus_local.udf_test'::regclass, 'k', 99999, true); - citus_internal_update_none_dist_table_metadata + SELECT citus_internal.update_none_dist_table_metadata('create_ref_dist_from_citus_local.udf_test'::regclass, 'k', 99999, true); + update_none_dist_table_metadata --------------------------------------------------------------------- (1 row) @@ -426,8 +426,8 @@ BEGIN; SELECT placementid AS udf_test_placementid FROM pg_dist_shard_placement WHERE shardid = get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.udf_test') \gset - SELECT pg_catalog.citus_internal_delete_placement_metadata(:udf_test_placementid); - citus_internal_delete_placement_metadata + SELECT citus_internal.delete_placement_metadata(:udf_test_placementid); + delete_placement_metadata --------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/create_role_propagation.out b/src/test/regress/expected/create_role_propagation.out index 59f7948a115..4d594ddab6c 100644 --- a/src/test/regress/expected/create_role_propagation.out +++ b/src/test/regress/expected/create_role_propagation.out @@ -40,18 +40,10 @@ SELECT master_remove_node('localhost', :worker_2_port); CREATE ROLE create_role_with_everything SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 105 PASSWORD 'strong_password123^' VALID UNTIL '2045-05-05 00:00:00.00+00' IN ROLE create_role, create_group ROLE create_user, create_group_2 ADMIN create_role_2, create_user_2; CREATE ROLE create_role_with_nothing NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 3 PASSWORD 'weakpassword' VALID UNTIL '2015-05-05 00:00:00.00+00'; --- show that creating role from worker node is only allowed when create role --- propagation is off +-- show that creating role from worker node is allowed \c - - - :worker_1_port CREATE ROLE role_on_worker; -ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. -BEGIN; -SET citus.enable_create_role_propagation TO off; -CREATE ROLE role_on_worker; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. -ROLLBACK; +DROP ROLE role_on_worker; \c - - - :master_port -- edge case role names CREATE ROLE "create_role'edge"; @@ -129,17 +121,17 @@ SELECT 1 FROM master_add_node('localhost', :worker_2_port); SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, (rolpassword != '') as pass_not_empty, rolvaliduntil FROM pg_authid WHERE rolname LIKE 'create\_%' ORDER BY rolname; rolname | rolsuper | rolinherit | rolcreaterole | rolcreatedb | rolcanlogin | rolreplication | rolbypassrls | rolconnlimit | pass_not_empty | rolvaliduntil --------------------------------------------------------------------- - create_group | f | t | f | f | f | f | f | -1 | | infinity - create_group_2 | f | t | f | f | f | f | f | -1 | | infinity - create_role | f | t | f | f | f | f | f | -1 | | infinity - create_role"edge | f | t | f | f | f | f | f | -1 | | infinity - create_role'edge | f | t | f | f | f | f | f | -1 | | infinity - create_role_2 | f | t | f | f | f | f | f | -1 | | infinity - create_role_sysid | f | t | f | f | f | f | f | -1 | | infinity + create_group | f | t | f | f | f | f | f | -1 | | + create_group_2 | f | t | f | f | f | f | f | -1 | | + create_role | f | t | f | f | f | f | f | -1 | | + create_role"edge | f | t | f | f | f | f | f | -1 | | + create_role'edge | f | t | f | f | f | f | f | -1 | | + create_role_2 | f | t | f | f | f | f | f | -1 | | + create_role_sysid | f | t | f | f | f | f | f | -1 | | create_role_with_everything | t | t | t | t | t | t | t | 105 | t | Thu May 04 17:00:00 2045 PDT create_role_with_nothing | f | f | f | f | f | f | f | 3 | t | Mon May 04 17:00:00 2015 PDT - create_user | f | t | f | f | t | f | f | -1 | | infinity - create_user_2 | f | t | f | f | t | f | f | -1 | | infinity + create_user | f | t | f | f | t | f | f | -1 | | + create_user_2 | f | t | f | f | t | f | f | -1 | | (11 rows) SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE 'create\_%' ORDER BY 1, 2; @@ -204,6 +196,7 @@ SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::t (1 row) \c - - - :master_port +create role test_admin_role; -- test grants with distributed and non-distributed roles SELECT master_remove_node('localhost', :worker_2_port); master_remove_node @@ -217,41 +210,84 @@ CREATE ROLE dist_role_3; CREATE ROLE dist_role_4; SET citus.enable_create_role_propagation TO OFF; CREATE ROLE non_dist_role_1 SUPERUSER; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. CREATE ROLE non_dist_role_2; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. CREATE ROLE non_dist_role_3; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. CREATE ROLE non_dist_role_4; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. SET citus.enable_create_role_propagation TO ON; +grant dist_role_3,dist_role_1 to test_admin_role with admin option; SET ROLE dist_role_1; GRANT non_dist_role_1 TO non_dist_role_2; SET citus.enable_create_role_propagation TO OFF; +grant dist_role_1 to non_dist_role_1 with admin option; SET ROLE non_dist_role_1; -GRANT dist_role_1 TO dist_role_2; +GRANT dist_role_1 TO dist_role_2 granted by non_dist_role_1; RESET ROLE; SET citus.enable_create_role_propagation TO ON; -GRANT dist_role_3 TO non_dist_role_3; +GRANT dist_role_3 TO non_dist_role_3 granted by test_admin_role; GRANT non_dist_role_4 TO dist_role_4; +GRANT dist_role_3 TO dist_role_4 granted by test_admin_role; SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + ) q; + $$ +); + result +--------------------------------------------------------------------- + [{"member":"dist_role_4","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, + + {"member":"non_dist_role_3","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, + + {"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"dist_role_4","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, + + {"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"dist_role_4","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, + + {"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] +(3 rows) + +REVOKE dist_role_3 from dist_role_4 granted by test_admin_role cascade; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + order by member::regrole::text + ) q; + $$ +); + result +--------------------------------------------------------------------- + [{"member":"non_dist_role_3","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, + + {"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] +(3 rows) + +SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1','test_admin_role')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; role | member | grantor | admin_option --------------------------------------------------------------------- dist_role_1 | dist_role_2 | t | f + dist_role_1 | non_dist_role_1 | t | t + dist_role_1 | test_admin_role | t | t dist_role_3 | non_dist_role_3 | t | f + dist_role_3 | test_admin_role | t | t non_dist_role_1 | non_dist_role_2 | t | f non_dist_role_4 | dist_role_4 | t | f -(4 rows) +(7 rows) SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1; objid @@ -263,6 +299,25 @@ SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid':: non_dist_role_4 (5 rows) +REVOKE dist_role_3 from non_dist_role_3 granted by test_admin_role cascade; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + order by member::regrole::text + ) q; + $$ +); + result +--------------------------------------------------------------------- + [{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] +(3 rows) + +revoke dist_role_3,dist_role_1 from test_admin_role cascade; +drop role test_admin_role; \c - - - :worker_1_port SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; role | member | grantor | admin_option @@ -284,9 +339,8 @@ SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_%' ORDER BY 1; SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; role | member | grantor | admin_option --------------------------------------------------------------------- - dist_role_1 | dist_role_2 | postgres | f non_dist_role_4 | dist_role_4 | postgres | f -(2 rows) +(1 row) SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_%' ORDER BY 1; rolname @@ -307,11 +361,11 @@ CREATE ROLE dist_mixed_3; CREATE ROLE dist_mixed_4; SET citus.enable_create_role_propagation TO OFF; CREATE ROLE nondist_mixed_1; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. CREATE ROLE nondist_mixed_2; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_mixed%' ORDER BY 1, 2; role | member | grantor | admin_option --------------------------------------------------------------------- @@ -506,14 +560,14 @@ SELECT rolname, rolcanlogin FROM pg_authid WHERE rolname = 'create_role' OR roln -- test cascading grants SET citus.enable_create_role_propagation TO OFF; CREATE ROLE nondist_cascade_1; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. CREATE ROLE nondist_cascade_2; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. CREATE ROLE nondist_cascade_3; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. SET citus.enable_create_role_propagation TO ON; CREATE ROLE dist_cascade; GRANT nondist_cascade_1 TO nondist_cascade_2; @@ -696,3 +750,4 @@ SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; (0 rows) \c - - - :master_port +DROP ROLE nondist_cascade_1, nondist_cascade_2, nondist_cascade_3, dist_cascade; diff --git a/src/test/regress/expected/detect_conn_close.out b/src/test/regress/expected/detect_conn_close.out index ad758f32e83..60973de76c0 100644 --- a/src/test/regress/expected/detect_conn_close.out +++ b/src/test/regress/expected/detect_conn_close.out @@ -128,7 +128,7 @@ BEGIN; (1 row) SELECT count(*) FROM socket_test_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open ROLLBACK; -- repartition joins also can recover SET citus.enable_repartition_joins TO on; diff --git a/src/test/regress/expected/distributed_domain.out b/src/test/regress/expected/distributed_domain.out index 30e388803f6..6fdb348ebd6 100644 --- a/src/test/regress/expected/distributed_domain.out +++ b/src/test/regress/expected/distributed_domain.out @@ -680,16 +680,7 @@ SELECT * FROM use_age_invalid ORDER BY 1; -- verify we can validate a constraint that is already validated, can happen when we add a node while a domain constraint was not validated ALTER DOMAIN age_invalid VALIDATE CONSTRAINT check_age_positive; -- test changing the owner of a domain -SET client_min_messages TO error; -SELECT 1 FROM run_command_on_workers($$ CREATE ROLE domain_owner; $$); - ?column? ---------------------------------------------------------------------- - 1 - 1 -(2 rows) - CREATE ROLE domain_owner; -RESET client_min_messages; CREATE DOMAIN alter_domain_owner AS int; ALTER DOMAIN alter_domain_owner OWNER TO domain_owner; SELECT u.rolname diff --git a/src/test/regress/expected/drop_database.out b/src/test/regress/expected/drop_database.out index d150cc8d3ac..4d68f4c0d35 100644 --- a/src/test/regress/expected/drop_database.out +++ b/src/test/regress/expected/drop_database.out @@ -6,14 +6,14 @@ SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 35137400; CREATE DATABASE citus_created; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c citus_created CREATE EXTENSION citus; CREATE DATABASE citus_not_created; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c citus_not_created DROP DATABASE citus_created; \c regression @@ -26,14 +26,14 @@ SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 35137400; CREATE DATABASE citus_created; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c citus_created CREATE EXTENSION citus; CREATE DATABASE citus_not_created; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c citus_not_created DROP DATABASE citus_created; \c regression diff --git a/src/test/regress/expected/drop_partitioned_table.out b/src/test/regress/expected/drop_partitioned_table.out index 660adb89cd8..a92dee71112 100644 --- a/src/test/regress/expected/drop_partitioned_table.out +++ b/src/test/regress/expected/drop_partitioned_table.out @@ -354,8 +354,8 @@ NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.pa NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.parent_xxxxx CASCADE NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') -NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(1344400) -NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(1344400) +NOTICE: issuing SELECT citus_internal.delete_colocation_metadata(1344400) +NOTICE: issuing SELECT citus_internal.delete_colocation_metadata(1344400) ROLLBACK; NOTICE: issuing ROLLBACK NOTICE: issuing ROLLBACK @@ -377,8 +377,8 @@ NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.parent_xxxxx CASCAD NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.child1_xxxxx CASCADE -NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(1344400) -NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(1344400) +NOTICE: issuing SELECT citus_internal.delete_colocation_metadata(1344400) +NOTICE: issuing SELECT citus_internal.delete_colocation_metadata(1344400) ROLLBACK; NOTICE: issuing ROLLBACK NOTICE: issuing ROLLBACK diff --git a/src/test/regress/expected/failure_connection_establishment.out b/src/test/regress/expected/failure_connection_establishment.out index d032755ddcf..f23f11d2b1d 100644 --- a/src/test/regress/expected/failure_connection_establishment.out +++ b/src/test/regress/expected/failure_connection_establishment.out @@ -84,7 +84,7 @@ SELECT citus.mitmproxy('conn.connect_delay(1400)'); ALTER TABLE products ADD CONSTRAINT p_key PRIMARY KEY(product_no); WARNING: could not establish connection after 900 ms -ERROR: connection to the remote node localhost:xxxxx failed +ERROR: connection to the remote node postgres@localhost:xxxxx failed RESET citus.node_connection_timeout; SELECT citus.mitmproxy('conn.allow()'); mitmproxy diff --git a/src/test/regress/expected/failure_copy_on_hash.out b/src/test/regress/expected/failure_copy_on_hash.out index 24350f7076a..424ab0da85d 100644 --- a/src/test/regress/expected/failure_copy_on_hash.out +++ b/src/test/regress/expected/failure_copy_on_hash.out @@ -36,7 +36,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) \COPY test_table FROM stdin delimiter ','; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open CONTEXT: COPY test_table, line 1: "1,2" SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -271,7 +271,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) \COPY test_table_2 FROM stdin delimiter ','; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_create_database.out b/src/test/regress/expected/failure_create_database.out new file mode 100644 index 00000000000..81fcd451965 --- /dev/null +++ b/src/test/regress/expected/failure_create_database.out @@ -0,0 +1,386 @@ +SET citus.enable_create_database_propagation TO ON; +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_add_node('localhost', :master_port, 0); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +CREATE FUNCTION get_temp_databases_on_nodes() +RETURNS TEXT AS $func$ + SELECT array_agg(DISTINCT result ORDER BY result) AS temp_databases_on_nodes FROM run_command_on_all_nodes($$SELECT datname FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$) WHERE result != ''; +$func$ +LANGUAGE sql; +CREATE FUNCTION count_db_cleanup_records() +RETURNS TABLE(object_name TEXT, count INTEGER) AS $func$ + SELECT object_name, COUNT(*) FROM pg_dist_cleanup WHERE object_name LIKE 'citus_temp_database_%' GROUP BY object_name; +$func$ +LANGUAGE sql; +CREATE FUNCTION ensure_no_temp_databases_on_any_nodes() +RETURNS BOOLEAN AS $func$ + SELECT bool_and(result::boolean) AS no_temp_databases_on_any_nodes FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); +$func$ +LANGUAGE sql; +-- cleanup any orphaned resources from previous runs +CALL citus_cleanup_orphaned_resources(); +SET citus.next_operation_id TO 4000; +ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +SELECT citus.mitmproxy('conn.kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- +(0 rows) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT citus.mitmproxy('conn.onQuery(query="^CREATE DATABASE").cancel(' || pg_backend_pid() || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: canceling statement due to user request +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + {citus_temp_database_4000_0} +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- + citus_temp_database_4000_0 | 3 +(1 row) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT citus.mitmproxy('conn.onQuery(query="^ALTER DATABASE").cancel(' || pg_backend_pid() || ')'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: canceling statement due to user request +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + {citus_temp_database_4001_0} +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- + citus_temp_database_4001_0 | 3 +(1 row) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- +(0 rows) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: connection not open +CONTEXT: while executing command on localhost:xxxxx +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + {citus_temp_database_4002_0} +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- + citus_temp_database_4002_0 | 3 +(1 row) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +WARNING: connection not open +CONTEXT: while executing command on localhost:xxxxx +WARNING: failed to commit transaction on localhost:xxxxx +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +-- not call citus_cleanup_orphaned_resources() but recover the prepared transactions this time +SELECT 1 FROM recover_prepared_transactions(); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "db1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "db1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "db1", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +DROP DATABASE db1; +-- after recovering the prepared transactions, cleanup records should also be removed +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- +(0 rows) + +SELECT citus.mitmproxy('conn.onQuery(query="^SELECT citus_internal.acquire_citus_advisory_object_class_lock").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- +(0 rows) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +SELECT citus.mitmproxy('conn.onParse(query="^WITH distributed_object_data").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE DATABASE db1; +ERROR: connection not open +CONTEXT: while executing command on localhost:xxxxx +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT get_temp_databases_on_nodes(); + get_temp_databases_on_nodes +--------------------------------------------------------------------- + {citus_temp_database_4004_0} +(1 row) + +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- + citus_temp_database_4004_0 | 3 +(1 row) + +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); + ensure_no_temp_databases_on_any_nodes +--------------------------------------------------------------------- + t +(1 row) + +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +CREATE DATABASE db1; +-- show that a successful database creation doesn't leave any pg_dist_cleanup records behind +SELECT * FROM count_db_cleanup_records(); + object_name | count +--------------------------------------------------------------------- +(0 rows) + +DROP DATABASE db1; +DROP FUNCTION get_temp_databases_on_nodes(); +DROP FUNCTION ensure_no_temp_databases_on_any_nodes(); +DROP FUNCTION count_db_cleanup_records(); +SELECT 1 FROM citus_remove_node('localhost', :master_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + diff --git a/src/test/regress/expected/failure_create_distributed_table_non_empty.out b/src/test/regress/expected/failure_create_distributed_table_non_empty.out index 0e4b857013e..109d3686f58 100644 --- a/src/test/regress/expected/failure_create_distributed_table_non_empty.out +++ b/src/test/regress/expected/failure_create_distributed_table_non_empty.out @@ -28,7 +28,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -164,7 +164,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -436,7 +436,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -519,7 +519,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^SELECT worker_apply_shard_ddl_comma (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -680,7 +680,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- @@ -901,7 +901,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL R (1 row) SELECT create_distributed_table('test_table', 'id', colocate_with => 'colocated_table'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT count(*) FROM pg_dist_shard WHERE logicalrelid='create_distributed_table_non_empty_failure.test_table'::regclass; count --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_create_index_concurrently.out b/src/test/regress/expected/failure_create_index_concurrently.out index a198ddc7087..947d5711e3e 100644 --- a/src/test/regress/expected/failure_create_index_concurrently.out +++ b/src/test/regress/expected/failure_create_index_concurrently.out @@ -26,9 +26,10 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE").kill()'); (1 row) CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); -WARNING: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. -Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index. -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. +If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, +if applicable, and then re-attempt the original command. +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -59,9 +60,10 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE").kill()'); (1 row) CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); -WARNING: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. -Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index. -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. +If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, +if applicable, and then re-attempt the original command. +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -86,8 +88,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE").cancel(' || pg_backend_pid( (1 row) CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); -WARNING: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. -Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index. +WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. +If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, +if applicable, and then re-attempt the original command. ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -111,8 +114,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE").cancel(' || pg_backend_pid( (1 row) CREATE INDEX CONCURRENTLY idx_index_test ON index_test(id, value_1); -WARNING: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. -Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index. +WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. +If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, +if applicable, and then re-attempt the original command. ERROR: canceling statement due to user request SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -137,9 +141,10 @@ SELECT citus.mitmproxy('conn.onQuery(query="DROP INDEX CONCURRENTLY").kill()'); (1 row) DROP INDEX CONCURRENTLY IF EXISTS idx_index_test; -WARNING: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. -Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index. -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. +If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, +if applicable, and then re-attempt the original command. +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -164,8 +169,9 @@ SELECT create_distributed_table('index_test_2', 'a'); INSERT INTO index_test_2 VALUES (1, 1), (1, 2); CREATE UNIQUE INDEX CONCURRENTLY index_test_2_a_idx ON index_test_2(a); -WARNING: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. -Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index. +WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. +If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, +if applicable, and then re-attempt the original command. ERROR: could not create unique index "index_test_2_a_idx_1880019" DETAIL: Key (a)=(1) is duplicated. CONTEXT: while executing command on localhost:xxxxx diff --git a/src/test/regress/expected/failure_create_table.out b/src/test/regress/expected/failure_create_table.out index 5d022d6784d..956bdb2b2f8 100644 --- a/src/test/regress/expected/failure_create_table.out +++ b/src/test/regress/expected/failure_create_table.out @@ -22,7 +22,7 @@ SELECT citus.mitmproxy('conn.kill()'); (1 row) SELECT create_distributed_table('test_table','id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -116,7 +116,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_shard_ddl_comman (1 row) SELECT create_distributed_table('test_table','id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -147,7 +147,7 @@ BEGIN; (1 row) SELECT create_distributed_table('test_table', 'id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open COMMIT; SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -215,7 +215,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); (1 row) SELECT create_distributed_table('test_table','id',colocate_with=>'temp_table'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -484,7 +484,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE").kill()'); BEGIN; SELECT create_distributed_table('test_table','id'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open ROLLBACK; SELECT citus.mitmproxy('conn.allow()'); mitmproxy diff --git a/src/test/regress/expected/failure_cte_subquery.out b/src/test/regress/expected/failure_cte_subquery.out index 19fb11f379f..89e3e1489c5 100644 --- a/src/test/regress/expected/failure_cte_subquery.out +++ b/src/test/regress/expected/failure_cte_subquery.out @@ -86,7 +86,7 @@ FROM ORDER BY 1 DESC LIMIT 5 ) as foo WHERE foo.user_id = cte.user_id; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- kill at the third copy (pull) SELECT citus.mitmproxy('conn.onQuery(query="SELECT DISTINCT users_table.user").kill()'); mitmproxy @@ -117,7 +117,7 @@ FROM ORDER BY 1 DESC LIMIT 5 ) as foo WHERE foo.user_id = cte.user_id; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- cancel at the first copy (push) SELECT citus.mitmproxy('conn.onQuery(query="^COPY").cancel(' || :pid || ')'); mitmproxy @@ -254,7 +254,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^DELETE FROM").kill()'); WITH cte_delete AS MATERIALIZED (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *) INSERT INTO users_table SELECT * FROM cte_delete; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify contents are the same SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -365,7 +365,7 @@ BEGIN; SET LOCAL citus.multi_shard_modify_mode = 'sequential'; WITH cte_delete AS MATERIALIZED (DELETE FROM users_table WHERE user_name in ('A', 'D') RETURNING *) INSERT INTO users_table SELECT * FROM cte_delete; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open END; RESET SEARCH_PATH; SELECT citus.mitmproxy('conn.allow()'); diff --git a/src/test/regress/expected/failure_ddl.out b/src/test/regress/expected/failure_ddl.out index 77b134a728b..2f55663a0fd 100644 --- a/src/test/regress/expected/failure_ddl.out +++ b/src/test/regress/expected/failure_ddl.out @@ -36,7 +36,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg --------------------------------------------------------------------- @@ -99,7 +99,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- show that we've never commited the changes SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg @@ -300,7 +300,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) ALTER TABLE test_table DROP COLUMN new_column; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg --------------------------------------------------------------------- @@ -361,7 +361,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil (1 row) ALTER TABLE test_table DROP COLUMN new_column; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg --------------------------------------------------------------------- @@ -661,7 +661,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg --------------------------------------------------------------------- @@ -722,7 +722,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT array_agg(name::text ORDER BY name::text) FROM public.table_attrs where relid = 'test_table'::regclass; array_agg --------------------------------------------------------------------- @@ -1010,7 +1010,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").kil (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- kill as soon as the coordinator after it sends worker_apply_shard_ddl_command 2nd time SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").after(2).kill()'); mitmproxy @@ -1019,7 +1019,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").aft (1 row) ALTER TABLE test_table ADD COLUMN new_column INT; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- cancel as soon as the coordinator after it sends worker_apply_shard_ddl_command 2nd time SELECT citus.mitmproxy('conn.onQuery(query="worker_apply_shard_ddl_command").after(2).cancel(' || pg_backend_pid() || ')'); mitmproxy diff --git a/src/test/regress/expected/failure_distributed_results.out b/src/test/regress/expected/failure_distributed_results.out index fc97c9af609..5a246105785 100644 --- a/src/test/regress/expected/failure_distributed_results.out +++ b/src/test/regress/expected/failure_distributed_results.out @@ -14,6 +14,8 @@ SELECT citus.mitmproxy('conn.allow()'); (1 row) SET citus.next_shard_id TO 100800; +-- Needed because of issue #7306 +SET citus.force_max_query_parallelization TO true; -- always try the 1st replica before the 2nd replica. SET citus.task_assignment_policy TO 'first-replica'; -- @@ -86,7 +88,7 @@ CREATE TABLE distributed_result_info AS SELECT resultId, nodeport, rowcount, targetShardId, targetShardIndex FROM partition_task_list_results('test', $$ SELECT * FROM source_table $$, 'target_table') NATURAL JOIN pg_dist_node; -WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT * FROM distributed_result_info ORDER BY resultId; resultid | nodeport | rowcount | targetshardid | targetshardindex --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_failover_to_local_execution.out b/src/test/regress/expected/failure_failover_to_local_execution.out index 56518141ac6..20ad2a6df7e 100644 --- a/src/test/regress/expected/failure_failover_to_local_execution.out +++ b/src/test/regress/expected/failure_failover_to_local_execution.out @@ -101,7 +101,7 @@ NOTICE: issuing SELECT count(*) AS count FROM failure_failover_to_local_executi DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT count(*) AS count FROM failure_failover_to_local_execution.failover_to_local_1980003 failover_to_local WHERE true DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open NOTICE: executing the command locally: SELECT count(*) AS count FROM failure_failover_to_local_execution.failover_to_local_1980000 failover_to_local WHERE true NOTICE: executing the command locally: SELECT count(*) AS count FROM failure_failover_to_local_execution.failover_to_local_1980002 failover_to_local WHERE true count diff --git a/src/test/regress/expected/failure_insert_select_pushdown.out b/src/test/regress/expected/failure_insert_select_pushdown.out index ed461d0407f..570bf22f9bb 100644 --- a/src/test/regress/expected/failure_insert_select_pushdown.out +++ b/src/test/regress/expected/failure_insert_select_pushdown.out @@ -44,7 +44,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown" (1 row) INSERT INTO events_summary SELECT user_id, event_id, count(*) FROM events_table GROUP BY 1,2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -95,7 +95,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT INTO insert_select_pushdown" (1 row) INSERT INTO events_table SELECT * FROM events_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open --verify nothing is modified SELECT citus.mitmproxy('conn.allow()'); mitmproxy diff --git a/src/test/regress/expected/failure_insert_select_repartition.out b/src/test/regress/expected/failure_insert_select_repartition.out index d453182081d..ca36f7e882e 100644 --- a/src/test/regress/expected/failure_insert_select_repartition.out +++ b/src/test/regress/expected/failure_insert_select_repartition.out @@ -55,7 +55,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_partition_query_result").kill (1 row) INSERT INTO target_table SELECT * FROM source_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT * FROM target_table ORDER BY a; a | b --------------------------------------------------------------------- @@ -68,7 +68,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_partition_query_result").kill (1 row) INSERT INTO target_table SELECT * FROM replicated_source_table; -WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT * FROM target_table ORDER BY a; a | b --------------------------------------------------------------------- @@ -138,7 +138,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="read_intermediate_results").kill()') (1 row) INSERT INTO target_table SELECT * FROM source_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT * FROM target_table ORDER BY a; a | b --------------------------------------------------------------------- @@ -151,7 +151,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="read_intermediate_results").kill()') (1 row) INSERT INTO target_table SELECT * FROM replicated_source_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT * FROM target_table ORDER BY a; a | b --------------------------------------------------------------------- @@ -168,7 +168,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="read_intermediate_results").kill()') (1 row) INSERT INTO replicated_target_table SELECT * FROM source_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT * FROM replicated_target_table; a | b --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_multi_dml.out b/src/test/regress/expected/failure_multi_dml.out index bbea2c99959..7757f574c93 100644 --- a/src/test/regress/expected/failure_multi_dml.out +++ b/src/test/regress/expected/failure_multi_dml.out @@ -33,7 +33,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE").kill()'); BEGIN; DELETE FROM dml_test WHERE id = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open DELETE FROM dml_test WHERE id = 2; ERROR: current transaction is aborted, commands ignored until end of transaction block INSERT INTO dml_test VALUES (5, 'Epsilon'); @@ -93,7 +93,7 @@ BEGIN; DELETE FROM dml_test WHERE id = 1; DELETE FROM dml_test WHERE id = 2; INSERT INTO dml_test VALUES (5, 'Epsilon'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open UPDATE dml_test SET name = 'alpha' WHERE id = 1; ERROR: current transaction is aborted, commands ignored until end of transaction block UPDATE dml_test SET name = 'gamma' WHERE id = 3; @@ -148,7 +148,7 @@ DELETE FROM dml_test WHERE id = 1; DELETE FROM dml_test WHERE id = 2; INSERT INTO dml_test VALUES (5, 'Epsilon'); UPDATE dml_test SET name = 'alpha' WHERE id = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open UPDATE dml_test SET name = 'gamma' WHERE id = 3; ERROR: current transaction is aborted, commands ignored until end of transaction block COMMIT; diff --git a/src/test/regress/expected/failure_multi_row_insert.out b/src/test/regress/expected/failure_multi_row_insert.out index f3cd4919af2..8feffbaeb4b 100644 --- a/src/test/regress/expected/failure_multi_row_insert.out +++ b/src/test/regress/expected/failure_multi_row_insert.out @@ -43,7 +43,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="INSERT").kill()'); (1 row) INSERT INTO distributed_table VALUES (1,1), (1,2), (1,3); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- this test is broken, see https://github.com/citusdata/citus/issues/2460 -- SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); -- INSERT INTO distributed_table VALUES (1,4), (1,5), (1,6); @@ -55,7 +55,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO distributed_table VALUES (1,7), (5,8); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- this test is broken, see https://github.com/citusdata/citus/issues/2460 -- SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); -- INSERT INTO distributed_table VALUES (1,9), (5,10); @@ -67,7 +67,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO distributed_table VALUES (1,11), (6,12); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -84,7 +84,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).kill()'); (1 row) INSERT INTO distributed_table VALUES (1,15), (6,16); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").after(1).cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -101,7 +101,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").kill()'); (1 row) INSERT INTO distributed_table VALUES (2,19),(1,20); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.onQuery(query="^INSERT").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_multi_shard_update_delete.out b/src/test/regress/expected/failure_multi_shard_update_delete.out index 24cb895ea6d..27284ec38f3 100644 --- a/src/test/regress/expected/failure_multi_shard_update_delete.out +++ b/src/test/regress/expected/failure_multi_shard_update_delete.out @@ -58,7 +58,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); -- issue a multi shard delete DELETE FROM t2 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is deleted SELECT count(*) FROM t2; count @@ -74,7 +74,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005"). (1 row) DELETE FROM t2 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is deleted SELECT count(*) FROM t2; count @@ -134,7 +134,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); -- issue a multi shard update UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 @@ -150,7 +150,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").kill( (1 row) UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 @@ -202,7 +202,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); -- issue a multi shard delete DELETE FROM t2 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is deleted SELECT count(*) FROM t2; count @@ -218,7 +218,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM multi_shard.t2_201005"). (1 row) DELETE FROM t2 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is deleted SELECT count(*) FROM t2; count @@ -278,7 +278,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^UPDATE").kill()'); -- issue a multi shard update UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 @@ -294,7 +294,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t2_201005").kill( (1 row) UPDATE t2 SET c = 4 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 2) AS b2, count(*) FILTER (WHERE c = 4) AS c4 FROM t2; b2 | c4 @@ -364,7 +364,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); (1 row) DELETE FROM r1 WHERE a = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is deleted SELECT count(*) FILTER (WHERE b = 2) AS b2 FROM t2; b2 @@ -379,7 +379,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM").kill()'); (1 row) DELETE FROM t2 WHERE b = 2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is deleted SELECT count(*) FILTER (WHERE b = 2) AS b2 FROM t2; b2 @@ -459,7 +459,7 @@ UPDATE t3 SET c = q.c FROM ( SELECT b, max(c) as c FROM t2 GROUP BY b) q WHERE t3.b = q.b RETURNING *; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open --- verify nothing is updated SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -515,7 +515,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE multi_shard.t3_201013").kill( (1 row) UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; b1 | b2 @@ -547,7 +547,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO -- following will fail UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open END; -- verify everything is rolled back SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; @@ -563,7 +563,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO (1 row) UPDATE t3 SET b = 1 WHERE b = 2 RETURNING *; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; b1 | b2 @@ -578,7 +578,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO (1 row) UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- verify nothing is updated SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t3; b1 | b2 @@ -610,7 +610,7 @@ SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FRO -- following will fail UPDATE t3 SET b = 2 WHERE b = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open END; -- verify everything is rolled back SELECT count(*) FILTER (WHERE b = 1) b1, count(*) FILTER (WHERE b = 2) AS b2 FROM t2; diff --git a/src/test/regress/expected/failure_mx_metadata_sync.out b/src/test/regress/expected/failure_mx_metadata_sync.out index 7b4c04ff8c7..c2418e9ab22 100644 --- a/src/test/regress/expected/failure_mx_metadata_sync.out +++ b/src/test/regress/expected/failure_mx_metadata_sync.out @@ -132,7 +132,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; -- Check failures on DDL command propagation CREATE TABLE t2 (id int PRIMARY KEY); -SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_placement_metadata").kill()'); +SELECT citus.mitmproxy('conn.onParse(query="citus_internal.add_placement_metadata").kill()'); mitmproxy --------------------------------------------------------------------- @@ -140,7 +140,7 @@ SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_placement_metadat SELECT create_distributed_table('t2', 'id'); ERROR: connection not open -SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_shard_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onParse(query="citus_internal.add_shard_metadata").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_mx_metadata_sync_multi_trans.out b/src/test/regress/expected/failure_mx_metadata_sync_multi_trans.out index 541bce5c511..e71f092c345 100644 --- a/src/test/regress/expected/failure_mx_metadata_sync_multi_trans.out +++ b/src/test/regress/expected/failure_mx_metadata_sync_multi_trans.out @@ -155,7 +155,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_local_group SET group (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to drop node metadata SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_node").cancel(' || :pid || ')'); mitmproxy @@ -172,7 +172,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_node").kill()'); (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to send node metadata SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO pg_dist_node").cancel(' || :pid || ')'); mitmproxy @@ -189,7 +189,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="INSERT INTO pg_dist_node").kill()'); (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to drop sequence dependency for all tables SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequence_dependency.*FROM pg_dist_partition").cancel(' || :pid || ')'); mitmproxy @@ -206,7 +206,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequen (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to drop shell table SELECT citus.mitmproxy('conn.onQuery(query="CALL pg_catalog.worker_drop_all_shell_tables").cancel(' || :pid || ')'); mitmproxy @@ -223,7 +223,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CALL pg_catalog.worker_drop_all_shel (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to delete all pg_dist_partition metadata SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_partition").cancel(' || :pid || ')'); mitmproxy @@ -240,7 +240,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_partition").kill (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to delete all pg_dist_shard metadata SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_shard").cancel(' || :pid || ')'); mitmproxy @@ -257,7 +257,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_shard").kill()') (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to delete all pg_dist_placement metadata SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_placement").cancel(' || :pid || ')'); mitmproxy @@ -274,7 +274,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_dist_placement").kill (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to delete all pg_dist_object metadata SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_catalog.pg_dist_object").cancel(' || :pid || ')'); mitmproxy @@ -291,7 +291,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_catalog.pg_dist_objec (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to delete all pg_dist_colocation metadata SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_catalog.pg_dist_colocation").cancel(' || :pid || ')'); mitmproxy @@ -308,7 +308,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DELETE FROM pg_catalog.pg_dist_coloc (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to alter or create role SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_alter_role").cancel(' || :pid || ')'); mitmproxy @@ -325,7 +325,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_alter_role") (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to set database owner SELECT citus.mitmproxy('conn.onQuery(query="ALTER DATABASE.*OWNER TO").cancel(' || :pid || ')'); mitmproxy @@ -342,7 +342,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER DATABASE.*OWNER TO").kill()'); (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create schema SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA IF NOT EXISTS mx_metadata_sync_multi_trans AUTHORIZATION").cancel(' || :pid || ')'); mitmproxy @@ -359,7 +359,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE SCHEMA IF NOT EXISTS mx_metad (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create collation SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*CREATE COLLATION mx_metadata_sync_multi_trans.german_phonebook").cancel(' || :pid || ')'); mitmproxy @@ -376,7 +376,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_obje (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create function SELECT citus.mitmproxy('conn.onQuery(query="CREATE OR REPLACE FUNCTION mx_metadata_sync_multi_trans.one_as_result").cancel(' || :pid || ')'); mitmproxy @@ -393,7 +393,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE OR REPLACE FUNCTION mx_metada (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create text search dictionary SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*my_german_dict").cancel(' || :pid || ')'); mitmproxy @@ -410,7 +410,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_obje (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create text search config SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*my_ts_config").cancel(' || :pid || ')'); mitmproxy @@ -427,7 +427,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_obje (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create type SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_object.*pair_type").cancel(' || :pid || ')'); mitmproxy @@ -444,7 +444,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_create_or_replace_obje (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create publication SELECT citus.mitmproxy('conn.onQuery(query="CREATE PUBLICATION.*pub_all").cancel(' || :pid || ')'); mitmproxy @@ -461,7 +461,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE PUBLICATION.*pub_all").kill() (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create sequence SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_sequence_command").cancel(' || :pid || ')'); mitmproxy @@ -478,7 +478,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT worker_apply_sequence_command (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to drop sequence dependency for distributed table SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequence_dependency.*mx_metadata_sync_multi_trans.dist1").cancel(' || :pid || ')'); mitmproxy @@ -495,7 +495,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_drop_sequen (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to drop distributed table if exists SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS mx_metadata_sync_multi_trans.dist1").cancel(' || :pid || ')'); mitmproxy @@ -512,7 +512,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="DROP TABLE IF EXISTS mx_metadata_syn (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create distributed table SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.dist1").cancel(' || :pid || ')'); mitmproxy @@ -529,7 +529,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_ (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to record sequence dependency for table SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_record_sequence_dependency").cancel(' || :pid || ')'); mitmproxy @@ -546,7 +546,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.worker_record_sequ (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create index for table SELECT citus.mitmproxy('conn.onQuery(query="CREATE INDEX dist1_search_phone_idx ON mx_metadata_sync_multi_trans.dist1 USING gin").cancel(' || :pid || ')'); mitmproxy @@ -563,7 +563,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE INDEX dist1_search_phone_idx (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create reference table SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.ref").cancel(' || :pid || ')'); mitmproxy @@ -580,7 +580,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_ (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create local table SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.loc1").cancel(' || :pid || ')'); mitmproxy @@ -597,7 +597,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_ (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create distributed partitioned table SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.orders").cancel(' || :pid || ')'); mitmproxy @@ -614,7 +614,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_ (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to create distributed partition table SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_trans.orders_p2020_01_05").cancel(' || :pid || ')'); mitmproxy @@ -631,7 +631,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="CREATE TABLE mx_metadata_sync_multi_ (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to attach partition SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE mx_metadata_sync_multi_trans.orders ATTACH PARTITION mx_metadata_sync_multi_trans.orders_p2020_01_05").cancel(' || :pid || ')'); mitmproxy @@ -648,9 +648,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE mx_metadata_sync_multi_t (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to add partition metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_partition_metadata").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -658,16 +658,16 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_ SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_partition_metadata").kill()'); mitmproxy --------------------------------------------------------------------- (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to add shard metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_shard_metadata").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -675,16 +675,16 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_meta SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_shard_metadata").kill()'); mitmproxy --------------------------------------------------------------------- (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to add placement metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_placement_metadata").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -692,16 +692,16 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_ SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_placement_metadata").kill()'); mitmproxy --------------------------------------------------------------------- (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to add colocation metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.citus_internal_add_colocation_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_colocation_metadata").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -709,16 +709,16 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.citus_internal_add SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.citus_internal_add_colocation_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_colocation_metadata").kill()'); mitmproxy --------------------------------------------------------------------- (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to add distributed object metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_object_metadata").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -726,14 +726,14 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_met SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_object_metadata").kill()'); mitmproxy --------------------------------------------------------------------- (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to mark function as distributed SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*one_as_result").cancel(' || :pid || ')'); mitmproxy @@ -750,7 +750,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*one_as (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to mark collation as distributed SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*german_phonebook").cancel(' || :pid || ')'); mitmproxy @@ -767,7 +767,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*german (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to mark text search dictionary as distributed SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*my_german_dict").cancel(' || :pid || ')'); mitmproxy @@ -784,7 +784,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*my_ger (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to mark text search configuration as distributed SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*my_ts_config").cancel(' || :pid || ')'); mitmproxy @@ -801,7 +801,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*my_ts_ (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to mark type as distributed SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*pair_type").cancel(' || :pid || ')'); mitmproxy @@ -818,7 +818,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*pair_t (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to mark sequence as distributed SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*seq_owned").cancel(' || :pid || ')'); mitmproxy @@ -835,7 +835,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*seq_ow (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to mark publication as distributed SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*pub_all").cancel(' || :pid || ')'); mitmproxy @@ -852,7 +852,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="WITH distributed_object_data.*pub_al (1 row) SELECT citus_activate_node('localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to set isactive to true SELECT citus.mitmproxy('conn.onQuery(query="UPDATE pg_dist_node SET isactive = TRUE").cancel(' || :pid || ')'); mitmproxy diff --git a/src/test/regress/expected/failure_non_main_db_2pc.out b/src/test/regress/expected/failure_non_main_db_2pc.out new file mode 100644 index 00000000000..673ce45a5ec --- /dev/null +++ b/src/test/regress/expected/failure_non_main_db_2pc.out @@ -0,0 +1,154 @@ +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +CREATE SCHEMA failure_non_main_db_2pc; +SET SEARCH_PATH TO 'failure_non_main_db_2pc'; +CREATE DATABASE other_db1; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +SELECT citus.mitmproxy('conn.onQuery(query="COMMIT PREPARED").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +\c other_db1 +CREATE USER user_1; +\c regression +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT nodeid, result FROM run_command_on_all_nodes($$SELECT rolname FROM pg_roles WHERE rolname::TEXT = 'user_1'$$) ORDER BY 1; + nodeid | result +--------------------------------------------------------------------- + 0 | user_1 + 1 | user_1 + 2 | +(3 rows) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT nodeid, result FROM run_command_on_all_nodes($$SELECT rolname FROM pg_roles WHERE rolname::TEXT = 'user_1'$$) ORDER BY 1; + nodeid | result +--------------------------------------------------------------------- + 0 | user_1 + 1 | user_1 + 2 | user_1 +(3 rows) + +SELECT citus.mitmproxy('conn.onQuery(query="CREATE USER user_2").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +\c other_db1 +CREATE USER user_2; +ERROR: connection not open +CONTEXT: while executing command on localhost:xxxxx +while executing command on localhost:xxxxx +\c regression +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT nodeid, result FROM run_command_on_all_nodes($$SELECT rolname FROM pg_roles WHERE rolname::TEXT = 'user_2'$$) ORDER BY 1; + nodeid | result +--------------------------------------------------------------------- + 0 | + 1 | + 2 | +(3 rows) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT nodeid, result FROM run_command_on_all_nodes($$SELECT rolname FROM pg_roles WHERE rolname::TEXT = 'user_2'$$) ORDER BY 1; + nodeid | result +--------------------------------------------------------------------- + 0 | + 1 | + 2 | +(3 rows) + +DROP DATABASE other_db1; +-- user_2 should not exist because the query to create it will fail +-- but let's make sure we try to drop it just in case +DROP USER IF EXISTS user_1, user_2; +NOTICE: role "user_2" does not exist, skipping +SELECT citus_set_coordinator_host('localhost'); + citus_set_coordinator_host +--------------------------------------------------------------------- + +(1 row) + +\c - - - :worker_1_port +CREATE DATABASE other_db2; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +SELECT citus.mitmproxy('conn.onQuery(query="COMMIT PREPARED").kill()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +\c other_db2 +CREATE USER user_3; +\c regression +SELECT citus.mitmproxy('conn.allow()'); + mitmproxy +--------------------------------------------------------------------- + +(1 row) + +SELECT result FROM run_command_on_all_nodes($$SELECT rolname FROM pg_roles WHERE rolname::TEXT = 'user_3'$$) ORDER BY 1; + result +--------------------------------------------------------------------- + + user_3 + user_3 +(3 rows) + +SELECT recover_prepared_transactions(); + recover_prepared_transactions +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT result FROM run_command_on_all_nodes($$SELECT rolname FROM pg_roles WHERE rolname::TEXT = 'user_3'$$) ORDER BY 1; + result +--------------------------------------------------------------------- + user_3 + user_3 + user_3 +(3 rows) + +DROP DATABASE other_db2; +DROP USER user_3; +\c - - - :master_port +SELECT result FROM run_command_on_all_nodes($$DELETE FROM pg_dist_node WHERE groupid = 0$$); + result +--------------------------------------------------------------------- + DELETE 1 + DELETE 1 + DELETE 1 +(3 rows) + +DROP SCHEMA failure_non_main_db_2pc; diff --git a/src/test/regress/expected/failure_on_create_subscription.out b/src/test/regress/expected/failure_on_create_subscription.out index 19df82d3eb8..a42df24d232 100644 --- a/src/test/regress/expected/failure_on_create_subscription.out +++ b/src/test/regress/expected/failure_on_create_subscription.out @@ -43,9 +43,9 @@ SELECT * FROM shards_in_workers; -- Failure on creating the subscription -- Failing exactly on CREATE SUBSCRIPTION is causing flaky test where we fail with either: --- 1) ERROR: connection to the remote node localhost:xxxxx failed with the following error: ERROR: subscription "citus_shard_move_subscription_xxxxxxx" does not exist +-- 1) ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: ERROR: subscription "citus_shard_move_subscription_xxxxxxx" does not exist -- another command is already in progress --- 2) ERROR: connection to the remote node localhost:xxxxx failed with the following error: another command is already in progress +-- 2) ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: another command is already in progress -- Instead fail on the next step (ALTER SUBSCRIPTION) instead which is also required logically as part of uber CREATE SUBSCRIPTION operation. SELECT citus.mitmproxy('conn.onQuery(query="ALTER SUBSCRIPTION").kill()'); mitmproxy diff --git a/src/test/regress/expected/failure_online_move_shard_placement.out b/src/test/regress/expected/failure_online_move_shard_placement.out index cf5890f35af..0a881fe42b7 100644 --- a/src/test/regress/expected/failure_online_move_shard_placement.out +++ b/src/test/regress/expected/failure_online_move_shard_placement.out @@ -407,7 +407,7 @@ SELECT citus.mitmproxy('conn.matches(b"CREATE INDEX").killall()'); (1 row) SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- cleanup leftovers SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -442,7 +442,7 @@ SELECT citus.mitmproxy('conn.matches(b"CREATE INDEX").killall()'); (1 row) SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- failure on parallel create index ALTER SYSTEM RESET citus.max_adaptive_executor_pool_size; SELECT pg_reload_conf(); @@ -458,7 +458,7 @@ SELECT citus.mitmproxy('conn.matches(b"CREATE INDEX").killall()'); (1 row) SELECT master_move_shard_placement(101, 'localhost', :worker_1_port, 'localhost', :worker_2_proxy_port); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Verify that the shard is not moved and the number of rows are still 100k SELECT citus.mitmproxy('conn.allow()'); mitmproxy diff --git a/src/test/regress/expected/failure_ref_tables.out b/src/test/regress/expected/failure_ref_tables.out index 4984cc1bf18..e9a7e457173 100644 --- a/src/test/regress/expected/failure_ref_tables.out +++ b/src/test/regress/expected/failure_ref_tables.out @@ -33,7 +33,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="INSERT").kill()'); (1 row) INSERT INTO ref_table VALUES (5, 6); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT COUNT(*) FROM ref_table WHERE key=5; count --------------------------------------------------------------------- @@ -48,7 +48,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE").kill()'); (1 row) UPDATE ref_table SET key=7 RETURNING value; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT COUNT(*) FROM ref_table WHERE key=7; count --------------------------------------------------------------------- @@ -65,7 +65,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE").kill()'); BEGIN; DELETE FROM ref_table WHERE key=5; UPDATE ref_table SET key=value; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open COMMIT; SELECT COUNT(*) FROM ref_table WHERE key=value; count diff --git a/src/test/regress/expected/failure_replicated_partitions.out b/src/test/regress/expected/failure_replicated_partitions.out index 7294df98b2d..67dda269ca6 100644 --- a/src/test/regress/expected/failure_replicated_partitions.out +++ b/src/test/regress/expected/failure_replicated_partitions.out @@ -28,7 +28,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="INSERT").kill()'); (1 row) INSERT INTO partitioned_table VALUES (0, 0); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- use both placements SET citus.task_assignment_policy TO "round-robin"; -- the results should be the same diff --git a/src/test/regress/expected/failure_savepoints.out b/src/test/regress/expected/failure_savepoints.out index 9b155e90e59..ca5cb91f62a 100644 --- a/src/test/regress/expected/failure_savepoints.out +++ b/src/test/regress/expected/failure_savepoints.out @@ -312,7 +312,7 @@ SELECT * FROM ref; ROLLBACK TO SAVEPOINT start; SELECT * FROM ref; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open END; -- clean up RESET client_min_messages; diff --git a/src/test/regress/expected/failure_single_mod.out b/src/test/regress/expected/failure_single_mod.out index 2a6ed2d776a..aa6c10e6663 100644 --- a/src/test/regress/expected/failure_single_mod.out +++ b/src/test/regress/expected/failure_single_mod.out @@ -27,7 +27,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="INSERT").kill()'); (1 row) INSERT INTO mod_test VALUES (2, 6); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT COUNT(*) FROM mod_test WHERE key=2; count --------------------------------------------------------------------- @@ -59,7 +59,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="UPDATE").kill()'); (1 row) UPDATE mod_test SET value='ok' WHERE key=2 RETURNING key; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT COUNT(*) FROM mod_test WHERE value='ok'; count --------------------------------------------------------------------- @@ -89,7 +89,7 @@ INSERT INTO mod_test VALUES (2, 6); INSERT INTO mod_test VALUES (2, 7); DELETE FROM mod_test WHERE key=2 AND value = '7'; UPDATE mod_test SET value='ok' WHERE key=2; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open COMMIT; SELECT COUNT(*) FROM mod_test WHERE key=2; count diff --git a/src/test/regress/expected/failure_single_select.out b/src/test/regress/expected/failure_single_select.out index 1b60f31251b..586dd47569f 100644 --- a/src/test/regress/expected/failure_single_select.out +++ b/src/test/regress/expected/failure_single_select.out @@ -30,14 +30,14 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT.*select_test").kill()'); (1 row) SELECT * FROM select_test WHERE key = 3; -WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open key | value --------------------------------------------------------------------- 3 | test data (1 row) SELECT * FROM select_test WHERE key = 3; -WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open key | value --------------------------------------------------------------------- 3 | test data @@ -54,7 +54,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT.*select_test").kill()'); BEGIN; INSERT INTO select_test VALUES (3, 'more data'); SELECT * FROM select_test WHERE key = 3; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open COMMIT; SELECT citus.mitmproxy('conn.allow()'); mitmproxy @@ -142,7 +142,7 @@ SELECT * FROM select_test WHERE key = 3; INSERT INTO select_test VALUES (3, 'even more data'); SELECT * FROM select_test WHERE key = 3; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open COMMIT; SELECT citus.mitmproxy('conn.onQuery(query="SELECT.*pg_prepared_xacts").after(2).kill()'); mitmproxy @@ -186,7 +186,7 @@ SELECT * FROM select_test WHERE key = 1; (1 row) SELECT * FROM select_test WHERE key = 1; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- now the same test with query cancellation SELECT citus.mitmproxy('conn.onQuery(query="SELECT.*select_test").after(1).cancel(' || pg_backend_pid() || ')'); mitmproxy diff --git a/src/test/regress/expected/failure_split_cleanup.out b/src/test/regress/expected/failure_split_cleanup.out index fe646587c83..ba862472531 100644 --- a/src/test/regress/expected/failure_split_cleanup.out +++ b/src/test/regress/expected/failure_split_cleanup.out @@ -54,7 +54,7 @@ SELECT create_distributed_table('table_to_split', 'id'); ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; operation_id | object_type | object_name | node_group_id | policy_type --------------------------------------------------------------------- 777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981000 | 1 | 0 @@ -109,7 +109,7 @@ CONTEXT: while executing command on localhost:xxxxx (1 row) SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; operation_id | object_type | object_name | node_group_id | policy_type --------------------------------------------------------------------- (0 rows) @@ -163,7 +163,7 @@ CONTEXT: while executing command on localhost:xxxxx ERROR: Failed to run worker_split_shard_replication_setup UDF. It should successfully execute for splitting a shard in a non-blocking way. Please retry. RESET client_min_messages; SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; operation_id | object_type | object_name | node_group_id | policy_type --------------------------------------------------------------------- 777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981000 | 1 | 0 @@ -221,7 +221,7 @@ ERROR: Failed to run worker_split_shard_replication_setup UDF. It should succes (1 row) SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; operation_id | object_type | object_name | node_group_id | policy_type --------------------------------------------------------------------- (0 rows) @@ -277,12 +277,12 @@ CONTEXT: while executing command on localhost:xxxxx ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; operation_id | object_type | object_name | node_group_id | policy_type --------------------------------------------------------------------- 777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981000 | 1 | 0 - 777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981002 | 2 | 0 777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981002 | 1 | 1 + 777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981002 | 2 | 0 777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981003 | 2 | 1 777 | 4 | citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx | 2 | 0 777 | 4 | citus_shard_split_publication_xxxxxxx_xxxxxxx_xxxxxxx | 2 | 0 @@ -336,7 +336,7 @@ CONTEXT: while executing command on localhost:xxxxx (1 row) SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; operation_id | object_type | object_name | node_group_id | policy_type --------------------------------------------------------------------- (0 rows) @@ -388,7 +388,7 @@ CONTEXT: while executing command on localhost:xxxxx ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; operation_id | object_type | object_name | node_group_id | policy_type --------------------------------------------------------------------- 777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981000 | 1 | 0 @@ -455,7 +455,7 @@ CONTEXT: while executing command on localhost:xxxxx (1 row) SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; operation_id | object_type | object_name | node_group_id | policy_type --------------------------------------------------------------------- (0 rows) @@ -507,7 +507,7 @@ CONTEXT: while executing command on localhost:xxxxx ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; operation_id | object_type | object_name | node_group_id | policy_type --------------------------------------------------------------------- 777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981000 | 1 | 0 @@ -574,7 +574,7 @@ CONTEXT: while executing command on localhost:xxxxx (1 row) SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; operation_id | object_type | object_name | node_group_id | policy_type --------------------------------------------------------------------- (0 rows) @@ -627,14 +627,14 @@ WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx WARNING: connection not open CONTEXT: while executing command on localhost:xxxxx -WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open -WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open -WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open -WARNING: connection to the remote node localhost:xxxxx failed with the following error: connection not open +WARNING: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open +WARNING: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open +WARNING: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open +WARNING: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open ERROR: connection not open CONTEXT: while executing command on localhost:xxxxx SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; operation_id | object_type | object_name | node_group_id | policy_type --------------------------------------------------------------------- 777 | 1 | citus_failure_split_cleanup_schema.table_to_split_8981002 | 1 | 1 @@ -701,7 +701,7 @@ CONTEXT: while executing command on localhost:xxxxx (1 row) SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; operation_id | object_type | object_name | node_group_id | policy_type --------------------------------------------------------------------- (0 rows) diff --git a/src/test/regress/expected/failure_tenant_isolation.out b/src/test/regress/expected/failure_tenant_isolation.out index 6be4580bee4..b406aa2a3f4 100644 --- a/src/test/regress/expected/failure_tenant_isolation.out +++ b/src/test/regress/expected/failure_tenant_isolation.out @@ -76,7 +76,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").kill()'); (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- cancellation on colocated table population SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").cancel(' || :pid || ')'); mitmproxy @@ -94,7 +94,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- cancellation on colocated table constraints SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_2 ADD CONSTRAINT").after(2).cancel(' || :pid || ')'); mitmproxy @@ -131,7 +131,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").kill()'); (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- cancellation on table population SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").cancel(' || :pid || ')'); mitmproxy @@ -149,7 +149,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode => 'block_writes'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- cancellation on table constraints SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE tenant_isolation.table_1 ADD CONSTRAINT").after(2).cancel(' || :pid || ')'); mitmproxy diff --git a/src/test/regress/expected/failure_tenant_isolation_nonblocking.out b/src/test/regress/expected/failure_tenant_isolation_nonblocking.out index e40842e2a2c..aecde71c0ba 100644 --- a/src/test/regress/expected/failure_tenant_isolation_nonblocking.out +++ b/src/test/regress/expected/failure_tenant_isolation_nonblocking.out @@ -159,7 +159,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SET TRANSACTION SNAPSHOT").kill()'); (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- cancellation on setting snapshot SELECT citus.mitmproxy('conn.onQuery(query="SET TRANSACTION SNAPSHOT").cancel(' || :pid || ')'); mitmproxy @@ -177,7 +177,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").kill()'); (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- cancellation on table population SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(300").cancel(' || :pid || ')'); mitmproxy @@ -195,7 +195,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").kill()'); (1 row) SELECT isolate_tenant_to_new_shard('table_1', 5, 'CASCADE', shard_transfer_mode := 'force_logical'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- cancellation on colocated table population SELECT citus.mitmproxy('conn.onQuery(query="worker_split_copy\(302").cancel(' || :pid || ')'); mitmproxy diff --git a/src/test/regress/expected/failure_truncate.out b/src/test/regress/expected/failure_truncate.out index 4e332252ee1..253314ee915 100644 --- a/src/test/regress/expected/failure_truncate.out +++ b/src/test/regress/expected/failure_truncate.out @@ -43,7 +43,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) TRUNCATE test_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -152,7 +152,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test (1 row) TRUNCATE test_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -414,7 +414,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE").after(2).kill()'); (1 row) TRUNCATE reference_table CASCADE; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -553,7 +553,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) TRUNCATE test_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -662,7 +662,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^TRUNCATE TABLE truncate_failure.tes (1 row) TRUNCATE test_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -922,7 +922,7 @@ SELECT citus.mitmproxy('conn.onAuthenticationOk().kill()'); (1 row) TRUNCATE test_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- @@ -1031,7 +1031,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="TRUNCATE TABLE truncate_failure.test (1 row) TRUNCATE test_table; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.allow()'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_vacuum.out b/src/test/regress/expected/failure_vacuum.out index 617d40d3a97..b438f413bc8 100644 --- a/src/test/regress/expected/failure_vacuum.out +++ b/src/test/regress/expected/failure_vacuum.out @@ -30,7 +30,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM").kill()'); (1 row) VACUUM vacuum_test; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()'); mitmproxy --------------------------------------------------------------------- @@ -38,7 +38,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^ANALYZE").kill()'); (1 row) ANALYZE vacuum_test; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SET client_min_messages TO ERROR; SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT").kill()'); mitmproxy @@ -113,7 +113,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").kill()'); (1 row) VACUUM vacuum_test, other_vacuum_test; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: connection not open +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open SELECT citus.mitmproxy('conn.onQuery(query="^VACUUM.*other").cancel(' || pg_backend_pid() || ')'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/foreign_key_to_reference_shard_rebalance.out b/src/test/regress/expected/foreign_key_to_reference_shard_rebalance.out index 2dc32915300..38b82ed6809 100644 --- a/src/test/regress/expected/foreign_key_to_reference_shard_rebalance.out +++ b/src/test/regress/expected/foreign_key_to_reference_shard_rebalance.out @@ -210,6 +210,7 @@ select create_distributed_table('partitioned_tbl_with_fkey','x'); create table partition_1_with_fkey partition of partitioned_tbl_with_fkey for values from ('2022-01-01') to ('2022-12-31'); create table partition_2_with_fkey partition of partitioned_tbl_with_fkey for values from ('2023-01-01') to ('2023-12-31'); +create table partition_3_with_fkey partition of partitioned_tbl_with_fkey DEFAULT; insert into partitioned_tbl_with_fkey (x,y) select s,s%10 from generate_series(1,100) s; ALTER TABLE partitioned_tbl_with_fkey ADD CONSTRAINT fkey_to_ref_tbl FOREIGN KEY (y) REFERENCES ref_table_with_fkey(id); WITH shardid AS (SELECT shardid FROM pg_dist_shard where logicalrelid = 'partitioned_tbl_with_fkey'::regclass ORDER BY shardid LIMIT 1) diff --git a/src/test/regress/expected/function_with_case_when.out b/src/test/regress/expected/function_with_case_when.out new file mode 100644 index 00000000000..18df5be0a94 --- /dev/null +++ b/src/test/regress/expected/function_with_case_when.out @@ -0,0 +1,32 @@ +CREATE SCHEMA function_with_case; +SET search_path TO function_with_case; +-- create function +CREATE OR REPLACE FUNCTION test_err(v1 text) + RETURNS text + LANGUAGE plpgsql + SECURITY DEFINER +AS $function$ + +begin + return v1 || ' - ok'; +END; +$function$; +do $$ declare + lNewValues text; + val text; +begin + val = 'test'; + lNewValues = test_err(v1 => case when val::text = 'test'::text then 'yes' else 'no' end); + raise notice 'lNewValues= %', lNewValues; +end;$$ ; +NOTICE: lNewValues= yes - ok +CONTEXT: PL/pgSQL function inline_code_block line XX at RAISE +-- call function +SELECT test_err('test'); + test_err +--------------------------------------------------------------------- + test - ok +(1 row) + +DROP SCHEMA function_with_case CASCADE; +NOTICE: drop cascades to function test_err(text) diff --git a/src/test/regress/expected/global_cancel.out b/src/test/regress/expected/global_cancel.out index 5adeef3c8d2..e5ce4fbc620 100644 --- a/src/test/regress/expected/global_cancel.out +++ b/src/test/regress/expected/global_cancel.out @@ -9,9 +9,14 @@ SELECT 1 FROM master_add_node('localhost', :master_port, groupid => 0); RESET client_min_messages; -- Kill maintenance daemon so it gets restarted and gets a gpid containing our -- nodeid -SELECT pg_terminate_backend(pid) +SELECT COUNT(pg_terminate_backend(pid)) >= 0 FROM pg_stat_activity -WHERE application_name = 'Citus Maintenance Daemon' \gset +WHERE application_name = 'Citus Maintenance Daemon'; + ?column? +--------------------------------------------------------------------- + t +(1 row) + -- reconnect to make sure we get a session with the gpid containing our nodeid \c - - - - CREATE SCHEMA global_cancel; @@ -77,6 +82,7 @@ ERROR: must be a superuser to terminate superuser process SELECT pg_cancel_backend(citus_backend_gpid()); ERROR: canceling statement due to user request \c - postgres - :master_port +DROP USER global_cancel_user; SET client_min_messages TO DEBUG; -- 10000000000 is the node id multiplier for global pid SELECT pg_cancel_backend(10000000000 * citus_coordinator_nodeid() + 0); diff --git a/src/test/regress/expected/grant_on_database_propagation.out b/src/test/regress/expected/grant_on_database_propagation.out index 2fd135314bf..6fb363cfa1a 100644 --- a/src/test/regress/expected/grant_on_database_propagation.out +++ b/src/test/regress/expected/grant_on_database_propagation.out @@ -542,8 +542,8 @@ create user myuser; create user myuser_1; create database test_db; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. SELECT result FROM run_command_on_workers($$create database test_db$$); result --------------------------------------------------------------------- @@ -677,7 +677,7 @@ select has_database_privilege('myuser','regression', 'TEMPORARY'); select has_database_privilege('myuser','test_db', 'CREATE'); has_database_privilege --------------------------------------------------------------------- - t + f (1 row) select has_database_privilege('myuser','test_db', 'CONNECT'); @@ -725,7 +725,7 @@ select has_database_privilege('myuser_1','regression', 'TEMPORARY'); select has_database_privilege('myuser_1','test_db', 'CREATE'); has_database_privilege --------------------------------------------------------------------- - t + f (1 row) select has_database_privilege('myuser_1','test_db', 'CONNECT'); @@ -884,19 +884,19 @@ select has_database_privilege('myuser','test_db', 'CREATE'); select has_database_privilege('myuser','test_db', 'CONNECT'); has_database_privilege --------------------------------------------------------------------- - f + t (1 row) select has_database_privilege('myuser','test_db', 'TEMP'); has_database_privilege --------------------------------------------------------------------- - f + t (1 row) select has_database_privilege('myuser','test_db', 'TEMPORARY'); has_database_privilege --------------------------------------------------------------------- - f + t (1 row) select has_database_privilege('myuser_1','regression', 'CREATE'); @@ -932,19 +932,19 @@ select has_database_privilege('myuser_1','test_db', 'CREATE'); select has_database_privilege('myuser_1','test_db', 'CONNECT'); has_database_privilege --------------------------------------------------------------------- - f + t (1 row) select has_database_privilege('myuser_1','test_db', 'TEMP'); has_database_privilege --------------------------------------------------------------------- - f + t (1 row) select has_database_privilege('myuser_1','test_db', 'TEMPORARY'); has_database_privilege --------------------------------------------------------------------- - f + t (1 row) \c - - - :master_port diff --git a/src/test/regress/expected/grant_on_database_propagation_from_non_maindb.out b/src/test/regress/expected/grant_on_database_propagation_from_non_maindb.out new file mode 100644 index 00000000000..594e3b74eb4 --- /dev/null +++ b/src/test/regress/expected/grant_on_database_propagation_from_non_maindb.out @@ -0,0 +1,471 @@ +-- Public role has connect,temp,temporary privileges on database +-- To test these scenarios, we need to revoke these privileges from public role +-- since public role privileges are inherited by new roles/users +set citus.enable_create_database_propagation to on; +create database test_2pc_db; +show citus.main_db; + citus.main_db +--------------------------------------------------------------------- + regression +(1 row) + +revoke connect,temp,temporary on database test_2pc_db from public; +CREATE SCHEMA grant_on_database_propagation_non_maindb; +SET search_path TO grant_on_database_propagation_non_maindb; +-- test grant/revoke CREATE privilege propagation on database +create user "myuser'_test"; +\c test_2pc_db - - :master_port +grant create on database test_2pc_db to "myuser'_test"; +\c regression - - :master_port; +select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) +(3 rows) + +\c test_2pc_db - - :master_port +revoke create on database test_2pc_db from "myuser'_test"; +\c regression - - :master_port; +select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) +(3 rows) + +drop user "myuser'_test"; +--------------------------------------------------------------------- +-- test grant/revoke CONNECT privilege propagation on database +\c regression - - :master_port +create user myuser2; +\c test_2pc_db - - :master_port +grant CONNECT on database test_2pc_db to myuser2; +\c regression - - :master_port; +select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']); + check_database_privileges +--------------------------------------------------------------------- + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) +(3 rows) + +\c test_2pc_db - - :master_port +revoke connect on database test_2pc_db from myuser2; +\c regression - - :master_port +select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']); + check_database_privileges +--------------------------------------------------------------------- + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) +(3 rows) + +drop user myuser2; +--------------------------------------------------------------------- +-- test grant/revoke TEMP privilege propagation on database +\c regression - - :master_port +create user myuser3; +-- test grant/revoke temp on database +\c test_2pc_db - - :master_port +grant TEMP on database test_2pc_db to myuser3; +\c regression - - :master_port; +select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']); + check_database_privileges +--------------------------------------------------------------------- + (TEMP,t) + (TEMP,t) + (TEMP,t) +(3 rows) + +\c test_2pc_db - - :worker_1_port +revoke TEMP on database test_2pc_db from myuser3; +\c regression - - :master_port; +select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']); + check_database_privileges +--------------------------------------------------------------------- + (TEMP,f) + (TEMP,f) + (TEMP,f) +(3 rows) + +drop user myuser3; +--------------------------------------------------------------------- +\c regression - - :master_port +-- test temporary privilege on database +create user myuser4; +-- test grant/revoke temporary on database +\c test_2pc_db - - :worker_1_port +grant TEMPORARY on database test_2pc_db to myuser4; +\c regression - - :master_port +select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(3 rows) + +\c test_2pc_db - - :master_port +revoke TEMPORARY on database test_2pc_db from myuser4; +\c regression - - :master_port; +select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(3 rows) + +drop user myuser4; +--------------------------------------------------------------------- +-- test ALL privileges with ALL statement on database +create user myuser5; +grant ALL on database test_2pc_db to myuser5; +\c regression - - :master_port +select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +revoke ALL on database test_2pc_db from myuser5; +\c regression - - :master_port +select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +drop user myuser5; +--------------------------------------------------------------------- +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database +create user myuser6; +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser6; +\c regression - - :master_port +select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser6; +\c regression - - :master_port +select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +drop user myuser6; +--------------------------------------------------------------------- +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database with grant option +create user myuser7; +create user myuser_1; +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7; +set role myuser7; +--here since myuser7 does not have grant option, it should fail +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1; +WARNING: no privileges were granted for "test_2pc_db" +\c regression - - :master_port +select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +\c test_2pc_db - - :master_port +RESET ROLE; +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7 with grant option; +set role myuser7; +--here since myuser have grant option, it should succeed +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1 granted by myuser7; +\c regression - - :master_port +select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +RESET ROLE; +--below test should fail and should throw an error since myuser_1 still have the dependent privileges +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict; +ERROR: dependent privileges exist +HINT: Use CASCADE to revoke them too. +--below test should fail and should throw an error since myuser_1 still have the dependent privileges +revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict ; +ERROR: dependent privileges exist +HINT: Use CASCADE to revoke them too. +--below test should succeed and should not throw any error since myuser_1 privileges are revoked with cascade +revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 cascade ; +--here we test if myuser7 still have the privileges after revoke grant option for +\c regression - - :master_port +select check_database_privileges('myuser7','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +reset role; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser_1; +\c regression - - :master_port +drop user myuser_1; +drop user myuser7; +--------------------------------------------------------------------- +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database multi database +-- and multi user +\c regression - - :master_port +create user myuser8; +create user myuser_2; +set citus.enable_create_database_propagation to on; +create database test_db; +revoke connect,temp,temporary on database test_db from public; +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db to myuser8,myuser_2; +\c regression - - :master_port +select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +RESET ROLE; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 ; +--below test should succeed and should not throw any error +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser_2; +--below test should succeed and should not throw any error +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 cascade; +\c regression - - :master_port +select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +\c test_2pc_db - - :master_port +reset role; +\c regression - - :master_port +drop user myuser_2; +drop user myuser8; +set citus.enable_create_database_propagation to on; +drop database test_db; +--------------------------------------------------------------------- +-- rollbacks public role database privileges to original state +grant connect,temp,temporary on database test_2pc_db to public; +drop database test_2pc_db; +set citus.enable_create_database_propagation to off; +DROP SCHEMA grant_on_database_propagation_non_maindb CASCADE; +reset citus.enable_create_database_propagation; +reset search_path; +--------------------------------------------------------------------- diff --git a/src/test/regress/expected/grant_role_from_non_maindb.out b/src/test/regress/expected/grant_role_from_non_maindb.out new file mode 100644 index 00000000000..6dc0b6c60ff --- /dev/null +++ b/src/test/regress/expected/grant_role_from_non_maindb.out @@ -0,0 +1,160 @@ +CREATE SCHEMA grant_role2pc; +SET search_path TO grant_role2pc; +set citus.enable_create_database_propagation to on; +CREATE DATABASE grant_role2pc_db; +\c grant_role2pc_db +SHOW citus.main_db; + citus.main_db +--------------------------------------------------------------------- + regression +(1 row) + +SET citus.superuser TO 'postgres'; +CREATE USER grant_role2pc_user1; +CREATE USER grant_role2pc_user2; +CREATE USER grant_role2pc_user3; +CREATE USER grant_role2pc_user4; +CREATE USER grant_role2pc_user5; +CREATE USER grant_role2pc_user6; +CREATE USER grant_role2pc_user7; +\c grant_role2pc_db +--test with empty superuser +SET citus.superuser TO ''; +grant grant_role2pc_user1 to grant_role2pc_user2; +ERROR: No superuser role is given for Citus main database connection +HINT: Set citus.superuser to a superuser role name +SET citus.superuser TO 'postgres'; +grant grant_role2pc_user1 to grant_role2pc_user2 with admin option granted by CURRENT_USER; +\c regression +select result FROM run_command_on_all_nodes( + $$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text = 'grant_role2pc_user2' + order by member::regrole::text, roleid::regrole::text + ) t + $$ +); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true}] + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true}] + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true}] +(3 rows) + +\c grant_role2pc_db +--test grant under transactional context with multiple operations +BEGIN; +grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user3 WITH ADMIN OPTION; +grant grant_role2pc_user1 to grant_role2pc_user4 granted by grant_role2pc_user3 ; +COMMIT; +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user5 WITH ADMIN OPTION granted by grant_role2pc_user3; +grant grant_role2pc_user1 to grant_role2pc_user6; +ROLLBACK; +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user7; +SELECT 1/0; +ERROR: division by zero +commit; +\c regression +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user3","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user3","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user3","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false}] +(3 rows) + +\c grant_role2pc_db +grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user5,grant_role2pc_user6,grant_role2pc_user7 granted by grant_role2pc_user3; +\c regression +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] +(3 rows) + +\c grant_role2pc_db +revoke admin option for grant_role2pc_user1 from grant_role2pc_user5 granted by grant_role2pc_user3; +--test revoke under transactional context with multiple operations +BEGIN; +revoke grant_role2pc_user1 from grant_role2pc_user5 granted by grant_role2pc_user3 ; +revoke grant_role2pc_user1 from grant_role2pc_user4 granted by grant_role2pc_user3; +COMMIT; +\c grant_role2pc_db - - :worker_1_port +BEGIN; +revoke grant_role2pc_user1 from grant_role2pc_user6,grant_role2pc_user7 granted by grant_role2pc_user3; +revoke grant_role2pc_user1 from grant_role2pc_user3 cascade; +COMMIT; +\c regression +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user2','grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] +(3 rows) + +\c grant_role2pc_db - - :worker_1_port +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user5 WITH ADMIN OPTION; +grant grant_role2pc_user1 to grant_role2pc_user6; +COMMIT; +\c regression - - :master_port +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user5','grant_role2pc_user6') + order by member::regrole::text, roleid::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] +(3 rows) + +revoke grant_role2pc_user1 from grant_role2pc_user5,grant_role2pc_user6; +--clean resources +DROP SCHEMA grant_role2pc; +set citus.enable_create_database_propagation to on; +DROP DATABASE grant_role2pc_db; +drop user grant_role2pc_user2,grant_role2pc_user3,grant_role2pc_user4,grant_role2pc_user5,grant_role2pc_user6,grant_role2pc_user7; +drop user grant_role2pc_user1; +reset citus.enable_create_database_propagation; diff --git a/src/test/regress/expected/insert_select_connection_leak.out b/src/test/regress/expected/insert_select_connection_leak.out index 8a983acd536..b342ecde1b4 100644 --- a/src/test/regress/expected/insert_select_connection_leak.out +++ b/src/test/regress/expected/insert_select_connection_leak.out @@ -47,16 +47,16 @@ INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; -SELECT worker_connection_count(:worker_1_port) - :worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :worker_2_connections) AS leaked_worker_2_connections; leaked_worker_1_connections | leaked_worker_2_connections --------------------------------------------------------------------- 0 | 0 (1 row) END; -SELECT worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections) AS leaked_worker_2_connections; leaked_worker_1_connections | leaked_worker_2_connections --------------------------------------------------------------------- 0 | 0 @@ -67,8 +67,8 @@ BEGIN; INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; ROLLBACK; -SELECT worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections) AS leaked_worker_2_connections; leaked_worker_1_connections | leaked_worker_2_connections --------------------------------------------------------------------- 0 | 0 @@ -84,16 +84,16 @@ SAVEPOINT s1; INSERT INTO target_table SELECT a, CASE WHEN a < 50 THEN b ELSE null END FROM source_table; ERROR: null value in column "b" violates not-null constraint ROLLBACK TO SAVEPOINT s1; -SELECT worker_connection_count(:worker_1_port) - :worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :worker_2_connections) AS leaked_worker_2_connections; leaked_worker_1_connections | leaked_worker_2_connections --------------------------------------------------------------------- 0 | 0 (1 row) END; -SELECT worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections) AS leaked_worker_2_connections; leaked_worker_1_connections | leaked_worker_2_connections --------------------------------------------------------------------- 0 | 0 diff --git a/src/test/regress/expected/isolation_database_cmd_from_any_node.out b/src/test/regress/expected/isolation_database_cmd_from_any_node.out new file mode 100644 index 00000000000..e952bb45728 --- /dev/null +++ b/src/test/regress/expected/isolation_database_cmd_from_any_node.out @@ -0,0 +1,371 @@ +Parsed test spec with 2 sessions + +starting permutation: s1-begin s2-begin s1-acquire-citus-adv-oclass-lock s2-acquire-citus-adv-oclass-lock s1-commit s2-commit +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step s1-begin: BEGIN; +step s2-begin: BEGIN; +step s1-acquire-citus-adv-oclass-lock: SELECT citus_internal.acquire_citus_advisory_object_class_lock(value, NULL) FROM oclass_database; +acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +step s2-acquire-citus-adv-oclass-lock: SELECT citus_internal.acquire_citus_advisory_object_class_lock(value, NULL) FROM oclass_database; +step s1-commit: COMMIT; +step s2-acquire-citus-adv-oclass-lock: <... completed> +acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: COMMIT; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + + +starting permutation: s1-create-testdb1 s1-begin s2-begin s1-acquire-citus-adv-oclass-lock-with-oid-testdb1 s2-acquire-citus-adv-oclass-lock-with-oid-testdb1 s1-commit s2-commit s1-drop-testdb1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step s1-create-testdb1: CREATE DATABASE testdb1; +step s1-begin: BEGIN; +step s2-begin: BEGIN; +step s1-acquire-citus-adv-oclass-lock-with-oid-testdb1: SELECT citus_internal.acquire_citus_advisory_object_class_lock(value, 'testdb1') FROM oclass_database; +acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +step s2-acquire-citus-adv-oclass-lock-with-oid-testdb1: SELECT citus_internal.acquire_citus_advisory_object_class_lock(value, 'testdb1') FROM oclass_database; +step s1-commit: COMMIT; +step s2-acquire-citus-adv-oclass-lock-with-oid-testdb1: <... completed> +acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +step s2-commit: COMMIT; +step s1-drop-testdb1: DROP DATABASE testdb1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + + +starting permutation: s1-create-testdb1 s2-create-testdb2 s1-begin s2-begin s1-acquire-citus-adv-oclass-lock-with-oid-testdb1 s2-acquire-citus-adv-oclass-lock-with-oid-testdb2 s1-commit s2-commit s1-drop-testdb1 s2-drop-testdb2 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step s1-create-testdb1: CREATE DATABASE testdb1; +step s2-create-testdb2: CREATE DATABASE testdb2; +step s1-begin: BEGIN; +step s2-begin: BEGIN; +step s1-acquire-citus-adv-oclass-lock-with-oid-testdb1: SELECT citus_internal.acquire_citus_advisory_object_class_lock(value, 'testdb1') FROM oclass_database; +acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +step s2-acquire-citus-adv-oclass-lock-with-oid-testdb2: SELECT citus_internal.acquire_citus_advisory_object_class_lock(value, 'testdb2') FROM oclass_database; +acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: COMMIT; +step s2-commit: COMMIT; +step s1-drop-testdb1: DROP DATABASE testdb1; +step s2-drop-testdb2: DROP DATABASE testdb2; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + + +starting permutation: s2-create-testdb2 s1-begin s2-begin s1-acquire-citus-adv-oclass-lock s2-acquire-citus-adv-oclass-lock-with-oid-testdb2 s1-commit s2-commit s2-drop-testdb2 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-create-testdb2: CREATE DATABASE testdb2; +step s1-begin: BEGIN; +step s2-begin: BEGIN; +step s1-acquire-citus-adv-oclass-lock: SELECT citus_internal.acquire_citus_advisory_object_class_lock(value, NULL) FROM oclass_database; +acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +step s2-acquire-citus-adv-oclass-lock-with-oid-testdb2: SELECT citus_internal.acquire_citus_advisory_object_class_lock(value, 'testdb2') FROM oclass_database; +acquire_citus_advisory_object_class_lock +--------------------------------------------------------------------- + +(1 row) + +step s1-commit: COMMIT; +step s2-commit: COMMIT; +step s2-drop-testdb2: DROP DATABASE testdb2; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + + +starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-set-lc_monetary s1-create-db1 s2-rollback s2-drop-testdb2 s1-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-create-testdb2: CREATE DATABASE testdb2; +step s2-begin: BEGIN; +step s2-alter-testdb2-set-lc_monetary: ALTER DATABASE testdb2 SET lc_monetary TO 'C'; +step s1-create-db1: CREATE DATABASE db1; +step s2-rollback: ROLLBACK; +step s2-drop-testdb2: DROP DATABASE testdb2; +step s1-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + + +starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-set-lc_monetary s1-create-user-dbuser s1-grant-on-testdb2-to-dbuser s2-rollback s2-drop-testdb2 s1-drop-user-dbuser +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-create-testdb2: CREATE DATABASE testdb2; +step s2-begin: BEGIN; +step s2-alter-testdb2-set-lc_monetary: ALTER DATABASE testdb2 SET lc_monetary TO 'C'; +step s1-create-user-dbuser: CREATE USER dbuser; +step s1-grant-on-testdb2-to-dbuser: GRANT ALL ON DATABASE testdb2 TO dbuser; +step s2-rollback: ROLLBACK; +step s2-drop-testdb2: DROP DATABASE testdb2; +step s1-drop-user-dbuser: DROP USER dbuser; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + + +starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-set-lc_monetary s1-create-testdb1 s1-create-user-dbuser s1-grant-on-testdb1-to-dbuser s2-rollback s2-drop-testdb2 s1-drop-testdb1 s1-drop-user-dbuser +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-create-testdb2: CREATE DATABASE testdb2; +step s2-begin: BEGIN; +step s2-alter-testdb2-set-lc_monetary: ALTER DATABASE testdb2 SET lc_monetary TO 'C'; +step s1-create-testdb1: CREATE DATABASE testdb1; +step s1-create-user-dbuser: CREATE USER dbuser; +step s1-grant-on-testdb1-to-dbuser: GRANT ALL ON DATABASE testdb1 TO dbuser; +step s2-rollback: ROLLBACK; +step s2-drop-testdb2: DROP DATABASE testdb2; +step s1-drop-testdb1: DROP DATABASE testdb1; +step s1-drop-user-dbuser: DROP USER dbuser; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + + +starting permutation: s1-create-testdb1 s2-create-testdb2 s1-begin s2-begin s1-alter-testdb1-rename-to-db1 s2-alter-testdb2-rename-to-db1 s1-commit s2-rollback s1-drop-db1 s2-drop-testdb2 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step s1-create-testdb1: CREATE DATABASE testdb1; +step s2-create-testdb2: CREATE DATABASE testdb2; +step s1-begin: BEGIN; +step s2-begin: BEGIN; +step s1-alter-testdb1-rename-to-db1: ALTER DATABASE testdb1 RENAME TO db1; +step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1; +step s1-commit: COMMIT; +step s2-alter-testdb2-rename-to-db1: <... completed> +ERROR: database "db1" already exists +step s2-rollback: ROLLBACK; +step s1-drop-db1: DROP DATABASE db1; +step s2-drop-testdb2: DROP DATABASE testdb2; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + + +starting permutation: s1-create-testdb1 s2-create-testdb2 s1-begin s2-begin s1-alter-testdb1-rename-to-db1 s2-alter-testdb2-rename-to-db1 s1-rollback s2-commit s1-drop-testdb1 s2-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step s1-create-testdb1: CREATE DATABASE testdb1; +step s2-create-testdb2: CREATE DATABASE testdb2; +step s1-begin: BEGIN; +step s2-begin: BEGIN; +step s1-alter-testdb1-rename-to-db1: ALTER DATABASE testdb1 RENAME TO db1; +step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1; +step s1-rollback: ROLLBACK; +step s2-alter-testdb2-rename-to-db1: <... completed> +step s2-commit: COMMIT; +step s1-drop-testdb1: DROP DATABASE testdb1; +step s2-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + + +starting permutation: s1-create-testdb1 s1-begin s2-begin s1-alter-testdb1-rename-to-db1 s2-alter-testdb1-rename-to-db1 s1-commit s2-rollback s1-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step s1-create-testdb1: CREATE DATABASE testdb1; +step s1-begin: BEGIN; +step s2-begin: BEGIN; +step s1-alter-testdb1-rename-to-db1: ALTER DATABASE testdb1 RENAME TO db1; +step s2-alter-testdb1-rename-to-db1: ALTER DATABASE testdb1 RENAME TO db1; +step s1-commit: COMMIT; +step s2-alter-testdb1-rename-to-db1: <... completed> +ERROR: database "testdb1" does not exist +step s2-rollback: ROLLBACK; +step s1-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + + +starting permutation: s1-create-testdb1 s1-begin s2-begin s1-alter-testdb1-rename-to-db1 s2-alter-testdb1-rename-to-db1 s1-rollback s2-commit s2-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step s1-create-testdb1: CREATE DATABASE testdb1; +step s1-begin: BEGIN; +step s2-begin: BEGIN; +step s1-alter-testdb1-rename-to-db1: ALTER DATABASE testdb1 RENAME TO db1; +step s2-alter-testdb1-rename-to-db1: ALTER DATABASE testdb1 RENAME TO db1; +step s1-rollback: ROLLBACK; +step s2-alter-testdb1-rename-to-db1: <... completed> +step s2-commit: COMMIT; +step s2-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + + +starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-rename-to-db1 s1-create-db1 s2-rollback s2-drop-testdb2 s1-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-create-testdb2: CREATE DATABASE testdb2; +step s2-begin: BEGIN; +step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1; +step s1-create-db1: CREATE DATABASE db1; +step s2-rollback: ROLLBACK; +step s1-create-db1: <... completed> +step s2-drop-testdb2: DROP DATABASE testdb2; +step s1-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + + +starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-rename-to-db1 s1-create-db1 s2-commit s2-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-create-testdb2: CREATE DATABASE testdb2; +step s2-begin: BEGIN; +step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1; +step s1-create-db1: CREATE DATABASE db1; +step s2-commit: COMMIT; +step s1-create-db1: <... completed> +ERROR: database "db1" already exists +step s2-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + + +starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-rename-to-db2 s1-create-db1 s2-commit s2-drop-db2 s1-drop-db1 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-create-testdb2: CREATE DATABASE testdb2; +step s2-begin: BEGIN; +step s2-alter-testdb2-rename-to-db2: ALTER DATABASE testdb2 RENAME TO db2; +step s1-create-db1: CREATE DATABASE db1; +step s2-commit: COMMIT; +step s1-create-db1: <... completed> +step s2-drop-db2: DROP DATABASE db2; +step s1-drop-db1: DROP DATABASE db1; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + + +starting permutation: s2-create-testdb2 s2-begin s2-alter-testdb2-rename-to-db1 s1-drop-testdb2 s2-rollback +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-create-testdb2: CREATE DATABASE testdb2; +step s2-begin: BEGIN; +step s2-alter-testdb2-rename-to-db1: ALTER DATABASE testdb2 RENAME TO db1; +step s1-drop-testdb2: DROP DATABASE testdb2; +step s2-rollback: ROLLBACK; +step s1-drop-testdb2: <... completed> +?column? +--------------------------------------------------------------------- + 1 +(1 row) + + +starting permutation: s2-create-testdb2 s1-create-db1 s2-begin s2-alter-testdb2-rename-to-db2 s1-drop-db1 s2-commit s2-drop-db2 +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +step s2-create-testdb2: CREATE DATABASE testdb2; +step s1-create-db1: CREATE DATABASE db1; +step s2-begin: BEGIN; +step s2-alter-testdb2-rename-to-db2: ALTER DATABASE testdb2 RENAME TO db2; +step s1-drop-db1: DROP DATABASE db1; +step s2-commit: COMMIT; +step s2-drop-db2: DROP DATABASE db2; +?column? +--------------------------------------------------------------------- + 1 +(1 row) + diff --git a/src/test/regress/expected/isolation_drop_vs_all.out b/src/test/regress/expected/isolation_drop_vs_all.out index 7dab136153c..2c8912c21c7 100644 --- a/src/test/regress/expected/isolation_drop_vs_all.out +++ b/src/test/regress/expected/isolation_drop_vs_all.out @@ -226,7 +226,7 @@ step s1-drop: DROP TABLE drop_hash; step s2-table-size: SELECT citus_total_relation_size('drop_hash'); step s1-commit: COMMIT; step s2-table-size: <... completed> -ERROR: could not compute table size: relation does not exist +ERROR: could not compute relation size: relation does not exist step s2-commit: COMMIT; step s1-select-count: SELECT COUNT(*) FROM drop_hash; ERROR: relation "drop_hash" does not exist diff --git a/src/test/regress/expected/isolation_get_all_active_transactions.out b/src/test/regress/expected/isolation_get_all_active_transactions.out index a9739a826b2..73610a455ee 100644 --- a/src/test/regress/expected/isolation_get_all_active_transactions.out +++ b/src/test/regress/expected/isolation_get_all_active_transactions.out @@ -94,7 +94,7 @@ step s2-commit: COMMIT; -starting permutation: s4-record-pid s3-show-activity s5-kill s3-show-activity +starting permutation: s4-record-pid s3-show-activity s5-kill s3-wait-backend-termination step s4-record-pid: SELECT pg_backend_pid() INTO selected_pid; @@ -115,12 +115,22 @@ pg_terminate_backend t (1 row) -step s3-show-activity: +step s3-wait-backend-termination: SET ROLE postgres; - select count(*) from get_all_active_transactions() where process_id IN (SELECT * FROM selected_pid); - -count ---------------------------------------------------------------------- - 0 -(1 row) + DO $$ + DECLARE + i int; + BEGIN + i := 0; + -- try for 5 sec then timeout + WHILE (select count(*) > 0 from get_all_active_transactions() where process_id IN (SELECT * FROM selected_pid)) + LOOP + PERFORM pg_sleep(0.1); + i := i + 1; + IF i > 50 THEN + RAISE EXCEPTION 'Timeout while waiting for backend to terminate'; + END IF; + END LOOP; + END; + $$; diff --git a/src/test/regress/expected/isolation_master_update_node_1.out b/src/test/regress/expected/isolation_master_update_node_1.out new file mode 100644 index 00000000000..4749566299c --- /dev/null +++ b/src/test/regress/expected/isolation_master_update_node_1.out @@ -0,0 +1,68 @@ +Parsed test spec with 2 sessions + +starting permutation: s1-begin s1-insert s2-begin s2-update-node-1 s1-abort s2-abort +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: BEGIN; +step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100); +step s2-begin: BEGIN; +step s2-update-node-1: + -- update a specific node by address + SELECT master_update_node(nodeid, 'localhost', nodeport + 10) + FROM pg_dist_node + WHERE nodename = 'localhost' + AND nodeport = 57637; + +step s1-abort: ABORT; +step s2-update-node-1: <... completed> +master_update_node +--------------------------------------------------------------------- + +(1 row) + +step s2-abort: ABORT; +master_remove_node +--------------------------------------------------------------------- + + +(2 rows) + + +starting permutation: s1-begin s1-insert s2-begin s2-update-node-1-force s2-abort s1-abort +create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +step s1-begin: BEGIN; +step s1-insert: INSERT INTO t1 SELECT generate_series(1, 100); +step s2-begin: BEGIN; +step s2-update-node-1-force: + -- update a specific node by address (force) + SELECT master_update_node(nodeid, 'localhost', nodeport + 10, force => true, lock_cooldown => 100) + FROM pg_dist_node + WHERE nodename = 'localhost' + AND nodeport = 57637; + +step s2-update-node-1-force: <... completed> +master_update_node +--------------------------------------------------------------------- + +(1 row) + +step s2-abort: ABORT; +step s1-abort: ABORT; +FATAL: terminating connection due to administrator command +FATAL: terminating connection due to administrator command +SSL connection has been closed unexpectedly +server closed the connection unexpectedly + +master_remove_node +--------------------------------------------------------------------- + + +(2 rows) + diff --git a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out index e37724e4b8b..1aa7cbcc1e1 100644 --- a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out @@ -138,7 +138,7 @@ step s2-view-worker: ('%pg_prepared_xacts%'), ('%COMMIT%'), ('%dump_local_%'), - ('%citus_internal_local_blocked_processes%'), + ('%citus_internal.local_blocked_processes%'), ('%add_node%'), ('%csa_from_one_node%'), ('%pg_locks%')) diff --git a/src/test/regress/expected/isolation_update_node.out b/src/test/regress/expected/isolation_update_node.out index 1a1c65ec86f..703fcc4274f 100644 --- a/src/test/regress/expected/isolation_update_node.out +++ b/src/test/regress/expected/isolation_update_node.out @@ -93,8 +93,8 @@ nodeid|nodename|nodeport starting permutation: s1-begin s1-update-node-1 s2-begin s2-update-node-1 s1-commit s2-abort s1-show-nodes s3-update-node-1-back s3-manually-fix-metadata nodeid|nodename |nodeport --------------------------------------------------------------------- - 25|localhost| 57638 - 24|localhost| 57637 + 23|localhost| 57638 + 22|localhost| 57637 (2 rows) step s1-begin: @@ -139,8 +139,8 @@ step s1-show-nodes: nodeid|nodename |nodeport|isactive --------------------------------------------------------------------- - 25|localhost| 57638|t - 24|localhost| 58637|t + 23|localhost| 57638|t + 22|localhost| 58637|t (2 rows) step s3-update-node-1-back: @@ -178,8 +178,8 @@ nodeid|nodename|nodeport starting permutation: s2-create-table s1-begin s1-update-node-nonexistent s1-prepare-transaction s2-cache-prepared-statement s1-commit-prepared s2-execute-prepared s1-update-node-existent s3-manually-fix-metadata nodeid|nodename |nodeport --------------------------------------------------------------------- - 27|localhost| 57638 - 26|localhost| 57637 + 23|localhost| 57638 + 22|localhost| 57637 (2 rows) step s2-create-table: @@ -250,7 +250,7 @@ count step s1-commit-prepared: COMMIT prepared 'label'; -s2: WARNING: connection to the remote node non-existent:57637 failed with the following error: could not translate host name "non-existent" to address: Name or service not known +s2: WARNING: connection to the remote node postgres@non-existent:57637 failed with the following error: could not translate host name "non-existent" to address: step s2-execute-prepared: EXECUTE foo; diff --git a/src/test/regress/expected/issue_5763.out b/src/test/regress/expected/issue_5763.out index aa6c4f35b43..86429739717 100644 --- a/src/test/regress/expected/issue_5763.out +++ b/src/test/regress/expected/issue_5763.out @@ -28,8 +28,8 @@ DROP USER issue_5763_3; -- test non-distributed role SET citus.enable_create_role_propagation TO off; CREATE USER issue_5763_4 WITH SUPERUSER; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. \c - issue_5763_4 - :master_port set citus.enable_ddl_propagation = off; CREATE SCHEMA issue_5763_sc_4; diff --git a/src/test/regress/expected/issue_7477.out b/src/test/regress/expected/issue_7477.out new file mode 100644 index 00000000000..224d85c6ea4 --- /dev/null +++ b/src/test/regress/expected/issue_7477.out @@ -0,0 +1,62 @@ +--- Test for updating a table that has a foreign key reference to another reference table. +--- Issue #7477: Distributed deadlock after issuing a simple UPDATE statement +--- https://github.com/citusdata/citus/issues/7477 +CREATE TABLE table1 (id INT PRIMARY KEY); +SELECT create_reference_table('table1'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO table1 VALUES (1); +CREATE TABLE table2 ( + id INT, + info TEXT, + CONSTRAINT table1_id_fk FOREIGN KEY (id) REFERENCES table1 (id) + ); +SELECT create_reference_table('table2'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO table2 VALUES (1, 'test'); +--- Runs the update command in parallel on workers. +--- Due to bug #7477, before the fix, the result is non-deterministic +--- and have several rows of the form: +--- localhost | 57638 | f | ERROR: deadlock detected +--- localhost | 57637 | f | ERROR: deadlock detected +--- localhost | 57637 | f | ERROR: canceling the transaction since it was involved in a distributed deadlock +SELECT * FROM master_run_on_worker( + ARRAY['localhost', 'localhost','localhost', 'localhost','localhost', + 'localhost','localhost', 'localhost','localhost', 'localhost']::text[], + ARRAY[57638, 57637, 57637, 57638, 57637, 57638, 57637, 57638, 57638, 57637]::int[], + ARRAY['UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1' + ]::text[], + true); + node_name | node_port | success | result +--------------------------------------------------------------------- + localhost | 57638 | t | UPDATE 1 + localhost | 57637 | t | UPDATE 1 + localhost | 57637 | t | UPDATE 1 + localhost | 57638 | t | UPDATE 1 + localhost | 57637 | t | UPDATE 1 + localhost | 57638 | t | UPDATE 1 + localhost | 57637 | t | UPDATE 1 + localhost | 57638 | t | UPDATE 1 + localhost | 57638 | t | UPDATE 1 + localhost | 57637 | t | UPDATE 1 +(10 rows) + +--- cleanup +DROP TABLE table2; +DROP TABLE table1; diff --git a/src/test/regress/expected/limit_intermediate_size.out b/src/test/regress/expected/limit_intermediate_size.out index e6fd0e79897..996c6536bff 100644 --- a/src/test/regress/expected/limit_intermediate_size.out +++ b/src/test/regress/expected/limit_intermediate_size.out @@ -16,7 +16,8 @@ SELECT cte.user_id, cte.value_2 FROM cte,cte2 ORDER BY 1,2 LIMIT 10; ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 2 kB) DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place. HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable. -SET citus.max_intermediate_result_size TO 17; +SET citus.max_intermediate_result_size TO 9; +-- regular adaptive executor CTE should fail WITH cte AS MATERIALIZED ( SELECT @@ -38,20 +39,9 @@ FROM ORDER BY 1,2 LIMIT 10; - user_id | value_2 ---------------------------------------------------------------------- - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 - 1 | 0 -(10 rows) - +ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 9 kB) +DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place. +HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable. -- router queries should be able to get limitted too SET citus.max_intermediate_result_size TO 2; -- this should pass, since we fetch small portions in each subplan @@ -117,11 +107,9 @@ WITH cte AS MATERIALIZED ( AND EXISTS (select * from cte2, cte3) ) SELECT count(*) FROM cte WHERE EXISTS (select * from cte); - count ---------------------------------------------------------------------- - 105 -(1 row) - +ERROR: the intermediate result size exceeds citus.max_intermediate_result_size (currently 4 kB) +DETAIL: Citus restricts the size of intermediate results of complex subqueries and CTEs to avoid accidentally pulling large result sets into once place. +HINT: To run the current query, set citus.max_intermediate_result_size to a higher value or -1 to disable. SET citus.max_intermediate_result_size TO 3; -- this should fail since the cte-subplan exceeds the limit even if the -- cte2 and cte3 does not diff --git a/src/test/regress/expected/local_shard_execution.out b/src/test/regress/expected/local_shard_execution.out index f6e4db7ee3c..58293a2d69b 100644 --- a/src/test/regress/expected/local_shard_execution.out +++ b/src/test/regress/expected/local_shard_execution.out @@ -3281,9 +3281,9 @@ SELECT pg_sleep(0.1); -- wait to make sure the config has changed before running SET citus.enable_local_execution TO false; -- force a connection to the dummy placements -- run queries that use dummy placements for local execution SELECT * FROM event_responses WHERE FALSE; -ERROR: connection to the remote node foobar:57636 failed with the following error: could not translate host name "foobar" to address: +ERROR: connection to the remote node postgres@foobar:57636 failed with the following error: could not translate host name "foobar" to address: WITH cte_1 AS (SELECT * FROM event_responses LIMIT 1) SELECT count(*) FROM cte_1; -ERROR: connection to the remote node foobar:57636 failed with the following error: could not translate host name "foobar" to address: +ERROR: connection to the remote node postgres@foobar:57636 failed with the following error: could not translate host name "foobar" to address: ALTER SYSTEM RESET citus.local_hostname; SELECT pg_reload_conf(); pg_reload_conf diff --git a/src/test/regress/expected/local_shard_execution_0.out b/src/test/regress/expected/local_shard_execution_0.out index 8c4fbfd7443..948941aad65 100644 --- a/src/test/regress/expected/local_shard_execution_0.out +++ b/src/test/regress/expected/local_shard_execution_0.out @@ -3281,9 +3281,9 @@ SELECT pg_sleep(0.1); -- wait to make sure the config has changed before running SET citus.enable_local_execution TO false; -- force a connection to the dummy placements -- run queries that use dummy placements for local execution SELECT * FROM event_responses WHERE FALSE; -ERROR: connection to the remote node foobar:57636 failed with the following error: could not translate host name "foobar" to address: +ERROR: connection to the remote node postgres@foobar:57636 failed with the following error: could not translate host name "foobar" to address: WITH cte_1 AS (SELECT * FROM event_responses LIMIT 1) SELECT count(*) FROM cte_1; -ERROR: connection to the remote node foobar:57636 failed with the following error: could not translate host name "foobar" to address: +ERROR: connection to the remote node postgres@foobar:57636 failed with the following error: could not translate host name "foobar" to address: ALTER SYSTEM RESET citus.local_hostname; SELECT pg_reload_conf(); pg_reload_conf diff --git a/src/test/regress/expected/logical_replication.out b/src/test/regress/expected/logical_replication.out index 8a3e96da913..b5a36125a76 100644 --- a/src/test/regress/expected/logical_replication.out +++ b/src/test/regress/expected/logical_replication.out @@ -32,23 +32,21 @@ CREATE SUBSCRIPTION citus_shard_move_subscription_:postgres_oid PUBLICATION citus_shard_move_publication_:postgres_oid WITH (enabled=false, slot_name=citus_shard_move_slot_:postgres_oid); NOTICE: created replication slot "citus_shard_move_slot_10" on publisher -SELECT count(*) from pg_subscription; - count +SELECT subname from pg_subscription; + subname --------------------------------------------------------------------- - 1 + citus_shard_move_subscription_10 (1 row) -SELECT count(*) from pg_publication; - count +SELECT pubname from pg_publication; + pubname --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) -SELECT count(*) from pg_replication_slots; - count +SELECT slot_name from pg_replication_slots; + slot_name --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) SELECT count(*) FROM dist; count @@ -58,22 +56,21 @@ SELECT count(*) FROM dist; \c - - - :worker_1_port SET search_path TO logical_replication; -SELECT count(*) from pg_subscription; - count +SELECT subname from pg_subscription; + subname --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) -SELECT count(*) from pg_publication; - count +SELECT pubname from pg_publication; + pubname --------------------------------------------------------------------- - 1 + citus_shard_move_publication_10 (1 row) -SELECT count(*) from pg_replication_slots; - count +SELECT slot_name from pg_replication_slots; + slot_name --------------------------------------------------------------------- - 1 + citus_shard_move_slot_10 (1 row) SELECT count(*) FROM dist; @@ -90,25 +87,29 @@ select citus_move_shard_placement(6830002, 'localhost', :worker_1_port, 'localho (1 row) +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup +--------------------------------------------------------------------- + +(1 row) + -- the subscription is still there, as there is no cleanup record for it -- we have created it manually -SELECT count(*) from pg_subscription; - count +SELECT subname from pg_subscription; + subname --------------------------------------------------------------------- - 1 + citus_shard_move_subscription_10 (1 row) -SELECT count(*) from pg_publication; - count +SELECT pubname from pg_publication; + pubname --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) -SELECT count(*) from pg_replication_slots; - count +SELECT slot_name from pg_replication_slots; + slot_name --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) SELECT count(*) from dist; count @@ -120,22 +121,21 @@ SELECT count(*) from dist; SET search_path TO logical_replication; -- the publication and repslot are still there, as there are no cleanup records for them -- we have created them manually -SELECT count(*) from pg_subscription; - count +SELECT subname from pg_subscription; + subname --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) -SELECT count(*) from pg_publication; - count +SELECT pubname from pg_publication; + pubname --------------------------------------------------------------------- - 1 + citus_shard_move_publication_10 (1 row) -SELECT count(*) from pg_replication_slots; - count +SELECT slot_name from pg_replication_slots; + slot_name --------------------------------------------------------------------- - 1 + citus_shard_move_slot_10 (1 row) SELECT count(*) from dist; @@ -153,23 +153,20 @@ SELECT pg_drop_replication_slot('citus_shard_move_slot_' || :postgres_oid); \c - - - :worker_2_port SET search_path TO logical_replication; -SELECT count(*) from pg_subscription; - count +SELECT subname from pg_subscription; + subname --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) -SELECT count(*) from pg_publication; - count +SELECT pubname from pg_publication; + pubname --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) -SELECT count(*) from pg_replication_slots; - count +SELECT slot_name from pg_replication_slots; + slot_name --------------------------------------------------------------------- - 0 -(1 row) +(0 rows) SELECT count(*) from dist; count diff --git a/src/test/regress/expected/merge.out b/src/test/regress/expected/merge.out index a73467e81a8..5056ba5432e 100644 --- a/src/test/regress/expected/merge.out +++ b/src/test/regress/expected/merge.out @@ -1128,7 +1128,7 @@ DO NOTHING WHEN NOT MATCHED THEN INSERT VALUES(rs_source.id); DEBUG: Creating MERGE router plan -DEBUG: +DEBUG: RESET client_min_messages; SELECT * INTO rs_local FROM rs_target ORDER BY 1 ; -- Should be equal @@ -1259,7 +1259,7 @@ DO NOTHING WHEN NOT MATCHED THEN INSERT VALUES(fn_source.id, fn_source.source); DEBUG: Creating MERGE router plan -DEBUG: +DEBUG: RESET client_min_messages; SELECT * INTO fn_local FROM fn_target ORDER BY 1 ; -- Should be equal @@ -1552,7 +1552,7 @@ BEGIN; SET citus.log_remote_commands to true; SET client_min_messages TO DEBUG1; EXECUTE merge_prepare(2); -DEBUG: +DEBUG: DEBUG: Creating MERGE router plan NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx @@ -1782,13 +1782,13 @@ NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing MERGE INTO merge_schema.citus_target_xxxxxxx t USING merge_schema.citus_source_xxxxxxx s ON (t.id OPERATOR(pg_catalog.=) s.id) WHEN MATCHED THEN DO NOTHING WHEN NOT MATCHED AND (s.id OPERATOR(pg_catalog.<) 100) THEN INSERT (id, val) VALUES (s.id, s.val) +NOTICE: issuing MERGE INTO merge_schema.citus_target_xxxxxxx t USING merge_schema.citus_source_xxxxxxx s ON (t.id OPERATOR(pg_catalog.=) s.id) WHEN MATCHED THEN DO NOTHING WHEN NOT MATCHED AND (s.id OPERATOR(pg_catalog.<) 100) THEN INSERT (id, val) VALUES (s.id, s.val) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing MERGE INTO merge_schema.citus_target_xxxxxxx t USING merge_schema.citus_source_xxxxxxx s ON (t.id OPERATOR(pg_catalog.=) s.id) WHEN MATCHED THEN DO NOTHING WHEN NOT MATCHED AND (s.id OPERATOR(pg_catalog.<) 100) THEN INSERT (id, val) VALUES (s.id, s.val) +NOTICE: issuing MERGE INTO merge_schema.citus_target_xxxxxxx t USING merge_schema.citus_source_xxxxxxx s ON (t.id OPERATOR(pg_catalog.=) s.id) WHEN MATCHED THEN DO NOTHING WHEN NOT MATCHED AND (s.id OPERATOR(pg_catalog.<) 100) THEN INSERT (id, val) VALUES (s.id, s.val) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing MERGE INTO merge_schema.citus_target_xxxxxxx t USING merge_schema.citus_source_xxxxxxx s ON (t.id OPERATOR(pg_catalog.=) s.id) WHEN MATCHED THEN DO NOTHING WHEN NOT MATCHED AND (s.id OPERATOR(pg_catalog.<) 100) THEN INSERT (id, val) VALUES (s.id, s.val) +NOTICE: issuing MERGE INTO merge_schema.citus_target_xxxxxxx t USING merge_schema.citus_source_xxxxxxx s ON (t.id OPERATOR(pg_catalog.=) s.id) WHEN MATCHED THEN DO NOTHING WHEN NOT MATCHED AND (s.id OPERATOR(pg_catalog.<) 100) THEN INSERT (id, val) VALUES (s.id, s.val) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing MERGE INTO merge_schema.citus_target_xxxxxxx t USING merge_schema.citus_source_xxxxxxx s ON (t.id OPERATOR(pg_catalog.=) s.id) WHEN MATCHED THEN DO NOTHING WHEN NOT MATCHED AND (s.id OPERATOR(pg_catalog.<) 100) THEN INSERT (id, val) VALUES (s.id, s.val) +NOTICE: issuing MERGE INTO merge_schema.citus_target_xxxxxxx t USING merge_schema.citus_source_xxxxxxx s ON (t.id OPERATOR(pg_catalog.=) s.id) WHEN MATCHED THEN DO NOTHING WHEN NOT MATCHED AND (s.id OPERATOR(pg_catalog.<) 100) THEN INSERT (id, val) VALUES (s.id, s.val) DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx SET citus.log_remote_commands to false; SELECT compare_tables(); @@ -1842,6 +1842,297 @@ SELECT compare_tables(); (1 row) ROLLBACK; +-- let's create source and target table +ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 13000; +CREATE TABLE source_pushdowntest (id integer); +CREATE TABLE target_pushdowntest (id integer ); +-- let's distribute both table on id field +SELECT create_distributed_table('source_pushdowntest', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target_pushdowntest', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- we are doing this operation on single node setup let's figure out colocation id of both tables +-- both has same colocation id so both are colocated. +WITH colocations AS ( + SELECT colocationid + FROM pg_dist_partition + WHERE logicalrelid = 'source_pushdowntest'::regclass + OR logicalrelid = 'target_pushdowntest'::regclass +) +SELECT + CASE + WHEN COUNT(DISTINCT colocationid) = 1 THEN 'Same' + ELSE 'Different' + END AS colocation_status +FROM colocations; + colocation_status +--------------------------------------------------------------------- + Same +(1 row) + +SET client_min_messages TO DEBUG1; +-- Test 1 : tables are colocated AND query is multisharded AND Join On distributed column : should push down to workers. +EXPLAIN (costs off, timing off, summary off) +MERGE INTO target_pushdowntest t +USING source_pushdowntest s +ON t.id = s.id +WHEN NOT MATCHED THEN + INSERT (id) + VALUES (s.id); +DEBUG: +DEBUG: +DEBUG: +DEBUG: +DEBUG: Creating MERGE router plan + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (Citus Adaptive) + Task Count: 4 + Tasks Shown: All + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Merge on target_pushdowntest_4000068 t + -> Merge Left Join + Merge Cond: (s.id = t.id) + -> Sort + Sort Key: s.id + -> Seq Scan on source_pushdowntest_4000064 s + -> Sort + Sort Key: t.id + -> Seq Scan on target_pushdowntest_4000068 t + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Merge on target_pushdowntest_4000069 t + -> Merge Left Join + Merge Cond: (s.id = t.id) + -> Sort + Sort Key: s.id + -> Seq Scan on source_pushdowntest_4000065 s + -> Sort + Sort Key: t.id + -> Seq Scan on target_pushdowntest_4000069 t + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Merge on target_pushdowntest_4000070 t + -> Merge Left Join + Merge Cond: (s.id = t.id) + -> Sort + Sort Key: s.id + -> Seq Scan on source_pushdowntest_4000066 s + -> Sort + Sort Key: t.id + -> Seq Scan on target_pushdowntest_4000070 t + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Merge on target_pushdowntest_4000071 t + -> Merge Left Join + Merge Cond: (s.id = t.id) + -> Sort + Sort Key: s.id + -> Seq Scan on source_pushdowntest_4000067 s + -> Sort + Sort Key: t.id + -> Seq Scan on target_pushdowntest_4000071 t +(47 rows) + +-- Test 2 : tables are colocated AND source query is not multisharded : should push down to worker. +-- DEBUG LOGS show that query is getting pushed down +MERGE INTO target_pushdowntest t +USING (SELECT * from source_pushdowntest where id = 1) s +on t.id = s.id +WHEN NOT MATCHED THEN + INSERT (id) + VALUES (s.id); +DEBUG: +DEBUG: Creating MERGE router plan +-- Test 3 : tables are colocated source query is single sharded but not using source distributed column in insertion. let's not pushdown. +INSERT INTO source_pushdowntest (id) VALUES (3); +EXPLAIN (costs off, timing off, summary off) +MERGE INTO target_pushdowntest t +USING (SELECT 1 as somekey, id from source_pushdowntest where id = 1) s +on t.id = s.somekey +WHEN NOT MATCHED THEN + INSERT (id) + VALUES (s.somekey); +DEBUG: MERGE INSERT must use the source table distribution column value for push down to workers. Otherwise, repartitioning will be applied +DEBUG: MERGE INSERT must use the source table distribution column value for push down to workers. Otherwise, repartitioning will be applied +DEBUG: Creating MERGE repartition plan +DEBUG: Using column - index:0 from the source list to redistribute + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (Citus MERGE INTO ...) + MERGE INTO target_pushdowntest method: pull to coordinator + -> Custom Scan (Citus Adaptive) + Task Count: 1 + Tasks Shown: All + -> Task + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on source_pushdowntest_4000064 source_pushdowntest + Filter: (id = 1) +(9 rows) + +-- let's verify if we use some other column from source for value of distributed column in target. +-- it should be inserted to correct shard of target. +CREATE TABLE source_withdata (id integer, some_number integer); +CREATE TABLE target_table (id integer, name text); +SELECT create_distributed_table('source_withdata', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT create_distributed_table('target_table', 'id'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +INSERT INTO source_withdata (id, some_number) VALUES (1, 3); +-- we will use some_number column from source_withdata to insert into distributed column of target. +-- value of some_number is 3 let's verify what shard it should go to. +select worker_hash(3); + worker_hash +--------------------------------------------------------------------- + -28094569 +(1 row) + +-- it should go to second shard of target as target has 4 shard and hash "-28094569" comes in range of second shard. +MERGE INTO target_table t +USING (SELECT id, some_number from source_withdata where id = 1) s +on t.id = s.some_number +WHEN NOT MATCHED THEN + INSERT (id, name) + VALUES (s.some_number, 'parag'); +DEBUG: Sub-query is not pushable, try repartitioning +DEBUG: MERGE command is only supported when all distributed tables are co-located and joined on their distribution columns +DEBUG: Creating MERGE repartition plan +DEBUG: Using column - index:1 from the source list to redistribute +DEBUG: Collect source query results on coordinator +DEBUG: Create a MERGE task list that needs to be routed +DEBUG: +DEBUG: +DEBUG: +DEBUG: +DEBUG: Execute MERGE task list +-- let's verify if data inserted to second shard of target. +EXPLAIN (analyze on, costs off, timing off, summary off) SELECT * FROM target_table; + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (Citus Adaptive) (actual rows=1 loops=1) + Task Count: 4 + Tuple data received from nodes: 9 bytes + Tasks Shown: All + -> Task + Tuple data received from node: 0 bytes + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on target_table_4000076 target_table (actual rows=0 loops=1) + -> Task + Tuple data received from node: 9 bytes + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on target_table_4000077 target_table (actual rows=1 loops=1) + -> Task + Tuple data received from node: 0 bytes + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on target_table_4000078 target_table (actual rows=0 loops=1) + -> Task + Tuple data received from node: 0 bytes + Node: host=localhost port=xxxxx dbname=regression + -> Seq Scan on target_table_4000079 target_table (actual rows=0 loops=1) +(20 rows) + +-- let's verify target data too. +SELECT * FROM target_table; + id | name +--------------------------------------------------------------------- + 3 | parag +(1 row) + +-- test UPDATE : when source is single sharded and table are colocated +MERGE INTO target_table t +USING (SELECT id, some_number from source_withdata where id = 1) s +on t.id = s.some_number +WHEN MATCHED THEN + UPDATE SET name = 'parag jain'; +DEBUG: Sub-query is not pushable, try repartitioning +DEBUG: MERGE command is only supported when all distributed tables are co-located and joined on their distribution columns +DEBUG: Creating MERGE repartition plan +DEBUG: Using column - index:1 from the source list to redistribute +DEBUG: Collect source query results on coordinator +DEBUG: Create a MERGE task list that needs to be routed +DEBUG: +DEBUG: +DEBUG: +DEBUG: +DEBUG: Execute MERGE task list +-- let's verify if data updated properly. +SELECT * FROM target_table; + id | name +--------------------------------------------------------------------- + 3 | parag jain +(1 row) + +-- let's see what happend when we try to update distributed key of target table +MERGE INTO target_table t +USING (SELECT id, some_number from source_withdata where id = 1) s +on t.id = s.some_number +WHEN MATCHED THEN + UPDATE SET id = 1500; +ERROR: updating the distribution column is not allowed in MERGE actions +SELECT * FROM target_table; + id | name +--------------------------------------------------------------------- + 3 | parag jain +(1 row) + +-- test DELETE : when source is single sharded and table are colocated +MERGE INTO target_table t +USING (SELECT id, some_number from source_withdata where id = 1) s +on t.id = s.some_number +WHEN MATCHED THEN + DELETE; +DEBUG: Sub-query is not pushable, try repartitioning +DEBUG: MERGE command is only supported when all distributed tables are co-located and joined on their distribution columns +DEBUG: Creating MERGE repartition plan +DEBUG: Using column - index:1 from the source list to redistribute +DEBUG: Collect source query results on coordinator +DEBUG: Create a MERGE task list that needs to be routed +DEBUG: +DEBUG: +DEBUG: +DEBUG: +DEBUG: Execute MERGE task list +-- let's verify if data deleted properly. +SELECT * FROM target_table; + id | name +--------------------------------------------------------------------- +(0 rows) + +-- +DELETE FROM source_withdata; +DELETE FROM target_table; +INSERT INTO source VALUES (1,1); +merge into target_table sda +using source_withdata sdn +on sda.id = sdn.id AND sda.id = 1 +when not matched then + insert (id) + values (10000); +ERROR: MERGE INSERT is using unsupported expression type for distribution column +DETAIL: Inserting arbitrary values that don't correspond to the joined column values can lead to unpredictable outcomes where rows are incorrectly distributed among different shards +SELECT * FROM target_table WHERE id = 10000; + id | name +--------------------------------------------------------------------- +(0 rows) + +RESET client_min_messages; -- This will prune shards with restriction information as NOT MATCHED is void BEGIN; SET citus.log_remote_commands to true; @@ -2898,14 +3189,14 @@ WHEN NOT MATCHED THEN -> Limit -> Sort Sort Key: id2 - -> Seq Scan on demo_source_table_4000135 demo_source_table + -> Seq Scan on demo_source_table_4000151 demo_source_table -> Distributed Subplan XXX_2 -> Custom Scan (Citus Adaptive) Task Count: 4 Tasks Shown: One of 4 -> Task Node: host=localhost port=xxxxx dbname=regression - -> Seq Scan on demo_source_table_4000135 demo_source_table + -> Seq Scan on demo_source_table_4000151 demo_source_table Task Count: 1 Tasks Shown: All -> Task @@ -3119,10 +3410,10 @@ DEBUG: Creating MERGE repartition plan DEBUG: Using column - index:0 from the source list to redistribute DEBUG: Collect source query results on coordinator DEBUG: Create a MERGE task list that needs to be routed -DEBUG: -DEBUG: -DEBUG: -DEBUG: +DEBUG: +DEBUG: +DEBUG: +DEBUG: DEBUG: Execute MERGE task list RESET client_min_messages; SELECT * FROM target_6785 ORDER BY 1; @@ -3240,7 +3531,7 @@ USING s1 s ON t.id = s.id WHEN NOT MATCHED THEN INSERT (id) VALUES(s.val); -ERROR: MERGE INSERT must use the source table distribution column value +ERROR: MERGE INSERT must use the source's joining column for target's distribution column MERGE INTO t1 t USING s1 s ON t.id = s.id @@ -3966,7 +4257,7 @@ CONTEXT: SQL statement "SELECT citus_drop_all_shards(v_obj.objid, v_obj.schema_ PL/pgSQL function citus_drop_trigger() line XX at PERFORM DROP FUNCTION merge_when_and_write(); DROP SCHEMA merge_schema CASCADE; -NOTICE: drop cascades to 103 other objects +NOTICE: drop cascades to 107 other objects DETAIL: drop cascades to function insert_data() drop cascades to table local_local drop cascades to table target @@ -4026,6 +4317,10 @@ drop cascades to table pg_source drop cascades to table citus_target drop cascades to table citus_source drop cascades to function compare_tables() +drop cascades to table source_pushdowntest +drop cascades to table target_pushdowntest +drop cascades to table source_withdata +drop cascades to table target_table drop cascades to view pg_source_view drop cascades to view citus_source_view drop cascades to table pg_pa_target @@ -4042,7 +4337,7 @@ drop cascades to table target_set drop cascades to table source_set drop cascades to table refsource_ref drop cascades to table pg_result -drop cascades to table refsource_ref_4000112 +drop cascades to table refsource_ref_4000128 drop cascades to table pg_ref drop cascades to table local_ref drop cascades to table reftarget_local @@ -4060,11 +4355,7 @@ drop cascades to table source_6785 drop cascades to table target_6785 drop cascades to function add_s(integer,integer) drop cascades to table pg -drop cascades to table t1_4000174 -drop cascades to table s1_4000175 +drop cascades to table t1_4000190 +drop cascades to table s1_4000191 drop cascades to table t1 -drop cascades to table s1 -drop cascades to table dist_target -drop cascades to table dist_source -drop cascades to view show_tables -and 3 other objects (see server log for list) +and 7 other objects (see server log for list) diff --git a/src/test/regress/expected/metadata_sync_from_non_maindb.out b/src/test/regress/expected/metadata_sync_from_non_maindb.out new file mode 100644 index 00000000000..2aac507bda2 --- /dev/null +++ b/src/test/regress/expected/metadata_sync_from_non_maindb.out @@ -0,0 +1,311 @@ +CREATE SCHEMA metadata_sync_2pc_schema; +SET search_path TO metadata_sync_2pc_schema; +set citus.enable_create_database_propagation to on; +CREATE DATABASE metadata_sync_2pc_db; +revoke connect,temp,temporary on database metadata_sync_2pc_db from public; +\c metadata_sync_2pc_db +SHOW citus.main_db; + citus.main_db +--------------------------------------------------------------------- + regression +(1 row) + +CREATE USER "grant_role2pc'_user1"; +CREATE USER "grant_role2pc'_user2"; +CREATE USER "grant_role2pc'_user3"; +CREATE USER grant_role2pc_user4; +CREATE USER grant_role2pc_user5; +\c regression +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c metadata_sync_2pc_db +grant "grant_role2pc'_user1","grant_role2pc'_user2" to "grant_role2pc'_user3" WITH ADMIN OPTION; +-- This section was originally testing a scenario where a user with the 'admin option' grants the same role to another user, also with the 'admin option'. +-- However, we encountered inconsistent errors because the 'admin option' grant is executed after the grant below. +-- Once we establish the correct order of granting, we will reintroduce the 'granted by' clause. +-- For now, we are commenting out the grant below that includes 'granted by', and instead, we are adding a grant without the 'granted by' clause. +-- grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3"; +grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5; +--test for grant on database +\c metadata_sync_2pc_db - - :master_port +grant create on database metadata_sync_2pc_db to "grant_role2pc'_user1"; +grant connect on database metadata_sync_2pc_db to "grant_role2pc'_user2"; +grant ALL on database metadata_sync_2pc_db to "grant_role2pc'_user3"; +\c regression +select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) +(2 rows) + +select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); + check_database_privileges +--------------------------------------------------------------------- + (CONNECT,t) + (CONNECT,t) +(2 rows) + +select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) +(8 rows) + +-- test for security label on role +\c metadata_sync_2pc_db - - :master_port +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE grant_role2pc_user4 IS 'citus_unclassified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "grant_role2pc'_user1" IS 'citus_classified'; +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('grant_role2pc_user4') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels($$"grant_role2pc''_user1"$$) ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +set citus.enable_create_database_propagation to on; +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('"grant_role2pc''_user2"','"grant_role2pc''_user3"','grant_role2pc_user4','grant_role2pc_user5') + order by member::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false}] + [{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false}] + [{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false}] +(3 rows) + +select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) +(3 rows) + +select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); + check_database_privileges +--------------------------------------------------------------------- + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) +(3 rows) + +select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('grant_role2pc_user4') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels($$"grant_role2pc''_user1"$$) ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +\c metadata_sync_2pc_db +revoke "grant_role2pc'_user1","grant_role2pc'_user2" from grant_role2pc_user4,grant_role2pc_user5 ; +revoke admin option for "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; +revoke "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; +revoke ALL on database metadata_sync_2pc_db from "grant_role2pc'_user3"; +revoke CONNECT on database metadata_sync_2pc_db from "grant_role2pc'_user2"; +revoke CREATE on database metadata_sync_2pc_db from "grant_role2pc'_user1"; +\c regression +drop user "grant_role2pc'_user1","grant_role2pc'_user2","grant_role2pc'_user3",grant_role2pc_user4,grant_role2pc_user5; +--test for user operations +--test for create user +\c regression - - :master_port +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c metadata_sync_2pc_db - - :master_port +CREATE ROLE test_role1 WITH LOGIN PASSWORD 'password1'; +\c metadata_sync_2pc_db - - :worker_1_port +CREATE USER "test_role2-needs\!escape" +WITH + SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION +LIMIT 10 VALID UNTIL '2023-01-01' IN ROLE test_role1; +create role test_role3; +\c regression - - :master_port +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":true,"rolinherit":true,"rolcreaterole":true,"rolcreatedb":true,"rolcanlogin":true,"rolreplication":true,"rolbypassrls":true,"rolconnlimit":10,"pass_not_empty":null,"date":"2023-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":null}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":true,"rolinherit":true,"rolcreaterole":true,"rolcreatedb":true,"rolcanlogin":true,"rolreplication":true,"rolbypassrls":true,"rolconnlimit":10,"pass_not_empty":null,"date":"2023-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":null}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":true,"rolinherit":true,"rolcreaterole":true,"rolcreatedb":true,"rolcanlogin":true,"rolreplication":true,"rolbypassrls":true,"rolconnlimit":10,"pass_not_empty":null,"date":"2023-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":null}] +(3 rows) + +--test for alter user +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c metadata_sync_2pc_db - - :master_port +-- Test ALTER ROLE with various options +ALTER ROLE test_role1 WITH PASSWORD 'new_password1'; +\c metadata_sync_2pc_db - - :worker_1_port +ALTER USER "test_role2-needs\!escape" +WITH + NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION +LIMIT 5 VALID UNTIL '2024-01-01'; +\c regression - - :master_port +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":null}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":null}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":null}] +(3 rows) + +--test for drop user +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c metadata_sync_2pc_db - - :worker_1_port +DROP ROLE test_role1, "test_role2-needs\!escape"; +\c metadata_sync_2pc_db - - :master_port +DROP ROLE test_role3; +\c regression - - :master_port +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + + + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"},{"rolname":"test_role3","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":null,"date":null}] +(3 rows) + +-- Clean up: drop the database on worker node 2 +\c regression - - :worker_2_port +DROP ROLE if exists test_role1, "test_role2-needs\!escape", test_role3; +\c regression - - :master_port +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + + + +(3 rows) + +set citus.enable_create_database_propagation to on; +drop database metadata_sync_2pc_db; +drop schema metadata_sync_2pc_schema; +reset citus.enable_create_database_propagation; +reset search_path; diff --git a/src/test/regress/expected/metadata_sync_helpers.out b/src/test/regress/expected/metadata_sync_helpers.out index 29d62c46aa7..9db68eaf569 100644 --- a/src/test/regress/expected/metadata_sync_helpers.out +++ b/src/test/regress/expected/metadata_sync_helpers.out @@ -12,9 +12,9 @@ RESET client_min_messages; SET search_path TO metadata_sync_helpers; CREATE TABLE test(col_1 int); -- not in a distributed transaction -SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); +SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction -SELECT citus_internal_update_relation_colocation ('test'::regclass, 1); +SELECT citus_internal.update_relation_colocation ('test'::regclass, 1); ERROR: This is an internal Citus function can only be used in a distributed transaction -- in a distributed transaction, but the application name is not Citus BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -24,11 +24,10 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction ROLLBACK; --- in a distributed transaction and the application name is Citus --- but we are on the coordinator, so still not allowed +-- in a distributed transaction and the application name is Citus, allowed. BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); assign_distributed_transaction_id @@ -37,8 +36,12 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); -ERROR: This is an internal Citus function can only be used in a distributed transaction + SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); + add_partition_metadata +--------------------------------------------------------------------- + +(1 row) + ROLLBACK; \c - postgres - \c - - - :worker_1_port @@ -58,7 +61,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ERROR: must be owner of table test ROLLBACK; -- we do not own the relation @@ -70,7 +73,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation ('test'::regclass, 10); + SELECT citus_internal.update_relation_colocation ('test'::regclass, 10); ERROR: must be owner of table test ROLLBACK; -- finally, a user can only add its own tables to the metadata @@ -84,8 +87,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) @@ -106,8 +109,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_rebalancer gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) @@ -122,7 +125,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=not a correct gpid'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction ROLLBACK; -- also faills if done by the rebalancer @@ -134,7 +137,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_rebalancer gpid=not a correct gpid'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction ROLLBACK; -- application_name with suffix is ok (e.g. pgbouncer might add this) @@ -146,8 +149,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001 - from 10.12.14.16:10370'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) @@ -162,7 +165,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid='; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction ROLLBACK; -- empty application_name @@ -174,7 +177,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to ''; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction ROLLBACK; -- application_name with incorrect prefix @@ -186,7 +189,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction ROLLBACK; -- fails because there is no X distribution method @@ -198,7 +201,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ERROR: Metadata syncing is only allowed for hash, reference and local tables:X ROLLBACK; -- fails because there is the column does not exist @@ -210,7 +213,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'non_existing_col', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'non_existing_col', 0, 's'); ERROR: column "non_existing_col" of relation "test_2" does not exist ROLLBACK; --- fails because we do not allow NULL parameters @@ -222,7 +225,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata (NULL, 'h', 'non_existing_col', 0, 's'); + SELECT citus_internal.add_partition_metadata (NULL, 'h', 'non_existing_col', 0, 's'); ERROR: relation cannot be NULL ROLLBACK; -- fails because colocationId cannot be negative @@ -234,7 +237,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', -1, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', -1, 's'); ERROR: Metadata syncing is only allowed for valid colocation id values. ROLLBACK; -- fails because there is no X replication model @@ -246,7 +249,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 'X'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 'X'); ERROR: Metadata syncing is only allowed for hash, reference and local tables:X ROLLBACK; -- the same table cannot be added twice, that is enforced by a primary key @@ -259,13 +262,13 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ERROR: duplicate key value violates unique constraint "pg_dist_partition_logical_relid_index" ROLLBACK; -- the same table cannot be added twice, that is enforced by a primary key even if distribution key changes @@ -278,13 +281,13 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_2', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_2', 0, 's'); ERROR: duplicate key value violates unique constraint "pg_dist_partition_logical_relid_index" ROLLBACK; -- hash distributed table cannot have NULL distribution key @@ -297,7 +300,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', NULL, 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', NULL, 0, 's'); ERROR: Distribution column cannot be NULL for relation "test_2" ROLLBACK; -- even if metadata_sync_helper_role is not owner of the table test @@ -329,8 +332,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) @@ -346,7 +349,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420007, 10000, 11111); + SELECT citus_internal.update_placement_metadata(1420007, 10000, 11111); ERROR: could not find valid entry for shard xxxxx ROLLBACK; -- non-existing users should fail to pass the checks @@ -375,7 +378,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ERROR: role "non_existing_user" does not exist ROLLBACK; \c - postgres - :worker_1_port @@ -406,7 +409,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_ref'::regclass, 'n', 'col_1', 0, 's'); ERROR: Reference or local tables cannot have distribution columns ROLLBACK; -- non-valid replication model @@ -418,7 +421,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'A'); + SELECT citus_internal.add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'A'); ERROR: Metadata syncing is only allowed for known replication models. ROLLBACK; -- not-matching replication model for reference table @@ -430,7 +433,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'c'); + SELECT citus_internal.add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'c'); ERROR: Local or references tables can only have 's' or 't' as the replication model. ROLLBACK; -- add entry for super user table @@ -445,8 +448,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('super_user_table'::regclass, 'h', 'col_1', 0, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('super_user_table'::regclass, 'h', 'col_1', 0, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) @@ -467,7 +470,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: must be owner of table super_user_table ROLLBACK; -- the user is only allowed to add a shard for add a table which is in pg_dist_partition @@ -482,7 +485,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: The relation "test_2" does not have a valid entry in pg_dist_partition. ROLLBACK; -- ok, now add the table to the pg_dist_partition @@ -494,20 +497,20 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 250, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 250, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) - SELECT citus_internal_add_partition_metadata ('test_3'::regclass, 'h', 'col_1', 251, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_3'::regclass, 'h', 'col_1', 251, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) - SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 't'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 't'); + add_partition_metadata --------------------------------------------------------------------- (1 row) @@ -522,8 +525,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation ('test_2'::regclass, 1231231232); - citus_internal_update_relation_colocation + SELECT citus_internal.update_relation_colocation ('test_2'::regclass, 1231231232); + update_relation_colocation --------------------------------------------------------------------- (1 row) @@ -541,7 +544,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, -1, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Invalid shard id: -1 ROLLBACK; -- invalid storage types are not allowed @@ -556,7 +559,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 'X'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Invalid shard storage type: X ROLLBACK; -- NULL shard ranges are not allowed for hash distributed tables @@ -571,7 +574,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 't'::"char", NULL, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Shards of has distributed table "test_2" cannot have NULL shard ranges ROLLBACK; -- non-integer shard ranges are not allowed @@ -586,7 +589,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", 'non-int'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: invalid input syntax for type integer: "non-int" ROLLBACK; -- shardMinValue should be smaller than shardMaxValue @@ -601,7 +604,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-1610612737'::text, '-2147483648'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: shardMinValue=-1610612737 is greater than shardMaxValue=-2147483648 for table "test_2", which is not allowed ROLLBACK; -- we do not allow overlapping shards for the same table @@ -618,7 +621,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text), ('test_2'::regclass, 1420002::bigint, 't'::"char", '10'::text, '50'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Shard intervals overlap for table "test_2": 1420001 and 1420000 ROLLBACK; -- Now let's check valid pg_dist_object updates @@ -634,7 +637,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ERROR: unrecognized object type "non_existing_type" ROLLBACK; -- check the sanity of distributionArgumentIndex and colocationId @@ -649,7 +652,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -100, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ERROR: distribution_argument_index must be between 0 and 100 ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -663,7 +666,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -1, -1, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ERROR: colocationId must be a positive number ROLLBACK; -- check with non-existing object @@ -678,10 +681,10 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ERROR: role "non_existing_user" does not exist ROLLBACK; --- since citus_internal_add_object_metadata is strict function returns NULL +-- since citus_internal.add_object_metadata is strict function returns NULL -- if any parameter is NULL BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); @@ -694,15 +697,15 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; - citus_internal_add_object_metadata + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + add_object_metadata --------------------------------------------------------------------- (1 row) ROLLBACK; \c - postgres - :worker_1_port --- Show that citus_internal_add_object_metadata only works for object types +-- Show that citus_internal.add_object_metadata only works for object types -- which is known how to distribute BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); @@ -721,10 +724,10 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET ROLE metadata_sync_helper_role; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('operator', ARRAY['===']::text[], ARRAY['int','int']::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ERROR: operator object can not be distributed by Citus ROLLBACK; --- Show that citus_internal_add_object_metadata checks the priviliges +-- Show that citus_internal.add_object_metadata checks the priviliges BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); assign_distributed_transaction_id @@ -741,7 +744,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET ROLE metadata_sync_helper_role; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('function', ARRAY['distribution_test_function']::text[], ARRAY['integer']::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ERROR: must be owner of function distribution_test_function ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -758,7 +761,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET ROLE metadata_sync_helper_role; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('type', ARRAY['distributed_test_type']::text[], ARRAY[]::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ERROR: must be owner of type distributed_test_type ROLLBACK; -- we do not allow wrong partmethod @@ -777,7 +780,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Metadata syncing is only allowed for hash, reference and local tables: X ROLLBACK; -- we do not allow NULL shardMinMax values @@ -794,8 +797,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - citus_internal_add_shard_metadata + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + add_shard_metadata --------------------------------------------------------------------- (1 row) @@ -804,7 +807,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 1420000; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Shards of has distributed table "test_2" cannot have NULL shard ranges ROLLBACK; \c - metadata_sync_helper_role - :worker_1_port @@ -827,8 +830,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ('test_2'::regclass, 1420004::bigint, 't'::"char", '51'::text, '60'::text), ('test_2'::regclass, 1420005::bigint, 't'::"char", '61'::text, '70'::text), ('test_3'::regclass, 1420008::bigint, 't'::"char", '11'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - citus_internal_add_shard_metadata + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + add_shard_metadata --------------------------------------------------------------------- @@ -849,7 +852,7 @@ BEGIN; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ERROR: cannot colocate tables test_2 and test_3 ROLLBACK; -- now, add few more shards for test_3 to make it colocated with test_2 @@ -868,8 +871,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ('test_3'::regclass, 1420011::bigint, 't'::"char", '41'::text, '50'::text), ('test_3'::regclass, 1420012::bigint, 't'::"char", '51'::text, '60'::text), ('test_3'::regclass, 1420013::bigint, 't'::"char", '61'::text, '70'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - citus_internal_add_shard_metadata + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + add_shard_metadata --------------------------------------------------------------------- @@ -891,7 +894,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420003::bigint, 't'::"char", '-1610612737'::text, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Shards of reference or local table "test_ref" should have NULL shard ranges ROLLBACK; -- reference tables cannot have multiple shards @@ -907,7 +910,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL), ('test_ref'::regclass, 1420007::bigint, 't'::"char", NULL, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: relation "test_ref" has already at least one shard, adding more is not allowed ROLLBACK; -- finally, add a shard for reference tables @@ -922,8 +925,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - citus_internal_add_shard_metadata + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + add_shard_metadata --------------------------------------------------------------------- (1 row) @@ -943,8 +946,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420007::bigint, 't'::"char", '11'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - citus_internal_add_shard_metadata + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + add_shard_metadata --------------------------------------------------------------------- (1 row) @@ -963,9 +966,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS - (VALUES (-10, 1, 0::bigint, 1::int, 1500000::bigint)) - SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS + (VALUES (-10, 0::bigint, 1::int, 1500000::bigint)) + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: could not find valid entry for shard xxxxx ROLLBACK; -- invalid placementid @@ -980,7 +983,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420000, 0::bigint, 1::int, -10)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: Shard placement has invalid placement id (-10) for shard(1420000) ROLLBACK; -- non-existing shard @@ -995,7 +998,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1430100, 0::bigint, 1::int, 10)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: could not find valid entry for shard xxxxx ROLLBACK; -- non-existing node with non-existing node-id 123123123 @@ -1010,7 +1013,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES ( 1420000, 0::bigint, 123123123::int, 1500000)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: Node with group id 123123123 for shard placement xxxxx does not exist ROLLBACK; -- create a volatile function that returns the local node id @@ -1041,7 +1044,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420000, 0::bigint, get_node_id(), 1500000), (1420000, 0::bigint, get_node_id(), 1500001)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: duplicate key value violates unique constraint "placement_shardid_groupid_unique_index" ROLLBACK; -- shard is not owned by us @@ -1056,7 +1059,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420007, 0::bigint, get_node_id(), 1500000)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: must be owner of table super_user_table ROLLBACK; -- sucessfully add placements @@ -1082,8 +1085,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1420011, 0::bigint, get_node_id(), 1500009), (1420012, 0::bigint, get_node_id(), 1500010), (1420013, 0::bigint, get_node_id(), 1500011)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - citus_internal_add_placement_metadata + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + add_placement_metadata --------------------------------------------------------------------- @@ -1109,8 +1112,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); - citus_internal_update_relation_colocation + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); + update_relation_colocation --------------------------------------------------------------------- (1 row) @@ -1127,7 +1130,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); + SELECT citus_internal.update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); ERROR: Node with group id 1014 for shard placement xxxxx does not exist COMMIT; -- fails because the source node doesn't contain the shard @@ -1140,7 +1143,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); + SELECT citus_internal.update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); ERROR: Active placement for shard xxxxx is not found on group:14 COMMIT; -- fails because shard does not exist @@ -1153,7 +1156,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(0, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(0, get_node_id(), get_node_id()+1); ERROR: Shard id does not exists: 0 COMMIT; -- fails because none-existing shard @@ -1166,7 +1169,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); ERROR: Shard id does not exists: 213123123123 COMMIT; -- fails because we do not own the shard @@ -1179,7 +1182,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420007, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(1420007, get_node_id(), get_node_id()+1); ERROR: must be owner of table super_user_table COMMIT; -- the user only allowed to delete their own shards @@ -1194,7 +1197,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420007)) - SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data; + SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data; ERROR: must be owner of table super_user_table ROLLBACK; -- the user cannot delete non-existing shards @@ -1209,7 +1212,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420100)) - SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data; + SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data; ERROR: Shard id does not exists: 1420100 ROLLBACK; -- sucessfully delete shards @@ -1236,8 +1239,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420000)) - SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data; - citus_internal_delete_shard_metadata + SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data; + delete_shard_metadata --------------------------------------------------------------------- (1 row) @@ -1271,7 +1274,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET repmodel = 't' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ERROR: cannot colocate tables test_2 and test_3 ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -1295,7 +1298,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}' WHERE logicalrelid = 'test_2'::regclass; \endif - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ERROR: cannot colocate tables test_2 and test_3 ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -1310,7 +1313,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = '' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ERROR: The relation "test_2" does not have a valid entry in pg_dist_partition. ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -1325,7 +1328,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = 'a' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ERROR: Updating colocation ids are only allowed for hash and single shard distributed tables: a ROLLBACK; -- colocated hash distributed table should have the same dist key columns @@ -1340,13 +1343,13 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_add_partition_metadata ('test_5'::regclass, 'h', 'int_col', 500, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_5'::regclass, 'h', 'int_col', 500, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) - SELECT citus_internal_add_partition_metadata ('test_6'::regclass, 'h', 'text_col', 500, 's'); + SELECT citus_internal.add_partition_metadata ('test_6'::regclass, 'h', 'text_col', 500, 's'); ERROR: cannot colocate tables test_6 and test_5 ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -1364,13 +1367,13 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_add_partition_metadata ('test_7'::regclass, 'h', 'text_col', 500, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_7'::regclass, 'h', 'text_col', 500, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) - SELECT citus_internal_add_partition_metadata ('test_8'::regclass, 'h', 'text_col', 500, 's'); + SELECT citus_internal.add_partition_metadata ('test_8'::regclass, 'h', 'text_col', 500, 's'); ERROR: cannot colocate tables test_8 and test_7 ROLLBACK; -- we don't need the table/schema anymore diff --git a/src/test/regress/expected/multi_alter_table_statements.out b/src/test/regress/expected/multi_alter_table_statements.out index c24927504b6..398fa8f7f87 100644 --- a/src/test/regress/expected/multi_alter_table_statements.out +++ b/src/test/regress/expected/multi_alter_table_statements.out @@ -1349,6 +1349,77 @@ SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.p (schema,{test_schema_for_sequence_propagation},{}) (1 row) +-- Bug: https://github.com/citusdata/citus/issues/7378 +-- Create a reference table +CREATE TABLE tbl_ref(row_id integer primary key); +INSERT INTO tbl_ref VALUES (1), (2); +SELECT create_reference_table('tbl_ref'); +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$multi_alter_table_statements.tbl_ref$$) + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +-- Create a distributed table +CREATE TABLE tbl_dist(series_id integer); +INSERT INTO tbl_dist VALUES (1), (1), (2), (2); +SELECT create_distributed_table('tbl_dist', 'series_id'); +NOTICE: Copying data from local table... +NOTICE: copying the data has completed +DETAIL: The local data in the table is no longer visible, but is still on disk. +HINT: To remove the local data, run: SELECT truncate_local_data_after_distributing_table($$multi_alter_table_statements.tbl_dist$$) + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- Create a view that joins the distributed table with the reference table on the distribution key. +CREATE VIEW vw_citus_views as +SELECT d.series_id FROM tbl_dist d JOIN tbl_ref r ON d.series_id = r.row_id; +-- The view initially works fine +SELECT * FROM vw_citus_views ORDER BY 1; + series_id +--------------------------------------------------------------------- + 1 + 1 + 2 + 2 +(4 rows) + +-- Now, alter the table +ALTER TABLE tbl_ref ADD COLUMN category1 varchar(50); +SELECT * FROM vw_citus_views ORDER BY 1; + series_id +--------------------------------------------------------------------- + 1 + 1 + 2 + 2 +(4 rows) + +ALTER TABLE tbl_ref ADD COLUMN category2 varchar(50); +SELECT * FROM vw_citus_views ORDER BY 1; + series_id +--------------------------------------------------------------------- + 1 + 1 + 2 + 2 +(4 rows) + +ALTER TABLE tbl_ref DROP COLUMN category1; +SELECT * FROM vw_citus_views ORDER BY 1; + series_id +--------------------------------------------------------------------- + 1 + 1 + 2 + 2 +(4 rows) + SET client_min_messages TO WARNING; DROP SCHEMA test_schema_for_sequence_propagation CASCADE; DROP TABLE table_without_sequence; diff --git a/src/test/regress/expected/multi_citus_tools.out b/src/test/regress/expected/multi_citus_tools.out index eef7a98ca6b..b477636862b 100644 --- a/src/test/regress/expected/multi_citus_tools.out +++ b/src/test/regress/expected/multi_citus_tools.out @@ -587,7 +587,7 @@ SET client_min_messages TO DEBUG; -- verify that we can create connections only with users with login privileges. SET ROLE role_without_login; SELECT citus_check_connection_to_node('localhost', :worker_1_port); -WARNING: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "role_without_login" is not permitted to log in +WARNING: connection to the remote node role_without_login@localhost:xxxxx failed with the following error: FATAL: role "role_without_login" is not permitted to log in citus_check_connection_to_node --------------------------------------------------------------------- f diff --git a/src/test/regress/expected/multi_cluster_management.out b/src/test/regress/expected/multi_cluster_management.out index e58b029373f..3eb549ab574 100644 --- a/src/test/regress/expected/multi_cluster_management.out +++ b/src/test/regress/expected/multi_cluster_management.out @@ -90,7 +90,7 @@ SELECT citus_disable_node('localhost', :worker_2_port); (1 row) -SELECT public.wait_until_metadata_sync(60000); +SELECT public.wait_until_metadata_sync(20000); wait_until_metadata_sync --------------------------------------------------------------------- @@ -812,7 +812,7 @@ SELECT citus_disable_node('localhost', 9999); (1 row) -SELECT public.wait_until_metadata_sync(60000); +SELECT public.wait_until_metadata_sync(20000); wait_until_metadata_sync --------------------------------------------------------------------- @@ -1258,3 +1258,9 @@ SELECT bool_and(hasmetadata) AND bool_and(metadatasynced) FROM pg_dist_node WHER t (1 row) +-- Grant all on public schema to public +-- +-- That's the default on Postgres versions < 15 and we want to +-- keep permissions compatible accross versions, in regression +-- tests. +GRANT ALL ON SCHEMA public TO PUBLIC; diff --git a/src/test/regress/expected/multi_copy.out b/src/test/regress/expected/multi_copy.out index abd58eb1d7e..ff4cbdd2c11 100644 --- a/src/test/regress/expected/multi_copy.out +++ b/src/test/regress/expected/multi_copy.out @@ -730,7 +730,7 @@ ALTER USER test_user WITH nologin; \c - test_user - :master_port -- reissue copy, and it should fail COPY numbers_hash FROM STDIN WITH (FORMAT 'csv'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" is not permitted to log in +ERROR: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" is not permitted to log in -- verify shards in the none of the workers as marked invalid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) @@ -749,7 +749,7 @@ SELECT shardid, shardstate, nodename, nodeport -- try to insert into a reference table copy should fail COPY numbers_reference FROM STDIN WITH (FORMAT 'csv'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" is not permitted to log in +ERROR: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" is not permitted to log in -- verify shards for reference table are still valid SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement join pg_dist_shard using(shardid) @@ -765,7 +765,7 @@ SELECT shardid, shardstate, nodename, nodeport -- since it can not insert into either copies of a shard. shards are expected to -- stay valid since the operation is rolled back. COPY numbers_hash_other FROM STDIN WITH (FORMAT 'csv'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" is not permitted to log in +ERROR: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" is not permitted to log in -- verify shards for numbers_hash_other are still valid -- since copy has failed altogether SELECT shardid, shardstate, nodename, nodeport diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 295b10c76e0..aaafce71509 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -1420,9 +1420,39 @@ SELECT * FROM multi_extension.print_extension_changes(); -- Snapshot of state at 12.2-1 ALTER EXTENSION citus UPDATE TO '12.2-1'; SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- -(0 rows) + previous_object | current_object +--------------------------------------------------------------------- + function citus_unmark_object_distributed(oid,oid,integer) void | + | function citus_internal.acquire_citus_advisory_object_class_lock(integer,cstring) void + | function citus_internal.add_colocation_metadata(integer,integer,integer,regtype,oid) void + | function citus_internal.add_object_metadata(text,text[],text[],integer,integer,boolean) void + | function citus_internal.add_partition_metadata(regclass,"char",text,integer,"char") void + | function citus_internal.add_placement_metadata(bigint,bigint,integer,bigint) void + | function citus_internal.add_shard_metadata(regclass,bigint,"char",text,text) void + | function citus_internal.add_tenant_schema(oid,integer) void + | function citus_internal.adjust_local_clock_to_remote(cluster_clock) void + | function citus_internal.commit_management_command_2pc() void + | function citus_internal.database_command(text) void + | function citus_internal.delete_colocation_metadata(integer) void + | function citus_internal.delete_partition_metadata(regclass) void + | function citus_internal.delete_placement_metadata(bigint) void + | function citus_internal.delete_shard_metadata(bigint) void + | function citus_internal.delete_tenant_schema(oid) void + | function citus_internal.execute_command_on_remote_nodes_as_user(text,text) void + | function citus_internal.global_blocked_processes() SETOF record + | function citus_internal.is_replication_origin_tracking_active() boolean + | function citus_internal.local_blocked_processes() SETOF record + | function citus_internal.mark_node_not_synced(integer,integer) void + | function citus_internal.mark_object_distributed(oid,text,oid,text) void + | function citus_internal.start_management_transaction(xid8) void + | function citus_internal.start_replication_origin_tracking() void + | function citus_internal.stop_replication_origin_tracking() void + | function citus_internal.unregister_tenant_schema_globally(oid,text) void + | function citus_internal.update_none_dist_table_metadata(oid,"char",bigint,boolean) void + | function citus_internal.update_placement_metadata(bigint,integer,integer) void + | function citus_internal.update_relation_colocation(oid,integer) void + | function citus_unmark_object_distributed(oid,oid,integer,boolean) void +(30 rows) DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version @@ -1618,8 +1648,8 @@ CREATE EXTENSION citus; -- Check that maintenance daemon can also be started in another database CREATE DATABASE another; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c another CREATE EXTENSION citus; CREATE SCHEMA test; @@ -1677,13 +1707,13 @@ NOTICE: drop cascades to function test_daemon.maintenance_daemon_died(text) -- create a test database, configure citus with single node CREATE DATABASE another; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c - - - :worker_1_port CREATE DATABASE another; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c - - - :master_port \c another CREATE EXTENSION citus; diff --git a/src/test/regress/expected/multi_fix_partition_shard_index_names.out b/src/test/regress/expected/multi_fix_partition_shard_index_names.out index e6795317c85..975a4935149 100644 --- a/src/test/regress/expected/multi_fix_partition_shard_index_names.out +++ b/src/test/regress/expected/multi_fix_partition_shard_index_names.out @@ -658,9 +658,9 @@ NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('extension', ARRAY['citus_columnar']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; +NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('extension', ARRAY['citus_columnar']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('extension', ARRAY['citus_columnar']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; +NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('extension', ARRAY['citus_columnar']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_apply_shard_ddl_command (915002, 'fix_idx_names', 'CREATE TABLE fix_idx_names.p2 (dist_col integer NOT NULL, another_col integer, partition_col timestamp without time zone NOT NULL, name text) USING columnar') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx @@ -692,25 +692,25 @@ NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT citus_internal_add_partition_metadata ('fix_idx_names.p2'::regclass, 'h', 'dist_col', 1370001, 's') +NOTICE: issuing SELECT citus_internal.add_partition_metadata ('fix_idx_names.p2'::regclass, 'h', 'dist_col', 1370001, 's') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT citus_internal_add_partition_metadata ('fix_idx_names.p2'::regclass, 'h', 'dist_col', 1370001, 's') +NOTICE: issuing SELECT citus_internal.add_partition_metadata ('fix_idx_names.p2'::regclass, 'h', 'dist_col', 1370001, 's') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; +NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; +NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (xxxxxx, xxxxxx, xxxxxx, xxxxxx)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; +NOTICE: issuing WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (xxxxxx, xxxxxx, xxxxxx, xxxxxx)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (xxxxxx, xxxxxx, xxxxxx, xxxxxx)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; +NOTICE: issuing WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (xxxxxx, xxxxxx, xxxxxx, xxxxxx)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['fix_idx_names', 'p2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; +NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['fix_idx_names', 'p2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['fix_idx_names', 'p2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; +NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['fix_idx_names', 'p2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx diff --git a/src/test/regress/expected/multi_join_order_additional.out b/src/test/regress/expected/multi_join_order_additional.out index 405962dbc4b..6fde7da13b0 100644 --- a/src/test/regress/expected/multi_join_order_additional.out +++ b/src/test/regress/expected/multi_join_order_additional.out @@ -77,10 +77,10 @@ DEBUG: Router planner cannot handle multi-shard select queries LOG: join order: [ "lineitem" ][ local partition join "lineitem" ] DEBUG: join prunable for intervals [-2147483648,-1] and [0,2147483647] DEBUG: join prunable for intervals [0,2147483647] and [-2147483648,-1] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (2 rows) SET client_min_messages TO LOG; @@ -92,11 +92,11 @@ SELECT count(*) FROM lineitem, orders WHERE (l_orderkey = o_orderkey AND l_quantity > 5) OR (l_orderkey = o_orderkey AND l_quantity < 10); LOG: join order: [ "lineitem" ][ local partition join "orders" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (3 rows) EXPLAIN (COSTS OFF) @@ -107,11 +107,11 @@ EXPLAIN (COSTS OFF) SELECT count(*) FROM orders, lineitem_hash WHERE o_orderkey = l_orderkey; LOG: join order: [ "orders" ][ dual partition join "lineitem_hash" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (3 rows) -- Verify we handle local joins between two hash-partitioned tables. @@ -119,11 +119,11 @@ EXPLAIN (COSTS OFF) SELECT count(*) FROM orders_hash, lineitem_hash WHERE o_orderkey = l_orderkey; LOG: join order: [ "orders_hash" ][ local partition join "lineitem_hash" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (3 rows) -- Validate that we can handle broadcast joins with hash-partitioned tables. @@ -131,11 +131,11 @@ EXPLAIN (COSTS OFF) SELECT count(*) FROM customer_hash, nation WHERE c_nationkey = n_nationkey; LOG: join order: [ "customer_hash" ][ reference join "nation" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (3 rows) -- Validate that we don't use a single-partition join method for a hash @@ -144,11 +144,11 @@ EXPLAIN (COSTS OFF) SELECT count(*) FROM orders, lineitem, customer_append WHERE o_custkey = l_partkey AND o_custkey = c_nationkey; LOG: join order: [ "orders" ][ dual partition join "lineitem" ][ dual partition join "customer_append" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (3 rows) -- Validate that we don't chose a single-partition join method with a @@ -157,11 +157,11 @@ EXPLAIN (COSTS OFF) SELECT count(*) FROM orders, customer_hash WHERE c_custkey = o_custkey; LOG: join order: [ "orders" ][ dual partition join "customer_hash" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (3 rows) -- Validate that we can re-partition a hash partitioned table to join with a @@ -170,11 +170,11 @@ EXPLAIN (COSTS OFF) SELECT count(*) FROM orders_hash, customer_append WHERE c_custkey = o_custkey; LOG: join order: [ "orders_hash" ][ dual partition join "customer_append" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (3 rows) -- Validate a 4 way join that could be done locally is planned as such by the logical @@ -195,11 +195,11 @@ JOIN ( WHERE event_type = 5 ) AS some_users ON (some_users.user_id = bar.user_id); LOG: join order: [ "users_table" ][ local partition join "events_table" ][ local partition join "users_table" ][ local partition join "events_table" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (3 rows) -- Reset client logging level to its previous value diff --git a/src/test/regress/expected/multi_join_order_tpch_repartition.out b/src/test/regress/expected/multi_join_order_tpch_repartition.out index e26a4bfecb3..736bfa2b47c 100644 --- a/src/test/regress/expected/multi_join_order_tpch_repartition.out +++ b/src/test/regress/expected/multi_join_order_tpch_repartition.out @@ -22,11 +22,11 @@ WHERE and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; LOG: join order: [ "lineitem" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (3 rows) -- Query #3 from the TPC-H decision support benchmark @@ -61,7 +61,7 @@ LOG: join order: [ "orders" ][ local partition join "lineitem" ][ dual partitio -> HashAggregate Group Key: remote_scan.l_orderkey, remote_scan.o_orderdate, remote_scan.o_shippriority -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (6 rows) -- Query #10 from the TPC-H decision support benchmark @@ -98,12 +98,12 @@ GROUP BY ORDER BY revenue DESC; LOG: join order: [ "orders" ][ local partition join "lineitem" ][ dual partition join "customer_append" ][ reference join "nation" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: remote_scan.revenue DESC -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (4 rows) -- Query #19 from the TPC-H decision support benchmark (modified) @@ -138,11 +138,11 @@ WHERE AND l_shipinstruct = 'DELIVER IN PERSON' ); LOG: join order: [ "lineitem" ][ dual partition join "part_append" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (3 rows) -- Query to test multiple re-partition jobs in a single query @@ -158,12 +158,12 @@ WHERE GROUP BY l_partkey; LOG: join order: [ "lineitem" ][ local partition join "orders" ][ dual partition join "part_append" ][ dual partition join "customer_append" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- HashAggregate Group Key: remote_scan.l_partkey -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (4 rows) -- Reset client logging level to its previous value diff --git a/src/test/regress/expected/multi_join_order_tpch_small.out b/src/test/regress/expected/multi_join_order_tpch_small.out index b0b32bb1d02..a2e86ce23e5 100644 --- a/src/test/regress/expected/multi_join_order_tpch_small.out +++ b/src/test/regress/expected/multi_join_order_tpch_small.out @@ -17,11 +17,11 @@ WHERE and l_discount between 0.06 - 0.01 and 0.06 + 0.01 and l_quantity < 24; LOG: join order: [ "lineitem" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (3 rows) -- Query #3 from the TPC-H decision support benchmark @@ -49,12 +49,12 @@ ORDER BY revenue DESC, o_orderdate; LOG: join order: [ "orders" ][ reference join "customer" ][ local partition join "lineitem" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Sort Sort Key: remote_scan.revenue DESC, remote_scan.o_orderdate -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (4 rows) -- Query #10 from the TPC-H decision support benchmark @@ -98,7 +98,7 @@ LOG: join order: [ "orders" ][ reference join "customer" ][ reference join "nat -> HashAggregate Group Key: remote_scan.c_custkey, remote_scan.c_name, remote_scan.c_acctbal, remote_scan.c_phone, remote_scan.n_name, remote_scan.c_address, remote_scan.c_comment -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (6 rows) -- Query #19 from the TPC-H decision support benchmark (modified) @@ -133,11 +133,11 @@ WHERE AND l_shipinstruct = 'DELIVER IN PERSON' ); LOG: join order: [ "lineitem" ][ reference join "part" ] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (3 rows) -- Reset client logging level to its previous value diff --git a/src/test/regress/expected/multi_join_pruning.out b/src/test/regress/expected/multi_join_pruning.out index 27fdc398004..59c12de0a25 100644 --- a/src/test/regress/expected/multi_join_pruning.out +++ b/src/test/regress/expected/multi_join_pruning.out @@ -104,11 +104,11 @@ SELECT count(*) DEBUG: Router planner cannot handle multi-shard select queries DEBUG: join prunable for intervals [{},{AZZXSP27F21T6,AZZXSP27F21T6}] and [{BA1000U2AMO4ZGX,BZZXSP27F21T6},{CA1000U2AMO4ZGX,CZZXSP27F21T6}] DEBUG: join prunable for intervals [{BA1000U2AMO4ZGX,BZZXSP27F21T6},{CA1000U2AMO4ZGX,CZZXSP27F21T6}] and [{},{AZZXSP27F21T6,AZZXSP27F21T6}] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (3 rows) EXPLAIN (COSTS OFF) @@ -118,11 +118,11 @@ SELECT count(*) DEBUG: Router planner cannot handle multi-shard select queries DEBUG: join prunable for intervals [(a,3,b),(b,4,c)] and [(c,5,d),(d,6,e)] DEBUG: join prunable for intervals [(c,5,d),(d,6,e)] and [(a,3,b),(b,4,c)] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (3 rows) -- Test that large table joins on partition varchar columns work @@ -133,14 +133,14 @@ SELECT count(*) DEBUG: Router planner cannot handle multi-shard select queries DEBUG: join prunable for intervals [AA1000U2AMO4ZGX,AZZXSP27F21T6] and [BA1000U2AMO4ZGX,BZZXSP27F21T6] DEBUG: join prunable for intervals [BA1000U2AMO4ZGX,BZZXSP27F21T6] and [AA1000U2AMO4ZGX,AZZXSP27F21T6] - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (3 rows) -SET client_min_messages TO WARNING; +SET client_min_messages TO WARNING; DROP TABLE varchar_partitioned_table; DROP TABLE array_partitioned_table; DROP TABLE composite_partitioned_table; diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index cb8f0c0e140..d15e7516c21 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -101,9 +101,9 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; (33 rows) -- Create a test table with constraints and SERIAL and default from user defined sequence @@ -162,8 +162,8 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT citus_internal_add_partition_metadata ('public.single_shard_tbl'::regclass, 'n', NULL, 3, 's') + SELECT citus_internal.add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('public.single_shard_tbl'::regclass, 'n', NULL, 3, 's') SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table'); SELECT pg_catalog.worker_drop_sequence_dependency('public.single_shard_tbl'); SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition @@ -183,19 +183,19 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (3, 1, 1, 0, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'single_shard_tbl']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310008, 0, 2, 100008)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.single_shard_tbl'::regclass, 1310008, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) + WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (3, 1, 1, 0, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'single_shard_tbl']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310008, 0, 2, 100008)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.single_shard_tbl'::regclass, 1310008, 't'::"char", NULL, NULL)) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (61 rows) -- Drop single shard table @@ -230,7 +230,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table'); SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3') @@ -248,15 +248,15 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (52 rows) -- Show that schema changes are included in the activate node snapshot @@ -291,7 +291,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') @@ -309,16 +309,16 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (54 rows) -- Show that append distributed tables are not included in the activate node snapshot @@ -359,7 +359,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') @@ -377,16 +377,16 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (54 rows) -- Show that range distributed tables are not included in the activate node snapshot @@ -420,7 +420,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') @@ -438,16 +438,16 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (54 rows) -- Test start_metadata_sync_to_node and citus_activate_node UDFs @@ -1996,12 +1996,12 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('mx_test_schema_1.mx_table_1'::regclass, 'h', 'col1', 7, 's') - SELECT citus_internal_add_partition_metadata ('mx_test_schema_2.mx_table_2'::regclass, 'h', 'col1', 7, 's') - SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT citus_internal_add_partition_metadata ('public.dist_table_1'::regclass, 'h', 'a', 10010, 's') - SELECT citus_internal_add_partition_metadata ('public.mx_ref'::regclass, 'n', NULL, 10009, 't') - SELECT citus_internal_add_partition_metadata ('public.test_table'::regclass, 'h', 'id', 10010, 's') + SELECT citus_internal.add_partition_metadata ('mx_test_schema_1.mx_table_1'::regclass, 'h', 'col1', 7, 's') + SELECT citus_internal.add_partition_metadata ('mx_test_schema_2.mx_table_2'::regclass, 'h', 'col1', 7, 's') + SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('public.dist_table_1'::regclass, 'h', 'a', 10010, 's') + SELECT citus_internal.add_partition_metadata ('public.mx_ref'::regclass, 'n', NULL, 10009, 't') + SELECT citus_internal.add_partition_metadata ('public.test_table'::regclass, 'h', 'id', 10010, 's') SELECT pg_catalog.worker_drop_sequence_dependency('mx_test_schema_1.mx_table_1'); SELECT pg_catalog.worker_drop_sequence_dependency('mx_test_schema_2.mx_table_2'); SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); @@ -2031,37 +2031,37 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310076, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310078, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310079, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310075, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310085, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310086, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310087, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310088, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) + WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310076, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310078, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310079, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310075, 't'::"char", NULL, NULL)) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310085, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310086, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310087, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310088, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (118 rows) -- shouldn't work since test_table is MX diff --git a/src/test/regress/expected/multi_metadata_sync_0.out b/src/test/regress/expected/multi_metadata_sync_0.out index c81462e6f91..bc1775ada37 100644 --- a/src/test/regress/expected/multi_metadata_sync_0.out +++ b/src/test/regress/expected/multi_metadata_sync_0.out @@ -101,9 +101,9 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; (33 rows) -- Create a test table with constraints and SERIAL and default from user defined sequence @@ -162,8 +162,8 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT citus_internal_add_partition_metadata ('public.single_shard_tbl'::regclass, 'n', NULL, 3, 's') + SELECT citus_internal.add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('public.single_shard_tbl'::regclass, 'n', NULL, 3, 's') SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table'); SELECT pg_catalog.worker_drop_sequence_dependency('public.single_shard_tbl'); SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition @@ -183,19 +183,19 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (3, 1, 1, 0, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'single_shard_tbl']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310008, 0, 2, 100008)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.single_shard_tbl'::regclass, 1310008, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) + WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (3, 1, 1, 0, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'single_shard_tbl']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310008, 0, 2, 100008)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.single_shard_tbl'::regclass, 1310008, 't'::"char", NULL, NULL)) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (61 rows) -- Drop single shard table @@ -230,7 +230,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table'); SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3') @@ -248,15 +248,15 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (52 rows) -- Show that schema changes are included in the activate node snapshot @@ -291,7 +291,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') @@ -309,16 +309,16 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (54 rows) -- Show that append distributed tables are not included in the activate node snapshot @@ -359,7 +359,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') @@ -377,16 +377,16 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (54 rows) -- Show that range distributed tables are not included in the activate node snapshot @@ -420,7 +420,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') @@ -438,16 +438,16 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (54 rows) -- Test start_metadata_sync_to_node and citus_activate_node UDFs @@ -1996,12 +1996,12 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('mx_test_schema_1.mx_table_1'::regclass, 'h', 'col1', 7, 's') - SELECT citus_internal_add_partition_metadata ('mx_test_schema_2.mx_table_2'::regclass, 'h', 'col1', 7, 's') - SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT citus_internal_add_partition_metadata ('public.dist_table_1'::regclass, 'h', 'a', 10010, 's') - SELECT citus_internal_add_partition_metadata ('public.mx_ref'::regclass, 'n', NULL, 10009, 't') - SELECT citus_internal_add_partition_metadata ('public.test_table'::regclass, 'h', 'id', 10010, 's') + SELECT citus_internal.add_partition_metadata ('mx_test_schema_1.mx_table_1'::regclass, 'h', 'col1', 7, 's') + SELECT citus_internal.add_partition_metadata ('mx_test_schema_2.mx_table_2'::regclass, 'h', 'col1', 7, 's') + SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('public.dist_table_1'::regclass, 'h', 'a', 10010, 's') + SELECT citus_internal.add_partition_metadata ('public.mx_ref'::regclass, 'n', NULL, 10009, 't') + SELECT citus_internal.add_partition_metadata ('public.test_table'::regclass, 'h', 'id', 10010, 's') SELECT pg_catalog.worker_drop_sequence_dependency('mx_test_schema_1.mx_table_1'); SELECT pg_catalog.worker_drop_sequence_dependency('mx_test_schema_2.mx_table_2'); SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); @@ -2031,37 +2031,37 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT pg_catalog.citus_internal_add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310076, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310078, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310079, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310075, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310085, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310086, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310087, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310088, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) + WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310076, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310078, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310079, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310075, 't'::"char", NULL, NULL)) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310085, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310086, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310087, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310088, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (118 rows) -- shouldn't work since test_table is MX diff --git a/src/test/regress/expected/multi_modifying_xacts.out b/src/test/regress/expected/multi_modifying_xacts.out index 7c3344ee57c..849a28c7370 100644 --- a/src/test/regress/expected/multi_modifying_xacts.out +++ b/src/test/regress/expected/multi_modifying_xacts.out @@ -1208,15 +1208,15 @@ set citus.enable_alter_role_propagation=true; SET search_path TO multi_modifying_xacts; -- should fail since the worker doesn't have test_user anymore INSERT INTO reference_failure_test VALUES (1, '1'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist +ERROR: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist -- the same as the above, but wrapped within a transaction BEGIN; INSERT INTO reference_failure_test VALUES (1, '1'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist +ERROR: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist COMMIT; BEGIN; COPY reference_failure_test FROM STDIN WITH (FORMAT 'csv'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist +ERROR: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist COMMIT; -- show that no data go through the table and shard states are good SET client_min_messages to 'ERROR'; @@ -1242,7 +1242,7 @@ ORDER BY s.logicalrelid, sp.shardstate; -- any failure rollbacks the transaction BEGIN; COPY numbers_hash_failure_test FROM STDIN WITH (FORMAT 'csv'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist +ERROR: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist ABORT; -- none of placements are invalid after abort SELECT shardid, shardstate, nodename, nodeport @@ -1263,8 +1263,8 @@ ORDER BY shardid, nodeport; -- verify nothing is inserted SELECT count(*) FROM numbers_hash_failure_test; -WARNING: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist -WARNING: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist +WARNING: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist +WARNING: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist count --------------------------------------------------------------------- 0 @@ -1290,7 +1290,7 @@ ORDER BY shardid, nodeport; -- all failures roll back the transaction BEGIN; COPY numbers_hash_failure_test FROM STDIN WITH (FORMAT 'csv'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist +ERROR: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist COMMIT; -- expect none of the placements to be market invalid after commit SELECT shardid, shardstate, nodename, nodeport @@ -1311,8 +1311,8 @@ ORDER BY shardid, nodeport; -- verify no data is inserted SELECT count(*) FROM numbers_hash_failure_test; -WARNING: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist -WARNING: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist +WARNING: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist +WARNING: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist count --------------------------------------------------------------------- 0 @@ -1328,7 +1328,7 @@ set citus.enable_alter_role_propagation=true; SET search_path TO multi_modifying_xacts; -- fails on all shard placements INSERT INTO numbers_hash_failure_test VALUES (2,2); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist +ERROR: connection to the remote node test_user@localhost:xxxxx failed with the following error: FATAL: role "test_user" does not exist -- connect back to the master with the proper user to continue the tests \c - :default_user - :master_port SET search_path TO multi_modifying_xacts; diff --git a/src/test/regress/expected/multi_multiuser_auth.out b/src/test/regress/expected/multi_multiuser_auth.out index 8dd9b8ba720..6b0e85b6701 100644 --- a/src/test/regress/expected/multi_multiuser_auth.out +++ b/src/test/regress/expected/multi_multiuser_auth.out @@ -12,19 +12,9 @@ \set bob_worker_1_pw triplex-royalty-warranty-stand-cheek \set bob_worker_2_pw omnibus-plectrum-comet-sneezy-ensile \set bob_fallback_pw :bob_worker_1_pw -SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port; - worker_1_id ---------------------------------------------------------------------- - 17 -(1 row) - +SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port \gset -SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port; - worker_2_id ---------------------------------------------------------------------- - 35 -(1 row) - +SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port \gset -- alice is a superuser so she can update own password CREATE USER alice PASSWORD :'alice_master_pw' SUPERUSER; @@ -82,7 +72,7 @@ GRANT ALL ON TABLE lineitem, orders, lineitem, customer, nation, part, supplier \c :alice_conninfo -- router query (should break because of bad password) INSERT INTO customer VALUES (12345, 'name', NULL, 5, 'phone', 123.45, 'segment', 'comment'); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: password authentication failed for user "alice" +ERROR: connection to the remote node alice@localhost:xxxxx failed with the following error: FATAL: password authentication failed for user "alice" -- fix alice's worker1 password ... UPDATE pg_dist_authinfo SET authinfo = ('password=' || :'alice_worker_1_pw') diff --git a/src/test/regress/expected/multi_mx_add_coordinator.out b/src/test/regress/expected/multi_mx_add_coordinator.out index e810b715e02..42bcd664778 100644 --- a/src/test/regress/expected/multi_mx_add_coordinator.out +++ b/src/test/regress/expected/multi_mx_add_coordinator.out @@ -70,38 +70,43 @@ SELECT create_reference_table('ref'); (1 row) \c - - - :worker_1_port --- alter role from mx worker isn't allowed when alter role propagation is on -SET citus.enable_alter_role_propagation TO ON; -ALTER ROLE reprefuser WITH CREATEROLE; -ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. --- to alter role locally disable alter role propagation first +-- to alter role locally, disable alter role propagation first SET citus.enable_alter_role_propagation TO OFF; ALTER ROLE reprefuser WITH CREATEROLE; -SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; - rolcreatedb | rolcreaterole ---------------------------------------------------------------------- - t | t -(1 row) - -RESET citus.enable_alter_role_propagation; -\c - - - :worker_2_port --- show that altering role locally on worker doesn't propagated to other worker -SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; - rolcreatedb | rolcreaterole ---------------------------------------------------------------------- - t | f -(1 row) +SELECT result from run_command_on_all_nodes( + $$ + SELECT to_jsonb(q2.*) FROM ( + SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser' + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + {"rolcreatedb": true, "rolcreaterole": false} + {"rolcreatedb": true, "rolcreaterole": false} + {"rolcreatedb": true, "rolcreaterole": true} +(3 rows) + +-- alter role from mx worker is allowed +SET citus.enable_alter_role_propagation TO ON; +ALTER ROLE reprefuser WITH CREATEROLE; +-- show that altering role locally on worker is propagated to coordinator and to other workers too +SELECT result from run_command_on_all_nodes( + $$ + SELECT to_jsonb(q2.*) FROM ( + SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser' + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + {"rolcreatedb": true, "rolcreaterole": true} + {"rolcreatedb": true, "rolcreaterole": true} + {"rolcreatedb": true, "rolcreaterole": true} +(3 rows) \c - - - :master_port SET search_path TO mx_add_coordinator,public; --- show that altering role locally on worker doesn't propagated to coordinator -SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; - rolcreatedb | rolcreaterole ---------------------------------------------------------------------- - t | f -(1 row) - SET citus.log_local_commands TO ON; SET client_min_messages TO DEBUG; -- if the placement policy is not round-robin, SELECTs on the reference @@ -124,7 +129,7 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM mx_add_coo 0 (1 row) --- test that distributed functions also use local execution +-- test that distributed functions also use sequential execution CREATE OR REPLACE FUNCTION my_group_id() RETURNS void LANGUAGE plpgsql @@ -365,5 +370,6 @@ SELECT verify_metadata('localhost', :worker_1_port), SET client_min_messages TO error; DROP SCHEMA mx_add_coordinator CASCADE; +DROP USER reprefuser; SET search_path TO DEFAULT; RESET client_min_messages; diff --git a/src/test/regress/expected/multi_mx_create_table.out b/src/test/regress/expected/multi_mx_create_table.out index ac7f9082602..b9d3f7faaab 100644 --- a/src/test/regress/expected/multi_mx_create_table.out +++ b/src/test/regress/expected/multi_mx_create_table.out @@ -3,6 +3,7 @@ -- ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1220000; +SET client_min_messages TO WARNING; SELECT start_metadata_sync_to_node('localhost', :worker_1_port); start_metadata_sync_to_node --------------------------------------------------------------------- @@ -15,6 +16,9 @@ SELECT start_metadata_sync_to_node('localhost', :worker_2_port); (1 row) +-- cannot drop them at the end of the test file as other tests depend on them +DROP SCHEMA IF EXISTS citus_mx_test_schema, citus_mx_test_schema_join_1, citus_mx_test_schema_join_2 CASCADE; +DROP TABLE IF EXISTS nation_hash, lineitem_mx, orders_mx, customer_mx, nation_mx, part_mx, supplier_mx, mx_ddl_table, limit_orders_mx, multiple_hash_mx, app_analytics_events_mx, researchers_mx, labs_mx, objects_mx, articles_hash_mx, articles_single_shard_hash_mx, company_employees_mx; -- create schema to test schema support CREATE SCHEMA citus_mx_test_schema; CREATE SCHEMA citus_mx_test_schema_join_1; @@ -42,7 +46,7 @@ BEGIN END; $$ LANGUAGE 'plpgsql' IMMUTABLE; -CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int) +CREATE OR REPLACE FUNCTION public.immutable_append_mx(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; CREATE OPERATOR citus_mx_test_schema.=== ( LEFTARG = int, @@ -65,14 +69,16 @@ SELECT quote_ident(current_setting('lc_collate')) as current_locale \gset \endif CREATE COLLATION citus_mx_test_schema.english (LOCALE=:current_locale); CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text); -CREATE TYPE order_side_mx AS ENUM ('buy', 'sell'); +CREATE TYPE citus_mx_test_schema.order_side_mx AS ENUM ('buy', 'sell'); -- now create required stuff in the worker 1 \c - - - :worker_1_port +SET client_min_messages TO WARNING; -- show that we do not support creating citus local tables from mx workers for now CREATE TABLE citus_local_table(a int); SELECT citus_add_local_table_to_metadata('citus_local_table'); ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. +DROP TABLE citus_local_table; SET search_path TO citus_mx_test_schema; -- create operator CREATE OPERATOR citus_mx_test_schema.=== ( @@ -85,6 +91,7 @@ CREATE OPERATOR citus_mx_test_schema.=== ( ); -- now create required stuff in the worker 2 \c - - - :worker_2_port +SET client_min_messages TO WARNING; SET search_path TO citus_mx_test_schema; -- create operator CREATE OPERATOR citus_mx_test_schema.=== ( @@ -97,6 +104,7 @@ CREATE OPERATOR citus_mx_test_schema.=== ( ); -- connect back to the master, and do some more tests \c - - - :master_port +SET client_min_messages TO WARNING; SET citus.shard_replication_factor TO 1; SET search_path TO public; CREATE TABLE nation_hash( @@ -315,7 +323,7 @@ CREATE TABLE limit_orders_mx ( symbol text NOT NULL, bidder_id bigint NOT NULL, placed_at timestamp NOT NULL, - kind order_side_mx NOT NULL, + kind citus_mx_test_schema.order_side_mx NOT NULL, limit_price decimal NOT NULL DEFAULT 0.00 CHECK (limit_price >= 0.00) ); SET citus.shard_count TO 2; @@ -473,6 +481,7 @@ ORDER BY table_name::text; (23 rows) \c - - - :worker_1_port +SET client_min_messages TO WARNING; SELECT table_name, citus_table_type, distribution_column, shard_count, table_owner FROM citus_tables ORDER BY table_name::text; @@ -978,6 +987,6 @@ SELECT shard_name, table_name, citus_table_type, shard_size FROM citus_shards OR (469 rows) -- Show that altering type name is not supported from worker node -ALTER TYPE order_side_mx RENAME TO temp_order_side_mx; +ALTER TYPE citus_mx_test_schema.order_side_mx RENAME TO temp_order_side_mx; ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. diff --git a/src/test/regress/expected/multi_mx_hide_shard_names.out b/src/test/regress/expected/multi_mx_hide_shard_names.out index 116269a4e86..762c6a30b54 100644 --- a/src/test/regress/expected/multi_mx_hide_shard_names.out +++ b/src/test/regress/expected/multi_mx_hide_shard_names.out @@ -83,6 +83,52 @@ SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_name test_table (1 row) +-- Even when using subquery and having no existing quals on pg_clcass +SELECT relname FROM (SELECT relname, relnamespace FROM pg_catalog.pg_class) AS q WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; + relname +--------------------------------------------------------------------- + test_table +(1 row) + +-- Check that inserts into pg_class don't add the filter +EXPLAIN (COSTS OFF) INSERT INTO pg_class VALUES (1); + QUERY PLAN +--------------------------------------------------------------------- + Insert on pg_class + -> Result +(2 rows) + +-- Unless it's an INSERT SELECT that queries from pg_class; +EXPLAIN (COSTS OFF) INSERT INTO pg_class SELECT * FROM pg_class; + QUERY PLAN +--------------------------------------------------------------------- + Insert on pg_class + -> Seq Scan on pg_class pg_class_1 + Filter: (relation_is_a_known_shard(oid) IS NOT TRUE) +(3 rows) + +-- Check that query that psql "\d test_table" does gets optimized to an index +-- scan +EXPLAIN (COSTS OFF) SELECT c.oid, + n.nspname, + c.relname +FROM pg_catalog.pg_class c + LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace +WHERE c.relname OPERATOR(pg_catalog.~) '^(test_table)$' COLLATE pg_catalog.default + AND pg_catalog.pg_table_is_visible(c.oid) +ORDER BY 2, 3; + QUERY PLAN +--------------------------------------------------------------------- + Sort + Sort Key: n.nspname, c.relname + -> Nested Loop Left Join + Join Filter: (n.oid = c.relnamespace) + -> Index Scan using pg_class_relname_nsp_index on pg_class c + Index Cond: (relname = 'test_table'::text) + Filter: ((relname ~ '^(test_table)$'::text) AND (relation_is_a_known_shard(oid) IS NOT TRUE) AND pg_table_is_visible(oid)) + -> Seq Scan on pg_namespace n +(8 rows) + commit prepared 'take-aggressive-lock'; -- now create an index \c - - - :master_port diff --git a/src/test/regress/expected/multi_mx_insert_select_repartition.out b/src/test/regress/expected/multi_mx_insert_select_repartition.out index 62f197c30bb..a3912ec8e90 100644 --- a/src/test/regress/expected/multi_mx_insert_select_repartition.out +++ b/src/test/regress/expected/multi_mx_insert_select_repartition.out @@ -103,10 +103,11 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM multi_mx_i 4 (1 row) + -- we omit the "SELECT bytes FROM fetch_intermediate_results..." line since it is flaky + SET LOCAL citus.grep_remote_commands TO '%multi_mx_insert_select_repartition%'; insert into target_table SELECT a*2 FROM source_table RETURNING a; NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213581_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213581_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213583_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213583_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213583 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartitioned_results_xxxxx_from_4213582_to_0','repartitioned_results_xxxxx_from_4213584_to_0']::text[],'localhost',57638) bytes NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213585 AS citus_table_alias (a) SELECT intermediate_result.a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_0,repartitioned_results_xxxxx_from_4213582_to_0,repartitioned_results_xxxxx_from_4213584_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213587 AS citus_table_alias (a) SELECT intermediate_result.a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a a diff --git a/src/test/regress/expected/multi_mx_insert_select_repartition_0.out b/src/test/regress/expected/multi_mx_insert_select_repartition_0.out index 15deba0c0af..62271f9a743 100644 --- a/src/test/regress/expected/multi_mx_insert_select_repartition_0.out +++ b/src/test/regress/expected/multi_mx_insert_select_repartition_0.out @@ -103,10 +103,11 @@ NOTICE: executing the command locally: SELECT count(*) AS count FROM multi_mx_i 4 (1 row) + -- we omit the "SELECT bytes FROM fetch_intermediate_results..." line since it is flaky + SET LOCAL citus.grep_remote_commands TO '%multi_mx_insert_select_repartition%'; insert into target_table SELECT a*2 FROM source_table RETURNING a; NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213581_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213581_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213581 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 NOTICE: executing the command locally: SELECT partition_index, 'repartitioned_results_xxxxx_from_4213583_to' || '_' || partition_index::text , rows_written FROM worker_partition_query_result('repartitioned_results_xxxxx_from_4213583_to','SELECT (a OPERATOR(pg_catalog.*) 2) AS a FROM multi_mx_insert_select_repartition.source_table_4213583 source_table WHERE true',0,'hash','{-2147483648,-715827883,715827882}'::text[],'{-715827884,715827881,2147483647}'::text[],true) WHERE rows_written > 0 -NOTICE: executing the command locally: SELECT bytes FROM fetch_intermediate_results(ARRAY['repartitioned_results_xxxxx_from_4213582_to_0','repartitioned_results_xxxxx_from_4213584_to_0']::text[],'localhost',57638) bytes NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213585 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_0,repartitioned_results_xxxxx_from_4213582_to_0,repartitioned_results_xxxxx_from_4213584_to_0}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a NOTICE: executing the command locally: INSERT INTO multi_mx_insert_select_repartition.target_table_4213587 AS citus_table_alias (a) SELECT a FROM read_intermediate_results('{repartitioned_results_xxxxx_from_4213581_to_2}'::text[], 'binary'::citus_copy_format) intermediate_result(a integer) RETURNING citus_table_alias.a a diff --git a/src/test/regress/expected/multi_mx_node_metadata.out b/src/test/regress/expected/multi_mx_node_metadata.out index 707dcc472fe..6a152b515ae 100644 --- a/src/test/regress/expected/multi_mx_node_metadata.out +++ b/src/test/regress/expected/multi_mx_node_metadata.out @@ -9,7 +9,7 @@ SET citus.shard_count TO 8; SET citus.shard_replication_factor TO 1; \set VERBOSITY terse -- Simulates a readonly node by setting default_transaction_read_only. -CREATE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLEAN) +CREATE OR REPLACE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLEAN) RETURNS TEXT LANGUAGE sql AS $$ @@ -27,7 +27,7 @@ CREATE OR REPLACE FUNCTION raise_error_in_metadata_sync() RETURNS void LANGUAGE C STRICT AS 'citus'; -CREATE PROCEDURE wait_until_process_count(appname text, target_count int) AS $$ +CREATE OR REPLACE PROCEDURE wait_until_process_count(appname text, target_count int) AS $$ declare counter integer := -1; begin @@ -846,7 +846,22 @@ SELECT datname FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%'; db_to_drop (1 row) -DROP DATABASE db_to_drop; +DO $$ +DECLARE + i int := 0; +BEGIN + WHILE NOT (SELECT bool_and(success) from run_command_on_all_nodes('DROP DATABASE IF EXISTS db_to_drop')) + LOOP + BEGIN + i := i + 1; + IF i > 5 THEN + RAISE EXCEPTION 'DROP DATABASE timed out'; + END IF; + PERFORM pg_sleep(1); + END; + END LOOP; +END; +$$; SELECT datname FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%'; datname --------------------------------------------------------------------- diff --git a/src/test/regress/expected/multi_mx_router_planner.out b/src/test/regress/expected/multi_mx_router_planner.out index e7855a898f9..5ac6093cb99 100644 --- a/src/test/regress/expected/multi_mx_router_planner.out +++ b/src/test/regress/expected/multi_mx_router_planner.out @@ -370,7 +370,7 @@ WITH RECURSIVE hierarchy as ( h.company_id = ce.company_id)) SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: Router planner cannot handle multi-shard select queries -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- logically wrong query, query involves different shards -- from the same table, but still router plannable due to -- shard being placed on the same worker. @@ -386,7 +386,7 @@ WITH RECURSIVE hierarchy as ( ce.company_id = 2)) SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: router planner does not support queries that reference non-colocated distributed tables -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- grouping sets are supported on single shard SELECT id, substring(title, 2, 1) AS subtitle, count(*) diff --git a/src/test/regress/expected/multi_mx_transaction_recovery.out b/src/test/regress/expected/multi_mx_transaction_recovery.out index 20cec75783a..0a29a22af65 100644 --- a/src/test/regress/expected/multi_mx_transaction_recovery.out +++ b/src/test/regress/expected/multi_mx_transaction_recovery.out @@ -64,7 +64,7 @@ SELECT recover_prepared_transactions(); (1 row) -- delete the citus_122_should_do_nothing transaction -DELETE FROM pg_dist_transaction WHERE gid = 'citus_122_should_do_nothing' RETURNING *; +DELETE FROM pg_dist_transaction WHERE gid = 'citus_122_should_do_nothing' RETURNING groupid, gid; groupid | gid --------------------------------------------------------------------- 122 | citus_122_should_do_nothing diff --git a/src/test/regress/expected/multi_poolinfo_usage.out b/src/test/regress/expected/multi_poolinfo_usage.out index ee98f0df79c..53dfca24ef8 100644 --- a/src/test/regress/expected/multi_poolinfo_usage.out +++ b/src/test/regress/expected/multi_poolinfo_usage.out @@ -6,19 +6,9 @@ -- Test of ability to override host/port for a node SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 20000000; -SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port; - worker_1_id ---------------------------------------------------------------------- - 17 -(1 row) - +SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port \gset -SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port; - worker_2_id ---------------------------------------------------------------------- - 35 -(1 row) - +SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port \gset CREATE TABLE lotsa_connections (id integer, name text); SELECT create_distributed_table('lotsa_connections', 'id'); diff --git a/src/test/regress/expected/multi_prepare_plsql.out b/src/test/regress/expected/multi_prepare_plsql.out index 74c9835ffad..a87b47a34d3 100644 --- a/src/test/regress/expected/multi_prepare_plsql.out +++ b/src/test/regress/expected/multi_prepare_plsql.out @@ -1317,11 +1317,11 @@ SELECT type_ddl_plpgsql(); (1 row) -- find all renamed types to verify the schema name didn't leak, nor a crash happened -SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup'; +SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup' ORDER BY 1; nspname | typname --------------------------------------------------------------------- - public | prepare_ddl_type_backup otherschema | prepare_ddl_type_backup + public | prepare_ddl_type_backup (2 rows) DROP TYPE prepare_ddl_type_backup; @@ -1332,6 +1332,7 @@ DROP FUNCTION ddl_in_plpgsql(); DROP FUNCTION copy_in_plpgsql(); DROP TABLE prepare_ddl; DROP TABLE local_ddl; +DROP TABLE plpgsql_table; DROP SCHEMA otherschema; -- clean-up functions DROP FUNCTION plpgsql_test_1(); diff --git a/src/test/regress/expected/multi_router_planner.out b/src/test/regress/expected/multi_router_planner.out index c6d46ccc925..fee821a7d24 100644 --- a/src/test/regress/expected/multi_router_planner.out +++ b/src/test/regress/expected/multi_router_planner.out @@ -436,7 +436,7 @@ WITH RECURSIVE hierarchy as MATERIALIZED ( h.company_id = ce.company_id)) SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: Router planner cannot handle multi-shard select queries -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- logically wrong query, query involves different shards -- from the same table WITH RECURSIVE hierarchy as MATERIALIZED ( @@ -451,7 +451,7 @@ WITH RECURSIVE hierarchy as MATERIALIZED ( ce.company_id = 2)) SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: router planner does not support queries that reference non-colocated distributed tables -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- Test router modifying CTEs WITH new_article AS MATERIALIZED( INSERT INTO articles_hash VALUES (1, 1, 'arsenous', 9) RETURNING * @@ -2703,10 +2703,10 @@ SET search_path TO multi_router_planner; -- still, we never mark placements inactive. Instead, fail the transaction BEGIN; INSERT INTO failure_test VALUES (1, 1); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "router_user" does not exist +ERROR: connection to the remote node router_user@localhost:xxxxx failed with the following error: FATAL: role "router_user" does not exist ROLLBACK; INSERT INTO failure_test VALUES (2, 1); -ERROR: connection to the remote node localhost:xxxxx failed with the following error: FATAL: role "router_user" does not exist +ERROR: connection to the remote node router_user@localhost:xxxxx failed with the following error: FATAL: role "router_user" does not exist SELECT shardid, shardstate, nodename, nodeport FROM pg_dist_shard_placement WHERE shardid IN ( SELECT shardid FROM pg_dist_shard diff --git a/src/test/regress/expected/multi_size_queries.out b/src/test/regress/expected/multi_size_queries.out index 2ff8d9c4b79..eb1981e64d8 100644 --- a/src/test/regress/expected/multi_size_queries.out +++ b/src/test/regress/expected/multi_size_queries.out @@ -7,19 +7,25 @@ SET citus.next_shard_id TO 1390000; -- Tests with invalid relation IDs SELECT citus_table_size(1); -ERROR: could not compute table size: relation does not exist +ERROR: could not compute relation size: relation does not exist SELECT citus_relation_size(1); -ERROR: could not compute table size: relation does not exist +ERROR: could not compute relation size: relation does not exist SELECT citus_total_relation_size(1); -ERROR: could not compute table size: relation does not exist +ERROR: could not compute relation size: relation does not exist -- Tests with non-distributed table -CREATE TABLE non_distributed_table (x int); +CREATE TABLE non_distributed_table (x int primary key); SELECT citus_table_size('non_distributed_table'); ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed SELECT citus_relation_size('non_distributed_table'); ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed SELECT citus_total_relation_size('non_distributed_table'); ERROR: cannot calculate the size because relation 'non_distributed_table' is not distributed +SELECT citus_table_size('non_distributed_table_pkey'); +ERROR: cannot calculate the size because table 'non_distributed_table' for index 'non_distributed_table_pkey' is not distributed +SELECT citus_relation_size('non_distributed_table_pkey'); +ERROR: cannot calculate the size because table 'non_distributed_table' for index 'non_distributed_table_pkey' is not distributed +SELECT citus_total_relation_size('non_distributed_table_pkey'); +ERROR: cannot calculate the size because table 'non_distributed_table' for index 'non_distributed_table_pkey' is not distributed DROP TABLE non_distributed_table; -- fix broken placements via disabling the node SET client_min_messages TO ERROR; @@ -31,24 +37,70 @@ SELECT replicate_table_shards('lineitem_hash_part', shard_replication_factor:=2, -- Tests on distributed table with replication factor > 1 VACUUM (FULL) lineitem_hash_part; -SELECT citus_table_size('lineitem_hash_part'); - citus_table_size +SELECT citus_relation_size('lineitem_hash_part') <= citus_table_size('lineitem_hash_part'); + ?column? --------------------------------------------------------------------- - 3801088 + t (1 row) -SELECT citus_relation_size('lineitem_hash_part'); - citus_relation_size +SELECT citus_table_size('lineitem_hash_part') <= citus_total_relation_size('lineitem_hash_part'); + ?column? --------------------------------------------------------------------- - 3801088 + t (1 row) -SELECT citus_total_relation_size('lineitem_hash_part'); - citus_total_relation_size +SELECT citus_relation_size('lineitem_hash_part') > 0; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +CREATE INDEX lineitem_hash_part_idx ON lineitem_hash_part(l_orderkey); +VACUUM (FULL) lineitem_hash_part; +SELECT citus_relation_size('lineitem_hash_part') <= citus_table_size('lineitem_hash_part'); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT citus_table_size('lineitem_hash_part') <= citus_total_relation_size('lineitem_hash_part'); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT citus_relation_size('lineitem_hash_part') > 0; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT citus_relation_size('lineitem_hash_part_idx') <= citus_table_size('lineitem_hash_part_idx'); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT citus_table_size('lineitem_hash_part_idx') <= citus_total_relation_size('lineitem_hash_part_idx'); + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT citus_relation_size('lineitem_hash_part_idx') > 0; + ?column? --------------------------------------------------------------------- - 3801088 + t +(1 row) + +SELECT citus_total_relation_size('lineitem_hash_part') >= + citus_table_size('lineitem_hash_part') + citus_table_size('lineitem_hash_part_idx'); + ?column? +--------------------------------------------------------------------- + t (1 row) +DROP INDEX lineitem_hash_part_idx; VACUUM (FULL) customer_copy_hash; -- Tests on distributed tables with streaming replication. SELECT citus_table_size('customer_copy_hash'); @@ -72,10 +124,10 @@ SELECT citus_total_relation_size('customer_copy_hash'); -- Make sure we can get multiple sizes in a single query SELECT citus_table_size('customer_copy_hash'), citus_table_size('customer_copy_hash'), - citus_table_size('supplier'); + citus_table_size('customer_copy_hash'); citus_table_size | citus_table_size | citus_table_size --------------------------------------------------------------------- - 548864 | 548864 | 655360 + 548864 | 548864 | 548864 (1 row) CREATE INDEX index_1 on customer_copy_hash(c_custkey); @@ -99,6 +151,24 @@ SELECT citus_total_relation_size('customer_copy_hash'); 2646016 (1 row) +SELECT citus_table_size('index_1'); + citus_table_size +--------------------------------------------------------------------- + 1048576 +(1 row) + +SELECT citus_relation_size('index_1'); + citus_relation_size +--------------------------------------------------------------------- + 1048576 +(1 row) + +SELECT citus_total_relation_size('index_1'); + citus_total_relation_size +--------------------------------------------------------------------- + 1048576 +(1 row) + -- Tests on reference table VACUUM (FULL) supplier; SELECT citus_table_size('supplier'); @@ -139,6 +209,74 @@ SELECT citus_total_relation_size('supplier'); 688128 (1 row) +SELECT citus_table_size('index_2'); + citus_table_size +--------------------------------------------------------------------- + 122880 +(1 row) + +SELECT citus_relation_size('index_2'); + citus_relation_size +--------------------------------------------------------------------- + 122880 +(1 row) + +SELECT citus_total_relation_size('index_2'); + citus_total_relation_size +--------------------------------------------------------------------- + 122880 +(1 row) + +-- Test on partitioned table +CREATE TABLE split_me (dist_col int, partition_col timestamp) PARTITION BY RANGE (partition_col); +CREATE INDEX ON split_me(dist_col); +-- create 2 partitions +CREATE TABLE m PARTITION OF split_me FOR VALUES FROM ('2018-01-01') TO ('2019-01-01'); +CREATE TABLE e PARTITION OF split_me FOR VALUES FROM ('2019-01-01') TO ('2020-01-01'); +INSERT INTO split_me SELECT 1, '2018-01-01'::timestamp + i * interval '1 day' FROM generate_series(1, 360) i; +INSERT INTO split_me SELECT 2, '2019-01-01'::timestamp + i * interval '1 day' FROM generate_series(1, 180) i; +-- before citus +SELECT citus_relation_size('split_me'); +ERROR: cannot calculate the size because relation 'split_me' is not distributed +SELECT citus_relation_size('split_me_dist_col_idx'); +ERROR: cannot calculate the size because table 'split_me' for index 'split_me_dist_col_idx' is not distributed +SELECT citus_relation_size('m'); +ERROR: cannot calculate the size because relation 'm' is not distributed +SELECT citus_relation_size('m_dist_col_idx'); +ERROR: cannot calculate the size because table 'm' for index 'm_dist_col_idx' is not distributed +-- distribute the table(s) +SELECT create_distributed_table('split_me', 'dist_col'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- after citus +SELECT citus_relation_size('split_me'); + citus_relation_size +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT citus_relation_size('split_me_dist_col_idx'); + citus_relation_size +--------------------------------------------------------------------- + 0 +(1 row) + +SELECT citus_relation_size('m'); + citus_relation_size +--------------------------------------------------------------------- + 32768 +(1 row) + +SELECT citus_relation_size('m_dist_col_idx'); + citus_relation_size +--------------------------------------------------------------------- + 81920 +(1 row) + +DROP TABLE split_me; -- Test inside the transaction BEGIN; ALTER TABLE supplier ALTER COLUMN s_suppkey SET NOT NULL; diff --git a/src/test/regress/expected/multi_task_assignment_policy.out b/src/test/regress/expected/multi_task_assignment_policy.out index 7a58103fa9b..0ac51027c91 100644 --- a/src/test/regress/expected/multi_task_assignment_policy.out +++ b/src/test/regress/expected/multi_task_assignment_policy.out @@ -75,11 +75,11 @@ DEBUG: shard count after pruning for task_assignment_test_table: 3 DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (3 rows) EXPLAIN (COSTS OFF) SELECT count(*) FROM task_assignment_test_table; @@ -89,11 +89,11 @@ DEBUG: shard count after pruning for task_assignment_test_table: 3 DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (3 rows) -- Next test the first-replica task assignment policy @@ -105,11 +105,11 @@ DEBUG: shard count after pruning for task_assignment_test_table: 3 DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (3 rows) EXPLAIN (COSTS OFF) SELECT count(*) FROM task_assignment_test_table; @@ -119,11 +119,11 @@ DEBUG: shard count after pruning for task_assignment_test_table: 3 DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx DEBUG: assigned task to node localhost:xxxxx - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Aggregate -> Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (3 rows) COMMIT; @@ -142,38 +142,38 @@ SET LOCAL citus.task_assignment_policy TO 'greedy'; EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (2 rows) EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (2 rows) SET LOCAL citus.task_assignment_policy TO 'first-replica'; EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (2 rows) EXPLAIN (COSTS FALSE) SELECT * FROM task_assignment_reference_table; DEBUG: Distributed planning for a fast-path router query DEBUG: Creating router plan - QUERY PLAN + QUERY PLAN --------------------------------------------------------------------- Custom Scan (Citus Adaptive) - explain statements for distributed queries are not enabled + citus.explain_distributed_queries: false (2 rows) ROLLBACK; diff --git a/src/test/regress/expected/multi_tenant_isolation_nonblocking.out b/src/test/regress/expected/multi_tenant_isolation_nonblocking.out index 3ec16e6eeb5..3daac7dacd9 100644 --- a/src/test/regress/expected/multi_tenant_isolation_nonblocking.out +++ b/src/test/regress/expected/multi_tenant_isolation_nonblocking.out @@ -1275,8 +1275,9 @@ SELECT count(*) FROM pg_catalog.pg_dist_partition WHERE colocationid > 0; TRUNCATE TABLE pg_catalog.pg_dist_colocation; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; -SELECT citus_set_coordinator_host('localhost'); - citus_set_coordinator_host +-- make sure we don't have any replication objects leftover on the nodes +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup --------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/multi_test_helpers.out b/src/test/regress/expected/multi_test_helpers.out index b8758e561bd..0f31f2354eb 100644 --- a/src/test/regress/expected/multi_test_helpers.out +++ b/src/test/regress/expected/multi_test_helpers.out @@ -526,3 +526,116 @@ BEGIN RETURN result; END; $func$ LANGUAGE plpgsql; +-- Returns pg_seclabels entries from all nodes in the cluster for which +-- the object name is the input. +CREATE OR REPLACE FUNCTION get_citus_tests_label_provider_labels(object_name text, + master_port INTEGER DEFAULT 57636, + worker_1_port INTEGER DEFAULT 57637, + worker_2_port INTEGER DEFAULT 57638) +RETURNS TABLE ( + node_type text, + result text +) +AS $func$ +DECLARE + pg_seclabels_cmd TEXT := 'SELECT to_jsonb(q.*) FROM (' || + 'SELECT provider, objtype, label FROM pg_seclabels ' || + 'WHERE objname = ''' || object_name || ''') q'; +BEGIN + RETURN QUERY + SELECT + CASE + WHEN nodeport = master_port THEN 'coordinator' + WHEN nodeport = worker_1_port THEN 'worker_1' + WHEN nodeport = worker_2_port THEN 'worker_2' + ELSE 'unexpected_node' + END AS node_type, + a.result + FROM run_command_on_all_nodes(pg_seclabels_cmd) a + JOIN pg_dist_node USING (nodeid) + ORDER BY node_type; +END; +$func$ LANGUAGE plpgsql; +-- For all nodes, returns database properties of given database, except +-- oid, datfrozenxid and datminmxid. +-- +-- Also returns whether the node has a pg_dist_object record for the database +-- and whether there are any stale pg_dist_object records for a database. +CREATE OR REPLACE FUNCTION check_database_on_all_nodes(p_database_name text) +RETURNS TABLE (node_type text, result text) +AS $func$ +DECLARE + pg_ge_15_options text := ''; + pg_ge_16_options text := ''; +BEGIN + IF EXISTS (SELECT 1 FROM pg_attribute WHERE attrelid = 'pg_database'::regclass AND attname = 'datlocprovider') THEN + pg_ge_15_options := ', daticulocale, datcollversion, datlocprovider'; + ELSE + pg_ge_15_options := $$, null as daticulocale, null as datcollversion, 'c' as datlocprovider$$; + END IF; + + IF EXISTS (SELECT 1 FROM pg_attribute WHERE attrelid = 'pg_database'::regclass AND attname = 'daticurules') THEN + pg_ge_16_options := ', daticurules'; + ELSE + pg_ge_16_options := ', null as daticurules'; + END IF; + + RETURN QUERY + SELECT + CASE WHEN (groupid = 0 AND groupid = (SELECT groupid FROM pg_dist_local_group)) THEN 'coordinator (local)' + WHEN (groupid = 0) THEN 'coordinator (remote)' + WHEN (groupid = (SELECT groupid FROM pg_dist_local_group)) THEN 'worker node (local)' + ELSE 'worker node (remote)' + END AS node_type, + q2.result + FROM run_command_on_all_nodes( + format( + $$ + SELECT to_jsonb(q.*) + FROM ( + SELECT + ( + SELECT to_jsonb(database_properties.*) + FROM ( + SELECT datname, pa.rolname as database_owner, + pg_encoding_to_char(pd.encoding) as encoding, + datistemplate, datallowconn, datconnlimit, datacl, + pt.spcname AS tablespace, datcollate, datctype + %2$s -- >= pg15 options + %3$s -- >= pg16 options + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + JOIN pg_tablespace pt ON pd.dattablespace = pt.oid + WHERE datname = '%1$s' + ) database_properties + ) AS database_properties, + ( + SELECT COUNT(*)=1 + FROM pg_dist_object WHERE objid = (SELECT oid FROM pg_database WHERE datname = '%1$s') + ) AS pg_dist_object_record_for_db_exists, + ( + SELECT COUNT(*) > 0 + FROM pg_dist_object + WHERE classid = 1262 AND objid NOT IN (SELECT oid FROM pg_database) + ) AS stale_pg_dist_object_record_for_a_db_exists + ) q + $$, + p_database_name, pg_ge_15_options, pg_ge_16_options + ) + ) q2 + JOIN pg_dist_node USING (nodeid); +END; +$func$ LANGUAGE plpgsql; +CREATE OR REPLACE FUNCTION check_database_privileges(role_name text, db_name text, permissions text[]) +RETURNS TABLE(permission text, result text) +AS $func$ +DECLARE + permission text; +BEGIN + FOREACH permission IN ARRAY permissions + LOOP + RETURN QUERY EXECUTE format($inner$SELECT %s, result FROM run_command_on_all_nodes($$select has_database_privilege(%s,%s,%s); $$)$inner$, + quote_literal(permission), quote_literal(role_name), quote_literal(db_name), quote_literal(permission)); + END LOOP; +END; +$func$ LANGUAGE plpgsql; diff --git a/src/test/regress/expected/multi_transaction_recovery_multiple_databases.out b/src/test/regress/expected/multi_transaction_recovery_multiple_databases.out index 2e396da7de0..a3a374131cc 100644 --- a/src/test/regress/expected/multi_transaction_recovery_multiple_databases.out +++ b/src/test/regress/expected/multi_transaction_recovery_multiple_databases.out @@ -33,8 +33,8 @@ $definition$ create_function_test_maintenance_worker \gset CREATE DATABASE db1; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. SELECT oid AS db1_oid FROM pg_database WHERE datname = 'db1' @@ -42,13 +42,13 @@ WHERE datname = 'db1' \c - - - :worker_1_port CREATE DATABASE db1; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c - - - :worker_2_port CREATE DATABASE db1; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c db1 - - :worker_1_port CREATE EXTENSION citus; \c db1 - - :worker_2_port @@ -94,8 +94,8 @@ FROM pg_dist_node; CREATE DATABASE db2; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. SELECT oid AS db2_oid FROM pg_database WHERE datname = 'db2' @@ -103,13 +103,13 @@ WHERE datname = 'db2' \c - - - :worker_1_port CREATE DATABASE db2; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c - - - :worker_2_port CREATE DATABASE db2; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. \c db2 - - :worker_1_port CREATE EXTENSION citus; \c db2 - - :worker_2_port diff --git a/src/test/regress/expected/multi_utilities.out b/src/test/regress/expected/multi_utilities.out index b82e54f1678..5faab87d7c5 100644 --- a/src/test/regress/expected/multi_utilities.out +++ b/src/test/regress/expected/multi_utilities.out @@ -348,6 +348,8 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; VACUUM local_vacuum_table; +VACUUM local_vacuum_table; +VACUUM local_vacuum_table; SELECT CASE WHEN s BETWEEN 20000000 AND 25000000 THEN 22500000 ELSE s END FROM pg_total_relation_size('local_vacuum_table') s ; s @@ -401,6 +403,8 @@ VACUUM (DISABLE_PAGE_SKIPPING false) local_vacuum_table; insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table; SELECT CASE WHEN s BETWEEN 50000000 AND 70000000 THEN 60000000 ELSE s END size FROM pg_total_relation_size('local_vacuum_table') s ; size @@ -411,6 +415,8 @@ FROM pg_total_relation_size('local_vacuum_table') s ; insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; SELECT CASE WHEN s BETWEEN 20000000 AND 49999999 THEN 35000000 ELSE s END size FROM pg_total_relation_size('local_vacuum_table') s ; size @@ -418,14 +424,46 @@ FROM pg_total_relation_size('local_vacuum_table') s ; 35000000 (1 row) +-- vacuum (process_toast true) should be vacuuming toast tables (default is true) +select reltoastrelid from pg_class where relname='local_vacuum_table' +\gset +SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass +\gset +insert into local_vacuum_table select i from generate_series(1,10000) i; +VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table; +SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class +WHERE oid=:reltoastrelid::regclass; + frozen_performed +--------------------------------------------------------------------- + t +(1 row) + +delete from local_vacuum_table; +-- vacuum (process_toast false) should not be vacuuming toast tables (default is true) +SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass +\gset +insert into local_vacuum_table select i from generate_series(1,10000) i; +VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table; +SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class +WHERE oid=:reltoastrelid::regclass; + frozen_not_performed +--------------------------------------------------------------------- + t +(1 row) + +delete from local_vacuum_table; -- vacuum (truncate false) should not attempt to truncate off any empty pages at the end of the table (default is true) insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; vacuum (TRUNCATE false) local_vacuum_table; +vacuum (TRUNCATE false) local_vacuum_table; +vacuum (TRUNCATE false) local_vacuum_table; SELECT pg_total_relation_size('local_vacuum_table') as size1 \gset insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; vacuum (TRUNCATE true) local_vacuum_table; +vacuum (TRUNCATE true) local_vacuum_table; +vacuum (TRUNCATE true) local_vacuum_table; SELECT pg_total_relation_size('local_vacuum_table') as size2 \gset SELECT :size1 > :size2 as truncate_less_size; truncate_less_size diff --git a/src/test/regress/expected/multi_utility_warnings.out b/src/test/regress/expected/multi_utility_warnings.out index 89899b0f18f..880614c5851 100644 --- a/src/test/regress/expected/multi_utility_warnings.out +++ b/src/test/regress/expected/multi_utility_warnings.out @@ -5,5 +5,6 @@ -- databases. CREATE DATABASE new_database; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +DROP DATABASE new_database; diff --git a/src/test/regress/expected/node_conninfo_reload.out b/src/test/regress/expected/node_conninfo_reload.out index d2e33d95011..3b33c54b272 100644 --- a/src/test/regress/expected/node_conninfo_reload.out +++ b/src/test/regress/expected/node_conninfo_reload.out @@ -47,7 +47,7 @@ show citus.node_conninfo; -- Should give a connection error because of bad sslmode select count(*) from test where a = 0; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" -- Reset it again ALTER SYSTEM RESET citus.node_conninfo; select pg_reload_conf(); @@ -118,7 +118,7 @@ select count(*) from test where a = 0; COMMIT; -- Should fail now with connection error, when transaction is finished select count(*) from test where a = 0; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" -- Reset it again ALTER SYSTEM RESET citus.node_conninfo; select pg_reload_conf(); @@ -181,7 +181,7 @@ COMMIT; -- Should fail now, when transaction is finished SET client_min_messages TO ERROR; select count(*) from test where a = 0; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" RESET client_min_messages; -- Reset it again ALTER SYSTEM RESET citus.node_conninfo; @@ -235,11 +235,11 @@ show citus.node_conninfo; -- Should fail since a different shard is accessed and thus a new connection -- will to be created. select count(*) from test where a = 0; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" COMMIT; -- Should still fail now, when transaction is finished select count(*) from test where a = 0; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" -- Reset it again ALTER SYSTEM RESET citus.node_conninfo; select pg_reload_conf(); @@ -301,7 +301,7 @@ COMMIT; -- Should fail now, when transaction is finished SET client_min_messages TO ERROR; select count(*) from test; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" RESET client_min_messages; -- Reset it again ALTER SYSTEM RESET citus.node_conninfo; @@ -359,7 +359,7 @@ ROLLBACK; -- Should fail now, when transaction is finished SET client_min_messages TO ERROR; select count(*) from test; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" RESET client_min_messages; -- Reset it again ALTER SYSTEM RESET citus.node_conninfo; @@ -497,7 +497,7 @@ ALTER TABLE test ADD COLUMN c INT; COMMIT; -- Should fail now, when transaction is finished ALTER TABLE test ADD COLUMN d INT; -ERROR: connection to the remote node localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: invalid sslmode value: "doesnotexist" -- Reset it again ALTER SYSTEM RESET citus.node_conninfo; select pg_reload_conf(); @@ -520,5 +520,61 @@ show citus.node_conninfo; -- Should work again ALTER TABLE test ADD COLUMN e INT; +-- show that we allow providing "host" param via citus.node_conninfo +ALTER SYSTEM SET citus.node_conninfo = 'sslmode=require host=nosuchhost'; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +-- fails due to invalid host +SELECT COUNT(*)>=0 FROM test; +WARNING: connection to the remote node postgres@localhost:xxxxx failed with the following error: could not parse network address "localhost": Name or service not known +ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: could not parse network address "localhost": Name or service not known +SELECT array_agg(nodeid) as updated_nodeids from pg_dist_node WHERE nodename = 'localhost' \gset +UPDATE pg_dist_node SET nodename = '127.0.0.1' WHERE nodeid = ANY(:'updated_nodeids'::int[]); +ALTER SYSTEM SET citus.node_conninfo = 'sslmode=require host=localhost'; +SELECT pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +-- works when hostaddr is specified in pg_dist_node after providing host in citus.node_conninfo +SELECT COUNT(*)>=0 FROM test; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +-- restore original nodenames into pg_dist_node +UPDATE pg_dist_node SET nodename = 'localhost' WHERE nodeid = ANY(:'updated_nodeids'::int[]); +-- reset it +ALTER SYSTEM RESET citus.node_conninfo; +select pg_reload_conf(); + pg_reload_conf +--------------------------------------------------------------------- + t +(1 row) + +select pg_sleep(0.1); -- wait for config reload to apply + pg_sleep +--------------------------------------------------------------------- + +(1 row) + DROP SCHEMA node_conninfo_reload CASCADE; NOTICE: drop cascades to table test diff --git a/src/test/regress/expected/other_databases.out b/src/test/regress/expected/other_databases.out new file mode 100644 index 00000000000..c67746055a5 --- /dev/null +++ b/src/test/regress/expected/other_databases.out @@ -0,0 +1,339 @@ +CREATE SCHEMA other_databases; +SET search_path TO other_databases; +SET citus.next_shard_id TO 10231023; +CREATE DATABASE other_db1; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +\c other_db1 +SHOW citus.main_db; + citus.main_db +--------------------------------------------------------------------- + regression +(1 row) + +-- check that empty citus.superuser gives error +SET citus.superuser TO ''; +CREATE USER empty_superuser; +ERROR: No superuser role is given for Citus main database connection +HINT: Set citus.superuser to a superuser role name +SET citus.superuser TO 'postgres'; +CREATE USER other_db_user1; +CREATE USER other_db_user2; +BEGIN; +CREATE USER other_db_user3; +CREATE USER other_db_user4; +COMMIT; +BEGIN; +CREATE USER other_db_user5; +CREATE USER other_db_user6; +ROLLBACK; +BEGIN; +CREATE USER other_db_user7; +SELECT 1/0; +ERROR: division by zero +COMMIT; +CREATE USER other_db_user8; +\c regression +SELECT usename FROM pg_user WHERE usename LIKE 'other\_db\_user%' ORDER BY 1; + usename +--------------------------------------------------------------------- + other_db_user1 + other_db_user2 + other_db_user3 + other_db_user4 + other_db_user8 +(5 rows) + +\c - - - :worker_1_port +SELECT usename FROM pg_user WHERE usename LIKE 'other\_db\_user%' ORDER BY 1; + usename +--------------------------------------------------------------------- + other_db_user1 + other_db_user2 + other_db_user3 + other_db_user4 + other_db_user8 +(5 rows) + +\c - - - :master_port +-- some user creation commands will fail but let's make sure we try to drop them just in case +DROP USER IF EXISTS other_db_user1, other_db_user2, other_db_user3, other_db_user4, other_db_user5, other_db_user6, other_db_user7, other_db_user8; +NOTICE: role "other_db_user5" does not exist, skipping +NOTICE: role "other_db_user6" does not exist, skipping +NOTICE: role "other_db_user7" does not exist, skipping +-- Make sure non-superuser roles cannot use internal GUCs +-- but they can still create a role +CREATE USER nonsuperuser CREATEROLE; +GRANT ALL ON SCHEMA citus_internal TO nonsuperuser; +SET ROLE nonsuperuser; +SELECT citus_internal.execute_command_on_remote_nodes_as_user($$SELECT 'dangerous query'$$, 'postgres'); +ERROR: permission denied for function execute_command_on_remote_nodes_as_user +\c other_db1 +SET citus.local_hostname TO '127.0.0.1'; +SET ROLE nonsuperuser; +-- Make sure that we don't try to access pg_dist_node. +-- Otherwise, we would get the following error: +-- ERROR: cache lookup failed for pg_dist_node, called too early? +CREATE USER other_db_user9; +RESET ROLE; +RESET citus.local_hostname; +RESET ROLE; +\c regression +SELECT usename FROM pg_user WHERE usename LIKE 'other\_db\_user%' ORDER BY 1; + usename +--------------------------------------------------------------------- + other_db_user9 +(1 row) + +\c - - - :worker_1_port +SELECT usename FROM pg_user WHERE usename LIKE 'other\_db\_user%' ORDER BY 1; + usename +--------------------------------------------------------------------- + other_db_user9 +(1 row) + +\c - - - :master_port +REVOKE ALL ON SCHEMA citus_internal FROM nonsuperuser; +DROP USER other_db_user9, nonsuperuser; +-- test from a worker +\c - - - :worker_1_port +CREATE DATABASE worker_other_db; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. +\c worker_other_db +CREATE USER worker_user1; +BEGIN; +CREATE USER worker_user2; +COMMIT; +BEGIN; +CREATE USER worker_user3; +ROLLBACK; +\c regression +SELECT usename FROM pg_user WHERE usename LIKE 'worker\_user%' ORDER BY 1; + usename +--------------------------------------------------------------------- + worker_user1 + worker_user2 +(2 rows) + +\c - - - :master_port +SELECT usename FROM pg_user WHERE usename LIKE 'worker\_user%' ORDER BY 1; + usename +--------------------------------------------------------------------- + worker_user1 + worker_user2 +(2 rows) + +-- some user creation commands will fail but let's make sure we try to drop them just in case +DROP USER IF EXISTS worker_user1, worker_user2, worker_user3; +NOTICE: role "worker_user3" does not exist, skipping +-- test creating and dropping a database from a Citus non-main database +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +\c other_db1 +CREATE DATABASE other_db3; +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "other_db3", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "other_db3", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "other_db3", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c other_db1 +DROP DATABASE other_db3; +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c worker_other_db - - :worker_1_port +CREATE DATABASE other_db4; +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": {"datacl": null, "datname": "other_db4", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": {"datacl": null, "datname": "other_db4", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "other_db4", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c worker_other_db +DROP DATABASE other_db4; +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +DROP DATABASE worker_other_db; +CREATE DATABASE other_db5; +-- disable create database propagation for the next test +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +\c other_db5 - - :worker_2_port +-- locally create a database +CREATE DATABASE local_db; +\c regression - - - +-- re-enable create database propagation +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +\c other_db5 - - :master_port +-- Test a scenario where create database fails because the database +-- already exists on another node and we don't crash etc. +CREATE DATABASE local_db; +ERROR: database "local_db" already exists +CONTEXT: while executing command on localhost:xxxxx +while executing command on localhost:xxxxx +\c regression - - - +SELECT * FROM public.check_database_on_all_nodes('local_db') ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "local_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c - - - :worker_2_port +-- locally drop the database for cleanup purposes +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +DROP DATABASE local_db; +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +\c - - - :master_port +DROP DATABASE other_db5; +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +DROP SCHEMA other_databases; +DROP DATABASE other_db1; diff --git a/src/test/regress/expected/pg14.out b/src/test/regress/expected/pg14.out index 1b1d80df215..bbfd5dafa08 100644 --- a/src/test/regress/expected/pg14.out +++ b/src/test/regress/expected/pg14.out @@ -71,32 +71,6 @@ NOTICE: issuing VACUUM (FULL,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980000 DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing VACUUM (FULL,TRUNCATE false,INDEX_CLEANUP auto) pg14.t1_980001 DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx --- vacuum (process_toast true) should be vacuuming toast tables (default is true) -CREATE TABLE local_vacuum_table(name text); -select reltoastrelid from pg_class where relname='local_vacuum_table' -\gset -SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass -\gset -VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table; -SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class -WHERE oid=:reltoastrelid::regclass; - frozen_performed ---------------------------------------------------------------------- - t -(1 row) - --- vacuum (process_toast false) should not be vacuuming toast tables (default is true) -SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass -\gset -VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table; -SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class -WHERE oid=:reltoastrelid::regclass; - frozen_not_performed ---------------------------------------------------------------------- - t -(1 row) - -DROP TABLE local_vacuum_table; SET citus.log_remote_commands TO OFF; create table dist(a int, b int); select create_distributed_table('dist','a'); @@ -1168,7 +1142,7 @@ WITH RECURSIVE search_graph(f, t, label) AS ( WHERE g.f = sg.t and g.f = 1 ) SEARCH DEPTH FIRST BY f, t SET seq SELECT * FROM search_graph ORDER BY seq; -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column WITH RECURSIVE search_graph(f, t, label) AS ( SELECT * FROM graph0 g WHERE f = 1 UNION ALL @@ -1177,7 +1151,7 @@ WITH RECURSIVE search_graph(f, t, label) AS ( WHERE g.f = sg.t and g.f = 1 ) SEARCH DEPTH FIRST BY f, t SET seq DELETE FROM graph0 WHERE t IN (SELECT t FROM search_graph ORDER BY seq); -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column CREATE TABLE graph1(f INT, t INT, label TEXT); SELECT create_reference_table('graph1'); create_reference_table @@ -1196,7 +1170,7 @@ WITH RECURSIVE search_graph(f, t, label) AS ( WHERE g.f = sg.t and g.f = 1 ) SEARCH DEPTH FIRST BY f, t SET seq SELECT * FROM search_graph ORDER BY seq; -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column WITH RECURSIVE search_graph(f, t, label) AS ( SELECT * FROM graph1 g WHERE f = 1 UNION ALL @@ -1205,7 +1179,7 @@ WITH RECURSIVE search_graph(f, t, label) AS ( WHERE g.f = sg.t and g.f = 1 ) SEARCH DEPTH FIRST BY f, t SET seq DELETE FROM graph1 WHERE t IN (SELECT t FROM search_graph ORDER BY seq); -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column SELECT * FROM ( WITH RECURSIVE search_graph(f, t, label) AS ( SELECT * @@ -1217,7 +1191,7 @@ SELECT * FROM ( ) SEARCH DEPTH FIRST BY f, t SET seq SELECT * FROM search_graph ORDER BY seq ) as foo; -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- -- https://github.com/citusdata/citus/issues/5258 -- @@ -1492,4 +1466,5 @@ DROP TABLE compression_and_defaults, compression_and_generated_col; set client_min_messages to error; drop extension postgres_fdw cascade; drop schema pg14 cascade; +DROP ROLE role_1, r1; reset client_min_messages; diff --git a/src/test/regress/expected/pg15.out b/src/test/regress/expected/pg15.out index fcbb0cd1220..eff8b0ce662 100644 --- a/src/test/regress/expected/pg15.out +++ b/src/test/regress/expected/pg15.out @@ -1529,6 +1529,16 @@ alter database regression REFRESH COLLATION VERSION; NOTICE: version has not changed NOTICE: issuing ALTER DATABASE regression REFRESH COLLATION VERSION; NOTICE: issuing ALTER DATABASE regression REFRESH COLLATION VERSION; +SET citus.enable_create_database_propagation TO OFF; +CREATE DATABASE local_database_1; +NOTICE: Citus partially supports CREATE DATABASE for distributed databases +RESET citus.enable_create_database_propagation; +CREATE ROLE local_role_1; +ALTER DATABASE local_database_1 REFRESH COLLATION VERSION; +NOTICE: version has not changed +REVOKE CONNECT, TEMPORARY, CREATE ON DATABASE local_database_1 FROM local_role_1; +DROP ROLE local_role_1; +DROP DATABASE local_database_1; set citus.log_remote_commands = false; -- Clean up \set VERBOSITY terse diff --git a/src/test/regress/expected/pg16.out b/src/test/regress/expected/pg16.out index 8d47b6f1bf1..a035fcfc4a2 100644 --- a/src/test/regress/expected/pg16.out +++ b/src/test/regress/expected/pg16.out @@ -207,8 +207,8 @@ DETAIL: Only ADD|DROP COLUMN, SET|DROP NOT NULL, SET|DROP DEFAULT, ADD|DROP|VAL -- https://github.com/postgres/postgres/commit/30a53b7 CREATE DATABASE test_db WITH LOCALE_PROVIDER = 'icu' LOCALE = '' ICU_RULES = '&a < g' TEMPLATE = 'template0'; NOTICE: Citus partially supports CREATE DATABASE for distributed databases -DETAIL: Citus does not propagate CREATE DATABASE command to workers -HINT: You can manually create a database and its extensions on workers. +DETAIL: Citus does not propagate CREATE DATABASE command to other nodes +HINT: You can manually create a database and its extensions on other nodes. NOTICE: using standard form "und" for ICU locale "" SELECT result FROM run_command_on_workers ($$CREATE DATABASE test_db WITH LOCALE_PROVIDER = 'icu' LOCALE = '' ICU_RULES = '&a < g' TEMPLATE = 'template0'$$); @@ -1007,49 +1007,18 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing GRANT role1 TO role2 WITH ADMIN OPTION; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx REVOKE role1 FROM role2; -RESET citus.log_remote_commands; -RESET citus.grep_remote_commands; -- -- PG16 added new options to GRANT ROLE -- inherit: https://github.com/postgres/postgres/commit/e3ce2de -- set: https://github.com/postgres/postgres/commit/3d14e17 --- We don't propagate for now in Citus +-- We now propagate these options in Citus -- -GRANT role1 TO role2 WITH INHERIT FALSE; -NOTICE: not propagating GRANT/REVOKE commands with specified INHERIT/SET options to worker nodes -HINT: Connect to worker nodes directly to manually run the same GRANT/REVOKE command after disabling DDL propagation. -REVOKE role1 FROM role2; -GRANT role1 TO role2 WITH INHERIT TRUE; -NOTICE: not propagating GRANT/REVOKE commands with specified INHERIT/SET options to worker nodes -HINT: Connect to worker nodes directly to manually run the same GRANT/REVOKE command after disabling DDL propagation. -REVOKE role1 FROM role2; -GRANT role1 TO role2 WITH INHERIT OPTION; -NOTICE: not propagating GRANT/REVOKE commands with specified INHERIT/SET options to worker nodes -HINT: Connect to worker nodes directly to manually run the same GRANT/REVOKE command after disabling DDL propagation. -REVOKE role1 FROM role2; -GRANT role1 TO role2 WITH SET FALSE; -NOTICE: not propagating GRANT/REVOKE commands with specified INHERIT/SET options to worker nodes -HINT: Connect to worker nodes directly to manually run the same GRANT/REVOKE command after disabling DDL propagation. -REVOKE role1 FROM role2; -GRANT role1 TO role2 WITH SET TRUE; -NOTICE: not propagating GRANT/REVOKE commands with specified INHERIT/SET options to worker nodes -HINT: Connect to worker nodes directly to manually run the same GRANT/REVOKE command after disabling DDL propagation. -REVOKE role1 FROM role2; -GRANT role1 TO role2 WITH SET OPTION; -NOTICE: not propagating GRANT/REVOKE commands with specified INHERIT/SET options to worker nodes -HINT: Connect to worker nodes directly to manually run the same GRANT/REVOKE command after disabling DDL propagation. -REVOKE role1 FROM role2; --- connect to worker node -GRANT role1 TO role2 WITH ADMIN OPTION, INHERIT FALSE, SET FALSE; -NOTICE: not propagating GRANT/REVOKE commands with specified INHERIT/SET options to worker nodes -HINT: Connect to worker nodes directly to manually run the same GRANT/REVOKE command after disabling DDL propagation. SELECT roleid::regrole::text AS role, member::regrole::text, admin_option, inherit_option, set_option FROM pg_auth_members WHERE roleid::regrole::text = 'role1' ORDER BY 1, 2; - role | member | admin_option | inherit_option | set_option + role | member | admin_option | inherit_option | set_option --------------------------------------------------------------------- - role1 | role2 | t | f | f -(1 row) +(0 rows) \c - - - :worker_1_port SELECT roleid::regrole::text AS role, member::regrole::text, @@ -1059,27 +1028,22 @@ WHERE roleid::regrole::text = 'role1' ORDER BY 1, 2; --------------------------------------------------------------------- (0 rows) -SET citus.enable_ddl_propagation TO off; -GRANT role1 TO role2 WITH ADMIN OPTION, INHERIT FALSE, SET FALSE; -RESET citus.enable_ddl_propagation; -SELECT roleid::regrole::text AS role, member::regrole::text, -admin_option, inherit_option, set_option FROM pg_auth_members -WHERE roleid::regrole::text = 'role1' ORDER BY 1, 2; - role | member | admin_option | inherit_option | set_option ---------------------------------------------------------------------- - role1 | role2 | t | f | f -(1 row) - \c - - - :master_port -REVOKE role1 FROM role2; --- test REVOKES as well -GRANT role1 TO role2; -REVOKE SET OPTION FOR role1 FROM role2; -NOTICE: not propagating GRANT/REVOKE commands with specified INHERIT/SET options to worker nodes -HINT: Connect to worker nodes directly to manually run the same GRANT/REVOKE command after disabling DDL propagation. -REVOKE INHERIT OPTION FOR role1 FROM role2; -NOTICE: not propagating GRANT/REVOKE commands with specified INHERIT/SET options to worker nodes -HINT: Connect to worker nodes directly to manually run the same GRANT/REVOKE command after disabling DDL propagation. +-- Set GUCs to log remote commands and filter on REVOKE commands +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%REVOKE%'; + -- test REVOKES as well + GRANT role1 TO role2; + REVOKE SET OPTION FOR role1 FROM role2; +NOTICE: issuing REVOKE SET OPTION FOR role1 FROM role2 RESTRICT; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing REVOKE SET OPTION FOR role1 FROM role2 RESTRICT; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx + REVOKE INHERIT OPTION FOR role1 FROM role2; +NOTICE: issuing REVOKE INHERIT OPTION FOR role1 FROM role2 RESTRICT; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing REVOKE INHERIT OPTION FOR role1 FROM role2 RESTRICT; +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx DROP ROLE role1, role2; -- test that everything works fine for roles that are not propagated SET citus.enable_ddl_propagation TO off; @@ -1090,7 +1054,8 @@ RESET citus.enable_ddl_propagation; -- by default, admin option is false, inherit is true, set is true GRANT role3 TO role4; GRANT role3 TO role5 WITH ADMIN TRUE, INHERIT FALSE, SET FALSE; -SELECT roleid::regrole::text AS role, member::regrole::text, admin_option, inherit_option, set_option FROM pg_auth_members WHERE roleid::regrole::text = 'role3' ORDER BY 1, 2; +SELECT roleid::regrole::text AS role, member::regrole::text, admin_option, inherit_option, set_option FROM pg_auth_members +WHERE roleid::regrole::text = 'role3' ORDER BY 1, 2; role | member | admin_option | inherit_option | set_option --------------------------------------------------------------------- role3 | role4 | f | t | t @@ -1098,6 +1063,118 @@ SELECT roleid::regrole::text AS role, member::regrole::text, admin_option, inher (2 rows) DROP ROLE role3, role4, role5; +-- Test that everything works fine for roles that are propagated +CREATE ROLE role6; +CREATE ROLE role7; +CREATE ROLE role8; +CREATE ROLE role9; +CREATE ROLE role10; +CREATE ROLE role11; +CREATE ROLE role12; +CREATE ROLE role13; +CREATE ROLE role14; +CREATE ROLE role15; +CREATE ROLE role16; +CREATE ROLE role17; +CREATE ROLE role18 NOINHERIT; +CREATE ROLE role19; +CREATE ROLE role20; +-- Grant role with admin and inherit options set to true +GRANT role6 TO role7 WITH ADMIN OPTION, INHERIT TRUE; +-- GRANT with INHERIT and SET Options +-- note that set is true by default so we don't include it in the propagation +GRANT role7 TO role8 WITH INHERIT TRUE, SET TRUE; +-- Grant role with admin option set to true and inherit option set to false +GRANT role9 TO role10 WITH ADMIN OPTION, INHERIT FALSE; +-- Grant role with admin option set to true, and inherit/set options set to false +GRANT role11 TO role12 WITH INHERIT FALSE, ADMIN TRUE, SET FALSE; +-- Grant role with inherit set to false +GRANT role13 TO role14 WITH INHERIT FALSE; +-- Grant role with set option set to false +GRANT role15 TO role16 WITH SET FALSE; +-- Handles with default inherit false +-- we created role18 with noinherit option above +GRANT role17 TO role18; +-- Run GRANT/REVOKE commands on worker nodes +\c - - - :worker_1_port +-- Run GRANT command on worker node +GRANT role19 TO role20; +\c - - - :master_port +SELECT roleid::regrole::text AS role, member::regrole::text, admin_option, inherit_option, set_option +FROM pg_auth_members +WHERE roleid::regrole::text LIKE 'role%' +ORDER BY 1, 2; + role | member | admin_option | inherit_option | set_option +--------------------------------------------------------------------- + role11 | role12 | t | f | f + role13 | role14 | f | f | t + role15 | role16 | f | t | f + role17 | role18 | f | f | t + role19 | role20 | f | t | t + role6 | role7 | t | t | t + role7 | role8 | f | t | t + role9 | role10 | t | f | t +(8 rows) + +\c - - - :worker_1_port +SELECT roleid::regrole::text AS role, member::regrole::text, admin_option, inherit_option, set_option +FROM pg_auth_members +WHERE roleid::regrole::text LIKE 'role%' +ORDER BY 1, 2; + role | member | admin_option | inherit_option | set_option +--------------------------------------------------------------------- + role11 | role12 | t | f | f + role13 | role14 | f | f | t + role15 | role16 | f | t | f + role17 | role18 | f | f | t + role19 | role20 | f | t | t + role6 | role7 | t | t | t + role7 | role8 | f | t | t + role9 | role10 | t | f | t +(8 rows) + +\c - - - :master_port +DROP ROLE role6, role7, role8, role9, role10, role11, role12, + role13, role14, role15, role16, role17, role18, role19, role20; +-- here we test that we propagate admin, set and inherit options correctly +-- when adding a new node. + -- First, we need to remove the node: +SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); +?column? +--------------------------------------------------------------------- + 1 +(1 row) + +CREATE ROLE create_role1; +CREATE ROLE create_role2; +CREATE ROLE create_role3; +-- test grant role +GRANT create_role1 TO create_role2 WITH ADMIN OPTION, INHERIT FALSE, SET FALSE; +GRANT create_role2 TO create_role3 WITH INHERIT TRUE, ADMIN FALSE, SET FALSE; +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option, inherit_option, set_option FROM pg_auth_members WHERE roleid::regrole::text LIKE 'create\_%' ORDER BY 1, 2; + role | member | grantor | admin_option | inherit_option | set_option +--------------------------------------------------------------------- + create_role1 | create_role2 | postgres | t | f | f + create_role2 | create_role3 | postgres | f | t | f +(2 rows) + +-- Add second worker node +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c - - - :worker_2_port +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option, inherit_option, set_option FROM pg_auth_members WHERE roleid::regrole::text LIKE 'create\_%' ORDER BY 1, 2; + role | member | grantor | admin_option | inherit_option | set_option +--------------------------------------------------------------------- + create_role1 | create_role2 | postgres | t | f | f + create_role2 | create_role3 | postgres | f | t | f +(2 rows) + +\c - - - :master_port +DROP ROLE create_role1, create_role2, create_role3; \set VERBOSITY terse SET client_min_messages TO ERROR; DROP EXTENSION postgres_fdw CASCADE; diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out index c761efb3e95..2df4e59d30b 100644 --- a/src/test/regress/expected/publication.out +++ b/src/test/regress/expected/publication.out @@ -267,6 +267,7 @@ SET client_min_messages TO ERROR; DROP SCHEMA publication CASCADE; DROP SCHEMA "publication-1" CASCADE; DROP SCHEMA citus_schema_1 CASCADE; +SELECT public.wait_for_resource_cleanup(); \q \endif -- recreate a mixed publication @@ -544,3 +545,9 @@ DROP SCHEMA publication CASCADE; DROP SCHEMA "publication-1" CASCADE; DROP SCHEMA citus_schema_1 CASCADE; DROP SCHEMA publication2 CASCADE; +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/expected/publication_0.out b/src/test/regress/expected/publication_0.out index 14fa94d17d6..e768a1d412e 100644 --- a/src/test/regress/expected/publication_0.out +++ b/src/test/regress/expected/publication_0.out @@ -267,4 +267,10 @@ SET client_min_messages TO ERROR; DROP SCHEMA publication CASCADE; DROP SCHEMA "publication-1" CASCADE; DROP SCHEMA citus_schema_1 CASCADE; +SELECT public.wait_for_resource_cleanup(); + wait_for_resource_cleanup +--------------------------------------------------------------------- + +(1 row) + \q diff --git a/src/test/regress/expected/query_single_shard_table.out b/src/test/regress/expected/query_single_shard_table.out index ad6037b65b5..5f551a9881d 100644 --- a/src/test/regress/expected/query_single_shard_table.out +++ b/src/test/regress/expected/query_single_shard_table.out @@ -1529,7 +1529,7 @@ DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries DEBUG: generating subplan XXX_1 for CTE level_1: WITH RECURSIVE level_2_recursive(x) AS (VALUES (1) UNION ALL SELECT (nullkey_c1_t1.a OPERATOR(pg_catalog.+) 1) FROM (query_single_shard_table.nullkey_c1_t1 JOIN level_2_recursive level_2_recursive_1 ON ((nullkey_c1_t1.a OPERATOR(pg_catalog.=) level_2_recursive_1.x))) WHERE (nullkey_c1_t1.a OPERATOR(pg_catalog.<) 100)) SELECT level_2_recursive.x, distributed_table.a, distributed_table.b FROM (level_2_recursive JOIN query_single_shard_table.distributed_table ON ((level_2_recursive.x OPERATOR(pg_catalog.=) distributed_table.a))) DEBUG: Router planner cannot handle multi-shard select queries -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- grouping set SELECT id, substring(title, 2, 1) AS subtitle, count(*) diff --git a/src/test/regress/expected/reassign_owned.out b/src/test/regress/expected/reassign_owned.out new file mode 100644 index 00000000000..366e6d9456c --- /dev/null +++ b/src/test/regress/expected/reassign_owned.out @@ -0,0 +1,194 @@ +CREATE ROLE distributed_source_role1; +create ROLE "distributed_source_role-\!"; +CREATE ROLE "distributed_target_role1-\!"; +set citus.enable_create_role_propagation to off; +create ROLE local_target_role1; +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. +\c - - - :worker_1_port +set citus.enable_create_role_propagation to off; +CREATE ROLE local_target_role1; +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. +\c - - - :master_port +set citus.enable_create_role_propagation to off; +create role local_source_role1; +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. +reset citus.enable_create_role_propagation; +GRANT CREATE ON SCHEMA public TO distributed_source_role1,"distributed_source_role-\!"; +SET ROLE distributed_source_role1; +CREATE TABLE public.test_table (col1 int); +set role "distributed_source_role-\!"; +CREATE TABLE public.test_table2 (col2 int); +RESET ROLE; +select create_distributed_table('test_table', 'col1'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +select create_distributed_table('test_table2', 'col2'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT + schemaname, + tablename, + tableowner + FROM + pg_tables + WHERE + tablename in ('test_table', 'test_table2') + ORDER BY tablename + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + [{"tablename": "test_table", "schemaname": "public", "tableowner": "distributed_source_role1"}, {"tablename": "test_table2", "schemaname": "public", "tableowner": "distributed_source_role-\\!"}] + [{"tablename": "test_table", "schemaname": "public", "tableowner": "distributed_source_role1"}, {"tablename": "test_table2", "schemaname": "public", "tableowner": "distributed_source_role-\\!"}] + [{"tablename": "test_table", "schemaname": "public", "tableowner": "distributed_source_role1"}, {"tablename": "test_table2", "schemaname": "public", "tableowner": "distributed_source_role-\\!"}] +(3 rows) + +--tests for reassing owned by with multiple distributed roles and a local role to a distributed role +--local role should be ignored +set citus.log_remote_commands to on; +set citus.grep_remote_commands = '%REASSIGN OWNED BY%'; +REASSIGN OWNED BY distributed_source_role1,"distributed_source_role-\!",local_source_role1 TO "distributed_target_role1-\!"; +NOTICE: issuing REASSIGN OWNED BY distributed_source_role1, "distributed_source_role-\!" TO "distributed_target_role1-\!" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing REASSIGN OWNED BY distributed_source_role1, "distributed_source_role-\!" TO "distributed_target_role1-\!" +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +reset citus.grep_remote_commands; +reset citus.log_remote_commands; +--check if the owner changed to "distributed_target_role1-\!" +RESET citus.log_remote_commands; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT + schemaname, + tablename, + tableowner + FROM + pg_tables + WHERE + tablename in ('test_table', 'test_table2') + ORDER BY tablename + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + [{"tablename": "test_table", "schemaname": "public", "tableowner": "distributed_target_role1-\\!"}, {"tablename": "test_table2", "schemaname": "public", "tableowner": "distributed_target_role1-\\!"}] + [{"tablename": "test_table", "schemaname": "public", "tableowner": "distributed_target_role1-\\!"}, {"tablename": "test_table2", "schemaname": "public", "tableowner": "distributed_target_role1-\\!"}] + [{"tablename": "test_table", "schemaname": "public", "tableowner": "distributed_target_role1-\\!"}, {"tablename": "test_table2", "schemaname": "public", "tableowner": "distributed_target_role1-\\!"}] +(3 rows) + +--tests for reassing owned by with multiple distributed roles and a local role to a local role +--local role should be ignored +SET ROLE distributed_source_role1; +CREATE TABLE public.test_table3 (col1 int); +set role "distributed_source_role-\!"; +CREATE TABLE public.test_table4 (col2 int); +RESET ROLE; +select create_distributed_table('test_table3', 'col1'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +select create_distributed_table('test_table4', 'col2'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +set citus.log_remote_commands to on; +set citus.grep_remote_commands = '%REASSIGN OWNED BY%'; +set citus.enable_create_role_propagation to off; +set citus.enable_alter_role_propagation to off; +set citus.enable_alter_role_set_propagation to off; +REASSIGN OWNED BY distributed_source_role1,"distributed_source_role-\!",local_source_role1 TO local_target_role1; +NOTICE: issuing REASSIGN OWNED BY distributed_source_role1, "distributed_source_role-\!" TO local_target_role1 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing REASSIGN OWNED BY distributed_source_role1, "distributed_source_role-\!" TO local_target_role1 +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +show citus.enable_create_role_propagation; + citus.enable_create_role_propagation +--------------------------------------------------------------------- + off +(1 row) + +show citus.enable_alter_role_propagation; + citus.enable_alter_role_propagation +--------------------------------------------------------------------- + off +(1 row) + +show citus.enable_alter_role_set_propagation; + citus.enable_alter_role_set_propagation +--------------------------------------------------------------------- + off +(1 row) + +reset citus.grep_remote_commands; +reset citus.log_remote_commands; +reset citus.enable_create_role_propagation; +reset citus.enable_alter_role_propagation; +reset citus.enable_alter_role_set_propagation; +--check if the owner changed to local_target_role1 +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT + schemaname, + tablename, + tableowner + FROM + pg_tables + WHERE + tablename in ('test_table3', 'test_table4') + ORDER BY tablename + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + [{"tablename": "test_table3", "schemaname": "public", "tableowner": "local_target_role1"}, {"tablename": "test_table4", "schemaname": "public", "tableowner": "local_target_role1"}] + [{"tablename": "test_table3", "schemaname": "public", "tableowner": "local_target_role1"}, {"tablename": "test_table4", "schemaname": "public", "tableowner": "local_target_role1"}] + [{"tablename": "test_table3", "schemaname": "public", "tableowner": "local_target_role1"}, {"tablename": "test_table4", "schemaname": "public", "tableowner": "local_target_role1"}] +(3 rows) + +--clear resources +DROP OWNED BY distributed_source_role1, "distributed_source_role-\!","distributed_target_role1-\!",local_target_role1; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT + schemaname, + tablename, + tableowner +FROM + pg_tables +WHERE + tablename in ('test_table', 'test_table2', 'test_table3', 'test_table4') + ) q2 + $$ +) ORDER BY result; + result +--------------------------------------------------------------------- + + + +(3 rows) + +set client_min_messages to warning; +drop role distributed_source_role1, "distributed_source_role-\!","distributed_target_role1-\!",local_target_role1,local_source_role1; diff --git a/src/test/regress/expected/remove_non_default_nodes.out b/src/test/regress/expected/remove_non_default_nodes.out new file mode 100644 index 00000000000..7645af708db --- /dev/null +++ b/src/test/regress/expected/remove_non_default_nodes.out @@ -0,0 +1,13 @@ +-- The default nodes for the citus test suite are coordinator and 2 worker nodes +-- Which we identify with master_port, worker_1_port, worker_2_port. +-- When needed in some tests, GetLocalNodeId() does not behave correctly, +-- So we remove the non default nodes. This tests expects the non default nodes +-- to not have any active placements. +SELECT any_value(citus_remove_node('localhost', nodeport)) +FROM pg_dist_node +WHERE nodeport NOT IN (:master_port, :worker_1_port, :worker_2_port); + any_value +--------------------------------------------------------------------- + +(1 row) + diff --git a/src/test/regress/expected/role_command_from_any_node.out b/src/test/regress/expected/role_command_from_any_node.out new file mode 100644 index 00000000000..a5e22f40bd6 --- /dev/null +++ b/src/test/regress/expected/role_command_from_any_node.out @@ -0,0 +1,274 @@ +-- idempotently remove the coordinator from metadata +SELECT COUNT(citus_remove_node(nodename, nodeport)) >= 0 FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :master_port; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +-- make sure that CREATE ROLE from workers is not supported when coordinator is not added to metadata +SELECT result FROM run_command_on_workers('CREATE ROLE test_role'); + result +--------------------------------------------------------------------- + ERROR: coordinator is not added to the metadata + ERROR: coordinator is not added to the metadata +(2 rows) + +\c - - - :master_port +CREATE SCHEMA role_command_from_any_node; +SET search_path TO role_command_from_any_node; +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +CREATE OR REPLACE FUNCTION check_role_on_all_nodes(p_role_name text) +RETURNS TABLE (node_type text, result text) +AS $func$ +DECLARE + v_worker_query text; +BEGIN + v_worker_query := format( + $$ + SELECT to_jsonb(q1.*) FROM ( + SELECT + ( + SELECT COUNT(*) = 1 FROM pg_roles WHERE rolname = '%s' + ) AS role_exists, + ( + SELECT to_jsonb(q.*) FROM (SELECT * FROM pg_roles WHERE rolname = '%s') q + ) AS role_properties, + ( + SELECT COUNT(*) = 1 + FROM pg_dist_object + WHERE objid = (SELECT oid FROM pg_roles WHERE rolname = '%s') + ) AS pg_dist_object_record_for_role_exists, + ( + SELECT COUNT(*) > 0 + FROM pg_dist_object + WHERE classid = 1260 AND objid NOT IN (SELECT oid FROM pg_roles) + ) AS stale_pg_dist_object_record_for_a_role_exists + ) q1 + $$, + p_role_name, p_role_name, p_role_name + ); + + RETURN QUERY + SELECT + CASE WHEN (groupid = 0 AND groupid = (SELECT groupid FROM pg_dist_local_group)) THEN 'coordinator (local)' + WHEN (groupid = 0) THEN 'coordinator (remote)' + WHEN (groupid = (SELECT groupid FROM pg_dist_local_group)) THEN 'worker node (local)' + ELSE 'worker node (remote)' + END AS node_type, + q2.result + FROM run_command_on_all_nodes(v_worker_query) q2 + JOIN pg_dist_node USING (nodeid); +END; +$func$ LANGUAGE plpgsql; +\c - - - :worker_1_port +SET search_path TO role_command_from_any_node; +SET client_min_messages TO NOTICE; +SET citus.enable_create_role_propagation TO OFF; +CREATE ROLE test_role; +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": true, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +DROP ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +CREATE ROLE test_role; +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": true, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +SET citus.enable_create_role_propagation TO ON; +-- doesn't fail even if the role doesn't exist on other nodes +DROP ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +CREATE ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +DROP ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +CREATE ROLE test_role; +SET citus.enable_alter_role_propagation TO OFF; +ALTER ROLE test_role RENAME TO test_role_renamed; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": false, "pg_dist_object_record_for_role_exists": false, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +ALTER ROLE test_role_renamed RENAME TO test_role; +SET citus.enable_alter_role_propagation TO ON; +ALTER ROLE test_role RENAME TO test_role_renamed; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (local) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +SET citus.enable_alter_role_propagation TO OFF; +ALTER ROLE test_role_renamed CREATEDB; +SET citus.enable_alter_role_propagation TO ON; +SELECT node_type, (result::jsonb)->'role_properties'->'rolcreatedb' as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | false + worker node (local) | true + worker node (remote) | false +(3 rows) + +ALTER ROLE test_role_renamed CREATEDB; +SELECT node_type, (result::jsonb)->'role_properties'->'rolcreatedb' as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | true + worker node (local) | true + worker node (remote) | true +(3 rows) + +SET citus.enable_alter_role_set_propagation TO ON; +ALTER ROLE current_user IN DATABASE "regression" SET enable_hashjoin TO OFF; +SELECT result FROM run_command_on_all_nodes('SHOW enable_hashjoin') ORDER BY result; + result +--------------------------------------------------------------------- + off + off + off +(3 rows) + +SET citus.enable_alter_role_set_propagation TO OFF; +ALTER ROLE current_user IN DATABASE "regression" SET enable_hashjoin TO ON; +SELECT result FROM run_command_on_all_nodes('SHOW enable_hashjoin') ORDER BY result; + result +--------------------------------------------------------------------- + off + off + on +(3 rows) + +SET citus.enable_alter_role_set_propagation TO ON; +ALTER ROLE current_user IN DATABASE "regression" RESET enable_hashjoin; +CREATE ROLE another_user; +SET citus.enable_create_role_propagation TO OFF; +GRANT another_user TO test_role_renamed; +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=1 FROM pg_auth_members WHERE roleid = 'another_user'::regrole AND member = 'test_role_renamed'::regrole +$$) ORDER BY result; + result +--------------------------------------------------------------------- + f + f + t +(3 rows) + +SET citus.enable_create_role_propagation TO ON; +SET client_min_messages TO ERROR; +GRANT another_user TO test_role_renamed; +SET client_min_messages TO NOTICE; +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=1 FROM pg_auth_members WHERE roleid = 'another_user'::regrole AND member = 'test_role_renamed'::regrole +$$) ORDER BY result; + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +\c - - - :master_port +SET search_path TO role_command_from_any_node; +SET client_min_messages TO NOTICE; +SELECT citus_remove_node('localhost', :worker_1_port); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +SELECT 1 FROM citus_add_node('localhost', :worker_1_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- make sure that citus_add_node() propagates the roles created via a worker +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} + worker node (remote) | {"role_exists": true, "pg_dist_object_record_for_role_exists": true, "stale_pg_dist_object_record_for_a_role_exists": false} +(3 rows) + +SELECT citus_remove_node('localhost', :master_port); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +\c - - - :worker_1_port +-- they fail because the coordinator is not added to metadata +DROP ROLE test_role_renamed; +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') on coordinator to configure the coordinator hostname +ALTER ROLE test_role_renamed RENAME TO test_role; +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') on coordinator to configure the coordinator hostname +ALTER ROLE test_role_renamed CREATEDB; +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') on coordinator to configure the coordinator hostname +ALTER ROLE current_user IN DATABASE "regression" SET enable_hashjoin TO OFF; +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') on coordinator to configure the coordinator hostname +GRANT another_user TO test_role_renamed; +ERROR: coordinator is not added to the metadata +HINT: Use SELECT citus_set_coordinator_host('') on coordinator to configure the coordinator hostname +\c - - - :master_port +DROP ROLE test_role_renamed, another_user; +SET client_min_messages TO WARNING; +DROP SCHEMA role_command_from_any_node CASCADE; diff --git a/src/test/regress/expected/role_operations_from_non_maindb.out b/src/test/regress/expected/role_operations_from_non_maindb.out new file mode 100644 index 00000000000..3b51c89b028 --- /dev/null +++ b/src/test/regress/expected/role_operations_from_non_maindb.out @@ -0,0 +1,138 @@ +-- Create a new database +set citus.enable_create_database_propagation to on; +CREATE DATABASE role_operations_test_db; +SET citus.superuser TO 'postgres'; +-- Connect to the new database +\c role_operations_test_db +-- Test CREATE ROLE with various options +CREATE ROLE test_role1 WITH LOGIN PASSWORD 'password1'; +\c role_operations_test_db - - :worker_1_port +CREATE USER "test_role2-needs\!escape" +WITH + SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION +LIMIT 10 VALID UNTIL '2023-01-01' IN ROLE test_role1; +\c regression - - :master_port +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":true,"rolinherit":true,"rolcreaterole":true,"rolcreatedb":true,"rolcanlogin":true,"rolreplication":true,"rolbypassrls":true,"rolconnlimit":10,"pass_not_empty":null,"date":"2023-01-01"}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":true,"rolinherit":true,"rolcreaterole":true,"rolcreatedb":true,"rolcanlogin":true,"rolreplication":true,"rolbypassrls":true,"rolconnlimit":10,"pass_not_empty":null,"date":"2023-01-01"}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":true,"rolinherit":true,"rolcreaterole":true,"rolcreatedb":true,"rolcanlogin":true,"rolreplication":true,"rolbypassrls":true,"rolconnlimit":10,"pass_not_empty":null,"date":"2023-01-01"}] +(3 rows) + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT r.rolname + FROM pg_dist_object d + JOIN pg_roles r ON d.objid = r.oid + WHERE r.rolname IN ('test_role1', 'test_role2-needs\!escape') + order by r.rolname + ) t +$$); + result +--------------------------------------------------------------------- + [{"rolname":"test_role1"},{"rolname":"test_role2-needs\\!escape"}] + [{"rolname":"test_role1"},{"rolname":"test_role2-needs\\!escape"}] + [{"rolname":"test_role1"},{"rolname":"test_role2-needs\\!escape"}] +(3 rows) + +\c role_operations_test_db - - :master_port +-- Test ALTER ROLE with various options +ALTER ROLE test_role1 WITH PASSWORD 'new_password1'; +\c role_operations_test_db - - :worker_1_port +ALTER USER "test_role2-needs\!escape" +WITH + NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION +LIMIT 5 VALID UNTIL '2024-01-01'; +\c regression - - :master_port +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"}] + [{"rolname":"test_role1","rolsuper":false,"rolinherit":true,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":true,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":-1,"pass_not_empty":true,"date":null},{"rolname":"test_role2-needs\\!escape","rolsuper":false,"rolinherit":false,"rolcreaterole":false,"rolcreatedb":false,"rolcanlogin":false,"rolreplication":false,"rolbypassrls":false,"rolconnlimit":5,"pass_not_empty":null,"date":"2024-01-01"}] +(3 rows) + +\c role_operations_test_db - - :master_port +-- Test DROP ROLE +DROP ROLE no_such_role; -- fails nicely +ERROR: role "no_such_role" does not exist +DROP ROLE IF EXISTS no_such_role; -- doesn't fail +NOTICE: role "no_such_role" does not exist, skipping +CREATE ROLE new_role; +DROP ROLE IF EXISTS no_such_role, new_role; -- doesn't fail +NOTICE: role "no_such_role" does not exist, skipping +DROP ROLE IF EXISTS test_role1, "test_role2-needs\!escape"; +\c regression - - :master_port +--verify that roles and dist_object are dropped +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','new_role','no_such_role') + ORDER BY rolname + ) t +$$); + result +--------------------------------------------------------------------- + + + +(3 rows) + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT r.rolname + FROM pg_roles r + WHERE r.rolname IN ('test_role1', 'test_role2-needs\!escape','new_role','no_such_role') + order by r.rolname + ) t +$$); + result +--------------------------------------------------------------------- + + + +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$ + SELECT count(*) leaked_pg_dist_object_records_for_roles + FROM pg_dist_object LEFT JOIN pg_authid ON (objid = oid) + WHERE classid = 1260 AND oid IS NULL +$$); + result +--------------------------------------------------------------------- + 0 + 0 + 0 +(3 rows) + +-- Clean up: drop the database +set citus.enable_create_database_propagation to on; +DROP DATABASE role_operations_test_db; +reset citus.enable_create_database_propagation; diff --git a/src/test/regress/expected/schema_based_sharding.out b/src/test/regress/expected/schema_based_sharding.out index 28cb45688f9..711c3914137 100644 --- a/src/test/regress/expected/schema_based_sharding.out +++ b/src/test/regress/expected/schema_based_sharding.out @@ -13,19 +13,19 @@ SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0); SET client_min_messages TO NOTICE; -- Verify that the UDFs used to sync tenant schema metadata to workers -- fail on NULL input. -SELECT citus_internal_add_tenant_schema(NULL, 1); +SELECT citus_internal.add_tenant_schema(NULL, 1); ERROR: schema_id cannot be NULL -SELECT citus_internal_add_tenant_schema(1, NULL); +SELECT citus_internal.add_tenant_schema(1, NULL); ERROR: colocation_id cannot be NULL -SELECT citus_internal_delete_tenant_schema(NULL); +SELECT citus_internal.delete_tenant_schema(NULL); ERROR: schema_id cannot be NULL -SELECT citus_internal_unregister_tenant_schema_globally(1, NULL); +SELECT citus_internal.unregister_tenant_schema_globally(1, NULL); ERROR: schema_name cannot be NULL -SELECT citus_internal_unregister_tenant_schema_globally(NULL, 'text'); +SELECT citus_internal.unregister_tenant_schema_globally(NULL, 'text'); ERROR: schema_id cannot be NULL --- Verify that citus_internal_unregister_tenant_schema_globally can only +-- Verify that citus_internal.unregister_tenant_schema_globally can only -- be called on schemas that are dropped already. -SELECT citus_internal_unregister_tenant_schema_globally('regular_schema'::regnamespace, 'regular_schema'); +SELECT citus_internal.unregister_tenant_schema_globally('regular_schema'::regnamespace, 'regular_schema'); ERROR: schema is expected to be already dropped because this function is only expected to be called from Citus drop hook SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); ?column? @@ -1511,10 +1511,10 @@ SELECT pg_reload_conf(); t (1 row) --- Verify that citus_internal_unregister_tenant_schema_globally is a no-op +-- Verify that citus_internal.unregister_tenant_schema_globally is a no-op -- on workers. -SELECT citus_internal_unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3'); - citus_internal_unregister_tenant_schema_globally +SELECT citus_internal.unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3'); + unregister_tenant_schema_globally --------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/seclabel.out b/src/test/regress/expected/seclabel.out new file mode 100644 index 00000000000..ca6c6f984d0 --- /dev/null +++ b/src/test/regress/expected/seclabel.out @@ -0,0 +1,226 @@ +-- +-- SECLABEL +-- +-- Test suite for SECURITY LABEL ON ROLE statements +-- +-- first we remove one of the worker nodes to be able to test +-- citus_add_node later +SELECT citus_remove_node('localhost', :worker_2_port); + citus_remove_node +--------------------------------------------------------------------- + +(1 row) + +-- create two roles, one with characters that need escaping +CREATE ROLE user1; +CREATE ROLE "user 2"; +-- check an invalid label for our current dummy hook citus_test_object_relabel +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'invalid_label'; +ERROR: 'invalid_label' is not a valid security label for Citus tests. +-- if we disable metadata_sync, the command will not be propagated +SET citus.enable_metadata_sync TO off; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | +(2 rows) + +RESET citus.enable_metadata_sync; +-- check that we only support propagating for roles +SET citus.shard_replication_factor to 1; +-- distributed table +CREATE TABLE a (a int); +SELECT create_distributed_table('a', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +-- distributed view +CREATE VIEW v_dist AS SELECT * FROM a; +-- distributed function +CREATE FUNCTION notice(text) RETURNS void LANGUAGE plpgsql AS $$ + BEGIN RAISE NOTICE '%', $1; END; $$; +SECURITY LABEL ON TABLE a IS 'citus_classified'; +NOTICE: not propagating SECURITY LABEL commands whose object type is not role +HINT: Connect to worker nodes directly to manually run the same SECURITY LABEL command. +SECURITY LABEL ON FUNCTION notice IS 'citus_unclassified'; +NOTICE: not propagating SECURITY LABEL commands whose object type is not role +HINT: Connect to worker nodes directly to manually run the same SECURITY LABEL command. +SECURITY LABEL ON VIEW v_dist IS 'citus_classified'; +NOTICE: not propagating SECURITY LABEL commands whose object type is not role +HINT: Connect to worker nodes directly to manually run the same SECURITY LABEL command. +SELECT node_type, result FROM get_citus_tests_label_provider_labels('a') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "table", "provider": "citus '!tests_label_provider"} + worker_1 | +(2 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('notice(text)') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "function", "provider": "citus '!tests_label_provider"} + worker_1 | +(2 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('v_dist') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "view", "provider": "citus '!tests_label_provider"} + worker_1 | +(2 rows) + +\c - - - :worker_1_port +SECURITY LABEL ON TABLE a IS 'citus_classified'; +SECURITY LABEL ON FUNCTION notice IS 'citus_unclassified'; +SECURITY LABEL ON VIEW v_dist IS 'citus_classified'; +\c - - - :master_port +SELECT node_type, result FROM get_citus_tests_label_provider_labels('a') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "table", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "table", "provider": "citus '!tests_label_provider"} +(2 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('notice(text)') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "function", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "function", "provider": "citus '!tests_label_provider"} +(2 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('v_dist') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "view", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "view", "provider": "citus '!tests_label_provider"} +(2 rows) + +DROP TABLE a CASCADE; +NOTICE: drop cascades to view v_dist +DROP FUNCTION notice; +-- test that SECURITY LABEL statement is actually propagated for ROLES +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +-- we have exactly one provider loaded, so we may not include the provider in the command +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +NOTICE: issuing SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SECURITY LABEL ON ROLE user1 IS NULL; +NOTICE: issuing SECURITY LABEL ON ROLE user1 IS NULL +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'; +NOTICE: issuing SECURITY LABEL ON ROLE user1 IS 'citus_unclassified' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified'; +NOTICE: issuing SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +\c - - - :worker_1_port +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +-- command from the worker node should be propagated to the coordinator +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +NOTICE: issuing SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +RESET citus.log_remote_commands; +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +\c - - - :master_port +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +-- add a new node and check that it also propagates the SECURITY LABEL statement to the new node +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); +NOTICE: issuing SELECT worker_create_or_alter_role('user1', 'CREATE ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL', 'ALTER ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL');SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +NOTICE: issuing SELECT worker_create_or_alter_role('user 2', 'CREATE ROLE "user 2" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL', 'ALTER ROLE "user 2" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL');SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +-- disable the GUC and check that the command is not propagated +SET citus.enable_alter_role_propagation TO off; +SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'; +NOTICE: not propagating SECURITY LABEL commands to other nodes +HINT: Connect to other nodes directly to manually assign necessary labels. +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +\c - - - :worker_2_port +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +SET citus.enable_alter_role_propagation TO off; +SECURITY LABEL ON ROLE user1 IS 'citus ''!unclassified'; +NOTICE: not propagating SECURITY LABEL commands to other nodes +HINT: Connect to other nodes directly to manually assign necessary labels. +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +RESET citus.enable_alter_role_propagation; +-- cleanup +RESET citus.log_remote_commands; +DROP ROLE user1, "user 2"; diff --git a/src/test/regress/expected/seclabel_non_maindb.out b/src/test/regress/expected/seclabel_non_maindb.out new file mode 100644 index 00000000000..48c89fb3119 --- /dev/null +++ b/src/test/regress/expected/seclabel_non_maindb.out @@ -0,0 +1,111 @@ +-- SECLABEL +-- +-- Test suite for running SECURITY LABEL ON ROLE statements from non-main databases +SET citus.enable_create_database_propagation to ON; +CREATE DATABASE database1; +CREATE DATABASE database2; +\c - - - :worker_1_port +SET citus.enable_create_database_propagation to ON; +CREATE DATABASE database_w1; +\c - - - :master_port +CREATE ROLE user1; +\c database1 +SHOW citus.main_db; + citus.main_db +--------------------------------------------------------------------- + regression +(1 row) + +SHOW citus.superuser; + citus.superuser +--------------------------------------------------------------------- + postgres +(1 row) + +CREATE ROLE "user 2"; +-- Set a SECURITY LABEL on a role from a non-main database +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_unclassified'; +-- Check the result +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +\c database1 +-- Set a SECURITY LABEL on database, it should not be propagated +SECURITY LABEL FOR "citus '!tests_label_provider" ON DATABASE database1 IS 'citus_classified'; +-- Set a SECURITY LABEL on a table, it should not be propagated +CREATE TABLE a (i int); +SECURITY LABEL ON TABLE a IS 'citus_classified'; +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('database1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "database", "provider": "citus '!tests_label_provider"} + worker_1 | + worker_2 | +(3 rows) + +-- Check that only the SECURITY LABEL for ROLES is propagated to the non-main databases on other nodes +\c database_w1 - - :worker_1_port +SELECT provider, objtype, label, objname FROM pg_seclabels ORDER BY objname; + provider | objtype | label | objname +--------------------------------------------------------------------- + citus '!tests_label_provider | role | citus_unclassified | "user 2" + citus '!tests_label_provider | role | citus_classified | user1 +(2 rows) + +-- Check the result after a transaction +BEGIN; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON DATABASE database_w1 IS 'citus_classified'; +COMMIT; +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('database_w1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | + worker_1 | {"label": "citus_classified", "objtype": "database", "provider": "citus '!tests_label_provider"} + worker_2 | +(3 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +BEGIN; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified'; +ROLLBACK; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +-- clean up +SET citus.enable_create_database_propagation to ON; +DROP DATABASE database1; +DROP DATABASE database2; +DROP DATABASE database_w1; +DROP ROLE user1; +DROP ROLE "user 2"; +RESET citus.enable_create_database_propagation; diff --git a/src/test/regress/expected/shard_rebalancer.out b/src/test/regress/expected/shard_rebalancer.out index 7997b5e28b5..988fa68be88 100644 --- a/src/test/regress/expected/shard_rebalancer.out +++ b/src/test/regress/expected/shard_rebalancer.out @@ -127,7 +127,7 @@ SELECT pg_sleep(.1); -- wait to make sure the config has changed before running (1 row) SELECT master_drain_node('localhost', :master_port); -ERROR: connection to the remote node foobar:57636 failed with the following error: could not translate host name "foobar" to address: +ERROR: connection to the remote node postgres@foobar:57636 failed with the following error: could not translate host name "foobar" to address: CALL citus_cleanup_orphaned_resources(); ALTER SYSTEM RESET citus.local_hostname; SELECT pg_reload_conf(); @@ -197,7 +197,7 @@ SELECT pg_sleep(.1); -- wait to make sure the config has changed before running (1 row) SELECT replicate_table_shards('dist_table_test_2', max_shard_copies := 4, shard_transfer_mode:='block_writes'); -ERROR: connection to the remote node foobar:57636 failed with the following error: could not translate host name "foobar" to address: +ERROR: connection to the remote node postgres@foobar:57636 failed with the following error: could not translate host name "foobar" to address: ALTER SYSTEM RESET citus.local_hostname; SELECT pg_reload_conf(); pg_reload_conf @@ -328,8 +328,8 @@ RESET citus.shard_replication_factor; -- test some more error handling. We create them later there. SET citus.enable_create_role_propagation TO OFF; CREATE USER testrole; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. GRANT ALL ON SCHEMA public TO testrole; ERROR: role "testrole" does not exist CONTEXT: while executing command on localhost:xxxxx @@ -681,7 +681,7 @@ FROM ( FROM pg_dist_shard WHERE logicalrelid = 'rebalance_test_table'::regclass ) T; -ERROR: connection to the remote node foobar:57636 failed with the following error: could not translate host name "foobar" to address: +ERROR: connection to the remote node postgres@foobar:57636 failed with the following error: could not translate host name "foobar" to address: CALL citus_cleanup_orphaned_resources(); ALTER SYSTEM RESET citus.local_hostname; SELECT pg_reload_conf(); @@ -731,8 +731,8 @@ ERROR: target node localhost:xxxxx is not responsive \c - - - :worker_1_port SET citus.enable_create_role_propagation TO OFF; CREATE USER testrole; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. GRANT ALL ON SCHEMA public TO testrole; ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. @@ -745,8 +745,8 @@ ERROR: source node localhost:xxxxx is not responsive \c - - - :worker_2_port SET citus.enable_create_role_propagation TO OFF; CREATE USER testrole; -NOTICE: not propagating CREATE ROLE/USER commands to worker nodes -HINT: Connect to worker nodes directly to manually create all necessary users and roles. +NOTICE: not propagating CREATE ROLE/USER commands to other nodes +HINT: Connect to other nodes directly to manually create all necessary users and roles. GRANT ALL ON SCHEMA public TO testrole; ERROR: operation is not allowed on this node HINT: Connect to the coordinator and run it again. @@ -2184,6 +2184,27 @@ SELECT citus_add_rebalance_strategy( 0.1 ); ERROR: default_threshold cannot be smaller than minimum_threshold +SELECT citus_add_rebalance_strategy( + 'test_improvement_threshold', + 'citus_shard_cost_1', + 'capacity_high_worker_2', + 'citus_shard_allowed_on_node_true', + 0.2, + 0.1, + 0.3 + ); + citus_add_rebalance_strategy +--------------------------------------------------------------------- + +(1 row) + +SELECT * FROM pg_dist_rebalance_strategy WHERE name='test_improvement_threshold'; + name | default_strategy | shard_cost_function | node_capacity_function | shard_allowed_on_node_function | default_threshold | minimum_threshold | improvement_threshold +--------------------------------------------------------------------- + test_improvement_threshold | f | citus_shard_cost_1 | capacity_high_worker_2 | citus_shard_allowed_on_node_true | 0.2 | 0.1 | 0.3 +(1 row) + +DELETE FROM pg_catalog.pg_dist_rebalance_strategy WHERE name='test_improvement_threshold'; -- Make it a data node again SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true); master_set_node_property @@ -2374,6 +2395,74 @@ SELECT count(*) FROM pg_dist_partition; 0 (1 row) +-- verify a system with a new node won't copy distributed table shards without reference tables +SELECT 1 from master_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT public.wait_until_metadata_sync(30000); + wait_until_metadata_sync +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE r1 (a int PRIMARY KEY, b int); +SELECT create_reference_table('r1'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE d1 (a int PRIMARY KEY, b int); +SELECT create_distributed_table('d1', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +ALTER SEQUENCE pg_dist_groupid_seq RESTART WITH 15; +SELECT 1 from master_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- count the number of placements for the reference table to verify it is not available on +-- all nodes +SELECT count(*) +FROM pg_dist_shard +JOIN pg_dist_shard_placement USING (shardid) +WHERE logicalrelid = 'r1'::regclass; + count +--------------------------------------------------------------------- + 1 +(1 row) + +-- #7426 We can't move shards to the fresh node before we copy reference tables there. +-- rebalance_table_shards() will do the copy, but the low-level +-- citus_move_shard_placement() should raise an error +SELECT citus_move_shard_placement(pg_dist_shard.shardid, nodename, nodeport, 'localhost', :worker_2_port) + FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) + WHERE logicalrelid = 'd1'::regclass AND nodename = 'localhost' AND nodeport = :worker_1_port LIMIT 1; +ERROR: there are missing reference tables on some nodes +SELECT replicate_reference_tables(); + replicate_reference_tables +--------------------------------------------------------------------- + +(1 row) + +-- After replication, the move should succeed. +SELECT citus_move_shard_placement(pg_dist_shard.shardid, nodename, nodeport, 'localhost', :worker_2_port) + FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) + WHERE logicalrelid = 'd1'::regclass AND nodename = 'localhost' AND nodeport = :worker_1_port LIMIT 1; + citus_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +DROP TABLE d1, r1; -- verify a system having only reference tables will copy the reference tables when -- executing the rebalancer SELECT 1 from master_remove_node('localhost', :worker_2_port); diff --git a/src/test/regress/expected/single_node.out b/src/test/regress/expected/single_node.out index f485763c5be..522ffb8e804 100644 --- a/src/test/regress/expected/single_node.out +++ b/src/test/regress/expected/single_node.out @@ -88,8 +88,9 @@ SELECT create_distributed_table('failover_to_local', 'a', shard_count=>32); (1 row) CREATE INDEX CONCURRENTLY ON failover_to_local(a); -WARNING: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. - Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index. +WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. +If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, +if applicable, and then re-attempt the original command. ERROR: the total number of connections on the server is more than max_connections(100) HINT: Consider using a higher value for max_connections -- reset global GUC changes diff --git a/src/test/regress/expected/single_node_0.out b/src/test/regress/expected/single_node_0.out index 321d283f85d..12b385e9651 100644 --- a/src/test/regress/expected/single_node_0.out +++ b/src/test/regress/expected/single_node_0.out @@ -88,8 +88,9 @@ SELECT create_distributed_table('failover_to_local', 'a', shard_count=>32); (1 row) CREATE INDEX CONCURRENTLY ON failover_to_local(a); -WARNING: CONCURRENTLY-enabled index commands can fail partially, leaving behind an INVALID index. - Use DROP INDEX CONCURRENTLY IF EXISTS to remove the invalid index. +WARNING: Commands that are not transaction-safe may result in partial failure, potentially leading to an inconsistent state. +If the problematic command is a CREATE operation, consider using the 'IF EXISTS' syntax to drop the object, +if applicable, and then re-attempt the original command. ERROR: the total number of connections on the server is more than max_connections(100) HINT: Consider using a higher value for max_connections -- reset global GUC changes diff --git a/src/test/regress/expected/subquery_and_cte.out b/src/test/regress/expected/subquery_and_cte.out index c15e9b9d78b..89686086560 100644 --- a/src/test/regress/expected/subquery_and_cte.out +++ b/src/test/regress/expected/subquery_and_cte.out @@ -527,7 +527,7 @@ FROM ) as bar WHERE foo.user_id = bar.user_id ORDER BY 1 DESC; -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column CREATE TABLE ref_table_1 (a int); SELECT create_reference_table('ref_table_1'); create_reference_table diff --git a/src/test/regress/expected/system_queries.out b/src/test/regress/expected/system_queries.out new file mode 100644 index 00000000000..cd2aef4d26e --- /dev/null +++ b/src/test/regress/expected/system_queries.out @@ -0,0 +1,33 @@ +-- The following query retrieves the foreign key constraints of the table "pg_dist_background_job" +-- along with their details. This modification includes a fix for a null pointer exception that occurred +-- in the "HasRangeTableRef" method of "worker_shard_visibility". The issue was resolved with PR #7604. +select + ct.conname as constraint_name, + a.attname as column_name, + fc.relname as foreign_table_name, + fns.nspname as foreign_table_schema +from + (SELECT ct.conname, ct.conrelid, ct.confrelid, ct.conkey, ct.contype, +ct.confkey, generate_subscripts(ct.conkey, 1) AS s + FROM pg_constraint ct + ) AS ct + inner join pg_class c on c.oid=ct.conrelid + inner join pg_namespace ns on c.relnamespace=ns.oid + inner join pg_attribute a on a.attrelid=ct.conrelid and a.attnum = +ct.conkey[ct.s] + left join pg_class fc on fc.oid=ct.confrelid + left join pg_namespace fns on fc.relnamespace=fns.oid + left join pg_attribute fa on fa.attrelid=ct.confrelid and fa.attnum = +ct.confkey[ct.s] +where + ct.contype='f' + and fc.relname='pg_dist_background_job' + and ns.nspname='pg_catalog' +order by + fns.nspname, fc.relname, a.attnum; + constraint_name | column_name | foreign_table_name | foreign_table_schema +--------------------------------------------------------------------- + pg_dist_background_task_job_id_fkey | job_id | pg_dist_background_job | pg_catalog + pg_dist_background_task_depend_job_id_fkey | job_id | pg_dist_background_job | pg_catalog +(2 rows) + diff --git a/src/test/regress/expected/text_search.out b/src/test/regress/expected/text_search.out index b9934a1d412..6c5b387ba2b 100644 --- a/src/test/regress/expected/text_search.out +++ b/src/test/regress/expected/text_search.out @@ -374,12 +374,21 @@ SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config3'::regconfig; (2 rows) -- verify they are all removed locally -SELECT 'text_search.config1'::regconfig; -ERROR: text search configuration "text_search.config1" does not exist -SELECT 'text_search.config2'::regconfig; -ERROR: text search configuration "text_search.config2" does not exist -SELECT 'text_search.config3'::regconfig; -ERROR: text search configuration "text_search.config3" does not exist +SELECT 1 FROM pg_ts_config WHERE cfgname = 'config1' AND cfgnamespace = 'text_search'::regnamespace; + ?column? +--------------------------------------------------------------------- +(0 rows) + +SELECT 1 FROM pg_ts_config WHERE cfgname = 'config2' AND cfgnamespace = 'text_search'::regnamespace; + ?column? +--------------------------------------------------------------------- +(0 rows) + +SELECT 1 FROM pg_ts_config WHERE cfgname = 'config3' AND cfgnamespace = 'text_search'::regnamespace; + ?column? +--------------------------------------------------------------------- +(0 rows) + -- verify that indexes created concurrently that would propagate a TEXT SEARCH CONFIGURATION object SET citus.enable_ddl_propagation TO off; CREATE TEXT SEARCH CONFIGURATION concurrent_index_config ( PARSER = default ); @@ -434,12 +443,12 @@ $$) ORDER BY 1,2; CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_wrongly ( copy = french ); -- now we expect manually_created_wrongly(citus_backup_XXX) to show up when querying the configurations SELECT * FROM run_command_on_workers($$ - SELECT array_agg(cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_wrongly%'; + SELECT array_agg(cfgname ORDER BY cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_wrongly%'; $$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- - localhost | 57637 | t | {manually_created_wrongly(citus_backup_0),manually_created_wrongly} - localhost | 57638 | t | {manually_created_wrongly(citus_backup_0),manually_created_wrongly} + localhost | 57637 | t | {manually_created_wrongly,manually_created_wrongly(citus_backup_0)} + localhost | 57638 | t | {manually_created_wrongly,manually_created_wrongly(citus_backup_0)} (2 rows) -- verify the objects get reused appropriately when the specification is the same @@ -458,7 +467,7 @@ CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_correct ( copy = f -- now we don't expect manually_created_correct(citus_backup_XXX) to show up when querying the configurations as the -- original one is reused SELECT * FROM run_command_on_workers($$ - SELECT array_agg(cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_correct%'; + SELECT array_agg(cfgname ORDER BY cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_correct%'; $$) ORDER BY 1,2; nodename | nodeport | success | result --------------------------------------------------------------------- diff --git a/src/test/regress/expected/upgrade_basic_after.out b/src/test/regress/expected/upgrade_basic_after.out index 1bfbfc989ee..7c0ebfb2909 100644 --- a/src/test/regress/expected/upgrade_basic_after.out +++ b/src/test/regress/expected/upgrade_basic_after.out @@ -9,100 +9,6 @@ SELECT * FROM pg_indexes WHERE schemaname = 'upgrade_basic' and tablename NOT LI upgrade_basic | tp | tp_pkey | | CREATE UNIQUE INDEX tp_pkey ON upgrade_basic.tp USING btree (a) (3 rows) -SELECT nextval('pg_dist_shardid_seq') > MAX(shardid) FROM pg_dist_shard; - ?column? ---------------------------------------------------------------------- - t -(1 row) - -SELECT nextval('pg_dist_placement_placementid_seq') > MAX(placementid) FROM pg_dist_placement; - ?column? ---------------------------------------------------------------------- - t -(1 row) - -SELECT nextval('pg_dist_groupid_seq') > MAX(groupid) FROM pg_dist_node; - ?column? ---------------------------------------------------------------------- - t -(1 row) - -SELECT nextval('pg_dist_node_nodeid_seq') > MAX(nodeid) FROM pg_dist_node; - ?column? ---------------------------------------------------------------------- - t -(1 row) - -SELECT nextval('pg_dist_colocationid_seq') > MAX(colocationid) FROM pg_dist_colocation; - ?column? ---------------------------------------------------------------------- - t -(1 row) - --- while testing sequences on pg_dist_cleanup, they return null in pg upgrade schedule --- but return a valid value in citus upgrade schedule --- that's why we accept both NULL and MAX()+1 here -SELECT - CASE WHEN MAX(operation_id) IS NULL - THEN true - ELSE nextval('pg_dist_operationid_seq') > MAX(operation_id) - END AS check_operationid - FROM pg_dist_cleanup; - check_operationid ---------------------------------------------------------------------- - t -(1 row) - -SELECT - CASE WHEN MAX(record_id) IS NULL - THEN true - ELSE nextval('pg_dist_cleanup_recordid_seq') > MAX(record_id) - END AS check_recordid - FROM pg_dist_cleanup; - check_recordid ---------------------------------------------------------------------- - t -(1 row) - -SELECT nextval('pg_dist_background_job_job_id_seq') > COALESCE(MAX(job_id), 0) FROM pg_dist_background_job; - ?column? ---------------------------------------------------------------------- - t -(1 row) - -SELECT nextval('pg_dist_background_task_task_id_seq') > COALESCE(MAX(task_id), 0) FROM pg_dist_background_task; - ?column? ---------------------------------------------------------------------- - t -(1 row) - -SELECT last_value > 0 FROM pg_dist_clock_logical_seq; - ?column? ---------------------------------------------------------------------- - t -(1 row) - --- If this query gives output it means we've added a new sequence that should --- possibly be restored after upgrades. -SELECT sequence_name FROM information_schema.sequences - WHERE sequence_name LIKE 'pg_dist_%' - AND sequence_name NOT IN ( - -- these ones are restored above - 'pg_dist_shardid_seq', - 'pg_dist_placement_placementid_seq', - 'pg_dist_groupid_seq', - 'pg_dist_node_nodeid_seq', - 'pg_dist_colocationid_seq', - 'pg_dist_operationid_seq', - 'pg_dist_cleanup_recordid_seq', - 'pg_dist_background_job_job_id_seq', - 'pg_dist_background_task_task_id_seq', - 'pg_dist_clock_logical_seq' - ); - sequence_name ---------------------------------------------------------------------- -(0 rows) - SELECT logicalrelid FROM pg_dist_partition JOIN pg_depend ON logicalrelid=objid JOIN pg_catalog.pg_class ON logicalrelid=oid diff --git a/src/test/regress/expected/upgrade_basic_after_non_mixed.out b/src/test/regress/expected/upgrade_basic_after_non_mixed.out new file mode 100644 index 00000000000..8dbc13babf8 --- /dev/null +++ b/src/test/regress/expected/upgrade_basic_after_non_mixed.out @@ -0,0 +1,94 @@ +SELECT nextval('pg_dist_shardid_seq') > MAX(shardid) FROM pg_dist_shard; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT nextval('pg_dist_placement_placementid_seq') > MAX(placementid) FROM pg_dist_placement; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT nextval('pg_dist_groupid_seq') > MAX(groupid) FROM pg_dist_node; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT nextval('pg_dist_node_nodeid_seq') > MAX(nodeid) FROM pg_dist_node; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT nextval('pg_dist_colocationid_seq') > MAX(colocationid) FROM pg_dist_colocation; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +-- while testing sequences on pg_dist_cleanup, they return null in pg upgrade schedule +-- but return a valid value in citus upgrade schedule +-- that's why we accept both NULL and MAX()+1 here +SELECT + CASE WHEN MAX(operation_id) IS NULL + THEN true + ELSE nextval('pg_dist_operationid_seq') > MAX(operation_id) + END AS check_operationid + FROM pg_dist_cleanup; + check_operationid +--------------------------------------------------------------------- + t +(1 row) + +SELECT + CASE WHEN MAX(record_id) IS NULL + THEN true + ELSE nextval('pg_dist_cleanup_recordid_seq') > MAX(record_id) + END AS check_recordid + FROM pg_dist_cleanup; + check_recordid +--------------------------------------------------------------------- + t +(1 row) + +SELECT nextval('pg_dist_background_job_job_id_seq') > COALESCE(MAX(job_id), 0) FROM pg_dist_background_job; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT nextval('pg_dist_background_task_task_id_seq') > COALESCE(MAX(task_id), 0) FROM pg_dist_background_task; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +SELECT last_value > 0 FROM pg_dist_clock_logical_seq; + ?column? +--------------------------------------------------------------------- + t +(1 row) + +-- If this query gives output it means we've added a new sequence that should +-- possibly be restored after upgrades. +SELECT sequence_name FROM information_schema.sequences + WHERE sequence_name LIKE 'pg_dist_%' + AND sequence_name NOT IN ( + -- these ones are restored above + 'pg_dist_shardid_seq', + 'pg_dist_placement_placementid_seq', + 'pg_dist_groupid_seq', + 'pg_dist_node_nodeid_seq', + 'pg_dist_colocationid_seq', + 'pg_dist_operationid_seq', + 'pg_dist_cleanup_recordid_seq', + 'pg_dist_background_job_job_id_seq', + 'pg_dist_background_task_task_id_seq', + 'pg_dist_clock_logical_seq' + ); + sequence_name +--------------------------------------------------------------------- +(0 rows) + diff --git a/src/test/regress/expected/upgrade_basic_before_non_mixed.out b/src/test/regress/expected/upgrade_basic_before_non_mixed.out new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index 36bd504e88d..ca31b222bb1 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -56,13 +56,41 @@ ORDER BY 1; function citus_get_active_worker_nodes() function citus_get_node_clock() function citus_get_transaction_clock() + function citus_internal.acquire_citus_advisory_object_class_lock(integer,cstring) + function citus_internal.add_colocation_metadata(integer,integer,integer,regtype,oid) + function citus_internal.add_object_metadata(text,text[],text[],integer,integer,boolean) + function citus_internal.add_partition_metadata(regclass,"char",text,integer,"char") + function citus_internal.add_placement_metadata(bigint,bigint,integer,bigint) + function citus_internal.add_shard_metadata(regclass,bigint,"char",text,text) + function citus_internal.add_tenant_schema(oid,integer) + function citus_internal.adjust_local_clock_to_remote(cluster_clock) + function citus_internal.commit_management_command_2pc() + function citus_internal.database_command(text) + function citus_internal.delete_colocation_metadata(integer) + function citus_internal.delete_partition_metadata(regclass) + function citus_internal.delete_placement_metadata(bigint) + function citus_internal.delete_shard_metadata(bigint) + function citus_internal.delete_tenant_schema(oid) + function citus_internal.execute_command_on_remote_nodes_as_user(text,text) function citus_internal.find_groupid_for_node(text,integer) + function citus_internal.global_blocked_processes() + function citus_internal.is_replication_origin_tracking_active() + function citus_internal.local_blocked_processes() + function citus_internal.mark_node_not_synced(integer,integer) + function citus_internal.mark_object_distributed(oid,text,oid,text) function citus_internal.pg_dist_node_trigger_func() function citus_internal.pg_dist_rebalance_strategy_trigger_func() function citus_internal.pg_dist_shard_placement_trigger_func() function citus_internal.refresh_isolation_tester_prepared_statement() function citus_internal.replace_isolation_tester_func() function citus_internal.restore_isolation_tester_func() + function citus_internal.start_management_transaction(xid8) + function citus_internal.start_replication_origin_tracking() + function citus_internal.stop_replication_origin_tracking() + function citus_internal.unregister_tenant_schema_globally(oid,text) + function citus_internal.update_none_dist_table_metadata(oid,"char",bigint,boolean) + function citus_internal.update_placement_metadata(bigint,integer,integer) + function citus_internal.update_relation_colocation(oid,integer) function citus_internal_add_colocation_metadata(integer,integer,integer,regtype,oid) function citus_internal_add_object_metadata(text,text[],text[],integer,integer,boolean) function citus_internal_add_partition_metadata(regclass,"char",text,integer,"char") @@ -146,7 +174,7 @@ ORDER BY 1; function citus_text_send_as_jsonb(text) function citus_total_relation_size(regclass,boolean) function citus_truncate_trigger() - function citus_unmark_object_distributed(oid,oid,integer) + function citus_unmark_object_distributed(oid,oid,integer,boolean) function citus_update_node(integer,text,integer,boolean,integer) function citus_update_shard_statistics(bigint) function citus_update_table_statistics(regclass) @@ -343,5 +371,5 @@ ORDER BY 1; view citus_stat_tenants_local view pg_dist_shard_placement view time_partitions -(333 rows) +(361 rows) diff --git a/src/test/regress/expected/upgrade_post_11_after.out b/src/test/regress/expected/upgrade_post_11_after.out index 422bc846fd7..49bd204324b 100644 --- a/src/test/regress/expected/upgrade_post_11_after.out +++ b/src/test/regress/expected/upgrade_post_11_after.out @@ -67,6 +67,20 @@ SELECT 1 FROM run_command_on_workers($$SELECT pg_reload_conf()$$); 1 (2 rows) +-- In the version that we use for upgrade tests (v10.2.0), we propagate +-- "valid until" to the workers as "infinity" even if it's not set. And +-- given that "postgres" role is created in the older version, "valid until" +-- is set to "infinity" on the workers while this is not the case for +-- coordinator. See https://github.com/citusdata/citus/issues/7533. +-- +-- We're fixing this for new versions of Citus and we'll probably backport +-- this to some older versions too. However, v10.2.0 won't ever have this +-- fix. +-- +-- For this reason, here we set "valid until" to "infinity" for all the +-- nodes so that below query doesn't report any difference between the +-- metadata on coordinator and workers. +ALTER ROLE postgres WITH VALID UNTIL 'infinity'; -- make sure that the metadata is consistent across all nodes -- we exclude the distributed_object_data as they are -- not sorted in the same order (as OIDs differ on the nodes) diff --git a/src/test/regress/expected/upgrade_rebalance_strategy_after.out b/src/test/regress/expected/upgrade_rebalance_strategy_after.out index da822fffd74..c7ea5cc4e0b 100644 --- a/src/test/regress/expected/upgrade_rebalance_strategy_after.out +++ b/src/test/regress/expected/upgrade_rebalance_strategy_after.out @@ -1,8 +1,9 @@ SELECT * FROM pg_catalog.pg_dist_rebalance_strategy ORDER BY name; - name | default_strategy | shard_cost_function | node_capacity_function | shard_allowed_on_node_function | default_threshold | minimum_threshold | improvement_threshold + name | default_strategy | shard_cost_function | node_capacity_function | shard_allowed_on_node_function | default_threshold | minimum_threshold | improvement_threshold --------------------------------------------------------------------- - by_disk_size | f | citus_shard_cost_by_disk_size | citus_node_capacity_1 | citus_shard_allowed_on_node_true | 0.1 | 0.01 | 0.5 - by_shard_count | f | citus_shard_cost_1 | citus_node_capacity_1 | citus_shard_allowed_on_node_true | 0 | 0 | 0 - custom_strategy | t | upgrade_rebalance_strategy.shard_cost_2 | upgrade_rebalance_strategy.capacity_high_worker_1 | upgrade_rebalance_strategy.only_worker_2 | 0.5 | 0.2 | 0 -(3 rows) + by_disk_size | f | citus_shard_cost_by_disk_size | citus_node_capacity_1 | citus_shard_allowed_on_node_true | 0.1 | 0.01 | 0.5 + by_shard_count | f | citus_shard_cost_1 | citus_node_capacity_1 | citus_shard_allowed_on_node_true | 0 | 0 | 0 + custom_strategy | t | upgrade_rebalance_strategy.shard_cost_2 | upgrade_rebalance_strategy.capacity_high_worker_1 | upgrade_rebalance_strategy.only_worker_2 | 0.5 | 0.2 | 0.3 + invalid_strategy | f | 1234567 | upgrade_rebalance_strategy.capacity_high_worker_1 | upgrade_rebalance_strategy.only_worker_2 | 0.5 | 0.2 | 0.3 +(4 rows) diff --git a/src/test/regress/expected/upgrade_rebalance_strategy_before.out b/src/test/regress/expected/upgrade_rebalance_strategy_before.out index cf1d122b3cd..85b458389c1 100644 --- a/src/test/regress/expected/upgrade_rebalance_strategy_before.out +++ b/src/test/regress/expected/upgrade_rebalance_strategy_before.out @@ -35,3 +35,23 @@ SELECT citus_set_default_rebalance_strategy('custom_strategy'); (1 row) +-- Disable the trigger temporarily to allow the invalid strategy to be added. +-- Normally an invalid strategy can end up in the table by deleting one of the +-- functions it depends on. But we do directly in this test because we want to +-- have a consistent OID, so we get consistent test output. +ALTER TABLE pg_catalog.pg_dist_rebalance_strategy DISABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger; +SELECT citus_add_rebalance_strategy( + 'invalid_strategy', + 1234567, + 'capacity_high_worker_1', + 'only_worker_2', + 0.5, + 0.2, + 0.3 + ); + citus_add_rebalance_strategy +--------------------------------------------------------------------- + +(1 row) + +ALTER TABLE pg_catalog.pg_dist_rebalance_strategy ENABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger; diff --git a/src/test/regress/expected/validate_constraint.out b/src/test/regress/expected/validate_constraint.out index 08b03a2bfb2..b38e835fd99 100644 --- a/src/test/regress/expected/validate_constraint.out +++ b/src/test/regress/expected/validate_constraint.out @@ -133,12 +133,6 @@ ORDER BY 1, 2; validatable_constraint_8000016 | t (10 rows) -DROP TABLE constrained_table; -DROP TABLE referenced_table CASCADE; -DROP TABLE referencing_table; +SET client_min_messages TO WARNING; DROP SCHEMA validate_constraint CASCADE; -NOTICE: drop cascades to 3 other objects -DETAIL: drop cascades to type constraint_validity -drop cascades to view constraint_validations_in_workers -drop cascades to view constraint_validations SET search_path TO DEFAULT; diff --git a/src/test/regress/expected/with_basics.out b/src/test/regress/expected/with_basics.out index 4eefb883755..78ed17317da 100644 --- a/src/test/regress/expected/with_basics.out +++ b/src/test/regress/expected/with_basics.out @@ -664,14 +664,14 @@ WITH RECURSIVE basic_recursive(x) AS ( SELECT user_id + 1 FROM users_table JOIN basic_recursive ON (user_id = x) WHERE user_id < 100 ) SELECT sum(x) FROM basic_recursive; -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column WITH RECURSIVE basic_recursive AS ( SELECT -1 as user_id, '2017-11-22 20:16:16.614779'::timestamp, -1, -1, -1, -1 UNION ALL SELECT basic_recursive.* FROM users_table JOIN basic_recursive USING (user_id) WHERE user_id>1 ) SELECT * FROM basic_recursive ORDER BY user_id LIMIT 1; -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- basic_recursive in FROM should error out SELECT * @@ -682,7 +682,7 @@ FROM SELECT basic_recursive.* FROM users_table JOIN basic_recursive USING (user_id) WHERE user_id>1 ) SELECT * FROM basic_recursive ORDER BY user_id LIMIT 1) cte_rec; -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- basic_recursive in WHERE with UNION ALL SELECT * @@ -696,7 +696,7 @@ WHERE SELECT basic_recursive.* FROM users_table JOIN basic_recursive USING (user_id) WHERE user_id>1 ) SELECT * FROM basic_recursive ORDER BY user_id LIMIT 1); -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- one recursive one regular CTE should error out WITH RECURSIVE basic_recursive(x) AS( VALUES (1) @@ -707,7 +707,7 @@ basic AS ( SELECT count(user_id) FROM users_table ) SELECT x FROM basic, basic_recursive; -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- one recursive one regular which SELECTs from the recursive CTE under a simple SELECT WITH RECURSIVE basic_recursive(x) AS( VALUES (1) @@ -718,7 +718,7 @@ basic AS ( SELECT count(x) FROM basic_recursive ) SELECT * FROM basic; -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- recursive CTE in a NESTED manner WITH regular_cte AS ( WITH regular_2 AS ( @@ -732,7 +732,7 @@ WITH regular_cte AS ( SELECT * FROM regular_2 ) SELECT * FROM regular_cte; -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- CTEs should work with VIEWs as well CREATE VIEW basic_view AS SELECT * FROM users_table; diff --git a/src/test/regress/expected/worker_split_binary_copy_test.out b/src/test/regress/expected/worker_split_binary_copy_test.out index f23dc2043f0..e161b7f67b0 100644 --- a/src/test/regress/expected/worker_split_binary_copy_test.out +++ b/src/test/regress/expected/worker_split_binary_copy_test.out @@ -3,43 +3,6 @@ SET search_path TO worker_split_binary_copy_test; SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 81060000; --- Remove extra nodes added, otherwise GetLocalNodeId() does not bahave correctly. -SELECT citus_remove_node('localhost', 8887); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - -SELECT citus_remove_node('localhost', 9995); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - -SELECT citus_remove_node('localhost', 9992); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - -SELECT citus_remove_node('localhost', 9998); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - -SELECT citus_remove_node('localhost', 9997); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - -SELECT citus_remove_node('localhost', 8888); - citus_remove_node ---------------------------------------------------------------------- - -(1 row) - -- BEGIN: Create distributed table and insert data. CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy ( l_orderkey bigint not null, diff --git a/src/test/regress/failure_schedule b/src/test/regress/failure_schedule index afc4780bf2b..8b992422ef2 100644 --- a/src/test/regress/failure_schedule +++ b/src/test/regress/failure_schedule @@ -34,6 +34,8 @@ test: failure_multi_row_insert test: failure_mx_metadata_sync test: failure_mx_metadata_sync_multi_trans test: failure_connection_establishment +test: failure_non_main_db_2pc +test: failure_create_database # this test syncs metadata to the workers test: failure_failover_to_local_execution diff --git a/src/test/regress/isolation_schedule b/src/test/regress/isolation_schedule index d8cc77c73f3..1b0f1427a42 100644 --- a/src/test/regress/isolation_schedule +++ b/src/test/regress/isolation_schedule @@ -77,6 +77,7 @@ test: isolation_global_pid test: isolation_citus_locks test: isolation_reference_table test: isolation_schema_based_sharding +test: isolation_database_cmd_from_any_node test: isolation_citus_pause_node test: isolation_citus_schema_distribute_undistribute diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 4dead5be390..015f7497316 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -27,12 +27,20 @@ test: multi_cluster_management test: non_super_user_object_metadata test: propagate_foreign_servers test: alter_role_propagation +test: role_command_from_any_node test: propagate_extension_commands test: escape_extension_name test: ref_citus_local_fkeys test: alter_database_owner +test: seclabel test: distributed_triggers test: create_single_shard_table +test: create_drop_database_propagation +test: create_drop_database_propagation_pg15 +test: create_drop_database_propagation_pg16 +test: comment_on_database +test: comment_on_role +test: metadata_sync_from_non_maindb # don't parallelize single_shard_table_udfs to make sure colocation ids are sequential test: single_shard_table_udfs test: schema_based_sharding @@ -50,9 +58,12 @@ test: multi_metadata_attributes test: multi_read_from_secondaries -test: grant_on_database_propagation +test: grant_on_database_propagation grant_on_database_propagation_from_non_maindb test: alter_database_propagation +test: citus_shards +test: reassign_owned + # ---------- # multi_citus_tools tests utility functions written for citus tools # ---------- @@ -160,7 +171,8 @@ test: with_executors with_join with_partitioning with_transactions with_dml # Tests around DDL statements run on distributed tables # ---------- test: multi_index_statements -test: multi_alter_table_statements alter_table_add_column +test: multi_alter_table_statements +test: alter_table_add_column test: multi_alter_table_add_constraints test: multi_alter_table_add_constraints_without_name test: multi_alter_table_add_foreign_key_without_name @@ -199,7 +211,8 @@ test: citus_copy_shard_placement # multi_utilities cannot be run in parallel with other tests because it checks # global locks test: multi_utilities -test: foreign_key_to_reference_table validate_constraint +test: foreign_key_to_reference_table +test: validate_constraint test: multi_repartition_udt multi_repartitioned_subquery_udf multi_subtransactions test: multi_modifying_xacts @@ -290,12 +303,14 @@ test: multi_foreign_key_relation_graph # Replicating reference tables to coordinator. Add coordinator to pg_dist_node # and rerun some of the tests. # -------- +test: remove_coordinator_from_metadata test: add_coordinator test: replicate_reference_tables_to_coordinator test: citus_local_tables test: mixed_relkind_tests test: multi_row_router_insert create_distributed_table_concurrently -test: multi_reference_table citus_local_tables_queries +test: multi_reference_table +test: citus_local_tables_queries test: citus_local_table_triggers test: coordinator_shouldhaveshards test: local_shard_utility_command_execution diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 65a27256687..7f0c7ca57f6 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -1,6 +1,7 @@ test: multi_test_helpers multi_test_helpers_superuser test: multi_cluster_management test: create_role_propagation +test: pg16 test: multi_create_fdw test: multi_test_catalog_views test: replicated_table_disable_node @@ -65,7 +66,6 @@ test: pg13 pg12 test: pg14 test: pg15 test: pg15_jsonpath detect_conn_close -test: pg16 test: drop_column_partitioned_table test: tableam @@ -79,11 +79,12 @@ test: multi_basic_queries cross_join multi_complex_expressions multi_subquery mu test: multi_subquery_complex_reference_clause multi_subquery_window_functions multi_view multi_sql_function multi_prepare_sql test: sql_procedure multi_function_in_join row_types materialized_view test: multi_subquery_in_where_reference_clause adaptive_executor propagate_set_commands geqo -test: forcedelegation_functions +test: forcedelegation_functions system_queries # this should be run alone as it gets too many clients test: join_pushdown test: multi_subquery_union multi_subquery_in_where_clause multi_subquery_misc statement_cancel_error_message -test: multi_agg_distinct multi_limit_clause_approximate multi_outer_join_reference multi_single_relation_subquery multi_prepare_plsql set_role_in_transaction +test: multi_agg_distinct +test: multi_limit_clause_approximate multi_outer_join_reference multi_single_relation_subquery multi_prepare_plsql set_role_in_transaction test: multi_reference_table multi_select_for_update relation_access_tracking pg13_with_ties test: custom_aggregate_support aggregate_support tdigest_aggregate_support test: multi_average_expression multi_working_columns multi_having_pushdown having_subquery @@ -102,17 +103,21 @@ test: multi_dropped_column_aliases foreign_key_restriction_enforcement test: binary_protocol test: alter_table_set_access_method test: alter_distributed_table -test: issue_5248 issue_5099 issue_5763 issue_6543 issue_6758 +test: issue_5248 issue_5099 issue_5763 issue_6543 issue_6758 issue_7477 test: object_propagation_debug test: undistribute_table test: run_command_on_all_nodes test: background_task_queue_monitor +test: other_databases grant_role_from_non_maindb role_operations_from_non_maindb seclabel_non_maindb +test: citus_internal_access +test: function_with_case_when # Causal clock test test: clock # MERGE tests -test: merge pgmerge merge_repartition2 +test: merge pgmerge +test: merge_repartition2 test: merge_repartition1 merge_schema_sharding test: merge_partition_tables diff --git a/src/test/regress/multi_schedule_hyperscale b/src/test/regress/multi_schedule_hyperscale index 8849e81f2af..86ac16d4f2f 100644 --- a/src/test/regress/multi_schedule_hyperscale +++ b/src/test/regress/multi_schedule_hyperscale @@ -154,7 +154,8 @@ test: multi_outer_join # --- test: multi_complex_count_distinct test: multi_upsert multi_simple_queries -test: foreign_key_to_reference_table validate_constraint +test: foreign_key_to_reference_table +test: validate_constraint # --------- # creates hash and range-partitioned tables and performs COPY diff --git a/src/test/regress/multi_schedule_hyperscale_superuser b/src/test/regress/multi_schedule_hyperscale_superuser index 052b937865c..f5cddfc05fe 100644 --- a/src/test/regress/multi_schedule_hyperscale_superuser +++ b/src/test/regress/multi_schedule_hyperscale_superuser @@ -150,7 +150,9 @@ test: multi_outer_join test: multi_create_fdw test: multi_generate_ddl_commands multi_create_shards multi_prune_shard_list test: multi_upsert multi_simple_queries multi_data_types -test: multi_utilities foreign_key_to_reference_table validate_constraint +test: multi_utilities +test: foreign_key_to_reference_table +test: validate_constraint test: multi_repartition_udt multi_repartitioned_subquery_udf # --------- diff --git a/src/test/regress/pg_regress_multi.pl b/src/test/regress/pg_regress_multi.pl index 4cc022198c4..35671ad265b 100755 --- a/src/test/regress/pg_regress_multi.pl +++ b/src/test/regress/pg_regress_multi.pl @@ -90,7 +90,6 @@ () my $serversAreShutdown = "TRUE"; my $usingWindows = 0; my $mitmPid = 0; -my $workerCount = 2; if ($Config{osname} eq "MSWin32") { @@ -297,10 +296,12 @@ sub generate_hba open(my $fh, ">", catfile($TMP_CHECKDIR, $nodename, "data", "pg_hba.conf")) or die "could not open pg_hba.conf"; - print $fh "host all alice,bob localhost md5\n"; + print $fh "host all alice,bob 127.0.0.1/32 md5\n"; + print $fh "host all alice,bob ::1/128 md5\n"; print $fh "host all all 127.0.0.1/32 trust\n"; print $fh "host all all ::1/128 trust\n"; - print $fh "host replication postgres localhost trust\n"; + print $fh "host replication postgres 127.0.0.1/32 trust\n"; + print $fh "host replication postgres ::1/128 trust\n"; close $fh; } @@ -491,6 +492,8 @@ sub generate_hba push(@pgOptions, "citus.enable_change_data_capture=on"); push(@pgOptions, "citus.stat_tenants_limit = 2"); push(@pgOptions, "citus.stat_tenants_track = 'ALL'"); +push(@pgOptions, "citus.main_db = 'regression'"); +push(@pgOptions, "citus.superuser = 'postgres'"); # Some tests look at shards in pg_class, make sure we can usually see them: push(@pgOptions, "citus.show_shards_for_app_name_prefixes='pg_regress'"); @@ -510,6 +513,12 @@ sub generate_hba # we disable some restrictions for local objects like local views to not break postgres vanilla test behaviour. push(@pgOptions, "citus.enforce_object_restrictions_for_local_objects=false"); } +else +{ + # We currently need this config for isolation tests and security label tests + # this option loads a security label provider, which we don't want in vanilla tests + push(@pgOptions, "citus.running_under_citus_test_suite=true"); +} if ($useMitmproxy) { @@ -560,7 +569,6 @@ sub generate_hba push(@pgOptions, "citus.metadata_sync_interval=1000"); push(@pgOptions, "citus.metadata_sync_retry_interval=100"); push(@pgOptions, "client_min_messages='warning'"); # pg12 introduced notice showing during isolation tests - push(@pgOptions, "citus.running_under_isolation_test=true"); # Disable all features of the maintenance daemon. Otherwise queries might # randomly show temporarily as "waiting..." because they are waiting for the @@ -630,7 +638,7 @@ sub generate_hba } } -for my $tablespace ("ts0", "ts1", "ts2") +for my $tablespace ("ts0", "ts1", "ts2", "ts3", "ts4", "ts5") { if (-e catfile($TMP_CHECKDIR, $tablespace)) { @@ -1120,16 +1128,33 @@ sub RunVanillaTests system("mkdir", ("-p", "$pgregressOutputdir/sql")) == 0 or die "Could not create vanilla sql dir."; - $exitcode = system("$plainRegress", - ("--dlpath", $dlpath), - ("--inputdir", $pgregressInputdir), - ("--outputdir", $pgregressOutputdir), - ("--schedule", catfile("$pgregressInputdir", "parallel_schedule")), - ("--use-existing"), - ("--host","$host"), - ("--port","$masterPort"), - ("--user","$user"), - ("--dbname", "$dbName")); + if ($majorversion >= "16") + { + $exitcode = system("$plainRegress", + ("--dlpath", $dlpath), + ("--inputdir", $pgregressInputdir), + ("--outputdir", $pgregressOutputdir), + ("--expecteddir", $pgregressOutputdir), + ("--schedule", catfile("$pgregressInputdir", "parallel_schedule")), + ("--use-existing"), + ("--host","$host"), + ("--port","$masterPort"), + ("--user","$user"), + ("--dbname", "$dbName")); + } + else + { + $exitcode = system("$plainRegress", + ("--dlpath", $dlpath), + ("--inputdir", $pgregressInputdir), + ("--outputdir", $pgregressOutputdir), + ("--schedule", catfile("$pgregressInputdir", "parallel_schedule")), + ("--use-existing"), + ("--host","$host"), + ("--port","$masterPort"), + ("--user","$user"), + ("--dbname", "$dbName")); + } } if ($useMitmproxy) { diff --git a/src/test/regress/spec/isolation_database_cmd_from_any_node.spec b/src/test/regress/spec/isolation_database_cmd_from_any_node.spec new file mode 100644 index 00000000000..8637a8942b6 --- /dev/null +++ b/src/test/regress/spec/isolation_database_cmd_from_any_node.spec @@ -0,0 +1,106 @@ +setup +{ + -- OCLASS for database changed in PG 16 from 25 to 26 + SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS value INTO oclass_database; + + SELECT 1 FROM citus_add_node('localhost', 57636, 0); +} + +teardown +{ + DROP TABLE IF EXISTS oclass_database; + + select 1 from citus_remove_node('localhost', 57636); +} + +session "s1" + +setup { SET citus.enable_create_database_propagation TO ON; } + +step "s1-begin" { BEGIN; } +step "s1-commit" { COMMIT; } +step "s1-rollback" { ROLLBACK; } + +step "s1-create-user-dbuser" { CREATE USER dbuser; } +step "s1-drop-user-dbuser" { DROP USER dbuser; } + +step "s1-acquire-citus-adv-oclass-lock" { SELECT citus_internal.acquire_citus_advisory_object_class_lock(value, NULL) FROM oclass_database; } +step "s1-acquire-citus-adv-oclass-lock-with-oid-testdb1" { SELECT citus_internal.acquire_citus_advisory_object_class_lock(value, 'testdb1') FROM oclass_database; } + +step "s1-create-testdb1" { CREATE DATABASE testdb1; } +step "s1-drop-testdb1" { DROP DATABASE testdb1; } +step "s1-alter-testdb1-rename-to-db1" { ALTER DATABASE testdb1 RENAME TO db1; } +step "s1-grant-on-testdb1-to-dbuser" { GRANT ALL ON DATABASE testdb1 TO dbuser;} + +step "s1-drop-testdb2" { DROP DATABASE testdb2; } +step "s1-grant-on-testdb2-to-dbuser" { GRANT ALL ON DATABASE testdb2 TO dbuser;} + +step "s1-create-db1" { CREATE DATABASE db1; } +step "s1-drop-db1" { DROP DATABASE db1; } + +session "s2" + +setup { SET citus.enable_create_database_propagation TO ON; } + +step "s2-begin" { BEGIN; } +step "s2-commit" { COMMIT; } +step "s2-rollback" { ROLLBACK; } + +step "s2-acquire-citus-adv-oclass-lock" { SELECT citus_internal.acquire_citus_advisory_object_class_lock(value, NULL) FROM oclass_database; } +step "s2-acquire-citus-adv-oclass-lock-with-oid-testdb1" { SELECT citus_internal.acquire_citus_advisory_object_class_lock(value, 'testdb1') FROM oclass_database; } +step "s2-acquire-citus-adv-oclass-lock-with-oid-testdb2" { SELECT citus_internal.acquire_citus_advisory_object_class_lock(value, 'testdb2') FROM oclass_database; } + +step "s2-alter-testdb1-rename-to-db1" { ALTER DATABASE testdb1 RENAME TO db1; } + +step "s2-create-testdb2" { CREATE DATABASE testdb2; } +step "s2-drop-testdb2" { DROP DATABASE testdb2; } +step "s2-alter-testdb2-rename-to-db1" { ALTER DATABASE testdb2 RENAME TO db1; } +step "s2-alter-testdb2-rename-to-db2" { ALTER DATABASE testdb2 RENAME TO db2; } +step "s2-alter-testdb2-set-lc_monetary" { ALTER DATABASE testdb2 SET lc_monetary TO 'C'; } + +step "s2-drop-db1" { DROP DATABASE db1; } + +step "s2-drop-db2" { DROP DATABASE db2; } + +// Given that we cannot execute CREATE / DROP DATABASE commands in a transaction block, we instead acquire the +// underlying advisory lock in some of below tests. + +// e.g., CREATE DATABASE vs CREATE DATABASE +permutation "s1-begin" "s2-begin" "s1-acquire-citus-adv-oclass-lock" "s2-acquire-citus-adv-oclass-lock" "s1-commit" "s2-commit" + +// e.g., DROP DATABASE vs DROP DATABASE +// dropping the same database +permutation "s1-create-testdb1" "s1-begin" "s2-begin" "s1-acquire-citus-adv-oclass-lock-with-oid-testdb1" "s2-acquire-citus-adv-oclass-lock-with-oid-testdb1" "s1-commit" "s2-commit" "s1-drop-testdb1" +// dropping a different database +permutation "s1-create-testdb1" "s2-create-testdb2" "s1-begin" "s2-begin" "s1-acquire-citus-adv-oclass-lock-with-oid-testdb1" "s2-acquire-citus-adv-oclass-lock-with-oid-testdb2" "s1-commit" "s2-commit" "s1-drop-testdb1" "s2-drop-testdb2" + +// CREATE DATABASE vs DROP DATABASE +permutation "s2-create-testdb2" "s1-begin" "s2-begin" "s1-acquire-citus-adv-oclass-lock" "s2-acquire-citus-adv-oclass-lock-with-oid-testdb2" "s1-commit" "s2-commit" "s2-drop-testdb2" + +// CREATE DATABASE vs ALTER DATABASE SET +permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-set-lc_monetary" "s1-create-db1" "s2-rollback" "s2-drop-testdb2" "s1-drop-db1" + +// GRANT .. ON DATABASE .. TO ... vs ALTER DATABASE SET +// on the same database +permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-set-lc_monetary" "s1-create-user-dbuser" "s1-grant-on-testdb2-to-dbuser" "s2-rollback" "s2-drop-testdb2" "s1-drop-user-dbuser" +// on a different database +permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-set-lc_monetary" "s1-create-testdb1" "s1-create-user-dbuser" "s1-grant-on-testdb1-to-dbuser" "s2-rollback" "s2-drop-testdb2" "s1-drop-testdb1" "s1-drop-user-dbuser" + +// ALTER DATABASE .. RENAME TO .. vs ALTER DATABASE .. RENAME TO .. +// try to rename different databases to the same name +permutation "s1-create-testdb1" "s2-create-testdb2" "s1-begin" "s2-begin" "s1-alter-testdb1-rename-to-db1" "s2-alter-testdb2-rename-to-db1" "s1-commit" "s2-rollback" "s1-drop-db1" "s2-drop-testdb2" +permutation "s1-create-testdb1" "s2-create-testdb2" "s1-begin" "s2-begin" "s1-alter-testdb1-rename-to-db1" "s2-alter-testdb2-rename-to-db1" "s1-rollback" "s2-commit" "s1-drop-testdb1" "s2-drop-db1" +// try to rename same database +permutation "s1-create-testdb1" "s1-begin" "s2-begin" "s1-alter-testdb1-rename-to-db1" "s2-alter-testdb1-rename-to-db1" "s1-commit" "s2-rollback" "s1-drop-db1" +permutation "s1-create-testdb1" "s1-begin" "s2-begin" "s1-alter-testdb1-rename-to-db1" "s2-alter-testdb1-rename-to-db1" "s1-rollback" "s2-commit" "s2-drop-db1" + +// CREATE DATABASE vs ALTER DATABASE .. RENAME TO .. +permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-rename-to-db1" "s1-create-db1" "s2-rollback" "s2-drop-testdb2" "s1-drop-db1" +permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-rename-to-db1" "s1-create-db1" "s2-commit" "s2-drop-db1" +permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-rename-to-db2" "s1-create-db1" "s2-commit" "s2-drop-db2" "s1-drop-db1" + +// DROP DATABASE vs ALTER DATABASE .. RENAME TO .. +// try to rename the same database +permutation "s2-create-testdb2" "s2-begin" "s2-alter-testdb2-rename-to-db1" "s1-drop-testdb2" "s2-rollback" +// try to rename a different database +permutation "s2-create-testdb2" "s1-create-db1" "s2-begin" "s2-alter-testdb2-rename-to-db2" "s1-drop-db1" "s2-commit" "s2-drop-db2" diff --git a/src/test/regress/spec/isolation_get_all_active_transactions.spec b/src/test/regress/spec/isolation_get_all_active_transactions.spec index 497b3a58af1..8a2d5a5c6ce 100644 --- a/src/test/regress/spec/isolation_get_all_active_transactions.spec +++ b/src/test/regress/spec/isolation_get_all_active_transactions.spec @@ -107,6 +107,29 @@ step "s3-show-activity" select count(*) from get_all_active_transactions() where process_id IN (SELECT * FROM selected_pid); } +step "s3-wait-backend-termination" +{ + SET ROLE postgres; + + DO $$ + DECLARE + i int; + BEGIN + i := 0; + + -- try for 5 sec then timeout + WHILE (select count(*) > 0 from get_all_active_transactions() where process_id IN (SELECT * FROM selected_pid)) + LOOP + PERFORM pg_sleep(0.1); + i := i + 1; + IF i > 50 THEN + RAISE EXCEPTION 'Timeout while waiting for backend to terminate'; + END IF; + END LOOP; + END; + $$; +} + session "s4" step "s4-record-pid" @@ -123,4 +146,4 @@ step "s5-kill" permutation "s1-grant" "s1-begin-insert" "s2-begin-insert" "s3-as-admin" "s3-as-user-1" "s3-as-readonly" "s3-as-monitor" "s1-commit" "s2-commit" -permutation "s4-record-pid" "s3-show-activity" "s5-kill" "s3-show-activity" +permutation "s4-record-pid" "s3-show-activity" "s5-kill" "s3-wait-backend-termination" diff --git a/src/test/regress/spec/isolation_metadata_sync_deadlock.spec b/src/test/regress/spec/isolation_metadata_sync_deadlock.spec index 67c20a2b213..411faf8893f 100644 --- a/src/test/regress/spec/isolation_metadata_sync_deadlock.spec +++ b/src/test/regress/spec/isolation_metadata_sync_deadlock.spec @@ -22,6 +22,7 @@ setup teardown { + SELECT wait_until_metadata_sync(); DROP FUNCTION trigger_metadata_sync(); DROP TABLE deadlock_detection_test; DROP TABLE t2; diff --git a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec index fce3794274a..9683935bed0 100644 --- a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec +++ b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec @@ -90,7 +90,7 @@ step "s2-view-worker" ('%pg_prepared_xacts%'), ('%COMMIT%'), ('%dump_local_%'), - ('%citus_internal_local_blocked_processes%'), + ('%citus_internal.local_blocked_processes%'), ('%add_node%'), ('%csa_from_one_node%'), ('%pg_locks%')) diff --git a/src/test/regress/spec/isolation_update_node.spec b/src/test/regress/spec/isolation_update_node.spec index d6be6bfdcaa..ccbbbec1bb8 100644 --- a/src/test/regress/spec/isolation_update_node.spec +++ b/src/test/regress/spec/isolation_update_node.spec @@ -3,6 +3,8 @@ setup -- revert back to pg_isolation_test_session_is_blocked until the tests are fixed SELECT citus_internal.restore_isolation_tester_func(); + ALTER SEQUENCE pg_dist_node_nodeid_seq RESTART 22; + SELECT 1 FROM master_add_node('localhost', 57637); SELECT 1 FROM master_add_node('localhost', 57638); diff --git a/src/test/regress/split_schedule b/src/test/regress/split_schedule index b47acd8282f..53c422eab9c 100644 --- a/src/test/regress/split_schedule +++ b/src/test/regress/split_schedule @@ -10,6 +10,7 @@ test: foreign_key_to_reference_table # Split tests go here. test: split_shard test: worker_split_copy_test +test: remove_non_default_nodes test: worker_split_binary_copy_test test: worker_split_text_copy_test test: citus_split_shard_by_split_points_negative diff --git a/src/test/regress/sql/add_coordinator.sql b/src/test/regress/sql/add_coordinator.sql index 81b77bfcd8b..2dba7806405 100644 --- a/src/test/regress/sql/add_coordinator.sql +++ b/src/test/regress/sql/add_coordinator.sql @@ -3,8 +3,6 @@ -- -- node trying to add itself without specifying groupid => 0 should error out --- first remove the coordinator to for testing master_add_node for coordinator -SELECT master_remove_node('localhost', :master_port); SELECT master_add_node('localhost', :master_port); SELECT master_add_node('localhost', :master_port, groupid => 0) AS master_nodeid \gset diff --git a/src/test/regress/sql/alter_database_propagation.sql b/src/test/regress/sql/alter_database_propagation.sql index 2b9d3ac3318..9a8b1fab8af 100644 --- a/src/test/regress/sql/alter_database_propagation.sql +++ b/src/test/regress/sql/alter_database_propagation.sql @@ -1,20 +1,12 @@ set citus.log_remote_commands = true; set citus.grep_remote_commands = '%ALTER DATABASE%'; - --- since ALLOW_CONNECTIONS alter option should be executed in a different database --- and since we don't have a multiple database support for now, --- this statement will get error -alter database regression ALLOW_CONNECTIONS false; - - alter database regression with CONNECTION LIMIT 100; alter database regression with IS_TEMPLATE true CONNECTION LIMIT 50; alter database regression with CONNECTION LIMIT -1; alter database regression with IS_TEMPLATE true; alter database regression with IS_TEMPLATE false; --- this statement will get error since we don't have a multiple database support for now -alter database regression rename to regression2; + alter database regression set default_transaction_read_only = true; @@ -56,4 +48,68 @@ alter database regression set lock_timeout from current; alter database regression set lock_timeout to DEFAULT; alter database regression RESET lock_timeout; +set citus.enable_create_database_propagation=on; +SET citus.next_operation_id TO 3000; +create database "regression!'2"; +alter database "regression!'2" with CONNECTION LIMIT 100; +alter database "regression!'2" with IS_TEMPLATE true CONNECTION LIMIT 50; +alter database "regression!'2" with IS_TEMPLATE false; + + + + +\set alter_db_tablespace :abs_srcdir '/tmp_check/ts3' +CREATE TABLESPACE alter_db_tablespace LOCATION :'alter_db_tablespace'; + +\c - - - :worker_1_port +\set alter_db_tablespace :abs_srcdir '/tmp_check/ts4' +CREATE TABLESPACE alter_db_tablespace LOCATION :'alter_db_tablespace'; + +\c - - - :worker_2_port +\set alter_db_tablespace :abs_srcdir '/tmp_check/ts5' +CREATE TABLESPACE alter_db_tablespace LOCATION :'alter_db_tablespace'; + +\c - - - :master_port + +set citus.log_remote_commands = true; +set citus.grep_remote_commands = '%ALTER DATABASE%'; + +alter database "regression!'2" set TABLESPACE alter_db_tablespace; + +set citus.enable_create_database_propagation=on; +alter database "regression!'2" rename to regression3; + +-- check that the local database rename and alter comnmand is not propagated +set citus.enable_create_database_propagation=off; +CREATE database local_regression; + +alter DATABASE local_regression with CONNECTION LIMIT 100; +alter DATABASE local_regression rename to local_regression2; +drop database local_regression2; + +set citus.enable_create_database_propagation=on; + +drop database regression3; + +SET citus.next_operation_id TO 3100; +create database "regression!'4"; + + +SELECT result FROM run_command_on_all_nodes( + $$ + ALTER TABLESPACE alter_db_tablespace RENAME TO "ts-needs\!escape" + $$ +); + +alter database "regression!'4" set TABLESPACE "ts-needs\!escape"; + +drop database "regression!'4"; + set citus.log_remote_commands = false; +set citus.enable_create_database_propagation=off; + +SELECT result FROM run_command_on_all_nodes( + $$ + drop tablespace "ts-needs\!escape" + $$ +); diff --git a/src/test/regress/sql/alter_table_add_column.sql b/src/test/regress/sql/alter_table_add_column.sql index 255e7714f33..355667842ad 100644 --- a/src/test/regress/sql/alter_table_add_column.sql +++ b/src/test/regress/sql/alter_table_add_column.sql @@ -41,6 +41,10 @@ ALTER TABLE referencing ADD COLUMN "test_\'!7" "simple_!\'custom_type"; ALTER TABLE referencing ADD COLUMN test_8 integer CHECK (test_8 > 0); ALTER TABLE referencing ADD COLUMN test_8 integer CONSTRAINT check_test_8 CHECK (test_8 > 0); +-- error out properly even if the REFERENCES does not include the column list of the referenced table +ALTER TABLE referencing ADD COLUMN test_9 bool, ADD COLUMN test_10 int REFERENCES referenced; +ALTER TABLE referencing ADD COLUMN test_9 bool, ADD COLUMN test_10 int REFERENCES referenced(int_col); + -- try to add test_6 again, but with IF NOT EXISTS ALTER TABLE referencing ADD COLUMN IF NOT EXISTS test_6 text; ALTER TABLE referencing ADD COLUMN IF NOT EXISTS test_6 integer; diff --git a/src/test/regress/sql/citus_internal_access.sql b/src/test/regress/sql/citus_internal_access.sql new file mode 100644 index 00000000000..8e97448f335 --- /dev/null +++ b/src/test/regress/sql/citus_internal_access.sql @@ -0,0 +1,10 @@ +--- Create a non-superuser role and check if it can access citus_internal schema functions +CREATE USER nonsuperuser CREATEROLE; + +SET ROLE nonsuperuser; +--- The non-superuser role should not be able to access citus_internal functions +SELECT citus_internal.commit_management_command_2pc(); +SELECT citus_internal.replace_isolation_tester_func(); + +RESET ROLE; +DROP USER nonsuperuser; diff --git a/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql b/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql index ba3f952154f..480d81b88b3 100644 --- a/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql +++ b/src/test/regress/sql/citus_non_blocking_split_shard_cleanup.sql @@ -79,6 +79,8 @@ SELECT pg_catalog.citus_split_shard_by_split_points( ARRAY[:worker_2_node, :worker_2_node, :worker_2_node], 'force_logical'); +SELECT public.wait_for_resource_cleanup(); + \c - - - :worker_2_port SET search_path TO "citus_split_test_schema"; -- Replication slots should be cleaned up diff --git a/src/test/regress/sql/citus_schema_distribute_undistribute.sql b/src/test/regress/sql/citus_schema_distribute_undistribute.sql index 1008b90b207..a7e9bf05115 100644 --- a/src/test/regress/sql/citus_schema_distribute_undistribute.sql +++ b/src/test/regress/sql/citus_schema_distribute_undistribute.sql @@ -185,7 +185,7 @@ SELECT citus_schema_undistribute('tenant1'); -- assign all tables to dummyregular except table5 SET role tenantuser; -SELECT result FROM run_command_on_all_nodes($$ REASSIGN OWNED BY tenantuser TO dummyregular; $$); +REASSIGN OWNED BY tenantuser TO dummyregular; CREATE TABLE tenant1.table5(id int); -- table owner check fails the distribution @@ -219,7 +219,7 @@ SELECT result FROM run_command_on_all_nodes($$ SELECT COUNT(*)=0 FROM pg_dist_co SELECT result FROM run_command_on_all_nodes($$ SELECT array_agg(logicalrelid ORDER BY logicalrelid) FROM pg_dist_partition WHERE logicalrelid::text LIKE 'tenant1.%' AND colocationid > 0 $$); RESET role; -SELECT result FROM run_command_on_all_nodes($$ REASSIGN OWNED BY dummyregular TO tenantuser; $$); +REASSIGN OWNED BY dummyregular TO tenantuser; DROP USER dummyregular; CREATE USER dummysuper superuser; diff --git a/src/test/regress/sql/citus_schema_move.sql b/src/test/regress/sql/citus_schema_move.sql index 8240feff761..bdf0d20ffac 100644 --- a/src/test/regress/sql/citus_schema_move.sql +++ b/src/test/regress/sql/citus_schema_move.sql @@ -147,7 +147,7 @@ SELECT citus_schema_move('s2', 'dummy_node', 1234); -- assign all tables to regularuser RESET ROLE; -SELECT result FROM run_command_on_all_nodes($$ REASSIGN OWNED BY tenantuser TO regularuser; $$); +REASSIGN OWNED BY tenantuser TO regularuser; GRANT USAGE ON SCHEMA citus_schema_move TO regularuser; diff --git a/src/test/regress/sql/citus_shards.sql b/src/test/regress/sql/citus_shards.sql new file mode 100644 index 00000000000..9234ffd2e3a --- /dev/null +++ b/src/test/regress/sql/citus_shards.sql @@ -0,0 +1,17 @@ +CREATE SCHEMA citus_shards; +SET search_path TO citus_shards; +SET citus.shard_count TO 4; +SET citus.shard_replication_factor TO 1; +SET citus.next_shard_id TO 99456900; +ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 456900; + +CREATE TABLE t1 (i int); +SELECT create_distributed_table('t1', 'i'); +CREATE TABLE "t with space" (i int); +SELECT create_distributed_table('"t with space"', 'i'); +INSERT INTO t1 SELECT generate_series(1, 100); +INSERT INTO "t with space" SELECT generate_series(1, 1000); +SELECT * FROM citus_shards; + +SET client_min_messages TO WARNING; +DROP SCHEMA citus_shards CASCADE; diff --git a/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql b/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql index fe37777c792..4c180052f60 100644 --- a/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql +++ b/src/test/regress/sql/citus_split_shard_by_split_points_negative.sql @@ -113,4 +113,5 @@ SELECT citus_split_shard_by_split_points( --BEGIN : Cleanup \c - postgres - :master_port DROP SCHEMA "citus_split_shard_by_split_points_negative" CASCADE; +SELECT public.wait_for_resource_cleanup(); --END : Cleanup diff --git a/src/test/regress/sql/columnar_create.sql b/src/test/regress/sql/columnar_create.sql index 408ce126e36..a0708aeac55 100644 --- a/src/test/regress/sql/columnar_create.sql +++ b/src/test/regress/sql/columnar_create.sql @@ -136,22 +136,34 @@ CREATE TEMPORARY TABLE columnar_temp(i int) USING columnar; -- reserve some chunks and a stripe INSERT INTO columnar_temp SELECT i FROM generate_series(1,5) i; -SELECT columnar.get_storage_id(oid) AS columnar_temp_storage_id -FROM pg_class WHERE relname='columnar_temp' \gset - -SELECT pg_backend_pid() AS val INTO old_backend_pid; +SELECT columnar.get_storage_id(oid) as oid INTO columnar_temp_storage_id +FROM pg_class WHERE relname='columnar_temp'; \c - - - :master_port SET search_path TO columnar_create; --- wait until old backend to expire to make sure that temp table cleanup is complete -SELECT columnar_test_helpers.pg_waitpid(val) FROM old_backend_pid; +-- wait until temporary table and its metadata is removed +DO $$ +DECLARE + loop_wait_count integer := 0; +BEGIN + WHILE ( + (SELECT COUNT(*) > 0 FROM pg_class WHERE relname='columnar_temp') OR + (SELECT columnar_test_helpers.columnar_metadata_has_storage_id(oid) FROM columnar_temp_storage_id) + ) + LOOP + IF loop_wait_count > 1000 THEN + RAISE EXCEPTION 'Timeout while waiting for temporary table to be dropped'; + END IF; + + PERFORM pg_sleep(0.001); -DROP TABLE old_backend_pid; + loop_wait_count := loop_wait_count + 1; + END LOOP; +END; +$$ language plpgsql; --- show that temporary table itself and its metadata is removed -SELECT COUNT(*)=0 FROM pg_class WHERE relname='columnar_temp'; -SELECT columnar_test_helpers.columnar_metadata_has_storage_id(:columnar_temp_storage_id); +DROP TABLE columnar_temp_storage_id; -- connect to another session and create a temp table with same name CREATE TEMPORARY TABLE columnar_temp(i int) USING columnar; diff --git a/src/test/regress/sql/comment_on_database.sql b/src/test/regress/sql/comment_on_database.sql new file mode 100644 index 00000000000..2c5ced81f66 --- /dev/null +++ b/src/test/regress/sql/comment_on_database.sql @@ -0,0 +1,73 @@ +set citus.log_remote_commands to on; + +set citus.enable_create_database_propagation to on; +set citus.grep_remote_commands to 'COMMENT ON DATABASE'; + +create database "test1-\!escape"; + +comment on DATABASE "test1-\!escape" is 'test-comment'; + +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT ds.description AS database_comment + FROM pg_database d + LEFT JOIN pg_shdescription ds ON d.oid = ds.objoid + WHERE d.datname = 'test1-\!escape'; + $$ +); + +comment on DATABASE "test1-\!escape" is 'comment-needs\!escape'; + +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT ds.description AS database_comment + FROM pg_database d + LEFT JOIN pg_shdescription ds ON d.oid = ds.objoid + WHERE d.datname = 'test1-\!escape'; + $$ +); + +comment on DATABASE "test1-\!escape" is null; + +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT ds.description AS database_comment + FROM pg_database d + LEFT JOIN pg_shdescription ds ON d.oid = ds.objoid + WHERE d.datname = 'test1-\!escape'; + $$ +); + +drop DATABASE "test1-\!escape"; + +--test metadata sync +select 1 from citus_remove_node('localhost', :worker_2_port); +create database "test1-\!escape"; +comment on DATABASE "test1-\!escape" is 'test-comment'; + +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT ds.description AS database_comment + FROM pg_database d + LEFT JOIN pg_shdescription ds ON d.oid = ds.objoid + WHERE d.datname = 'test1-\!escape'; + $$ +); + +select 1 from citus_add_node('localhost', :worker_2_port); + +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT ds.description AS database_comment + FROM pg_database d + LEFT JOIN pg_shdescription ds ON d.oid = ds.objoid + WHERE d.datname = 'test1-\!escape'; + $$ +); + +drop DATABASE "test1-\!escape"; + + +reset citus.enable_create_database_propagation; +reset citus.grep_remote_commands; +reset citus.log_remote_commands; diff --git a/src/test/regress/sql/comment_on_role.sql b/src/test/regress/sql/comment_on_role.sql new file mode 100644 index 00000000000..d65d57ccac4 --- /dev/null +++ b/src/test/regress/sql/comment_on_role.sql @@ -0,0 +1,72 @@ +set citus.log_remote_commands to on; + +set citus.grep_remote_commands to 'COMMENT ON ROLE'; + +create role "role1-\!escape"; + +comment on ROLE "role1-\!escape" is 'test-comment'; + +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT ds.description AS role_comment + FROM pg_roles r + LEFT JOIN pg_shdescription ds ON r.oid = ds.objoid + WHERE r.rolname = 'role1-\!escape'; + $$ +); + +comment on role "role1-\!escape" is 'comment-needs\!escape'; + +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT ds.description AS role_comment + FROM pg_roles r + LEFT JOIN pg_shdescription ds ON r.oid = ds.objoid + WHERE r.rolname = 'role1-\!escape'; + $$ +); + +comment on role "role1-\!escape" is NULL; + +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT ds.description AS role_comment + FROM pg_roles r + LEFT JOIN pg_shdescription ds ON r.oid = ds.objoid + WHERE r.rolname = 'role1-\!escape'; + $$ +); + +drop role "role1-\!escape"; + + +--test metadata sync + +select 1 from citus_remove_node('localhost', :worker_2_port); +create role "role1-\!escape"; +comment on ROLE "role1-\!escape" is 'test-comment'; + +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT ds.description AS role_comment + FROM pg_roles r + LEFT JOIN pg_shdescription ds ON r.oid = ds.objoid + WHERE r.rolname = 'role1-\!escape'; + $$ +); + +select 1 from citus_add_node('localhost', :worker_2_port); + +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT ds.description AS role_comment + FROM pg_roles r + LEFT JOIN pg_shdescription ds ON r.oid = ds.objoid + WHERE r.rolname = 'role1-\!escape'; + $$ +); + +drop role "role1-\!escape"; + +reset citus.grep_remote_commands; +reset citus.log_remote_commands; diff --git a/src/test/regress/sql/create_drop_database_propagation.sql b/src/test/regress/sql/create_drop_database_propagation.sql new file mode 100644 index 00000000000..de55258c3c5 --- /dev/null +++ b/src/test/regress/sql/create_drop_database_propagation.sql @@ -0,0 +1,819 @@ + +-- Test for create/drop database propagation. +-- This test is only executes for Postgres versions < 15. +-- For versions >= 15, pg15_create_drop_database_propagation.sql is used. +-- For versions >= 16, pg16_create_drop_database_propagation.sql is used. + +-- Test the UDF that we use to issue database command during metadata sync. +SELECT citus_internal.database_command(null); + +CREATE ROLE test_db_commands WITH LOGIN; +ALTER SYSTEM SET citus.enable_manual_metadata_changes_for_user TO 'test_db_commands'; +SELECT pg_reload_conf(); +SELECT pg_sleep(0.1); +SET ROLE test_db_commands; + +-- fails on null input +SELECT citus_internal.database_command(null); + +-- fails on non create / drop db command +SELECT citus_internal.database_command('CREATE TABLE foo_bar(a int)'); +SELECT citus_internal.database_command('SELECT 1'); +SELECT citus_internal.database_command('asfsfdsg'); +SELECT citus_internal.database_command(''); + +RESET ROLE; +ALTER ROLE test_db_commands nocreatedb; +SET ROLE test_db_commands; + +-- make sure that citus_internal.database_command doesn't cause privilege escalation +SELECT citus_internal.database_command('CREATE DATABASE no_permissions'); + +RESET ROLE; +DROP USER test_db_commands; +ALTER SYSTEM RESET citus.enable_manual_metadata_changes_for_user; +SELECT pg_reload_conf(); +SELECT pg_sleep(0.1); + +\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts3' +CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace'; + +\c - - - :worker_1_port +\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts4' +CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace'; + +\c - - - :worker_2_port +\set create_drop_db_tablespace :abs_srcdir '/tmp_check/ts5' +CREATE TABLESPACE create_drop_db_tablespace LOCATION :'create_drop_db_tablespace'; + +\c - - - :master_port +CREATE DATABASE local_database; + +-- check that it's only created for coordinator +SELECT * FROM public.check_database_on_all_nodes('local_database') ORDER BY node_type; + +DROP DATABASE local_database; + +-- and is dropped +SELECT * FROM public.check_database_on_all_nodes('local_database') ORDER BY node_type; + +\c - - - :worker_1_port +CREATE DATABASE local_database; + +-- check that it's only created for coordinator +SELECT * FROM public.check_database_on_all_nodes('local_database') ORDER BY node_type; + +DROP DATABASE local_database; + +-- and is dropped +SELECT * FROM public.check_database_on_all_nodes('local_database') ORDER BY node_type; + +\c - - - :master_port +create user create_drop_db_test_user; + +set citus.enable_create_database_propagation=on; + +-- Tests for create database propagation with template0 which should fail +CREATE DATABASE mydatabase + WITH OWNER = create_drop_db_test_user + TEMPLATE = 'template0' + ENCODING = 'UTF8' + CONNECTION LIMIT = 10 + TABLESPACE = create_drop_db_tablespace + ALLOW_CONNECTIONS = true + IS_TEMPLATE = false; + +CREATE DATABASE mydatabase_1 + WITH template=template1 + OWNER = create_drop_db_test_user + ENCODING = 'UTF8' + CONNECTION LIMIT = 10 + TABLESPACE = create_drop_db_tablespace + ALLOW_CONNECTIONS = true + IS_TEMPLATE = false; + +SELECT * FROM public.check_database_on_all_nodes('mydatabase_1') ORDER BY node_type; + +-- Test LC / LOCALE settings that don't match the ones provided in template db. +-- All should throw an error on the coordinator. +CREATE DATABASE lc_collate_test LC_COLLATE = 'C.UTF-8'; +CREATE DATABASE lc_ctype_test LC_CTYPE = 'C.UTF-8'; +CREATE DATABASE locale_test LOCALE = 'C.UTF-8'; +CREATE DATABASE lc_collate_lc_ctype_test LC_COLLATE = 'C.UTF-8' LC_CTYPE = 'C.UTF-8'; + +-- Test LC / LOCALE settings that match the ones provided in template db. +CREATE DATABASE lc_collate_test LC_COLLATE = 'C'; +CREATE DATABASE lc_ctype_test LC_CTYPE = 'C'; +CREATE DATABASE locale_test LOCALE = 'C'; +CREATE DATABASE lc_collate_lc_ctype_test LC_COLLATE = 'C' LC_CTYPE = 'C'; + +SELECT * FROM public.check_database_on_all_nodes('lc_collate_test') ORDER BY node_type; +SELECT * FROM public.check_database_on_all_nodes('lc_ctype_test') ORDER BY node_type; +SELECT * FROM public.check_database_on_all_nodes('locale_test') ORDER BY node_type; +SELECT * FROM public.check_database_on_all_nodes('lc_collate_lc_ctype_test') ORDER BY node_type; + +DROP DATABASE lc_collate_test; +DROP DATABASE lc_ctype_test; +DROP DATABASE locale_test; +DROP DATABASE lc_collate_lc_ctype_test; + +-- ALTER TABLESPACE .. RENAME TO .. is not supported, so we need to rename it manually. +SELECT result FROM run_command_on_all_nodes( + $$ + ALTER TABLESPACE create_drop_db_tablespace RENAME TO "ts-needs\!escape" + $$ +); + +CREATE USER "role-needs\!escape"; + +CREATE DATABASE "db-needs\!escape" owner "role-needs\!escape" tablespace "ts-needs\!escape"; + +-- Rename it to make check_database_on_all_nodes happy. + +ALTER DATABASE "db-needs\!escape" RENAME TO db_needs_escape; +SELECT * FROM public.check_database_on_all_nodes('db_needs_escape') ORDER BY node_type; + +-- test database syncing after node addition + +select 1 from citus_remove_node('localhost', :worker_2_port); + +--test with is_template true and allow connections false +CREATE DATABASE mydatabase + OWNER = create_drop_db_test_user + CONNECTION LIMIT = 10 + ENCODING = 'UTF8' + TABLESPACE = "ts-needs\!escape" + ALLOW_CONNECTIONS = false + IS_TEMPLATE = false; + +SELECT * FROM public.check_database_on_all_nodes('mydatabase') ORDER BY node_type; + +SET citus.metadata_sync_mode to 'transactional'; +select 1 from citus_add_node('localhost', :worker_2_port); + +SELECT * FROM public.check_database_on_all_nodes('mydatabase') ORDER BY node_type; +SELECT * FROM public.check_database_on_all_nodes('mydatabase_1') ORDER BY node_type; +SELECT * FROM public.check_database_on_all_nodes('db_needs_escape') ORDER BY node_type; + +select 1 from citus_remove_node('localhost', :worker_2_port); + +SET citus.metadata_sync_mode to 'nontransactional'; +select 1 from citus_add_node('localhost', :worker_2_port); + +RESET citus.metadata_sync_mode; + +SELECT * FROM public.check_database_on_all_nodes('mydatabase') ORDER BY node_type; +SELECT * FROM public.check_database_on_all_nodes('mydatabase_1') ORDER BY node_type; +SELECT * FROM public.check_database_on_all_nodes('db_needs_escape') ORDER BY node_type; + +SELECT citus_disable_node_and_wait('localhost', :worker_1_port, true); + +CREATE DATABASE test_node_activation; +SELECT 1 FROM citus_activate_node('localhost', :worker_1_port); + +SELECT * FROM public.check_database_on_all_nodes('mydatabase') ORDER BY node_type; +SELECT * FROM public.check_database_on_all_nodes('mydatabase_1') ORDER BY node_type; +SELECT * FROM public.check_database_on_all_nodes('db_needs_escape') ORDER BY node_type; +SELECT * FROM public.check_database_on_all_nodes('test_node_activation') ORDER BY node_type; + +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database mydatabase; + +SET citus.log_remote_commands = false; + +-- check that we actually drop the database +drop database mydatabase_1; + +SELECT * FROM public.check_database_on_all_nodes('mydatabase_1') ORDER BY node_type; + +SELECT * FROM public.check_database_on_all_nodes('mydatabase') ORDER BY node_type; + +-- create a template database with all options set and allow connections false +CREATE DATABASE my_template_database + WITH OWNER = create_drop_db_test_user + ENCODING = 'UTF8' + TABLESPACE = "ts-needs\!escape" + ALLOW_CONNECTIONS = false + IS_TEMPLATE = true; + +SELECT * FROM public.check_database_on_all_nodes('my_template_database') ORDER BY node_type; + +--template databases could not be dropped so we need to change the template flag +SELECT result from run_command_on_all_nodes( + $$ + UPDATE pg_database SET datistemplate = false WHERE datname = 'my_template_database' + $$ +) ORDER BY result; + +SET citus.log_remote_commands = true; + +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database my_template_database; + +SET citus.log_remote_commands = false; + +SELECT * FROM public.check_database_on_all_nodes('my_template_database') ORDER BY node_type; + +--tests for special characters in database name +set citus.enable_create_database_propagation=on; +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%DATABASE%'; +SET citus.next_operation_id TO 2000; + +create database "mydatabase#1'2"; + +set citus.grep_remote_commands = '%DROP DATABASE%'; +drop database if exists "mydatabase#1'2"; + +reset citus.grep_remote_commands; +reset citus.log_remote_commands; + +-- it doesn't fail thanks to "if exists" +drop database if exists "mydatabase#1'2"; + +-- recreate it to verify that it's actually dropped +create database "mydatabase#1'2"; +drop database "mydatabase#1'2"; + +-- second time we try to drop it, it fails due to lack of "if exists" +drop database "mydatabase#1'2"; + +\c - - - :worker_1_port + +SET citus.enable_create_database_propagation TO ON; + +-- show that dropping the database from workers is allowed when citus.enable_create_database_propagation is on +DROP DATABASE db_needs_escape; + +-- and the same applies to create database too +create database error_test; +drop database error_test; + +\c - - - :master_port + +SET citus.enable_create_database_propagation TO ON; + +DROP DATABASE test_node_activation; +DROP USER "role-needs\!escape"; + +-- drop database with force options test + +create database db_force_test; + +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%DROP DATABASE%'; + +drop database db_force_test with (force); + +reset citus.log_remote_commands; +reset citus.grep_remote_commands; + +SELECT * FROM public.check_database_on_all_nodes('db_force_test') ORDER BY node_type; + +-- test that we won't propagate non-distributed databases in citus_add_node + +select 1 from citus_remove_node('localhost', :worker_2_port); +SET citus.enable_create_database_propagation TO off; +CREATE DATABASE non_distributed_db; +SET citus.enable_create_database_propagation TO on; +create database distributed_db; + +select 1 from citus_add_node('localhost', :worker_2_port); + +--non_distributed_db should not be propagated to worker_2 +SELECT * FROM public.check_database_on_all_nodes('non_distributed_db') ORDER BY node_type; +--distributed_db should be propagated to worker_2 +SELECT * FROM public.check_database_on_all_nodes('distributed_db') ORDER BY node_type; + +--clean up resources created by this test +drop database distributed_db; + +set citus.enable_create_database_propagation TO off; +drop database non_distributed_db; + +-- test role grants on DATABASE in metadata sync + +SELECT result from run_command_on_all_nodes( + $$ + create database db_role_grants_test_non_distributed + $$ +) ORDER BY result; + +SELECT result from run_command_on_all_nodes( + $$ + revoke connect,temp,temporary,create on database db_role_grants_test_non_distributed from public + $$ +) ORDER BY result; + +SET citus.enable_create_database_propagation TO on; + +CREATE ROLE db_role_grants_test_role_exists_on_node_2; + +select 1 from citus_remove_node('localhost', :worker_2_port); + +CREATE DATABASE db_role_grants_test; + +revoke connect,temp,temporary,create on database db_role_grants_test from public; + +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%CREATE ROLE%'; +CREATE ROLE db_role_grants_test_role_missing_on_node_2; + +RESET citus.log_remote_commands ; +RESET citus.grep_remote_commands; + +SET citus.log_remote_commands = true; +set citus.grep_remote_commands = '%GRANT%'; +grant CONNECT,TEMPORARY,CREATE on DATABASE db_role_grants_test to db_role_grants_test_role_exists_on_node_2; +grant CONNECT,TEMPORARY,CREATE on DATABASE db_role_grants_test to db_role_grants_test_role_missing_on_node_2; + +grant CONNECT,TEMPORARY,CREATE on DATABASE db_role_grants_test_non_distributed to db_role_grants_test_role_exists_on_node_2; +grant CONNECT,TEMPORARY,CREATE on DATABASE db_role_grants_test_non_distributed to db_role_grants_test_role_missing_on_node_2; + +-- check the privileges before add_node for database db_role_grants_test, +-- role db_role_grants_test_role_exists_on_node_2 + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test', 'CREATE') + $$ +) ORDER BY result; + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test', 'TEMPORARY') + $$ +) ORDER BY result; + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test', 'CONNECT') + $$ +) ORDER BY result; + +-- check the privileges before add_node for database db_role_grants_test, +-- role db_role_grants_test_role_missing_on_node_2 + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test', 'CREATE') + $$ +) ORDER BY result; + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test', 'TEMPORARY') + $$ +) ORDER BY result; + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test', 'CONNECT') + $$ +) ORDER BY result; + +-- check the privileges before add_node for database db_role_grants_test_non_distributed, +-- role db_role_grants_test_role_exists_on_node_2 +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test_non_distributed', 'CREATE') + $$ +) ORDER BY result; + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test_non_distributed', 'TEMPORARY') + $$ +) ORDER BY result; + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test_non_distributed', 'CONNECT') + $$ +) ORDER BY result; + +-- check the privileges before add_node for database db_role_grants_test_non_distributed, +-- role db_role_grants_test_role_missing_on_node_2 + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test_non_distributed', 'CREATE') + $$ +) ORDER BY result; + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test_non_distributed', 'TEMPORARY') + $$ +) ORDER BY result; + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test_non_distributed', 'CONNECT') + $$ +) ORDER BY result; + +RESET citus.log_remote_commands; +RESET citus.grep_remote_commands; + +select 1 from citus_add_node('localhost', :worker_2_port); + +-- check the privileges after add_node for database db_role_grants_test, +-- role db_role_grants_test_role_exists_on_node_2 + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test', 'CREATE') + $$ +) ORDER BY result; + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test', 'TEMPORARY') + $$ +) ORDER BY result; + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test', 'CONNECT') + $$ +) ORDER BY result; + +-- check the privileges after add_node for database db_role_grants_test, +-- role db_role_grants_test_role_missing_on_node_2 + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test', 'CREATE') + $$ +) ORDER BY result; + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test', 'TEMPORARY') + $$ +) ORDER BY result; + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test', 'CONNECT') + $$ +) ORDER BY result; + +-- check the privileges after add_node for database db_role_grants_test_non_distributed, +-- role db_role_grants_test_role_exists_on_node_2 +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test_non_distributed', 'CREATE') + $$ +) ORDER BY result; + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test_non_distributed', 'TEMPORARY') + $$ +) ORDER BY result; + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_exists_on_node_2','db_role_grants_test_non_distributed', 'CONNECT') + $$ +) ORDER BY result; + +-- check the privileges after add_node for database db_role_grants_test_non_distributed, +-- role db_role_grants_test_role_missing_on_node_2 + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test_non_distributed', 'CREATE') + $$ +) ORDER BY result; + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test_non_distributed', 'TEMPORARY') + $$ +) ORDER BY result; + +SELECT result from run_command_on_all_nodes( + $$ + select has_database_privilege('db_role_grants_test_role_missing_on_node_2','db_role_grants_test_non_distributed', 'CONNECT') + $$ +) ORDER BY result; + +grant connect,temp,temporary,create on database db_role_grants_test to public; + +DROP DATABASE db_role_grants_test; + +SELECT result from run_command_on_all_nodes( + $$ + drop database db_role_grants_test_non_distributed + $$ +) ORDER BY result; +DROP ROLE db_role_grants_test_role_exists_on_node_2; +DROP ROLE db_role_grants_test_role_missing_on_node_2; + +select 1 from citus_remove_node('localhost', :worker_2_port); + +set citus.enable_create_role_propagation TO off; +create role non_propagated_role; +set citus.enable_create_role_propagation TO on; + +set citus.enable_create_database_propagation TO on; + +-- Make sure that we propagate non_propagated_role because it's a dependency of test_db. +-- And hence it becomes a distributed object. +create database test_db OWNER non_propagated_role; + +create role propagated_role; +grant connect on database test_db to propagated_role; + +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + +SELECT * FROM public.check_database_on_all_nodes('test_db') ORDER BY node_type; + +REVOKE CONNECT ON DATABASE test_db FROM propagated_role; +DROP DATABASE test_db; +DROP ROLE propagated_role, non_propagated_role; + +-- show that we don't try to propagate commands on non-distributed databases +SET citus.enable_create_database_propagation TO OFF; +CREATE DATABASE local_database_1; +SET citus.enable_create_database_propagation TO ON; + +CREATE ROLE local_role_1; + +GRANT CONNECT, TEMPORARY, CREATE ON DATABASE local_database_1 TO local_role_1; +ALTER DATABASE local_database_1 SET default_transaction_read_only = 'true'; + +REVOKE CONNECT, TEMPORARY, CREATE ON DATABASE local_database_1 FROM local_role_1; +DROP ROLE local_role_1; +DROP DATABASE local_database_1; + +-- test create / drop database commands from workers + +-- remove one of the workers to test node activation too +SELECT 1 from citus_remove_node('localhost', :worker_2_port); + +\c - - - :worker_1_port + +CREATE DATABASE local_worker_db; + +SET citus.enable_create_database_propagation TO ON; + +CREATE DATABASE db_created_from_worker + WITH template=template1 + OWNER = create_drop_db_test_user + ENCODING = 'UTF8' + CONNECTION LIMIT = 42 + TABLESPACE = "ts-needs\!escape" + ALLOW_CONNECTIONS = false; + +\c - - - :master_port + +SET citus.enable_create_database_propagation TO ON; + +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + +\c - - - :worker_1_port + +SET citus.enable_create_database_propagation TO ON; + +SELECT * FROM public.check_database_on_all_nodes('local_worker_db') ORDER BY node_type; +SELECT * FROM public.check_database_on_all_nodes('db_created_from_worker') ORDER BY node_type; + +DROP DATABASE db_created_from_worker; + +SELECT * FROM public.check_database_on_all_nodes('db_created_from_worker') ORDER BY node_type; + +-- drop the local database while the GUC is on +DROP DATABASE local_worker_db; +SELECT * FROM public.check_database_on_all_nodes('local_worker_db') ORDER BY node_type; + +SET citus.enable_create_database_propagation TO OFF; + +CREATE DATABASE local_worker_db; + +-- drop the local database while the GUC is off +DROP DATABASE local_worker_db; +SELECT * FROM public.check_database_on_all_nodes('local_worker_db') ORDER BY node_type; + +SET citus.enable_create_database_propagation TO ON; + +CREATE DATABASE another_db_created_from_worker; + +\c - - - :master_port + +SELECT 1 FROM citus_remove_node('localhost', :master_port); + +\c - - - :worker_1_port + +SET citus.enable_create_database_propagation TO ON; + +-- fails because coordinator is not added into metadata +DROP DATABASE another_db_created_from_worker; + +-- fails because coordinator is not added into metadata +CREATE DATABASE new_db; + +\c - - - :master_port + +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_add_node('localhost', :master_port, 0); +RESET client_min_messages; + +SET citus.enable_create_database_propagation TO ON; + +-- dropping a database that was created from a worker via a different node works fine +DROP DATABASE another_db_created_from_worker; +SELECT * FROM public.check_database_on_all_nodes('another_db_created_from_worker') ORDER BY node_type; + +-- Show that we automatically propagate the dependencies (only roles atm) when +-- creating a database from workers too. + +SELECT 1 from citus_remove_node('localhost', :worker_2_port); + +\c - - - :worker_1_port + +set citus.enable_create_role_propagation TO off; +create role non_propagated_role; +set citus.enable_create_role_propagation TO on; + +set citus.enable_create_database_propagation TO on; + +create database test_db OWNER non_propagated_role; + +create role propagated_role; + +\c - - - :master_port + +-- not supported from workers, so need to execute this via coordinator +grant connect on database test_db to propagated_role; + +SET citus.enable_create_database_propagation TO ON; + +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + +SELECT * FROM public.check_database_on_all_nodes('test_db') ORDER BY node_type; + +REVOKE CONNECT ON DATABASE test_db FROM propagated_role; +DROP DATABASE test_db; +DROP ROLE propagated_role, non_propagated_role; + +-- test citus_internal.acquire_citus_advisory_object_class_lock with null input +SELECT citus_internal.acquire_citus_advisory_object_class_lock(null, 'regression'); +SELECT citus_internal.acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), null); + +-- OCLASS_DATABASE +SELECT citus_internal.acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), NULL); +SELECT citus_internal.acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), 'regression'); +SELECT citus_internal.acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), ''); +SELECT citus_internal.acquire_citus_advisory_object_class_lock((SELECT CASE WHEN substring(version(), '\d+')::integer < 16 THEN 25 ELSE 26 END AS oclass_database), 'no_such_db'); + +-- invalid OCLASS +SELECT citus_internal.acquire_citus_advisory_object_class_lock(-1, NULL); +SELECT citus_internal.acquire_citus_advisory_object_class_lock(-1, 'regression'); + +-- invalid OCLASS +SELECT citus_internal.acquire_citus_advisory_object_class_lock(100, NULL); +SELECT citus_internal.acquire_citus_advisory_object_class_lock(100, 'regression'); + +-- another valid OCLASS, but not implemented yet +SELECT citus_internal.acquire_citus_advisory_object_class_lock(10, NULL); +SELECT citus_internal.acquire_citus_advisory_object_class_lock(10, 'regression'); + +SELECT 1 FROM run_command_on_all_nodes('ALTER SYSTEM SET citus.enable_create_database_propagation TO ON'); +SELECT 1 FROM run_command_on_all_nodes('SELECT pg_reload_conf()'); +SELECT pg_sleep(0.1); + +-- only one of them succeeds and we don't run into a distributed deadlock +SELECT COUNT(*) FROM run_command_on_all_nodes('CREATE DATABASE concurrent_create_db') WHERE success; +SELECT * FROM public.check_database_on_all_nodes('concurrent_create_db') ORDER BY node_type; + +SELECT COUNT(*) FROM run_command_on_all_nodes('DROP DATABASE concurrent_create_db') WHERE success; +SELECT * FROM public.check_database_on_all_nodes('concurrent_create_db') ORDER BY node_type; + +-- revert the system wide change that enables citus.enable_create_database_propagation on all nodes +SELECT 1 FROM run_command_on_all_nodes('ALTER SYSTEM SET citus.enable_create_database_propagation TO OFF'); +SELECT 1 FROM run_command_on_all_nodes('SELECT pg_reload_conf()'); +SELECT pg_sleep(0.1); + +-- but keep it enabled for coordinator for the rest of the tests +SET citus.enable_create_database_propagation TO ON; + +CREATE DATABASE distributed_db; + +CREATE USER no_createdb; +SET ROLE no_createdb; +SET citus.enable_create_database_propagation TO ON; + +CREATE DATABASE no_createdb; +ALTER DATABASE distributed_db RENAME TO rename_test; +DROP DATABASE distributed_db; +ALTER DATABASE distributed_db SET TABLESPACE pg_default; +ALTER DATABASE distributed_db SET timezone TO 'UTC'; +ALTER DATABASE distributed_db RESET timezone; +GRANT ALL ON DATABASE distributed_db TO postgres; + +RESET ROLE; + +ALTER ROLE no_createdb createdb; + +SET ROLE no_createdb; + +CREATE DATABASE no_createdb; + +ALTER DATABASE distributed_db RENAME TO rename_test; + +RESET ROLE; + +SELECT 1 FROM run_command_on_all_nodes($$GRANT ALL ON TABLESPACE pg_default TO no_createdb$$); +ALTER DATABASE distributed_db OWNER TO no_createdb; + +SET ROLE no_createdb; + +ALTER DATABASE distributed_db SET TABLESPACE pg_default; +ALTER DATABASE distributed_db SET timezone TO 'UTC'; +ALTER DATABASE distributed_db RESET timezone; +GRANT ALL ON DATABASE distributed_db TO postgres; +ALTER DATABASE distributed_db RENAME TO rename_test; +DROP DATABASE rename_test; + +RESET ROLE; + +SELECT 1 FROM run_command_on_all_nodes($$REVOKE ALL ON TABLESPACE pg_default FROM no_createdb$$); + +DROP DATABASE no_createdb; +DROP USER no_createdb; + +-- Test a failure scenario by trying to create a distributed database that +-- already exists on one of the nodes. + +\c - - - :worker_1_port +CREATE DATABASE "test_\!failure"; + +\c - - - :master_port + +SET citus.enable_create_database_propagation TO ON; + +CREATE DATABASE "test_\!failure"; + +SET client_min_messages TO WARNING; +CALL citus_cleanup_orphaned_resources(); +RESET client_min_messages; + +SELECT result AS database_cleanedup_on_node FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); +SELECT * FROM public.check_database_on_all_nodes($$test_\!failure$$) ORDER BY node_type, result; + +SET citus.enable_create_database_propagation TO OFF; +CREATE DATABASE "test_\!failure1"; + +\c - - - :worker_1_port +DROP DATABASE "test_\!failure"; + +SET citus.enable_create_database_propagation TO ON; + +CREATE DATABASE "test_\!failure1"; + +SET client_min_messages TO WARNING; +CALL citus_cleanup_orphaned_resources(); +RESET client_min_messages; + +SELECT result AS database_cleanedup_on_node FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); +SELECT * FROM public.check_database_on_all_nodes($$test_\!failure1$$) ORDER BY node_type, result; + +\c - - - :master_port + +-- Before dropping local "test_\!failure1" database, test a failure scenario +-- by trying to create a distributed database that already exists "on local +-- node" this time. + +SET citus.enable_create_database_propagation TO ON; + +CREATE DATABASE "test_\!failure1"; + +SET client_min_messages TO WARNING; +CALL citus_cleanup_orphaned_resources(); +RESET client_min_messages; + +SELECT result AS database_cleanedup_on_node FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); +SELECT * FROM public.check_database_on_all_nodes($$test_\!failure1$$) ORDER BY node_type, result; + +SET citus.enable_create_database_propagation TO OFF; + +DROP DATABASE "test_\!failure1"; + +SET citus.enable_create_database_propagation TO ON; + +--clean up resources created by this test + +-- DROP TABLESPACE is not supported, so we need to drop it manually. +SELECT result FROM run_command_on_all_nodes( + $$ + drop tablespace "ts-needs\!escape" + $$ +); + +drop user create_drop_db_test_user; +reset citus.enable_create_database_propagation; diff --git a/src/test/regress/sql/create_drop_database_propagation_pg15.sql b/src/test/regress/sql/create_drop_database_propagation_pg15.sql new file mode 100644 index 00000000000..4e006c54fa8 --- /dev/null +++ b/src/test/regress/sql/create_drop_database_propagation_pg15.sql @@ -0,0 +1,73 @@ +-- +-- PG15 +-- +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 15 AS server_version_ge_15 +\gset +\if :server_version_ge_15 +\else +\q +\endif + +-- create/drop database for pg >= 15 + +set citus.enable_create_database_propagation=on; + +CREATE DATABASE mydatabase + WITH OID = 966345; + +CREATE DATABASE mydatabase + WITH strategy file_copy; + +CREATE DATABASE st_wal_log + WITH strategy WaL_LoG; + +SELECT * FROM public.check_database_on_all_nodes('st_wal_log') ORDER BY node_type; + +drop database st_wal_log; + +select 1 from citus_remove_node('localhost', :worker_2_port); + +-- test COLLATION_VERSION + +CREATE DATABASE test_collation_version + WITH ENCODING = 'UTF8' + COLLATION_VERSION = '1.0' + ALLOW_CONNECTIONS = false; + +select 1 from citus_add_node('localhost', :worker_2_port); + +SELECT * FROM public.check_database_on_all_nodes('test_collation_version') ORDER BY node_type; + +drop database test_collation_version; + +SET client_min_messages TO WARNING; +-- test LOCALE_PROVIDER & ICU_LOCALE +CREATE DATABASE test_locale_provider + WITH ENCODING = 'UTF8' + LOCALE_PROVIDER = 'icu' + ICU_LOCALE = 'en_US'; +RESET client_min_messages; + +CREATE DATABASE test_locale_provider + WITH ENCODING = 'UTF8' + LOCALE_PROVIDER = 'libc' + ICU_LOCALE = 'en_US'; + +CREATE DATABASE test_locale_provider + WITH ENCODING = 'UTF8' + LOCALE_PROVIDER = 'libc'; + +SELECT * FROM public.check_database_on_all_nodes('test_locale_provider') ORDER BY node_type; + +\c test_locale_provider - - :worker_2_port + +set citus.enable_create_database_propagation to on; +create database unsupported_option_from_non_main_db with oid = 12345; + +\c regression - - :master_port + +set citus.enable_create_database_propagation to on; +drop database test_locale_provider; + +\c - - - :master_port diff --git a/src/test/regress/sql/create_drop_database_propagation_pg16.sql b/src/test/regress/sql/create_drop_database_propagation_pg16.sql new file mode 100644 index 00000000000..cec55381325 --- /dev/null +++ b/src/test/regress/sql/create_drop_database_propagation_pg16.sql @@ -0,0 +1,22 @@ +-- +-- PG16 +-- +SHOW server_version \gset +SELECT substring(:'server_version', '\d+')::int >= 16 AS server_version_ge_16 +\gset +\if :server_version_ge_16 +\else +\q +\endif + +-- create/drop database for pg >= 16 + +set citus.enable_create_database_propagation=on; + +-- test icu_rules +-- +-- practically we don't support it but better to test + +CREATE DATABASE citus_icu_rules_test WITH icu_rules='de_DE@collation=phonebook'; +CREATE DATABASE citus_icu_rules_test WITH icu_rules='de_DE@collation=phonebook' locale_provider='icu'; +CREATE DATABASE citus_icu_rules_test WITH icu_rules='de_DE@collation=phonebook' locale_provider='icu' icu_locale = 'de_DE'; diff --git a/src/test/regress/sql/create_ref_dist_from_citus_local.sql b/src/test/regress/sql/create_ref_dist_from_citus_local.sql index 7c10abce6ab..e9610d65fbe 100644 --- a/src/test/regress/sql/create_ref_dist_from_citus_local.sql +++ b/src/test/regress/sql/create_ref_dist_from_citus_local.sql @@ -219,8 +219,8 @@ ROLLBACK; -- Test the UDFs that we use to convert Citus local tables to single-shard tables and -- reference tables. -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, true); -SELECT pg_catalog.citus_internal_delete_placement_metadata(1); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', 1, true); +SELECT citus_internal.delete_placement_metadata(1); CREATE ROLE test_user_create_ref_dist WITH LOGIN; GRANT ALL ON SCHEMA create_ref_dist_from_citus_local TO test_user_create_ref_dist; @@ -234,18 +234,18 @@ SET citus.next_placement_id TO 8510000; SET citus.shard_replication_factor TO 1; SET search_path TO create_ref_dist_from_citus_local; -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(null, 't', 1, true); -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, null, 1, true); -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', null, true); -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, null); +SELECT citus_internal.update_none_dist_table_metadata(null, 't', 1, true); +SELECT citus_internal.update_none_dist_table_metadata(1, null, 1, true); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', null, true); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', 1, null); -SELECT pg_catalog.citus_internal_delete_placement_metadata(null); +SELECT citus_internal.delete_placement_metadata(null); CREATE TABLE udf_test (col_1 int); SELECT citus_add_local_table_to_metadata('udf_test'); BEGIN; - SELECT pg_catalog.citus_internal_update_none_dist_table_metadata('create_ref_dist_from_citus_local.udf_test'::regclass, 'k', 99999, true); + SELECT citus_internal.update_none_dist_table_metadata('create_ref_dist_from_citus_local.udf_test'::regclass, 'k', 99999, true); SELECT COUNT(*)=1 FROM pg_dist_partition WHERE logicalrelid = 'create_ref_dist_from_citus_local.udf_test'::regclass AND repmodel = 'k' AND colocationid = 99999 AND autoconverted = true; @@ -253,7 +253,7 @@ BEGIN; SELECT placementid AS udf_test_placementid FROM pg_dist_shard_placement WHERE shardid = get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.udf_test') \gset - SELECT pg_catalog.citus_internal_delete_placement_metadata(:udf_test_placementid); + SELECT citus_internal.delete_placement_metadata(:udf_test_placementid); SELECT COUNT(*)=0 FROM pg_dist_placement WHERE placementid = :udf_test_placementid; ROLLBACK; diff --git a/src/test/regress/sql/create_role_propagation.sql b/src/test/regress/sql/create_role_propagation.sql index 027e4f72e74..bd2951b175c 100644 --- a/src/test/regress/sql/create_role_propagation.sql +++ b/src/test/regress/sql/create_role_propagation.sql @@ -25,15 +25,10 @@ SELECT master_remove_node('localhost', :worker_2_port); CREATE ROLE create_role_with_everything SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION LIMIT 105 PASSWORD 'strong_password123^' VALID UNTIL '2045-05-05 00:00:00.00+00' IN ROLE create_role, create_group ROLE create_user, create_group_2 ADMIN create_role_2, create_user_2; CREATE ROLE create_role_with_nothing NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT 3 PASSWORD 'weakpassword' VALID UNTIL '2015-05-05 00:00:00.00+00'; --- show that creating role from worker node is only allowed when create role --- propagation is off +-- show that creating role from worker node is allowed \c - - - :worker_1_port CREATE ROLE role_on_worker; - -BEGIN; -SET citus.enable_create_role_propagation TO off; -CREATE ROLE role_on_worker; -ROLLBACK; +DROP ROLE role_on_worker; \c - - - :master_port @@ -80,6 +75,8 @@ SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::t \c - - - :master_port +create role test_admin_role; + -- test grants with distributed and non-distributed roles SELECT master_remove_node('localhost', :worker_2_port); @@ -89,6 +86,8 @@ CREATE ROLE dist_role_2; CREATE ROLE dist_role_3; CREATE ROLE dist_role_4; + + SET citus.enable_create_role_propagation TO OFF; CREATE ROLE non_dist_role_1 SUPERUSER; @@ -98,28 +97,71 @@ CREATE ROLE non_dist_role_4; SET citus.enable_create_role_propagation TO ON; + +grant dist_role_3,dist_role_1 to test_admin_role with admin option; + SET ROLE dist_role_1; GRANT non_dist_role_1 TO non_dist_role_2; SET citus.enable_create_role_propagation TO OFF; +grant dist_role_1 to non_dist_role_1 with admin option; SET ROLE non_dist_role_1; -GRANT dist_role_1 TO dist_role_2; +GRANT dist_role_1 TO dist_role_2 granted by non_dist_role_1; RESET ROLE; SET citus.enable_create_role_propagation TO ON; -GRANT dist_role_3 TO non_dist_role_3; + +GRANT dist_role_3 TO non_dist_role_3 granted by test_admin_role; GRANT non_dist_role_4 TO dist_role_4; +GRANT dist_role_3 TO dist_role_4 granted by test_admin_role; + SELECT 1 FROM master_add_node('localhost', :worker_2_port); -SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + ) q; + $$ +); + +REVOKE dist_role_3 from dist_role_4 granted by test_admin_role cascade; + +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + order by member::regrole::text + ) q; + $$ +); + +SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1','test_admin_role')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1; +REVOKE dist_role_3 from non_dist_role_3 granted by test_admin_role cascade; + +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + order by member::regrole::text + ) q; + $$ +); + +revoke dist_role_3,dist_role_1 from test_admin_role cascade; +drop role test_admin_role; + \c - - - :worker_1_port SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_%' ORDER BY 1; @@ -277,3 +319,5 @@ SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; \c - - - :worker_1_port SELECT rolname FROM pg_authid WHERE rolname LIKE '%existing%' ORDER BY 1; \c - - - :master_port + +DROP ROLE nondist_cascade_1, nondist_cascade_2, nondist_cascade_3, dist_cascade; diff --git a/src/test/regress/sql/distributed_domain.sql b/src/test/regress/sql/distributed_domain.sql index 5bf3bd6a80b..0850c99ee83 100644 --- a/src/test/regress/sql/distributed_domain.sql +++ b/src/test/regress/sql/distributed_domain.sql @@ -349,10 +349,7 @@ SELECT * FROM use_age_invalid ORDER BY 1; ALTER DOMAIN age_invalid VALIDATE CONSTRAINT check_age_positive; -- test changing the owner of a domain -SET client_min_messages TO error; -SELECT 1 FROM run_command_on_workers($$ CREATE ROLE domain_owner; $$); CREATE ROLE domain_owner; -RESET client_min_messages; CREATE DOMAIN alter_domain_owner AS int; ALTER DOMAIN alter_domain_owner OWNER TO domain_owner; diff --git a/src/test/regress/sql/failure_create_database.sql b/src/test/regress/sql/failure_create_database.sql new file mode 100644 index 00000000000..d117dc81192 --- /dev/null +++ b/src/test/regress/sql/failure_create_database.sql @@ -0,0 +1,128 @@ +SET citus.enable_create_database_propagation TO ON; +SET client_min_messages TO WARNING; + +SELECT 1 FROM citus_add_node('localhost', :master_port, 0); + +CREATE FUNCTION get_temp_databases_on_nodes() +RETURNS TEXT AS $func$ + SELECT array_agg(DISTINCT result ORDER BY result) AS temp_databases_on_nodes FROM run_command_on_all_nodes($$SELECT datname FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$) WHERE result != ''; +$func$ +LANGUAGE sql; + +CREATE FUNCTION count_db_cleanup_records() +RETURNS TABLE(object_name TEXT, count INTEGER) AS $func$ + SELECT object_name, COUNT(*) FROM pg_dist_cleanup WHERE object_name LIKE 'citus_temp_database_%' GROUP BY object_name; +$func$ +LANGUAGE sql; + +CREATE FUNCTION ensure_no_temp_databases_on_any_nodes() +RETURNS BOOLEAN AS $func$ + SELECT bool_and(result::boolean) AS no_temp_databases_on_any_nodes FROM run_command_on_all_nodes($$SELECT COUNT(*)=0 FROM pg_database WHERE datname LIKE 'citus_temp_database_%'$$); +$func$ +LANGUAGE sql; + +-- cleanup any orphaned resources from previous runs +CALL citus_cleanup_orphaned_resources(); + +SET citus.next_operation_id TO 4000; + +ALTER SYSTEM SET citus.defer_shard_delete_interval TO -1; +SELECT pg_reload_conf(); +SELECT pg_sleep(0.1); + +SELECT citus.mitmproxy('conn.kill()'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +SELECT citus.mitmproxy('conn.onQuery(query="^CREATE DATABASE").cancel(' || pg_backend_pid() || ')'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +SELECT citus.mitmproxy('conn.onQuery(query="^ALTER DATABASE").cancel(' || pg_backend_pid() || ')'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +SELECT citus.mitmproxy('conn.onQuery(query="^BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED").kill()'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +SELECT citus.mitmproxy('conn.onQuery(query="^PREPARE TRANSACTION").kill()'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +SELECT citus.mitmproxy('conn.onQuery(query="^COMMIT PREPARED").kill()'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +-- not call citus_cleanup_orphaned_resources() but recover the prepared transactions this time +SELECT 1 FROM recover_prepared_transactions(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +DROP DATABASE db1; + +-- after recovering the prepared transactions, cleanup records should also be removed +SELECT * FROM count_db_cleanup_records(); + +SELECT citus.mitmproxy('conn.onQuery(query="^SELECT citus_internal.acquire_citus_advisory_object_class_lock").kill()'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +SELECT citus.mitmproxy('conn.onParse(query="^WITH distributed_object_data").kill()'); +CREATE DATABASE db1; +SELECT citus.mitmproxy('conn.allow()'); + +SELECT get_temp_databases_on_nodes(); +SELECT * FROM count_db_cleanup_records(); +CALL citus_cleanup_orphaned_resources(); +SELECT ensure_no_temp_databases_on_any_nodes(); +SELECT * FROM public.check_database_on_all_nodes($$db1$$) ORDER BY node_type, result; + +CREATE DATABASE db1; + +-- show that a successful database creation doesn't leave any pg_dist_cleanup records behind +SELECT * FROM count_db_cleanup_records(); + +DROP DATABASE db1; + +DROP FUNCTION get_temp_databases_on_nodes(); +DROP FUNCTION ensure_no_temp_databases_on_any_nodes(); +DROP FUNCTION count_db_cleanup_records(); + +SELECT 1 FROM citus_remove_node('localhost', :master_port); diff --git a/src/test/regress/sql/failure_distributed_results.sql b/src/test/regress/sql/failure_distributed_results.sql index 95e4d5513bf..93e4a9a3391 100644 --- a/src/test/regress/sql/failure_distributed_results.sql +++ b/src/test/regress/sql/failure_distributed_results.sql @@ -15,6 +15,8 @@ SET client_min_messages TO WARNING; SELECT citus.mitmproxy('conn.allow()'); SET citus.next_shard_id TO 100800; +-- Needed because of issue #7306 +SET citus.force_max_query_parallelization TO true; -- always try the 1st replica before the 2nd replica. SET citus.task_assignment_policy TO 'first-replica'; diff --git a/src/test/regress/sql/failure_mx_metadata_sync.sql b/src/test/regress/sql/failure_mx_metadata_sync.sql index 90e882fe519..d8f82296f18 100644 --- a/src/test/regress/sql/failure_mx_metadata_sync.sql +++ b/src/test/regress/sql/failure_mx_metadata_sync.sql @@ -56,10 +56,10 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; -- Check failures on DDL command propagation CREATE TABLE t2 (id int PRIMARY KEY); -SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_placement_metadata").kill()'); +SELECT citus.mitmproxy('conn.onParse(query="citus_internal.add_placement_metadata").kill()'); SELECT create_distributed_table('t2', 'id'); -SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_shard_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onParse(query="citus_internal.add_shard_metadata").cancel(' || :pid || ')'); SELECT create_distributed_table('t2', 'id'); -- Verify that the table was not distributed diff --git a/src/test/regress/sql/failure_mx_metadata_sync_multi_trans.sql b/src/test/regress/sql/failure_mx_metadata_sync_multi_trans.sql index c0e575c14e9..afe6a64e905 100644 --- a/src/test/regress/sql/failure_mx_metadata_sync_multi_trans.sql +++ b/src/test/regress/sql/failure_mx_metadata_sync_multi_trans.sql @@ -279,33 +279,33 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE mx_metadata_sync_multi_t SELECT citus_activate_node('localhost', :worker_2_proxy_port); -- Failure to add partition metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_partition_metadata").cancel(' || :pid || ')'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_partition_metadata").kill()'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -- Failure to add shard metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_shard_metadata").cancel(' || :pid || ')'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_shard_metadata").kill()'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -- Failure to add placement metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_placement_metadata").cancel(' || :pid || ')'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_placement_metadata").kill()'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -- Failure to add colocation metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.citus_internal_add_colocation_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_colocation_metadata").cancel(' || :pid || ')'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -SELECT citus.mitmproxy('conn.onQuery(query="SELECT pg_catalog.citus_internal_add_colocation_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_colocation_metadata").kill()'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -- Failure to add distributed object metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_object_metadata").cancel(' || :pid || ')'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_object_metadata").kill()'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -- Failure to mark function as distributed diff --git a/src/test/regress/sql/failure_non_main_db_2pc.sql b/src/test/regress/sql/failure_non_main_db_2pc.sql new file mode 100644 index 00000000000..74061ae3488 --- /dev/null +++ b/src/test/regress/sql/failure_non_main_db_2pc.sql @@ -0,0 +1,75 @@ +SELECT citus.mitmproxy('conn.allow()'); + +CREATE SCHEMA failure_non_main_db_2pc; +SET SEARCH_PATH TO 'failure_non_main_db_2pc'; + +CREATE DATABASE other_db1; + +SELECT citus.mitmproxy('conn.onQuery(query="COMMIT PREPARED").kill()'); + +\c other_db1 + +CREATE USER user_1; + +\c regression + +SELECT citus.mitmproxy('conn.allow()'); + +SELECT nodeid, result FROM run_command_on_all_nodes($$SELECT rolname FROM pg_roles WHERE rolname::TEXT = 'user_1'$$) ORDER BY 1; + +SELECT recover_prepared_transactions(); + +SELECT nodeid, result FROM run_command_on_all_nodes($$SELECT rolname FROM pg_roles WHERE rolname::TEXT = 'user_1'$$) ORDER BY 1; + + +SELECT citus.mitmproxy('conn.onQuery(query="CREATE USER user_2").kill()'); + +\c other_db1 + +CREATE USER user_2; + +\c regression + +SELECT citus.mitmproxy('conn.allow()'); + +SELECT nodeid, result FROM run_command_on_all_nodes($$SELECT rolname FROM pg_roles WHERE rolname::TEXT = 'user_2'$$) ORDER BY 1; + +SELECT recover_prepared_transactions(); + +SELECT nodeid, result FROM run_command_on_all_nodes($$SELECT rolname FROM pg_roles WHERE rolname::TEXT = 'user_2'$$) ORDER BY 1; + +DROP DATABASE other_db1; +-- user_2 should not exist because the query to create it will fail +-- but let's make sure we try to drop it just in case +DROP USER IF EXISTS user_1, user_2; + +SELECT citus_set_coordinator_host('localhost'); + +\c - - - :worker_1_port + +CREATE DATABASE other_db2; + +SELECT citus.mitmproxy('conn.onQuery(query="COMMIT PREPARED").kill()'); + +\c other_db2 + +CREATE USER user_3; + +\c regression + +SELECT citus.mitmproxy('conn.allow()'); + +SELECT result FROM run_command_on_all_nodes($$SELECT rolname FROM pg_roles WHERE rolname::TEXT = 'user_3'$$) ORDER BY 1; + +SELECT recover_prepared_transactions(); + +SELECT result FROM run_command_on_all_nodes($$SELECT rolname FROM pg_roles WHERE rolname::TEXT = 'user_3'$$) ORDER BY 1; + +DROP DATABASE other_db2; +DROP USER user_3; + +\c - - - :master_port + +SELECT result FROM run_command_on_all_nodes($$DELETE FROM pg_dist_node WHERE groupid = 0$$); + +DROP SCHEMA failure_non_main_db_2pc; diff --git a/src/test/regress/sql/failure_on_create_subscription.sql b/src/test/regress/sql/failure_on_create_subscription.sql index 3a0ae3b5e7d..60af71e4742 100644 --- a/src/test/regress/sql/failure_on_create_subscription.sql +++ b/src/test/regress/sql/failure_on_create_subscription.sql @@ -34,9 +34,9 @@ SELECT * FROM shards_in_workers; -- Failure on creating the subscription -- Failing exactly on CREATE SUBSCRIPTION is causing flaky test where we fail with either: --- 1) ERROR: connection to the remote node localhost:xxxxx failed with the following error: ERROR: subscription "citus_shard_move_subscription_xxxxxxx" does not exist +-- 1) ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: ERROR: subscription "citus_shard_move_subscription_xxxxxxx" does not exist -- another command is already in progress --- 2) ERROR: connection to the remote node localhost:xxxxx failed with the following error: another command is already in progress +-- 2) ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: another command is already in progress -- Instead fail on the next step (ALTER SUBSCRIPTION) instead which is also required logically as part of uber CREATE SUBSCRIPTION operation. SELECT citus.mitmproxy('conn.onQuery(query="ALTER SUBSCRIPTION").kill()'); diff --git a/src/test/regress/sql/failure_split_cleanup.sql b/src/test/regress/sql/failure_split_cleanup.sql index 1b85d3d171e..cefeeb05db2 100644 --- a/src/test/regress/sql/failure_split_cleanup.sql +++ b/src/test/regress/sql/failure_split_cleanup.sql @@ -38,7 +38,7 @@ SELECT create_distributed_table('table_to_split', 'id'); ARRAY[:worker_1_node, :worker_2_node], 'force_logical'); SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; -- we need to allow connection so that we can connect to proxy SELECT citus.mitmproxy('conn.allow()'); @@ -58,7 +58,7 @@ SELECT create_distributed_table('table_to_split', 'id'); \c - postgres - :master_port SELECT public.wait_for_resource_cleanup(); SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; \c - - - :worker_2_proxy_port SET search_path TO "citus_failure_split_cleanup_schema", public, pg_catalog; @@ -90,7 +90,7 @@ SELECT create_distributed_table('table_to_split', 'id'); RESET client_min_messages; SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; -- we need to allow connection so that we can connect to proxy SELECT citus.mitmproxy('conn.allow()'); @@ -109,7 +109,7 @@ SELECT create_distributed_table('table_to_split', 'id'); \c - postgres - :master_port SELECT public.wait_for_resource_cleanup(); SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; \c - - - :worker_2_proxy_port SET search_path TO "citus_failure_split_cleanup_schema", public, pg_catalog; @@ -136,7 +136,7 @@ SELECT create_distributed_table('table_to_split', 'id'); ARRAY[:worker_1_node, :worker_2_node], 'force_logical'); SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; -- we need to allow connection so that we can connect to proxy SELECT citus.mitmproxy('conn.allow()'); @@ -155,7 +155,7 @@ SELECT create_distributed_table('table_to_split', 'id'); \c - postgres - :master_port SELECT public.wait_for_resource_cleanup(); SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; \c - - - :worker_2_proxy_port SET search_path TO "citus_failure_split_cleanup_schema", public, pg_catalog; @@ -182,7 +182,7 @@ SELECT create_distributed_table('table_to_split', 'id'); ARRAY[:worker_1_node, :worker_2_node], 'force_logical'); SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; -- we need to allow connection so that we can connect to proxy SELECT citus.mitmproxy('conn.allow()'); @@ -201,7 +201,7 @@ SELECT create_distributed_table('table_to_split', 'id'); \c - postgres - :master_port SELECT public.wait_for_resource_cleanup(); SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; \c - - - :worker_2_proxy_port SET search_path TO "citus_failure_split_cleanup_schema", public, pg_catalog; @@ -228,7 +228,7 @@ SELECT create_distributed_table('table_to_split', 'id'); ARRAY[:worker_1_node, :worker_2_node], 'force_logical'); SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; -- we need to allow connection so that we can connect to proxy SELECT citus.mitmproxy('conn.allow()'); @@ -247,7 +247,7 @@ SELECT create_distributed_table('table_to_split', 'id'); \c - postgres - :master_port SELECT public.wait_for_resource_cleanup(); SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; \c - - - :worker_2_proxy_port SET search_path TO "citus_failure_split_cleanup_schema", public, pg_catalog; @@ -275,7 +275,7 @@ SELECT create_distributed_table('table_to_split', 'id'); 'force_logical'); SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; SELECT relname FROM pg_class where relname LIKE '%table_to_split_%' AND relkind = 'r' order by relname; -- we need to allow connection so that we can connect to proxy SELECT citus.mitmproxy('conn.allow()'); @@ -295,7 +295,7 @@ SELECT create_distributed_table('table_to_split', 'id'); \c - postgres - :master_port SELECT public.wait_for_resource_cleanup(); SELECT operation_id, object_type, object_name, node_group_id, policy_type - FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name; + FROM pg_dist_cleanup where operation_id = 777 ORDER BY object_name, node_group_id; \c - - - :worker_2_proxy_port SET search_path TO "citus_failure_split_cleanup_schema", public, pg_catalog; diff --git a/src/test/regress/sql/foreign_key_to_reference_shard_rebalance.sql b/src/test/regress/sql/foreign_key_to_reference_shard_rebalance.sql index 7791001e05f..b1017768d6b 100644 --- a/src/test/regress/sql/foreign_key_to_reference_shard_rebalance.sql +++ b/src/test/regress/sql/foreign_key_to_reference_shard_rebalance.sql @@ -84,6 +84,7 @@ create table partitioned_tbl_with_fkey (x int, y int, t timestamptz default now( select create_distributed_table('partitioned_tbl_with_fkey','x'); create table partition_1_with_fkey partition of partitioned_tbl_with_fkey for values from ('2022-01-01') to ('2022-12-31'); create table partition_2_with_fkey partition of partitioned_tbl_with_fkey for values from ('2023-01-01') to ('2023-12-31'); +create table partition_3_with_fkey partition of partitioned_tbl_with_fkey DEFAULT; insert into partitioned_tbl_with_fkey (x,y) select s,s%10 from generate_series(1,100) s; ALTER TABLE partitioned_tbl_with_fkey ADD CONSTRAINT fkey_to_ref_tbl FOREIGN KEY (y) REFERENCES ref_table_with_fkey(id); diff --git a/src/test/regress/sql/function_with_case_when.sql b/src/test/regress/sql/function_with_case_when.sql new file mode 100644 index 00000000000..03c6678e435 --- /dev/null +++ b/src/test/regress/sql/function_with_case_when.sql @@ -0,0 +1,27 @@ +CREATE SCHEMA function_with_case; +SET search_path TO function_with_case; + +-- create function +CREATE OR REPLACE FUNCTION test_err(v1 text) + RETURNS text + LANGUAGE plpgsql + SECURITY DEFINER +AS $function$ + +begin + return v1 || ' - ok'; +END; +$function$; +do $$ declare + lNewValues text; + val text; +begin + val = 'test'; + lNewValues = test_err(v1 => case when val::text = 'test'::text then 'yes' else 'no' end); + raise notice 'lNewValues= %', lNewValues; +end;$$ ; + +-- call function +SELECT test_err('test'); + +DROP SCHEMA function_with_case CASCADE; diff --git a/src/test/regress/sql/global_cancel.sql b/src/test/regress/sql/global_cancel.sql index 848c3b01aa9..12330baf235 100644 --- a/src/test/regress/sql/global_cancel.sql +++ b/src/test/regress/sql/global_cancel.sql @@ -5,9 +5,9 @@ RESET client_min_messages; -- Kill maintenance daemon so it gets restarted and gets a gpid containing our -- nodeid -SELECT pg_terminate_backend(pid) +SELECT COUNT(pg_terminate_backend(pid)) >= 0 FROM pg_stat_activity -WHERE application_name = 'Citus Maintenance Daemon' \gset +WHERE application_name = 'Citus Maintenance Daemon'; -- reconnect to make sure we get a session with the gpid containing our nodeid \c - - - - @@ -58,6 +58,8 @@ SELECT pg_cancel_backend(citus_backend_gpid()); \c - postgres - :master_port +DROP USER global_cancel_user; + SET client_min_messages TO DEBUG; -- 10000000000 is the node id multiplier for global pid diff --git a/src/test/regress/sql/grant_on_database_propagation_from_non_maindb.sql b/src/test/regress/sql/grant_on_database_propagation_from_non_maindb.sql new file mode 100644 index 00000000000..f83472b3655 --- /dev/null +++ b/src/test/regress/sql/grant_on_database_propagation_from_non_maindb.sql @@ -0,0 +1,246 @@ +-- Public role has connect,temp,temporary privileges on database +-- To test these scenarios, we need to revoke these privileges from public role +-- since public role privileges are inherited by new roles/users +set citus.enable_create_database_propagation to on; +create database test_2pc_db; +show citus.main_db; +revoke connect,temp,temporary on database test_2pc_db from public; + +CREATE SCHEMA grant_on_database_propagation_non_maindb; +SET search_path TO grant_on_database_propagation_non_maindb; + +-- test grant/revoke CREATE privilege propagation on database +create user "myuser'_test"; + +\c test_2pc_db - - :master_port +grant create on database test_2pc_db to "myuser'_test"; + +\c regression - - :master_port; +select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']); + +\c test_2pc_db - - :master_port +revoke create on database test_2pc_db from "myuser'_test"; + +\c regression - - :master_port; +select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']); + +drop user "myuser'_test"; +----------------------------------------------------------------------- + +-- test grant/revoke CONNECT privilege propagation on database +\c regression - - :master_port +create user myuser2; + +\c test_2pc_db - - :master_port +grant CONNECT on database test_2pc_db to myuser2; + +\c regression - - :master_port; +select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']); + +\c test_2pc_db - - :master_port +revoke connect on database test_2pc_db from myuser2; + +\c regression - - :master_port +select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']); + +drop user myuser2; + +----------------------------------------------------------------------- + +-- test grant/revoke TEMP privilege propagation on database +\c regression - - :master_port +create user myuser3; + +-- test grant/revoke temp on database +\c test_2pc_db - - :master_port +grant TEMP on database test_2pc_db to myuser3; + +\c regression - - :master_port; +select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']); + + +\c test_2pc_db - - :worker_1_port +revoke TEMP on database test_2pc_db from myuser3; + +\c regression - - :master_port; +select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']); + +drop user myuser3; + +----------------------------------------------------------------------- + +\c regression - - :master_port +-- test temporary privilege on database +create user myuser4; + +-- test grant/revoke temporary on database +\c test_2pc_db - - :worker_1_port +grant TEMPORARY on database test_2pc_db to myuser4; + +\c regression - - :master_port +select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']); + +\c test_2pc_db - - :master_port +revoke TEMPORARY on database test_2pc_db from myuser4; + +\c regression - - :master_port; +select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']); + +drop user myuser4; +----------------------------------------------------------------------- + +-- test ALL privileges with ALL statement on database +create user myuser5; + +grant ALL on database test_2pc_db to myuser5; + +\c regression - - :master_port +select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +\c test_2pc_db - - :master_port +revoke ALL on database test_2pc_db from myuser5; + +\c regression - - :master_port +select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + +drop user myuser5; +----------------------------------------------------------------------- + +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database +create user myuser6; + +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser6; + +\c regression - - :master_port +select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + +\c test_2pc_db - - :master_port +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser6; + +\c regression - - :master_port +select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +drop user myuser6; +----------------------------------------------------------------------- + +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database with grant option +create user myuser7; +create user myuser_1; + +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7; + +set role myuser7; +--here since myuser7 does not have grant option, it should fail +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1; + +\c regression - - :master_port +select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + +\c test_2pc_db - - :master_port + +RESET ROLE; + +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7 with grant option; +set role myuser7; + +--here since myuser have grant option, it should succeed +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1 granted by myuser7; + +\c regression - - :master_port +select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + +\c test_2pc_db - - :master_port + +RESET ROLE; + +--below test should fail and should throw an error since myuser_1 still have the dependent privileges +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict; +--below test should fail and should throw an error since myuser_1 still have the dependent privileges +revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict ; + +--below test should succeed and should not throw any error since myuser_1 privileges are revoked with cascade +revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 cascade ; + +--here we test if myuser7 still have the privileges after revoke grant option for + +\c regression - - :master_port +select check_database_privileges('myuser7','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +\c test_2pc_db - - :master_port + +reset role; + +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser_1; + +\c regression - - :master_port +drop user myuser_1; +drop user myuser7; + +----------------------------------------------------------------------- + +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database multi database +-- and multi user +\c regression - - :master_port +create user myuser8; +create user myuser_2; + +set citus.enable_create_database_propagation to on; +create database test_db; + +revoke connect,temp,temporary on database test_db from public; + +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db to myuser8,myuser_2; + +\c regression - - :master_port +select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +\c test_2pc_db - - :master_port + +RESET ROLE; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 ; + +--below test should succeed and should not throw any error +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser_2; + +--below test should succeed and should not throw any error +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 cascade; + +\c regression - - :master_port +select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +\c test_2pc_db - - :master_port + +reset role; + +\c regression - - :master_port +drop user myuser_2; +drop user myuser8; + +set citus.enable_create_database_propagation to on; +drop database test_db; + +--------------------------------------------------------------------------- +-- rollbacks public role database privileges to original state +grant connect,temp,temporary on database test_2pc_db to public; +drop database test_2pc_db; +set citus.enable_create_database_propagation to off; +DROP SCHEMA grant_on_database_propagation_non_maindb CASCADE; + +reset citus.enable_create_database_propagation; +reset search_path; +--------------------------------------------------------------------------- diff --git a/src/test/regress/sql/grant_role_from_non_maindb.sql b/src/test/regress/sql/grant_role_from_non_maindb.sql new file mode 100644 index 00000000000..b74b5092d20 --- /dev/null +++ b/src/test/regress/sql/grant_role_from_non_maindb.sql @@ -0,0 +1,147 @@ +CREATE SCHEMA grant_role2pc; +SET search_path TO grant_role2pc; +set citus.enable_create_database_propagation to on; + +CREATE DATABASE grant_role2pc_db; + +\c grant_role2pc_db +SHOW citus.main_db; + +SET citus.superuser TO 'postgres'; +CREATE USER grant_role2pc_user1; +CREATE USER grant_role2pc_user2; +CREATE USER grant_role2pc_user3; +CREATE USER grant_role2pc_user4; +CREATE USER grant_role2pc_user5; +CREATE USER grant_role2pc_user6; +CREATE USER grant_role2pc_user7; + +\c grant_role2pc_db + +--test with empty superuser +SET citus.superuser TO ''; +grant grant_role2pc_user1 to grant_role2pc_user2; + +SET citus.superuser TO 'postgres'; +grant grant_role2pc_user1 to grant_role2pc_user2 with admin option granted by CURRENT_USER; + +\c regression + +select result FROM run_command_on_all_nodes( + $$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text = 'grant_role2pc_user2' + order by member::regrole::text, roleid::regrole::text + ) t + $$ +); + +\c grant_role2pc_db +--test grant under transactional context with multiple operations +BEGIN; +grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user3 WITH ADMIN OPTION; +grant grant_role2pc_user1 to grant_role2pc_user4 granted by grant_role2pc_user3 ; +COMMIT; + +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user5 WITH ADMIN OPTION granted by grant_role2pc_user3; +grant grant_role2pc_user1 to grant_role2pc_user6; +ROLLBACK; + + + +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user7; +SELECT 1/0; +commit; + + +\c regression + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + + +\c grant_role2pc_db + +grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user5,grant_role2pc_user6,grant_role2pc_user7 granted by grant_role2pc_user3; + +\c regression + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + +\c grant_role2pc_db +revoke admin option for grant_role2pc_user1 from grant_role2pc_user5 granted by grant_role2pc_user3; + +--test revoke under transactional context with multiple operations +BEGIN; +revoke grant_role2pc_user1 from grant_role2pc_user5 granted by grant_role2pc_user3 ; +revoke grant_role2pc_user1 from grant_role2pc_user4 granted by grant_role2pc_user3; +COMMIT; +\c grant_role2pc_db - - :worker_1_port +BEGIN; +revoke grant_role2pc_user1 from grant_role2pc_user6,grant_role2pc_user7 granted by grant_role2pc_user3; +revoke grant_role2pc_user1 from grant_role2pc_user3 cascade; +COMMIT; + +\c regression + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user2','grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + +\c grant_role2pc_db - - :worker_1_port +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user5 WITH ADMIN OPTION; +grant grant_role2pc_user1 to grant_role2pc_user6; +COMMIT; + +\c regression - - :master_port + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user5','grant_role2pc_user6') + order by member::regrole::text, roleid::regrole::text +) t +$$); + +revoke grant_role2pc_user1 from grant_role2pc_user5,grant_role2pc_user6; + +--clean resources +DROP SCHEMA grant_role2pc; +set citus.enable_create_database_propagation to on; +DROP DATABASE grant_role2pc_db; +drop user grant_role2pc_user2,grant_role2pc_user3,grant_role2pc_user4,grant_role2pc_user5,grant_role2pc_user6,grant_role2pc_user7; +drop user grant_role2pc_user1; +reset citus.enable_create_database_propagation; diff --git a/src/test/regress/sql/insert_select_connection_leak.sql b/src/test/regress/sql/insert_select_connection_leak.sql index 05afb10a0f0..e138f6c4da4 100644 --- a/src/test/regress/sql/insert_select_connection_leak.sql +++ b/src/test/regress/sql/insert_select_connection_leak.sql @@ -33,12 +33,12 @@ INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; -SELECT worker_connection_count(:worker_1_port) - :worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :worker_2_connections) AS leaked_worker_2_connections; END; -SELECT worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections) AS leaked_worker_2_connections; -- ROLLBACK BEGIN; @@ -46,8 +46,8 @@ INSERT INTO target_table SELECT * FROM source_table; INSERT INTO target_table SELECT * FROM source_table; ROLLBACK; -SELECT worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections) AS leaked_worker_2_connections; \set VERBOSITY TERSE @@ -59,12 +59,12 @@ SELECT worker_connection_count(:worker_1_port) AS worker_1_connections, SAVEPOINT s1; INSERT INTO target_table SELECT a, CASE WHEN a < 50 THEN b ELSE null END FROM source_table; ROLLBACK TO SAVEPOINT s1; -SELECT worker_connection_count(:worker_1_port) - :worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :worker_2_connections) AS leaked_worker_2_connections; END; -SELECT worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections AS leaked_worker_1_connections, - worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections AS leaked_worker_2_connections; +SELECT GREATEST(0, worker_connection_count(:worker_1_port) - :pre_xact_worker_1_connections) AS leaked_worker_1_connections, + GREATEST(0, worker_connection_count(:worker_2_port) - :pre_xact_worker_2_connections) AS leaked_worker_2_connections; SET client_min_messages TO WARNING; DROP SCHEMA insert_select_connection_leak CASCADE; diff --git a/src/test/regress/sql/issue_7477.sql b/src/test/regress/sql/issue_7477.sql new file mode 100644 index 00000000000..b9c1578e9b9 --- /dev/null +++ b/src/test/regress/sql/issue_7477.sql @@ -0,0 +1,44 @@ + +--- Test for updating a table that has a foreign key reference to another reference table. +--- Issue #7477: Distributed deadlock after issuing a simple UPDATE statement +--- https://github.com/citusdata/citus/issues/7477 + +CREATE TABLE table1 (id INT PRIMARY KEY); +SELECT create_reference_table('table1'); +INSERT INTO table1 VALUES (1); + +CREATE TABLE table2 ( + id INT, + info TEXT, + CONSTRAINT table1_id_fk FOREIGN KEY (id) REFERENCES table1 (id) + ); +SELECT create_reference_table('table2'); +INSERT INTO table2 VALUES (1, 'test'); + +--- Runs the update command in parallel on workers. +--- Due to bug #7477, before the fix, the result is non-deterministic +--- and have several rows of the form: +--- localhost | 57638 | f | ERROR: deadlock detected +--- localhost | 57637 | f | ERROR: deadlock detected +--- localhost | 57637 | f | ERROR: canceling the transaction since it was involved in a distributed deadlock + +SELECT * FROM master_run_on_worker( + ARRAY['localhost', 'localhost','localhost', 'localhost','localhost', + 'localhost','localhost', 'localhost','localhost', 'localhost']::text[], + ARRAY[57638, 57637, 57637, 57638, 57637, 57638, 57637, 57638, 57638, 57637]::int[], + ARRAY['UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1', + 'UPDATE table2 SET info = ''test_update'' WHERE id = 1' + ]::text[], + true); + +--- cleanup +DROP TABLE table2; +DROP TABLE table1; diff --git a/src/test/regress/sql/limit_intermediate_size.sql b/src/test/regress/sql/limit_intermediate_size.sql index 8f64c31fd06..38ef734e7eb 100644 --- a/src/test/regress/sql/limit_intermediate_size.sql +++ b/src/test/regress/sql/limit_intermediate_size.sql @@ -17,7 +17,8 @@ cte2 AS MATERIALIZED ( SELECT cte.user_id, cte.value_2 FROM cte,cte2 ORDER BY 1,2 LIMIT 10; -SET citus.max_intermediate_result_size TO 17; +SET citus.max_intermediate_result_size TO 9; +-- regular adaptive executor CTE should fail WITH cte AS MATERIALIZED ( SELECT diff --git a/src/test/regress/sql/logical_replication.sql b/src/test/regress/sql/logical_replication.sql index 3f8e048ca4b..a85c70b0852 100644 --- a/src/test/regress/sql/logical_replication.sql +++ b/src/test/regress/sql/logical_replication.sql @@ -35,17 +35,17 @@ CREATE SUBSCRIPTION citus_shard_move_subscription_:postgres_oid WITH (enabled=false, slot_name=citus_shard_move_slot_:postgres_oid); -SELECT count(*) from pg_subscription; -SELECT count(*) from pg_publication; -SELECT count(*) from pg_replication_slots; +SELECT subname from pg_subscription; +SELECT pubname from pg_publication; +SELECT slot_name from pg_replication_slots; SELECT count(*) FROM dist; \c - - - :worker_1_port SET search_path TO logical_replication; -SELECT count(*) from pg_subscription; -SELECT count(*) from pg_publication; -SELECT count(*) from pg_replication_slots; +SELECT subname from pg_subscription; +SELECT pubname from pg_publication; +SELECT slot_name from pg_replication_slots; SELECT count(*) FROM dist; \c - - - :master_port @@ -53,11 +53,13 @@ SET search_path TO logical_replication; select citus_move_shard_placement(6830002, 'localhost', :worker_1_port, 'localhost', :worker_2_port, 'force_logical'); +SELECT public.wait_for_resource_cleanup(); + -- the subscription is still there, as there is no cleanup record for it -- we have created it manually -SELECT count(*) from pg_subscription; -SELECT count(*) from pg_publication; -SELECT count(*) from pg_replication_slots; +SELECT subname from pg_subscription; +SELECT pubname from pg_publication; +SELECT slot_name from pg_replication_slots; SELECT count(*) from dist; \c - - - :worker_1_port @@ -65,9 +67,9 @@ SET search_path TO logical_replication; -- the publication and repslot are still there, as there are no cleanup records for them -- we have created them manually -SELECT count(*) from pg_subscription; -SELECT count(*) from pg_publication; -SELECT count(*) from pg_replication_slots; +SELECT subname from pg_subscription; +SELECT pubname from pg_publication; +SELECT slot_name from pg_replication_slots; SELECT count(*) from dist; DROP PUBLICATION citus_shard_move_publication_:postgres_oid; @@ -76,9 +78,9 @@ SELECT pg_drop_replication_slot('citus_shard_move_slot_' || :postgres_oid); \c - - - :worker_2_port SET search_path TO logical_replication; -SELECT count(*) from pg_subscription; -SELECT count(*) from pg_publication; -SELECT count(*) from pg_replication_slots; +SELECT subname from pg_subscription; +SELECT pubname from pg_publication; +SELECT slot_name from pg_replication_slots; SELECT count(*) from dist; \c - - - :master_port diff --git a/src/test/regress/sql/merge.sql b/src/test/regress/sql/merge.sql index a41e8084145..5316b5233ae 100644 --- a/src/test/regress/sql/merge.sql +++ b/src/test/regress/sql/merge.sql @@ -1206,6 +1206,139 @@ SET citus.log_remote_commands to false; SELECT compare_tables(); ROLLBACK; + +-- let's create source and target table +ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 13000; +CREATE TABLE source_pushdowntest (id integer); +CREATE TABLE target_pushdowntest (id integer ); + +-- let's distribute both table on id field +SELECT create_distributed_table('source_pushdowntest', 'id'); +SELECT create_distributed_table('target_pushdowntest', 'id'); + +-- we are doing this operation on single node setup let's figure out colocation id of both tables +-- both has same colocation id so both are colocated. +WITH colocations AS ( + SELECT colocationid + FROM pg_dist_partition + WHERE logicalrelid = 'source_pushdowntest'::regclass + OR logicalrelid = 'target_pushdowntest'::regclass +) +SELECT + CASE + WHEN COUNT(DISTINCT colocationid) = 1 THEN 'Same' + ELSE 'Different' + END AS colocation_status +FROM colocations; + +SET client_min_messages TO DEBUG1; +-- Test 1 : tables are colocated AND query is multisharded AND Join On distributed column : should push down to workers. + +EXPLAIN (costs off, timing off, summary off) +MERGE INTO target_pushdowntest t +USING source_pushdowntest s +ON t.id = s.id +WHEN NOT MATCHED THEN + INSERT (id) + VALUES (s.id); + +-- Test 2 : tables are colocated AND source query is not multisharded : should push down to worker. +-- DEBUG LOGS show that query is getting pushed down +MERGE INTO target_pushdowntest t +USING (SELECT * from source_pushdowntest where id = 1) s +on t.id = s.id +WHEN NOT MATCHED THEN + INSERT (id) + VALUES (s.id); + + +-- Test 3 : tables are colocated source query is single sharded but not using source distributed column in insertion. let's not pushdown. +INSERT INTO source_pushdowntest (id) VALUES (3); + +EXPLAIN (costs off, timing off, summary off) +MERGE INTO target_pushdowntest t +USING (SELECT 1 as somekey, id from source_pushdowntest where id = 1) s +on t.id = s.somekey +WHEN NOT MATCHED THEN + INSERT (id) + VALUES (s.somekey); + + +-- let's verify if we use some other column from source for value of distributed column in target. +-- it should be inserted to correct shard of target. +CREATE TABLE source_withdata (id integer, some_number integer); +CREATE TABLE target_table (id integer, name text); +SELECT create_distributed_table('source_withdata', 'id'); +SELECT create_distributed_table('target_table', 'id'); + +INSERT INTO source_withdata (id, some_number) VALUES (1, 3); + +-- we will use some_number column from source_withdata to insert into distributed column of target. +-- value of some_number is 3 let's verify what shard it should go to. +select worker_hash(3); + +-- it should go to second shard of target as target has 4 shard and hash "-28094569" comes in range of second shard. +MERGE INTO target_table t +USING (SELECT id, some_number from source_withdata where id = 1) s +on t.id = s.some_number +WHEN NOT MATCHED THEN + INSERT (id, name) + VALUES (s.some_number, 'parag'); + +-- let's verify if data inserted to second shard of target. +EXPLAIN (analyze on, costs off, timing off, summary off) SELECT * FROM target_table; + +-- let's verify target data too. +SELECT * FROM target_table; + + +-- test UPDATE : when source is single sharded and table are colocated +MERGE INTO target_table t +USING (SELECT id, some_number from source_withdata where id = 1) s +on t.id = s.some_number +WHEN MATCHED THEN + UPDATE SET name = 'parag jain'; + +-- let's verify if data updated properly. +SELECT * FROM target_table; + +-- let's see what happend when we try to update distributed key of target table +MERGE INTO target_table t +USING (SELECT id, some_number from source_withdata where id = 1) s +on t.id = s.some_number +WHEN MATCHED THEN + UPDATE SET id = 1500; + +SELECT * FROM target_table; + +-- test DELETE : when source is single sharded and table are colocated +MERGE INTO target_table t +USING (SELECT id, some_number from source_withdata where id = 1) s +on t.id = s.some_number +WHEN MATCHED THEN + DELETE; + +-- let's verify if data deleted properly. +SELECT * FROM target_table; + +-- +DELETE FROM source_withdata; +DELETE FROM target_table; +INSERT INTO source VALUES (1,1); + +merge into target_table sda +using source_withdata sdn +on sda.id = sdn.id AND sda.id = 1 +when not matched then + insert (id) + values (10000); + +SELECT * FROM target_table WHERE id = 10000; + +RESET client_min_messages; + + + -- This will prune shards with restriction information as NOT MATCHED is void BEGIN; SET citus.log_remote_commands to true; diff --git a/src/test/regress/sql/metadata_sync_from_non_maindb.sql b/src/test/regress/sql/metadata_sync_from_non_maindb.sql new file mode 100644 index 00000000000..62760c6cc41 --- /dev/null +++ b/src/test/regress/sql/metadata_sync_from_non_maindb.sql @@ -0,0 +1,188 @@ +CREATE SCHEMA metadata_sync_2pc_schema; +SET search_path TO metadata_sync_2pc_schema; +set citus.enable_create_database_propagation to on; +CREATE DATABASE metadata_sync_2pc_db; + +revoke connect,temp,temporary on database metadata_sync_2pc_db from public; + +\c metadata_sync_2pc_db +SHOW citus.main_db; + +CREATE USER "grant_role2pc'_user1"; +CREATE USER "grant_role2pc'_user2"; +CREATE USER "grant_role2pc'_user3"; +CREATE USER grant_role2pc_user4; +CREATE USER grant_role2pc_user5; + +\c regression +select 1 from citus_remove_node('localhost', :worker_2_port); + +\c metadata_sync_2pc_db +grant "grant_role2pc'_user1","grant_role2pc'_user2" to "grant_role2pc'_user3" WITH ADMIN OPTION; +-- This section was originally testing a scenario where a user with the 'admin option' grants the same role to another user, also with the 'admin option'. +-- However, we encountered inconsistent errors because the 'admin option' grant is executed after the grant below. +-- Once we establish the correct order of granting, we will reintroduce the 'granted by' clause. +-- For now, we are commenting out the grant below that includes 'granted by', and instead, we are adding a grant without the 'granted by' clause. +-- grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3"; +grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5; + +--test for grant on database +\c metadata_sync_2pc_db - - :master_port +grant create on database metadata_sync_2pc_db to "grant_role2pc'_user1"; +grant connect on database metadata_sync_2pc_db to "grant_role2pc'_user2"; +grant ALL on database metadata_sync_2pc_db to "grant_role2pc'_user3"; + +\c regression +select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); +select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); +select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); + +-- test for security label on role +\c metadata_sync_2pc_db - - :master_port +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE grant_role2pc_user4 IS 'citus_unclassified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "grant_role2pc'_user1" IS 'citus_classified'; + +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('grant_role2pc_user4') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels($$"grant_role2pc''_user1"$$) ORDER BY node_type; + +set citus.enable_create_database_propagation to on; +select 1 from citus_add_node('localhost', :worker_2_port); + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('"grant_role2pc''_user2"','"grant_role2pc''_user3"','grant_role2pc_user4','grant_role2pc_user5') + order by member::regrole::text +) t +$$); + +select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); +select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); +select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('grant_role2pc_user4') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels($$"grant_role2pc''_user1"$$) ORDER BY node_type; + +\c metadata_sync_2pc_db +revoke "grant_role2pc'_user1","grant_role2pc'_user2" from grant_role2pc_user4,grant_role2pc_user5 ; + +revoke admin option for "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; + +revoke "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; +revoke ALL on database metadata_sync_2pc_db from "grant_role2pc'_user3"; +revoke CONNECT on database metadata_sync_2pc_db from "grant_role2pc'_user2"; +revoke CREATE on database metadata_sync_2pc_db from "grant_role2pc'_user1"; + +\c regression + +drop user "grant_role2pc'_user1","grant_role2pc'_user2","grant_role2pc'_user3",grant_role2pc_user4,grant_role2pc_user5; +--test for user operations + +--test for create user +\c regression - - :master_port +select 1 from citus_remove_node('localhost', :worker_2_port); + +\c metadata_sync_2pc_db - - :master_port +CREATE ROLE test_role1 WITH LOGIN PASSWORD 'password1'; + +\c metadata_sync_2pc_db - - :worker_1_port +CREATE USER "test_role2-needs\!escape" +WITH + SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION +LIMIT 10 VALID UNTIL '2023-01-01' IN ROLE test_role1; + +create role test_role3; + +\c regression - - :master_port +select 1 from citus_add_node('localhost', :worker_2_port); + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + +--test for alter user +select 1 from citus_remove_node('localhost', :worker_2_port); +\c metadata_sync_2pc_db - - :master_port +-- Test ALTER ROLE with various options +ALTER ROLE test_role1 WITH PASSWORD 'new_password1'; + +\c metadata_sync_2pc_db - - :worker_1_port +ALTER USER "test_role2-needs\!escape" +WITH + NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION +LIMIT 5 VALID UNTIL '2024-01-01'; + +\c regression - - :master_port +select 1 from citus_add_node('localhost', :worker_2_port); + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + +--test for drop user +select 1 from citus_remove_node('localhost', :worker_2_port); + +\c metadata_sync_2pc_db - - :worker_1_port +DROP ROLE test_role1, "test_role2-needs\!escape"; + +\c metadata_sync_2pc_db - - :master_port +DROP ROLE test_role3; + +\c regression - - :master_port +select 1 from citus_add_node('localhost', :worker_2_port); + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + +-- Clean up: drop the database on worker node 2 +\c regression - - :worker_2_port +DROP ROLE if exists test_role1, "test_role2-needs\!escape", test_role3; + +\c regression - - :master_port + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','test_role3') + ORDER BY rolname + ) t +$$); + +set citus.enable_create_database_propagation to on; +drop database metadata_sync_2pc_db; +drop schema metadata_sync_2pc_schema; +reset citus.enable_create_database_propagation; +reset search_path; diff --git a/src/test/regress/sql/metadata_sync_helpers.sql b/src/test/regress/sql/metadata_sync_helpers.sql index a4044bab3de..dae331d258b 100644 --- a/src/test/regress/sql/metadata_sync_helpers.sql +++ b/src/test/regress/sql/metadata_sync_helpers.sql @@ -15,21 +15,20 @@ SET search_path TO metadata_sync_helpers; CREATE TABLE test(col_1 int); -- not in a distributed transaction -SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); -SELECT citus_internal_update_relation_colocation ('test'::regclass, 1); +SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); +SELECT citus_internal.update_relation_colocation ('test'::regclass, 1); -- in a distributed transaction, but the application name is not Citus BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; --- in a distributed transaction and the application name is Citus --- but we are on the coordinator, so still not allowed +-- in a distributed transaction and the application name is Citus, allowed. BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; \c - postgres - \c - - - :worker_1_port @@ -48,14 +47,14 @@ SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- we do not own the relation BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation ('test'::regclass, 10); + SELECT citus_internal.update_relation_colocation ('test'::regclass, 10); ROLLBACK; -- finally, a user can only add its own tables to the metadata @@ -64,7 +63,7 @@ CREATE TABLE test_3(col_1 int, col_2 int); BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); SELECT count(*) FROM pg_dist_partition WHERE logicalrelid = 'metadata_sync_helpers.test_2'::regclass; ROLLBACK; @@ -72,84 +71,84 @@ ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_rebalancer gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- application_name with incorrect gpid BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=not a correct gpid'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- also faills if done by the rebalancer BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_rebalancer gpid=not a correct gpid'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- application_name with suffix is ok (e.g. pgbouncer might add this) BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001 - from 10.12.14.16:10370'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- application_name with empty gpid BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid='; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- empty application_name BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to ''; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- application_name with incorrect prefix BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- fails because there is no X distribution method BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ROLLBACK; -- fails because there is the column does not exist BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'non_existing_col', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'non_existing_col', 0, 's'); ROLLBACK; --- fails because we do not allow NULL parameters BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata (NULL, 'h', 'non_existing_col', 0, 's'); + SELECT citus_internal.add_partition_metadata (NULL, 'h', 'non_existing_col', 0, 's'); ROLLBACK; -- fails because colocationId cannot be negative BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', -1, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', -1, 's'); ROLLBACK; -- fails because there is no X replication model BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 'X'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 'X'); ROLLBACK; -- the same table cannot be added twice, that is enforced by a primary key @@ -157,8 +156,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- the same table cannot be added twice, that is enforced by a primary key even if distribution key changes @@ -166,8 +165,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_2', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_2', 0, 's'); ROLLBACK; -- hash distributed table cannot have NULL distribution key @@ -175,7 +174,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', NULL, 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', NULL, 0, 's'); ROLLBACK; -- even if metadata_sync_helper_role is not owner of the table test @@ -195,7 +194,7 @@ SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ROLLBACK; -- should throw error even if we skip the checks, there are no such nodes @@ -203,7 +202,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420007, 10000, 11111); + SELECT citus_internal.update_placement_metadata(1420007, 10000, 11111); ROLLBACK; -- non-existing users should fail to pass the checks @@ -219,7 +218,7 @@ SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ROLLBACK; \c - postgres - :worker_1_port @@ -237,21 +236,21 @@ CREATE TABLE test_ref(col_1 int, col_2 int); BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_ref'::regclass, 'n', 'col_1', 0, 's'); ROLLBACK; -- non-valid replication model BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'A'); + SELECT citus_internal.add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'A'); ROLLBACK; -- not-matching replication model for reference table BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'c'); + SELECT citus_internal.add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'c'); ROLLBACK; -- add entry for super user table @@ -261,7 +260,7 @@ CREATE TABLE super_user_table(col_1 int); BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('super_user_table'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('super_user_table'::regclass, 'h', 'col_1', 0, 's'); COMMIT; -- now, lets check shard metadata @@ -277,7 +276,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- the user is only allowed to add a shard for add a table which is in pg_dist_partition @@ -287,23 +286,23 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- ok, now add the table to the pg_dist_partition BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 250, 's'); - SELECT citus_internal_add_partition_metadata ('test_3'::regclass, 'h', 'col_1', 251, 's'); - SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 't'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 250, 's'); + SELECT citus_internal.add_partition_metadata ('test_3'::regclass, 'h', 'col_1', 251, 's'); + SELECT citus_internal.add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 't'); COMMIT; -- we can update to a non-existing colocation group (e.g., colocate_with:=none) BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation ('test_2'::regclass, 1231231232); + SELECT citus_internal.update_relation_colocation ('test_2'::regclass, 1231231232); ROLLBACK; -- invalid shard ids are not allowed @@ -313,7 +312,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, -1, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- invalid storage types are not allowed @@ -323,7 +322,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 'X'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- NULL shard ranges are not allowed for hash distributed tables @@ -333,7 +332,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 't'::"char", NULL, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- non-integer shard ranges are not allowed @@ -343,7 +342,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", 'non-int'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- shardMinValue should be smaller than shardMaxValue @@ -353,7 +352,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-1610612737'::text, '-2147483648'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- we do not allow overlapping shards for the same table @@ -365,7 +364,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text), ('test_2'::regclass, 1420002::bigint, 't'::"char", '10'::text, '50'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- Now let's check valid pg_dist_object updates @@ -377,7 +376,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ROLLBACK; -- check the sanity of distributionArgumentIndex and colocationId @@ -387,7 +386,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -100, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -396,7 +395,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -1, -1, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ROLLBACK; -- check with non-existing object @@ -406,10 +405,10 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ROLLBACK; --- since citus_internal_add_object_metadata is strict function returns NULL +-- since citus_internal.add_object_metadata is strict function returns NULL -- if any parameter is NULL BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); @@ -417,12 +416,12 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ROLLBACK; \c - postgres - :worker_1_port --- Show that citus_internal_add_object_metadata only works for object types +-- Show that citus_internal.add_object_metadata only works for object types -- which is known how to distribute BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); @@ -438,10 +437,10 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET ROLE metadata_sync_helper_role; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('operator', ARRAY['===']::text[], ARRAY['int','int']::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ROLLBACK; --- Show that citus_internal_add_object_metadata checks the priviliges +-- Show that citus_internal.add_object_metadata checks the priviliges BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; @@ -455,7 +454,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET ROLE metadata_sync_helper_role; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('function', ARRAY['distribution_test_function']::text[], ARRAY['integer']::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -469,7 +468,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET ROLE metadata_sync_helper_role; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('type', ARRAY['distributed_test_type']::text[], ARRAY[]::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ROLLBACK; -- we do not allow wrong partmethod @@ -483,7 +482,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- we do not allow NULL shardMinMax values @@ -495,12 +494,12 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -- manually ingest NULL values, otherwise not likely unless metadata is corrupted UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 1420000; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; \c - metadata_sync_helper_role - :worker_1_port @@ -519,14 +518,14 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ('test_2'::regclass, 1420004::bigint, 't'::"char", '51'::text, '60'::text), ('test_2'::regclass, 1420005::bigint, 't'::"char", '61'::text, '70'::text), ('test_3'::regclass, 1420008::bigint, 't'::"char", '11'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; COMMIT; -- we cannot mark these two tables colocated because they are not colocated BEGIN; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; -- now, add few more shards for test_3 to make it colocated with test_2 @@ -540,7 +539,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ('test_3'::regclass, 1420011::bigint, 't'::"char", '41'::text, '50'::text), ('test_3'::regclass, 1420012::bigint, 't'::"char", '51'::text, '60'::text), ('test_3'::regclass, 1420013::bigint, 't'::"char", '61'::text, '70'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; COMMIT; -- shardMin/MaxValues should be NULL for reference tables @@ -550,7 +549,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420003::bigint, 't'::"char", '-1610612737'::text, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- reference tables cannot have multiple shards @@ -561,7 +560,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL), ('test_ref'::regclass, 1420007::bigint, 't'::"char", NULL, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- finally, add a shard for reference tables @@ -571,7 +570,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; COMMIT; \c - postgres - :worker_1_port @@ -584,7 +583,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420007::bigint, 't'::"char", '11'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; COMMIT; \c - metadata_sync_helper_role - :worker_1_port @@ -597,9 +596,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS - (VALUES (-10, 1, 0::bigint, 1::int, 1500000::bigint)) - SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS + (VALUES (-10, 0::bigint, 1::int, 1500000::bigint)) + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- invalid placementid @@ -609,7 +608,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420000, 0::bigint, 1::int, -10)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- non-existing shard @@ -619,7 +618,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1430100, 0::bigint, 1::int, 10)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- non-existing node with non-existing node-id 123123123 @@ -629,7 +628,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES ( 1420000, 0::bigint, 123123123::int, 1500000)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- create a volatile function that returns the local node id @@ -656,7 +655,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420000, 0::bigint, get_node_id(), 1500000), (1420000, 0::bigint, get_node_id(), 1500001)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- shard is not owned by us @@ -666,7 +665,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420007, 0::bigint, get_node_id(), 1500000)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- sucessfully add placements @@ -687,14 +686,14 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1420011, 0::bigint, get_node_id(), 1500009), (1420012, 0::bigint, get_node_id(), 1500010), (1420013, 0::bigint, get_node_id(), 1500011)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; COMMIT; -- we should be able to colocate both tables now BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; -- try to update placements @@ -704,7 +703,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); + SELECT citus_internal.update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); COMMIT; -- fails because the source node doesn't contain the shard @@ -712,7 +711,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); + SELECT citus_internal.update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); COMMIT; -- fails because shard does not exist @@ -720,7 +719,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(0, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(0, get_node_id(), get_node_id()+1); COMMIT; -- fails because none-existing shard @@ -728,7 +727,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); COMMIT; -- fails because we do not own the shard @@ -736,7 +735,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420007, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(1420007, get_node_id(), get_node_id()+1); COMMIT; -- the user only allowed to delete their own shards @@ -746,7 +745,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420007)) - SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data; + SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data; ROLLBACK; -- the user cannot delete non-existing shards @@ -756,7 +755,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420100)) - SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data; + SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data; ROLLBACK; @@ -771,7 +770,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420000)) - SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data; + SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data; SELECT count(*) FROM pg_dist_shard WHERE shardid = 1420000; SELECT count(*) FROM pg_dist_placement WHERE shardid = 1420000; @@ -789,7 +788,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET repmodel = 't' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; @@ -811,7 +810,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WHERE logicalrelid = 'test_2'::regclass; \endif - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -821,7 +820,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = '' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -831,7 +830,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = 'a' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; -- colocated hash distributed table should have the same dist key columns @@ -842,8 +841,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_add_partition_metadata ('test_5'::regclass, 'h', 'int_col', 500, 's'); - SELECT citus_internal_add_partition_metadata ('test_6'::regclass, 'h', 'text_col', 500, 's'); + SELECT citus_internal.add_partition_metadata ('test_5'::regclass, 'h', 'int_col', 500, 's'); + SELECT citus_internal.add_partition_metadata ('test_6'::regclass, 'h', 'text_col', 500, 's'); ROLLBACK; @@ -859,8 +858,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_add_partition_metadata ('test_7'::regclass, 'h', 'text_col', 500, 's'); - SELECT citus_internal_add_partition_metadata ('test_8'::regclass, 'h', 'text_col', 500, 's'); + SELECT citus_internal.add_partition_metadata ('test_7'::regclass, 'h', 'text_col', 500, 's'); + SELECT citus_internal.add_partition_metadata ('test_8'::regclass, 'h', 'text_col', 500, 's'); ROLLBACK; -- we don't need the table/schema anymore diff --git a/src/test/regress/sql/multi_alter_table_statements.sql b/src/test/regress/sql/multi_alter_table_statements.sql index 10e52cb3735..0674a68a60f 100644 --- a/src/test/regress/sql/multi_alter_table_statements.sql +++ b/src/test/regress/sql/multi_alter_table_statements.sql @@ -727,6 +727,32 @@ ALTER TABLE table_without_sequence ADD COLUMN x BIGINT DEFAULT nextval('test_sch SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation.seq_10'::regclass); SELECT pg_identify_object_as_address(classid, objid, objsubid) from pg_catalog.pg_dist_object WHERE objid IN ('test_schema_for_sequence_propagation'::regnamespace); +-- Bug: https://github.com/citusdata/citus/issues/7378 + +-- Create a reference table +CREATE TABLE tbl_ref(row_id integer primary key); +INSERT INTO tbl_ref VALUES (1), (2); +SELECT create_reference_table('tbl_ref'); + +-- Create a distributed table +CREATE TABLE tbl_dist(series_id integer); +INSERT INTO tbl_dist VALUES (1), (1), (2), (2); +SELECT create_distributed_table('tbl_dist', 'series_id'); + +-- Create a view that joins the distributed table with the reference table on the distribution key. +CREATE VIEW vw_citus_views as +SELECT d.series_id FROM tbl_dist d JOIN tbl_ref r ON d.series_id = r.row_id; + +-- The view initially works fine +SELECT * FROM vw_citus_views ORDER BY 1; +-- Now, alter the table +ALTER TABLE tbl_ref ADD COLUMN category1 varchar(50); +SELECT * FROM vw_citus_views ORDER BY 1; +ALTER TABLE tbl_ref ADD COLUMN category2 varchar(50); +SELECT * FROM vw_citus_views ORDER BY 1; +ALTER TABLE tbl_ref DROP COLUMN category1; +SELECT * FROM vw_citus_views ORDER BY 1; + SET client_min_messages TO WARNING; DROP SCHEMA test_schema_for_sequence_propagation CASCADE; DROP TABLE table_without_sequence; diff --git a/src/test/regress/sql/multi_cluster_management.sql b/src/test/regress/sql/multi_cluster_management.sql index 9ec0eb28e17..86fbd15b683 100644 --- a/src/test/regress/sql/multi_cluster_management.sql +++ b/src/test/regress/sql/multi_cluster_management.sql @@ -39,7 +39,7 @@ SELECT master_get_active_worker_nodes(); SELECT 1 FROM master_add_node('localhost', :worker_2_port); SELECT citus_disable_node('localhost', :worker_2_port); -SELECT public.wait_until_metadata_sync(60000); +SELECT public.wait_until_metadata_sync(20000); SELECT master_get_active_worker_nodes(); -- add some shard placements to the cluster @@ -328,7 +328,7 @@ SELECT 1 FROM master_add_inactive_node('localhost', 9996, groupid => :worker_2_g SELECT master_add_inactive_node('localhost', 9999, groupid => :worker_2_group, nodecluster => 'olap', noderole => 'secondary'); SELECT master_activate_node('localhost', 9999); SELECT citus_disable_node('localhost', 9999); -SELECT public.wait_until_metadata_sync(60000); +SELECT public.wait_until_metadata_sync(20000); SELECT master_remove_node('localhost', 9999); -- check that you can't manually add two primaries to a group @@ -530,3 +530,10 @@ RESET citus.metadata_sync_mode; -- verify that at the end of this file, all primary nodes have metadata synced SELECT bool_and(hasmetadata) AND bool_and(metadatasynced) FROM pg_dist_node WHERE isactive = 't' and noderole = 'primary'; + +-- Grant all on public schema to public +-- +-- That's the default on Postgres versions < 15 and we want to +-- keep permissions compatible accross versions, in regression +-- tests. +GRANT ALL ON SCHEMA public TO PUBLIC; diff --git a/src/test/regress/sql/multi_multiuser_auth.sql b/src/test/regress/sql/multi_multiuser_auth.sql index 43cb3c11f2f..1cd566b50bf 100644 --- a/src/test/regress/sql/multi_multiuser_auth.sql +++ b/src/test/regress/sql/multi_multiuser_auth.sql @@ -16,9 +16,9 @@ \set bob_worker_2_pw omnibus-plectrum-comet-sneezy-ensile \set bob_fallback_pw :bob_worker_1_pw -SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port; +SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port \gset -SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port; +SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port \gset -- alice is a superuser so she can update own password diff --git a/src/test/regress/sql/multi_mx_add_coordinator.sql b/src/test/regress/sql/multi_mx_add_coordinator.sql index 47053cd28b4..a7ab2749a44 100644 --- a/src/test/regress/sql/multi_mx_add_coordinator.sql +++ b/src/test/regress/sql/multi_mx_add_coordinator.sql @@ -41,23 +41,33 @@ CREATE TABLE ref(groupid int); SELECT create_reference_table('ref'); \c - - - :worker_1_port --- alter role from mx worker isn't allowed when alter role propagation is on -SET citus.enable_alter_role_propagation TO ON; -ALTER ROLE reprefuser WITH CREATEROLE; --- to alter role locally disable alter role propagation first +-- to alter role locally, disable alter role propagation first SET citus.enable_alter_role_propagation TO OFF; ALTER ROLE reprefuser WITH CREATEROLE; -SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; -RESET citus.enable_alter_role_propagation; -\c - - - :worker_2_port --- show that altering role locally on worker doesn't propagated to other worker -SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; +SELECT result from run_command_on_all_nodes( + $$ + SELECT to_jsonb(q2.*) FROM ( + SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser' + ) q2 + $$ +) ORDER BY result; + +-- alter role from mx worker is allowed +SET citus.enable_alter_role_propagation TO ON; +ALTER ROLE reprefuser WITH CREATEROLE; + +-- show that altering role locally on worker is propagated to coordinator and to other workers too +SELECT result from run_command_on_all_nodes( + $$ + SELECT to_jsonb(q2.*) FROM ( + SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser' + ) q2 + $$ +) ORDER BY result; \c - - - :master_port SET search_path TO mx_add_coordinator,public; --- show that altering role locally on worker doesn't propagated to coordinator -SELECT rolcreatedb, rolcreaterole FROM pg_roles WHERE rolname = 'reprefuser'; SET citus.log_local_commands TO ON; SET client_min_messages TO DEBUG; @@ -67,7 +77,7 @@ SET client_min_messages TO DEBUG; SELECT count(*) FROM ref; SELECT count(*) FROM ref; --- test that distributed functions also use local execution +-- test that distributed functions also use sequential execution CREATE OR REPLACE FUNCTION my_group_id() RETURNS void LANGUAGE plpgsql @@ -190,5 +200,6 @@ SELECT verify_metadata('localhost', :worker_1_port), SET client_min_messages TO error; DROP SCHEMA mx_add_coordinator CASCADE; +DROP USER reprefuser; SET search_path TO DEFAULT; RESET client_min_messages; diff --git a/src/test/regress/sql/multi_mx_create_table.sql b/src/test/regress/sql/multi_mx_create_table.sql index de346841567..4fb6eadbbc5 100644 --- a/src/test/regress/sql/multi_mx_create_table.sql +++ b/src/test/regress/sql/multi_mx_create_table.sql @@ -5,9 +5,15 @@ ALTER SEQUENCE pg_catalog.pg_dist_shardid_seq RESTART 1220000; ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 1220000; +SET client_min_messages TO WARNING; + SELECT start_metadata_sync_to_node('localhost', :worker_1_port); SELECT start_metadata_sync_to_node('localhost', :worker_2_port); +-- cannot drop them at the end of the test file as other tests depend on them +DROP SCHEMA IF EXISTS citus_mx_test_schema, citus_mx_test_schema_join_1, citus_mx_test_schema_join_2 CASCADE; +DROP TABLE IF EXISTS nation_hash, lineitem_mx, orders_mx, customer_mx, nation_mx, part_mx, supplier_mx, mx_ddl_table, limit_orders_mx, multiple_hash_mx, app_analytics_events_mx, researchers_mx, labs_mx, objects_mx, articles_hash_mx, articles_single_shard_hash_mx, company_employees_mx; + -- create schema to test schema support CREATE SCHEMA citus_mx_test_schema; CREATE SCHEMA citus_mx_test_schema_join_1; @@ -38,7 +44,7 @@ END; $$ LANGUAGE 'plpgsql' IMMUTABLE; -CREATE FUNCTION public.immutable_append_mx(old_values int[], new_value int) +CREATE OR REPLACE FUNCTION public.immutable_append_mx(old_values int[], new_value int) RETURNS int[] AS $$ SELECT old_values || new_value $$ LANGUAGE SQL IMMUTABLE; CREATE OPERATOR citus_mx_test_schema.=== ( @@ -67,14 +73,16 @@ SELECT quote_ident(current_setting('lc_collate')) as current_locale \gset CREATE COLLATION citus_mx_test_schema.english (LOCALE=:current_locale); CREATE TYPE citus_mx_test_schema.new_composite_type as (key1 text, key2 text); -CREATE TYPE order_side_mx AS ENUM ('buy', 'sell'); +CREATE TYPE citus_mx_test_schema.order_side_mx AS ENUM ('buy', 'sell'); -- now create required stuff in the worker 1 \c - - - :worker_1_port +SET client_min_messages TO WARNING; -- show that we do not support creating citus local tables from mx workers for now CREATE TABLE citus_local_table(a int); SELECT citus_add_local_table_to_metadata('citus_local_table'); +DROP TABLE citus_local_table; SET search_path TO citus_mx_test_schema; -- create operator @@ -89,6 +97,7 @@ CREATE OPERATOR citus_mx_test_schema.=== ( -- now create required stuff in the worker 2 \c - - - :worker_2_port +SET client_min_messages TO WARNING; SET search_path TO citus_mx_test_schema; @@ -104,6 +113,7 @@ CREATE OPERATOR citus_mx_test_schema.=== ( -- connect back to the master, and do some more tests \c - - - :master_port +SET client_min_messages TO WARNING; SET citus.shard_replication_factor TO 1; SET search_path TO public; @@ -308,7 +318,7 @@ CREATE TABLE limit_orders_mx ( symbol text NOT NULL, bidder_id bigint NOT NULL, placed_at timestamp NOT NULL, - kind order_side_mx NOT NULL, + kind citus_mx_test_schema.order_side_mx NOT NULL, limit_price decimal NOT NULL DEFAULT 0.00 CHECK (limit_price >= 0.00) ); @@ -386,6 +396,7 @@ FROM citus_tables ORDER BY table_name::text; \c - - - :worker_1_port +SET client_min_messages TO WARNING; SELECT table_name, citus_table_type, distribution_column, shard_count, table_owner FROM citus_tables @@ -394,4 +405,4 @@ ORDER BY table_name::text; SELECT shard_name, table_name, citus_table_type, shard_size FROM citus_shards ORDER BY shard_name::text; -- Show that altering type name is not supported from worker node -ALTER TYPE order_side_mx RENAME TO temp_order_side_mx; +ALTER TYPE citus_mx_test_schema.order_side_mx RENAME TO temp_order_side_mx; diff --git a/src/test/regress/sql/multi_mx_hide_shard_names.sql b/src/test/regress/sql/multi_mx_hide_shard_names.sql index e5213a41bf4..addc7f90ede 100644 --- a/src/test/regress/sql/multi_mx_hide_shard_names.sql +++ b/src/test/regress/sql/multi_mx_hide_shard_names.sql @@ -50,6 +50,24 @@ prepare transaction 'take-aggressive-lock'; -- shards are hidden when using psql as application_name SELECT relname FROM pg_catalog.pg_class WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; +-- Even when using subquery and having no existing quals on pg_clcass +SELECT relname FROM (SELECT relname, relnamespace FROM pg_catalog.pg_class) AS q WHERE relnamespace = 'mx_hide_shard_names'::regnamespace ORDER BY relname; + +-- Check that inserts into pg_class don't add the filter +EXPLAIN (COSTS OFF) INSERT INTO pg_class VALUES (1); +-- Unless it's an INSERT SELECT that queries from pg_class; +EXPLAIN (COSTS OFF) INSERT INTO pg_class SELECT * FROM pg_class; + +-- Check that query that psql "\d test_table" does gets optimized to an index +-- scan +EXPLAIN (COSTS OFF) SELECT c.oid, + n.nspname, + c.relname +FROM pg_catalog.pg_class c + LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace +WHERE c.relname OPERATOR(pg_catalog.~) '^(test_table)$' COLLATE pg_catalog.default + AND pg_catalog.pg_table_is_visible(c.oid) +ORDER BY 2, 3; commit prepared 'take-aggressive-lock'; diff --git a/src/test/regress/sql/multi_mx_insert_select_repartition.sql b/src/test/regress/sql/multi_mx_insert_select_repartition.sql index 4a9c8c96fa9..b206c6e4e77 100644 --- a/src/test/regress/sql/multi_mx_insert_select_repartition.sql +++ b/src/test/regress/sql/multi_mx_insert_select_repartition.sql @@ -55,6 +55,8 @@ SET citus.log_local_commands to on; -- INSERT .. SELECT via repartitioning with local execution BEGIN; select count(*) from source_table WHERE a = 1; + -- we omit the "SELECT bytes FROM fetch_intermediate_results..." line since it is flaky + SET LOCAL citus.grep_remote_commands TO '%multi_mx_insert_select_repartition%'; insert into target_table SELECT a*2 FROM source_table RETURNING a; ROLLBACK; diff --git a/src/test/regress/sql/multi_mx_node_metadata.sql b/src/test/regress/sql/multi_mx_node_metadata.sql index 45b4edae108..e0d765a204b 100644 --- a/src/test/regress/sql/multi_mx_node_metadata.sql +++ b/src/test/regress/sql/multi_mx_node_metadata.sql @@ -14,7 +14,7 @@ SET citus.shard_replication_factor TO 1; \set VERBOSITY terse -- Simulates a readonly node by setting default_transaction_read_only. -CREATE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLEAN) +CREATE OR REPLACE FUNCTION mark_node_readonly(hostname TEXT, port INTEGER, isreadonly BOOLEAN) RETURNS TEXT LANGUAGE sql AS $$ @@ -35,7 +35,7 @@ CREATE OR REPLACE FUNCTION raise_error_in_metadata_sync() LANGUAGE C STRICT AS 'citus'; -CREATE PROCEDURE wait_until_process_count(appname text, target_count int) AS $$ +CREATE OR REPLACE PROCEDURE wait_until_process_count(appname text, target_count int) AS $$ declare counter integer := -1; begin @@ -378,7 +378,22 @@ SELECT trigger_metadata_sync(); SELECT datname FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%'; -DROP DATABASE db_to_drop; +DO $$ +DECLARE + i int := 0; +BEGIN + WHILE NOT (SELECT bool_and(success) from run_command_on_all_nodes('DROP DATABASE IF EXISTS db_to_drop')) + LOOP + BEGIN + i := i + 1; + IF i > 5 THEN + RAISE EXCEPTION 'DROP DATABASE timed out'; + END IF; + PERFORM pg_sleep(1); + END; + END LOOP; +END; +$$; SELECT datname FROM pg_stat_activity WHERE application_name LIKE 'Citus Met%'; diff --git a/src/test/regress/sql/multi_mx_transaction_recovery.sql b/src/test/regress/sql/multi_mx_transaction_recovery.sql index 2a6b4991bd2..e46917f3542 100644 --- a/src/test/regress/sql/multi_mx_transaction_recovery.sql +++ b/src/test/regress/sql/multi_mx_transaction_recovery.sql @@ -47,7 +47,7 @@ INSERT INTO pg_dist_transaction VALUES (122, 'citus_122_should_do_nothing'); SELECT recover_prepared_transactions(); -- delete the citus_122_should_do_nothing transaction -DELETE FROM pg_dist_transaction WHERE gid = 'citus_122_should_do_nothing' RETURNING *; +DELETE FROM pg_dist_transaction WHERE gid = 'citus_122_should_do_nothing' RETURNING groupid, gid; ROLLBACK PREPARED 'citus_122_should_do_nothing'; SELECT count(*) FROM pg_dist_transaction; diff --git a/src/test/regress/sql/multi_poolinfo_usage.sql b/src/test/regress/sql/multi_poolinfo_usage.sql index da039cfcafd..2fbaed2ed14 100644 --- a/src/test/regress/sql/multi_poolinfo_usage.sql +++ b/src/test/regress/sql/multi_poolinfo_usage.sql @@ -7,9 +7,9 @@ SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 20000000; -SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port; +SELECT nodeid AS worker_1_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_1_port \gset -SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port; +SELECT nodeid AS worker_2_id FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :worker_2_port \gset CREATE TABLE lotsa_connections (id integer, name text); diff --git a/src/test/regress/sql/multi_prepare_plsql.sql b/src/test/regress/sql/multi_prepare_plsql.sql index 8589e5b5af1..e71e2818e71 100644 --- a/src/test/regress/sql/multi_prepare_plsql.sql +++ b/src/test/regress/sql/multi_prepare_plsql.sql @@ -624,7 +624,7 @@ CREATE TYPE prepare_ddl_type AS (x int, y int); SELECT type_ddl_plpgsql(); -- find all renamed types to verify the schema name didn't leak, nor a crash happened -SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup'; +SELECT nspname, typname FROM pg_type JOIN pg_namespace ON pg_namespace.oid = pg_type.typnamespace WHERE typname = 'prepare_ddl_type_backup' ORDER BY 1; DROP TYPE prepare_ddl_type_backup; RESET search_path; @@ -635,6 +635,7 @@ DROP FUNCTION ddl_in_plpgsql(); DROP FUNCTION copy_in_plpgsql(); DROP TABLE prepare_ddl; DROP TABLE local_ddl; +DROP TABLE plpgsql_table; DROP SCHEMA otherschema; -- clean-up functions diff --git a/src/test/regress/sql/multi_size_queries.sql b/src/test/regress/sql/multi_size_queries.sql index ff8d203f163..fdc3f78927e 100644 --- a/src/test/regress/sql/multi_size_queries.sql +++ b/src/test/regress/sql/multi_size_queries.sql @@ -13,10 +13,15 @@ SELECT citus_relation_size(1); SELECT citus_total_relation_size(1); -- Tests with non-distributed table -CREATE TABLE non_distributed_table (x int); +CREATE TABLE non_distributed_table (x int primary key); + SELECT citus_table_size('non_distributed_table'); SELECT citus_relation_size('non_distributed_table'); SELECT citus_total_relation_size('non_distributed_table'); + +SELECT citus_table_size('non_distributed_table_pkey'); +SELECT citus_relation_size('non_distributed_table_pkey'); +SELECT citus_total_relation_size('non_distributed_table_pkey'); DROP TABLE non_distributed_table; -- fix broken placements via disabling the node @@ -26,9 +31,25 @@ SELECT replicate_table_shards('lineitem_hash_part', shard_replication_factor:=2, -- Tests on distributed table with replication factor > 1 VACUUM (FULL) lineitem_hash_part; -SELECT citus_table_size('lineitem_hash_part'); -SELECT citus_relation_size('lineitem_hash_part'); -SELECT citus_total_relation_size('lineitem_hash_part'); +SELECT citus_relation_size('lineitem_hash_part') <= citus_table_size('lineitem_hash_part'); +SELECT citus_table_size('lineitem_hash_part') <= citus_total_relation_size('lineitem_hash_part'); +SELECT citus_relation_size('lineitem_hash_part') > 0; + +CREATE INDEX lineitem_hash_part_idx ON lineitem_hash_part(l_orderkey); +VACUUM (FULL) lineitem_hash_part; + +SELECT citus_relation_size('lineitem_hash_part') <= citus_table_size('lineitem_hash_part'); +SELECT citus_table_size('lineitem_hash_part') <= citus_total_relation_size('lineitem_hash_part'); +SELECT citus_relation_size('lineitem_hash_part') > 0; + +SELECT citus_relation_size('lineitem_hash_part_idx') <= citus_table_size('lineitem_hash_part_idx'); +SELECT citus_table_size('lineitem_hash_part_idx') <= citus_total_relation_size('lineitem_hash_part_idx'); +SELECT citus_relation_size('lineitem_hash_part_idx') > 0; + +SELECT citus_total_relation_size('lineitem_hash_part') >= + citus_table_size('lineitem_hash_part') + citus_table_size('lineitem_hash_part_idx'); + +DROP INDEX lineitem_hash_part_idx; VACUUM (FULL) customer_copy_hash; @@ -40,7 +61,7 @@ SELECT citus_total_relation_size('customer_copy_hash'); -- Make sure we can get multiple sizes in a single query SELECT citus_table_size('customer_copy_hash'), citus_table_size('customer_copy_hash'), - citus_table_size('supplier'); + citus_table_size('customer_copy_hash'); CREATE INDEX index_1 on customer_copy_hash(c_custkey); VACUUM (FULL) customer_copy_hash; @@ -50,6 +71,10 @@ SELECT citus_table_size('customer_copy_hash'); SELECT citus_relation_size('customer_copy_hash'); SELECT citus_total_relation_size('customer_copy_hash'); +SELECT citus_table_size('index_1'); +SELECT citus_relation_size('index_1'); +SELECT citus_total_relation_size('index_1'); + -- Tests on reference table VACUUM (FULL) supplier; @@ -64,6 +89,38 @@ SELECT citus_table_size('supplier'); SELECT citus_relation_size('supplier'); SELECT citus_total_relation_size('supplier'); +SELECT citus_table_size('index_2'); +SELECT citus_relation_size('index_2'); +SELECT citus_total_relation_size('index_2'); + +-- Test on partitioned table +CREATE TABLE split_me (dist_col int, partition_col timestamp) PARTITION BY RANGE (partition_col); +CREATE INDEX ON split_me(dist_col); + +-- create 2 partitions +CREATE TABLE m PARTITION OF split_me FOR VALUES FROM ('2018-01-01') TO ('2019-01-01'); +CREATE TABLE e PARTITION OF split_me FOR VALUES FROM ('2019-01-01') TO ('2020-01-01'); + +INSERT INTO split_me SELECT 1, '2018-01-01'::timestamp + i * interval '1 day' FROM generate_series(1, 360) i; +INSERT INTO split_me SELECT 2, '2019-01-01'::timestamp + i * interval '1 day' FROM generate_series(1, 180) i; + +-- before citus +SELECT citus_relation_size('split_me'); +SELECT citus_relation_size('split_me_dist_col_idx'); +SELECT citus_relation_size('m'); +SELECT citus_relation_size('m_dist_col_idx'); + +-- distribute the table(s) +SELECT create_distributed_table('split_me', 'dist_col'); + +-- after citus +SELECT citus_relation_size('split_me'); +SELECT citus_relation_size('split_me_dist_col_idx'); +SELECT citus_relation_size('m'); +SELECT citus_relation_size('m_dist_col_idx'); + +DROP TABLE split_me; + -- Test inside the transaction BEGIN; ALTER TABLE supplier ALTER COLUMN s_suppkey SET NOT NULL; diff --git a/src/test/regress/sql/multi_tenant_isolation_nonblocking.sql b/src/test/regress/sql/multi_tenant_isolation_nonblocking.sql index 1299c928262..994f29f0a98 100644 --- a/src/test/regress/sql/multi_tenant_isolation_nonblocking.sql +++ b/src/test/regress/sql/multi_tenant_isolation_nonblocking.sql @@ -608,5 +608,5 @@ ALTER SEQUENCE pg_catalog.pg_dist_colocationid_seq RESTART 100; ALTER SEQUENCE pg_catalog.pg_dist_placement_placementid_seq RESTART :last_placement_id; -SELECT citus_set_coordinator_host('localhost'); - +-- make sure we don't have any replication objects leftover on the nodes +SELECT public.wait_for_resource_cleanup(); diff --git a/src/test/regress/sql/multi_test_helpers.sql b/src/test/regress/sql/multi_test_helpers.sql index f7c97f1b2d3..7d218361ce7 100644 --- a/src/test/regress/sql/multi_test_helpers.sql +++ b/src/test/regress/sql/multi_test_helpers.sql @@ -550,3 +550,119 @@ BEGIN RETURN result; END; $func$ LANGUAGE plpgsql; + +-- Returns pg_seclabels entries from all nodes in the cluster for which +-- the object name is the input. +CREATE OR REPLACE FUNCTION get_citus_tests_label_provider_labels(object_name text, + master_port INTEGER DEFAULT 57636, + worker_1_port INTEGER DEFAULT 57637, + worker_2_port INTEGER DEFAULT 57638) +RETURNS TABLE ( + node_type text, + result text +) +AS $func$ +DECLARE + pg_seclabels_cmd TEXT := 'SELECT to_jsonb(q.*) FROM (' || + 'SELECT provider, objtype, label FROM pg_seclabels ' || + 'WHERE objname = ''' || object_name || ''') q'; +BEGIN + RETURN QUERY + SELECT + CASE + WHEN nodeport = master_port THEN 'coordinator' + WHEN nodeport = worker_1_port THEN 'worker_1' + WHEN nodeport = worker_2_port THEN 'worker_2' + ELSE 'unexpected_node' + END AS node_type, + a.result + FROM run_command_on_all_nodes(pg_seclabels_cmd) a + JOIN pg_dist_node USING (nodeid) + ORDER BY node_type; +END; +$func$ LANGUAGE plpgsql; + +-- For all nodes, returns database properties of given database, except +-- oid, datfrozenxid and datminmxid. +-- +-- Also returns whether the node has a pg_dist_object record for the database +-- and whether there are any stale pg_dist_object records for a database. +CREATE OR REPLACE FUNCTION check_database_on_all_nodes(p_database_name text) +RETURNS TABLE (node_type text, result text) +AS $func$ +DECLARE + pg_ge_15_options text := ''; + pg_ge_16_options text := ''; +BEGIN + IF EXISTS (SELECT 1 FROM pg_attribute WHERE attrelid = 'pg_database'::regclass AND attname = 'datlocprovider') THEN + pg_ge_15_options := ', daticulocale, datcollversion, datlocprovider'; + ELSE + pg_ge_15_options := $$, null as daticulocale, null as datcollversion, 'c' as datlocprovider$$; + END IF; + + IF EXISTS (SELECT 1 FROM pg_attribute WHERE attrelid = 'pg_database'::regclass AND attname = 'daticurules') THEN + pg_ge_16_options := ', daticurules'; + ELSE + pg_ge_16_options := ', null as daticurules'; + END IF; + + RETURN QUERY + SELECT + CASE WHEN (groupid = 0 AND groupid = (SELECT groupid FROM pg_dist_local_group)) THEN 'coordinator (local)' + WHEN (groupid = 0) THEN 'coordinator (remote)' + WHEN (groupid = (SELECT groupid FROM pg_dist_local_group)) THEN 'worker node (local)' + ELSE 'worker node (remote)' + END AS node_type, + q2.result + FROM run_command_on_all_nodes( + format( + $$ + SELECT to_jsonb(q.*) + FROM ( + SELECT + ( + SELECT to_jsonb(database_properties.*) + FROM ( + SELECT datname, pa.rolname as database_owner, + pg_encoding_to_char(pd.encoding) as encoding, + datistemplate, datallowconn, datconnlimit, datacl, + pt.spcname AS tablespace, datcollate, datctype + %2$s -- >= pg15 options + %3$s -- >= pg16 options + FROM pg_database pd + JOIN pg_authid pa ON pd.datdba = pa.oid + JOIN pg_tablespace pt ON pd.dattablespace = pt.oid + WHERE datname = '%1$s' + ) database_properties + ) AS database_properties, + ( + SELECT COUNT(*)=1 + FROM pg_dist_object WHERE objid = (SELECT oid FROM pg_database WHERE datname = '%1$s') + ) AS pg_dist_object_record_for_db_exists, + ( + SELECT COUNT(*) > 0 + FROM pg_dist_object + WHERE classid = 1262 AND objid NOT IN (SELECT oid FROM pg_database) + ) AS stale_pg_dist_object_record_for_a_db_exists + ) q + $$, + p_database_name, pg_ge_15_options, pg_ge_16_options + ) + ) q2 + JOIN pg_dist_node USING (nodeid); +END; +$func$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION check_database_privileges(role_name text, db_name text, permissions text[]) +RETURNS TABLE(permission text, result text) +AS $func$ +DECLARE + permission text; +BEGIN + FOREACH permission IN ARRAY permissions + LOOP + RETURN QUERY EXECUTE format($inner$SELECT %s, result FROM run_command_on_all_nodes($$select has_database_privilege(%s,%s,%s); $$)$inner$, + quote_literal(permission), quote_literal(role_name), quote_literal(db_name), quote_literal(permission)); + END LOOP; +END; +$func$ LANGUAGE plpgsql; diff --git a/src/test/regress/sql/multi_utilities.sql b/src/test/regress/sql/multi_utilities.sql index 9a14ab59016..668e1f32fbf 100644 --- a/src/test/regress/sql/multi_utilities.sql +++ b/src/test/regress/sql/multi_utilities.sql @@ -229,6 +229,8 @@ VACUUM; insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; VACUUM local_vacuum_table; +VACUUM local_vacuum_table; +VACUUM local_vacuum_table; SELECT CASE WHEN s BETWEEN 20000000 AND 25000000 THEN 22500000 ELSE s END FROM pg_total_relation_size('local_vacuum_table') s ; @@ -257,24 +259,53 @@ VACUUM (DISABLE_PAGE_SKIPPING false) local_vacuum_table; insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP OFF, PARALLEL 1) local_vacuum_table; SELECT CASE WHEN s BETWEEN 50000000 AND 70000000 THEN 60000000 ELSE s END size FROM pg_total_relation_size('local_vacuum_table') s ; insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; +VACUUM (INDEX_CLEANUP ON, PARALLEL 1) local_vacuum_table; SELECT CASE WHEN s BETWEEN 20000000 AND 49999999 THEN 35000000 ELSE s END size FROM pg_total_relation_size('local_vacuum_table') s ; +-- vacuum (process_toast true) should be vacuuming toast tables (default is true) +select reltoastrelid from pg_class where relname='local_vacuum_table' +\gset + +SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass +\gset +insert into local_vacuum_table select i from generate_series(1,10000) i; +VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table; +SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class +WHERE oid=:reltoastrelid::regclass; +delete from local_vacuum_table; + +-- vacuum (process_toast false) should not be vacuuming toast tables (default is true) +SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass +\gset +insert into local_vacuum_table select i from generate_series(1,10000) i; +VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table; +SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class +WHERE oid=:reltoastrelid::regclass; +delete from local_vacuum_table; + -- vacuum (truncate false) should not attempt to truncate off any empty pages at the end of the table (default is true) insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; vacuum (TRUNCATE false) local_vacuum_table; +vacuum (TRUNCATE false) local_vacuum_table; +vacuum (TRUNCATE false) local_vacuum_table; SELECT pg_total_relation_size('local_vacuum_table') as size1 \gset insert into local_vacuum_table select i from generate_series(1,1000000) i; delete from local_vacuum_table; vacuum (TRUNCATE true) local_vacuum_table; +vacuum (TRUNCATE true) local_vacuum_table; +vacuum (TRUNCATE true) local_vacuum_table; SELECT pg_total_relation_size('local_vacuum_table') as size2 \gset SELECT :size1 > :size2 as truncate_less_size; diff --git a/src/test/regress/sql/multi_utility_warnings.sql b/src/test/regress/sql/multi_utility_warnings.sql index 491c6557c8a..49855457c5b 100644 --- a/src/test/regress/sql/multi_utility_warnings.sql +++ b/src/test/regress/sql/multi_utility_warnings.sql @@ -6,3 +6,4 @@ -- databases. CREATE DATABASE new_database; +DROP DATABASE new_database; diff --git a/src/test/regress/sql/node_conninfo_reload.sql b/src/test/regress/sql/node_conninfo_reload.sql index 42ba8c9b195..2faaaeeb1ee 100644 --- a/src/test/regress/sql/node_conninfo_reload.sql +++ b/src/test/regress/sql/node_conninfo_reload.sql @@ -205,4 +205,30 @@ show citus.node_conninfo; -- Should work again ALTER TABLE test ADD COLUMN e INT; +-- show that we allow providing "host" param via citus.node_conninfo +ALTER SYSTEM SET citus.node_conninfo = 'sslmode=require host=nosuchhost'; +SELECT pg_reload_conf(); +SELECT pg_sleep(0.1); + +-- fails due to invalid host +SELECT COUNT(*)>=0 FROM test; + +SELECT array_agg(nodeid) as updated_nodeids from pg_dist_node WHERE nodename = 'localhost' \gset +UPDATE pg_dist_node SET nodename = '127.0.0.1' WHERE nodeid = ANY(:'updated_nodeids'::int[]); + +ALTER SYSTEM SET citus.node_conninfo = 'sslmode=require host=localhost'; +SELECT pg_reload_conf(); +SELECT pg_sleep(0.1); + +-- works when hostaddr is specified in pg_dist_node after providing host in citus.node_conninfo +SELECT COUNT(*)>=0 FROM test; + +-- restore original nodenames into pg_dist_node +UPDATE pg_dist_node SET nodename = 'localhost' WHERE nodeid = ANY(:'updated_nodeids'::int[]); + +-- reset it +ALTER SYSTEM RESET citus.node_conninfo; +select pg_reload_conf(); +select pg_sleep(0.1); -- wait for config reload to apply + DROP SCHEMA node_conninfo_reload CASCADE; diff --git a/src/test/regress/sql/other_databases.sql b/src/test/regress/sql/other_databases.sql new file mode 100644 index 00000000000..aa936e50753 --- /dev/null +++ b/src/test/regress/sql/other_databases.sql @@ -0,0 +1,182 @@ +CREATE SCHEMA other_databases; +SET search_path TO other_databases; + +SET citus.next_shard_id TO 10231023; + +CREATE DATABASE other_db1; + +\c other_db1 +SHOW citus.main_db; + +-- check that empty citus.superuser gives error +SET citus.superuser TO ''; +CREATE USER empty_superuser; +SET citus.superuser TO 'postgres'; + +CREATE USER other_db_user1; +CREATE USER other_db_user2; + +BEGIN; +CREATE USER other_db_user3; +CREATE USER other_db_user4; +COMMIT; + +BEGIN; +CREATE USER other_db_user5; +CREATE USER other_db_user6; +ROLLBACK; + +BEGIN; +CREATE USER other_db_user7; +SELECT 1/0; +COMMIT; + +CREATE USER other_db_user8; + +\c regression +SELECT usename FROM pg_user WHERE usename LIKE 'other\_db\_user%' ORDER BY 1; + +\c - - - :worker_1_port +SELECT usename FROM pg_user WHERE usename LIKE 'other\_db\_user%' ORDER BY 1; + +\c - - - :master_port +-- some user creation commands will fail but let's make sure we try to drop them just in case +DROP USER IF EXISTS other_db_user1, other_db_user2, other_db_user3, other_db_user4, other_db_user5, other_db_user6, other_db_user7, other_db_user8; + +-- Make sure non-superuser roles cannot use internal GUCs +-- but they can still create a role +CREATE USER nonsuperuser CREATEROLE; +GRANT ALL ON SCHEMA citus_internal TO nonsuperuser; +SET ROLE nonsuperuser; +SELECT citus_internal.execute_command_on_remote_nodes_as_user($$SELECT 'dangerous query'$$, 'postgres'); + +\c other_db1 +SET citus.local_hostname TO '127.0.0.1'; +SET ROLE nonsuperuser; + +-- Make sure that we don't try to access pg_dist_node. +-- Otherwise, we would get the following error: +-- ERROR: cache lookup failed for pg_dist_node, called too early? +CREATE USER other_db_user9; + +RESET ROLE; +RESET citus.local_hostname; +RESET ROLE; +\c regression +SELECT usename FROM pg_user WHERE usename LIKE 'other\_db\_user%' ORDER BY 1; + +\c - - - :worker_1_port +SELECT usename FROM pg_user WHERE usename LIKE 'other\_db\_user%' ORDER BY 1; + +\c - - - :master_port +REVOKE ALL ON SCHEMA citus_internal FROM nonsuperuser; +DROP USER other_db_user9, nonsuperuser; + +-- test from a worker +\c - - - :worker_1_port + +CREATE DATABASE worker_other_db; + +\c worker_other_db + +CREATE USER worker_user1; + +BEGIN; +CREATE USER worker_user2; +COMMIT; + +BEGIN; +CREATE USER worker_user3; +ROLLBACK; + +\c regression +SELECT usename FROM pg_user WHERE usename LIKE 'worker\_user%' ORDER BY 1; + +\c - - - :master_port +SELECT usename FROM pg_user WHERE usename LIKE 'worker\_user%' ORDER BY 1; + +-- some user creation commands will fail but let's make sure we try to drop them just in case +DROP USER IF EXISTS worker_user1, worker_user2, worker_user3; + +-- test creating and dropping a database from a Citus non-main database +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); +\c other_db1 +CREATE DATABASE other_db3; + +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type; + +\c other_db1 +DROP DATABASE other_db3; + +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type; + +\c worker_other_db - - :worker_1_port +CREATE DATABASE other_db4; + +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type; + +\c worker_other_db +DROP DATABASE other_db4; + +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type; + +DROP DATABASE worker_other_db; + +CREATE DATABASE other_db5; + +-- disable create database propagation for the next test +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + +\c other_db5 - - :worker_2_port + +-- locally create a database +CREATE DATABASE local_db; + +\c regression - - - + +-- re-enable create database propagation +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + +\c other_db5 - - :master_port + +-- Test a scenario where create database fails because the database +-- already exists on another node and we don't crash etc. +CREATE DATABASE local_db; + +\c regression - - - + +SELECT * FROM public.check_database_on_all_nodes('local_db') ORDER BY node_type, result; + +\c - - - :worker_2_port + +-- locally drop the database for cleanup purposes +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + +DROP DATABASE local_db; + +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + +\c - - - :master_port + +DROP DATABASE other_db5; + +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + +DROP SCHEMA other_databases; +DROP DATABASE other_db1; diff --git a/src/test/regress/sql/pg14.sql b/src/test/regress/sql/pg14.sql index 8d3f430ce9e..47eb6793029 100644 --- a/src/test/regress/sql/pg14.sql +++ b/src/test/regress/sql/pg14.sql @@ -22,25 +22,6 @@ VACUUM (INDEX_CLEANUP "AUTOX") t1; VACUUM (FULL, FREEZE, VERBOSE false, ANALYZE, SKIP_LOCKED, INDEX_CLEANUP, PROCESS_TOAST, TRUNCATE) t1; VACUUM (FULL, FREEZE false, VERBOSE false, ANALYZE false, SKIP_LOCKED false, INDEX_CLEANUP "Auto", PROCESS_TOAST true, TRUNCATE false) t1; --- vacuum (process_toast true) should be vacuuming toast tables (default is true) -CREATE TABLE local_vacuum_table(name text); -select reltoastrelid from pg_class where relname='local_vacuum_table' -\gset - -SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass -\gset -VACUUM (FREEZE, PROCESS_TOAST true) local_vacuum_table; -SELECT relfrozenxid::text::integer > :frozenxid AS frozen_performed FROM pg_class -WHERE oid=:reltoastrelid::regclass; - --- vacuum (process_toast false) should not be vacuuming toast tables (default is true) -SELECT relfrozenxid AS frozenxid FROM pg_class WHERE oid=:reltoastrelid::regclass -\gset -VACUUM (FREEZE, PROCESS_TOAST false) local_vacuum_table; -SELECT relfrozenxid::text::integer = :frozenxid AS frozen_not_performed FROM pg_class -WHERE oid=:reltoastrelid::regclass; - -DROP TABLE local_vacuum_table; SET citus.log_remote_commands TO OFF; create table dist(a int, b int); @@ -777,4 +758,5 @@ DROP TABLE compression_and_defaults, compression_and_generated_col; set client_min_messages to error; drop extension postgres_fdw cascade; drop schema pg14 cascade; +DROP ROLE role_1, r1; reset client_min_messages; diff --git a/src/test/regress/sql/pg15.sql b/src/test/regress/sql/pg15.sql index fe60222ddc4..cd9dab58c59 100644 --- a/src/test/regress/sql/pg15.sql +++ b/src/test/regress/sql/pg15.sql @@ -968,6 +968,19 @@ ORDER BY is_coordinator DESC, result; set citus.log_remote_commands = true; set citus.grep_remote_commands = '%ALTER DATABASE%'; alter database regression REFRESH COLLATION VERSION; + +SET citus.enable_create_database_propagation TO OFF; +CREATE DATABASE local_database_1; +RESET citus.enable_create_database_propagation; + +CREATE ROLE local_role_1; + +ALTER DATABASE local_database_1 REFRESH COLLATION VERSION; + +REVOKE CONNECT, TEMPORARY, CREATE ON DATABASE local_database_1 FROM local_role_1; +DROP ROLE local_role_1; +DROP DATABASE local_database_1; + set citus.log_remote_commands = false; -- Clean up diff --git a/src/test/regress/sql/pg16.sql b/src/test/regress/sql/pg16.sql index 82e9edf1ee6..99024edcba8 100644 --- a/src/test/regress/sql/pg16.sql +++ b/src/test/regress/sql/pg16.sql @@ -588,31 +588,12 @@ REVOKE role1 FROM role2; GRANT role1 TO role2 WITH ADMIN TRUE; REVOKE role1 FROM role2; -RESET citus.log_remote_commands; -RESET citus.grep_remote_commands; - -- -- PG16 added new options to GRANT ROLE -- inherit: https://github.com/postgres/postgres/commit/e3ce2de -- set: https://github.com/postgres/postgres/commit/3d14e17 --- We don't propagate for now in Citus +-- We now propagate these options in Citus -- -GRANT role1 TO role2 WITH INHERIT FALSE; -REVOKE role1 FROM role2; -GRANT role1 TO role2 WITH INHERIT TRUE; -REVOKE role1 FROM role2; -GRANT role1 TO role2 WITH INHERIT OPTION; -REVOKE role1 FROM role2; -GRANT role1 TO role2 WITH SET FALSE; -REVOKE role1 FROM role2; -GRANT role1 TO role2 WITH SET TRUE; -REVOKE role1 FROM role2; -GRANT role1 TO role2 WITH SET OPTION; -REVOKE role1 FROM role2; - --- connect to worker node -GRANT role1 TO role2 WITH ADMIN OPTION, INHERIT FALSE, SET FALSE; - SELECT roleid::regrole::text AS role, member::regrole::text, admin_option, inherit_option, set_option FROM pg_auth_members WHERE roleid::regrole::text = 'role1' ORDER BY 1, 2; @@ -623,22 +604,15 @@ SELECT roleid::regrole::text AS role, member::regrole::text, admin_option, inherit_option, set_option FROM pg_auth_members WHERE roleid::regrole::text = 'role1' ORDER BY 1, 2; -SET citus.enable_ddl_propagation TO off; -GRANT role1 TO role2 WITH ADMIN OPTION, INHERIT FALSE, SET FALSE; -RESET citus.enable_ddl_propagation; - -SELECT roleid::regrole::text AS role, member::regrole::text, -admin_option, inherit_option, set_option FROM pg_auth_members -WHERE roleid::regrole::text = 'role1' ORDER BY 1, 2; - \c - - - :master_port -REVOKE role1 FROM role2; +-- Set GUCs to log remote commands and filter on REVOKE commands +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%REVOKE%'; -- test REVOKES as well GRANT role1 TO role2; REVOKE SET OPTION FOR role1 FROM role2; REVOKE INHERIT OPTION FOR role1 FROM role2; - DROP ROLE role1, role2; -- test that everything works fine for roles that are not propagated @@ -650,10 +624,90 @@ RESET citus.enable_ddl_propagation; -- by default, admin option is false, inherit is true, set is true GRANT role3 TO role4; GRANT role3 TO role5 WITH ADMIN TRUE, INHERIT FALSE, SET FALSE; -SELECT roleid::regrole::text AS role, member::regrole::text, admin_option, inherit_option, set_option FROM pg_auth_members WHERE roleid::regrole::text = 'role3' ORDER BY 1, 2; +SELECT roleid::regrole::text AS role, member::regrole::text, admin_option, inherit_option, set_option FROM pg_auth_members +WHERE roleid::regrole::text = 'role3' ORDER BY 1, 2; DROP ROLE role3, role4, role5; +-- Test that everything works fine for roles that are propagated +CREATE ROLE role6; +CREATE ROLE role7; +CREATE ROLE role8; +CREATE ROLE role9; +CREATE ROLE role10; +CREATE ROLE role11; +CREATE ROLE role12; +CREATE ROLE role13; +CREATE ROLE role14; +CREATE ROLE role15; +CREATE ROLE role16; +CREATE ROLE role17; +CREATE ROLE role18 NOINHERIT; +CREATE ROLE role19; +CREATE ROLE role20; + +-- Grant role with admin and inherit options set to true +GRANT role6 TO role7 WITH ADMIN OPTION, INHERIT TRUE; +-- GRANT with INHERIT and SET Options +-- note that set is true by default so we don't include it in the propagation +GRANT role7 TO role8 WITH INHERIT TRUE, SET TRUE; +-- Grant role with admin option set to true and inherit option set to false +GRANT role9 TO role10 WITH ADMIN OPTION, INHERIT FALSE; +-- Grant role with admin option set to true, and inherit/set options set to false +GRANT role11 TO role12 WITH INHERIT FALSE, ADMIN TRUE, SET FALSE; +-- Grant role with inherit set to false +GRANT role13 TO role14 WITH INHERIT FALSE; +-- Grant role with set option set to false +GRANT role15 TO role16 WITH SET FALSE; +-- Handles with default inherit false +-- we created role18 with noinherit option above +GRANT role17 TO role18; +-- Run GRANT/REVOKE commands on worker nodes +\c - - - :worker_1_port +-- Run GRANT command on worker node +GRANT role19 TO role20; +\c - - - :master_port + +SELECT roleid::regrole::text AS role, member::regrole::text, admin_option, inherit_option, set_option +FROM pg_auth_members +WHERE roleid::regrole::text LIKE 'role%' +ORDER BY 1, 2; +\c - - - :worker_1_port +SELECT roleid::regrole::text AS role, member::regrole::text, admin_option, inherit_option, set_option +FROM pg_auth_members +WHERE roleid::regrole::text LIKE 'role%' +ORDER BY 1, 2; + +\c - - - :master_port +DROP ROLE role6, role7, role8, role9, role10, role11, role12, + role13, role14, role15, role16, role17, role18, role19, role20; + +-- here we test that we propagate admin, set and inherit options correctly +-- when adding a new node. + + -- First, we need to remove the node: +SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); + +CREATE ROLE create_role1; +CREATE ROLE create_role2; +CREATE ROLE create_role3; + +-- test grant role +GRANT create_role1 TO create_role2 WITH ADMIN OPTION, INHERIT FALSE, SET FALSE; +GRANT create_role2 TO create_role3 WITH INHERIT TRUE, ADMIN FALSE, SET FALSE; + +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option, inherit_option, set_option FROM pg_auth_members WHERE roleid::regrole::text LIKE 'create\_%' ORDER BY 1, 2; + +-- Add second worker node +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + +\c - - - :worker_2_port + +SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option, inherit_option, set_option FROM pg_auth_members WHERE roleid::regrole::text LIKE 'create\_%' ORDER BY 1, 2; + +\c - - - :master_port +DROP ROLE create_role1, create_role2, create_role3; + \set VERBOSITY terse SET client_min_messages TO ERROR; DROP EXTENSION postgres_fdw CASCADE; diff --git a/src/test/regress/sql/publication.sql b/src/test/regress/sql/publication.sql index 06bdc39fe52..70baf67267d 100644 --- a/src/test/regress/sql/publication.sql +++ b/src/test/regress/sql/publication.sql @@ -195,6 +195,7 @@ SET client_min_messages TO ERROR; DROP SCHEMA publication CASCADE; DROP SCHEMA "publication-1" CASCADE; DROP SCHEMA citus_schema_1 CASCADE; +SELECT public.wait_for_resource_cleanup(); \q \endif @@ -391,3 +392,5 @@ DROP SCHEMA publication CASCADE; DROP SCHEMA "publication-1" CASCADE; DROP SCHEMA citus_schema_1 CASCADE; DROP SCHEMA publication2 CASCADE; + +SELECT public.wait_for_resource_cleanup(); diff --git a/src/test/regress/sql/reassign_owned.sql b/src/test/regress/sql/reassign_owned.sql new file mode 100644 index 00000000000..0262b643c68 --- /dev/null +++ b/src/test/regress/sql/reassign_owned.sql @@ -0,0 +1,141 @@ +CREATE ROLE distributed_source_role1; +create ROLE "distributed_source_role-\!"; + +CREATE ROLE "distributed_target_role1-\!"; + +set citus.enable_create_role_propagation to off; +create ROLE local_target_role1; + + +\c - - - :worker_1_port +set citus.enable_create_role_propagation to off; +CREATE ROLE local_target_role1; + +\c - - - :master_port +set citus.enable_create_role_propagation to off; +create role local_source_role1; +reset citus.enable_create_role_propagation; + +GRANT CREATE ON SCHEMA public TO distributed_source_role1,"distributed_source_role-\!"; + +SET ROLE distributed_source_role1; +CREATE TABLE public.test_table (col1 int); + +set role "distributed_source_role-\!"; +CREATE TABLE public.test_table2 (col2 int); +RESET ROLE; +select create_distributed_table('test_table', 'col1'); +select create_distributed_table('test_table2', 'col2'); + + +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT + schemaname, + tablename, + tableowner + FROM + pg_tables + WHERE + tablename in ('test_table', 'test_table2') + ORDER BY tablename + ) q2 + $$ +) ORDER BY result; + +--tests for reassing owned by with multiple distributed roles and a local role to a distributed role +--local role should be ignored +set citus.log_remote_commands to on; +set citus.grep_remote_commands = '%REASSIGN OWNED BY%'; +REASSIGN OWNED BY distributed_source_role1,"distributed_source_role-\!",local_source_role1 TO "distributed_target_role1-\!"; +reset citus.grep_remote_commands; +reset citus.log_remote_commands; + +--check if the owner changed to "distributed_target_role1-\!" + +RESET citus.log_remote_commands; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT + schemaname, + tablename, + tableowner + FROM + pg_tables + WHERE + tablename in ('test_table', 'test_table2') + ORDER BY tablename + ) q2 + $$ +) ORDER BY result; + +--tests for reassing owned by with multiple distributed roles and a local role to a local role +--local role should be ignored +SET ROLE distributed_source_role1; +CREATE TABLE public.test_table3 (col1 int); + +set role "distributed_source_role-\!"; +CREATE TABLE public.test_table4 (col2 int); +RESET ROLE; +select create_distributed_table('test_table3', 'col1'); +select create_distributed_table('test_table4', 'col2'); + +set citus.log_remote_commands to on; +set citus.grep_remote_commands = '%REASSIGN OWNED BY%'; +set citus.enable_create_role_propagation to off; +set citus.enable_alter_role_propagation to off; +set citus.enable_alter_role_set_propagation to off; +REASSIGN OWNED BY distributed_source_role1,"distributed_source_role-\!",local_source_role1 TO local_target_role1; + +show citus.enable_create_role_propagation; +show citus.enable_alter_role_propagation; +show citus.enable_alter_role_set_propagation; + +reset citus.grep_remote_commands; +reset citus.log_remote_commands; +reset citus.enable_create_role_propagation; +reset citus.enable_alter_role_propagation; +reset citus.enable_alter_role_set_propagation; + + +--check if the owner changed to local_target_role1 +SET citus.log_remote_commands = false; +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT + schemaname, + tablename, + tableowner + FROM + pg_tables + WHERE + tablename in ('test_table3', 'test_table4') + ORDER BY tablename + ) q2 + $$ +) ORDER BY result; + +--clear resources +DROP OWNED BY distributed_source_role1, "distributed_source_role-\!","distributed_target_role1-\!",local_target_role1; + +SELECT result from run_command_on_all_nodes( + $$ + SELECT jsonb_agg(to_jsonb(q2.*)) FROM ( + SELECT + schemaname, + tablename, + tableowner +FROM + pg_tables +WHERE + tablename in ('test_table', 'test_table2', 'test_table3', 'test_table4') + ) q2 + $$ +) ORDER BY result; + + +set client_min_messages to warning; +drop role distributed_source_role1, "distributed_source_role-\!","distributed_target_role1-\!",local_target_role1,local_source_role1; diff --git a/src/test/regress/sql/remove_non_default_nodes.sql b/src/test/regress/sql/remove_non_default_nodes.sql new file mode 100644 index 00000000000..4175e87dc29 --- /dev/null +++ b/src/test/regress/sql/remove_non_default_nodes.sql @@ -0,0 +1,8 @@ +-- The default nodes for the citus test suite are coordinator and 2 worker nodes +-- Which we identify with master_port, worker_1_port, worker_2_port. +-- When needed in some tests, GetLocalNodeId() does not behave correctly, +-- So we remove the non default nodes. This tests expects the non default nodes +-- to not have any active placements. +SELECT any_value(citus_remove_node('localhost', nodeport)) +FROM pg_dist_node +WHERE nodeport NOT IN (:master_port, :worker_1_port, :worker_2_port); diff --git a/src/test/regress/sql/role_command_from_any_node.sql b/src/test/regress/sql/role_command_from_any_node.sql new file mode 100644 index 00000000000..0fd574716e7 --- /dev/null +++ b/src/test/regress/sql/role_command_from_any_node.sql @@ -0,0 +1,174 @@ +-- idempotently remove the coordinator from metadata +SELECT COUNT(citus_remove_node(nodename, nodeport)) >= 0 FROM pg_dist_node WHERE nodename = 'localhost' AND nodeport = :master_port; + +-- make sure that CREATE ROLE from workers is not supported when coordinator is not added to metadata +SELECT result FROM run_command_on_workers('CREATE ROLE test_role'); + +\c - - - :master_port + +CREATE SCHEMA role_command_from_any_node; +SET search_path TO role_command_from_any_node; + +SET client_min_messages TO WARNING; +SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0); + +CREATE OR REPLACE FUNCTION check_role_on_all_nodes(p_role_name text) +RETURNS TABLE (node_type text, result text) +AS $func$ +DECLARE + v_worker_query text; +BEGIN + v_worker_query := format( + $$ + SELECT to_jsonb(q1.*) FROM ( + SELECT + ( + SELECT COUNT(*) = 1 FROM pg_roles WHERE rolname = '%s' + ) AS role_exists, + ( + SELECT to_jsonb(q.*) FROM (SELECT * FROM pg_roles WHERE rolname = '%s') q + ) AS role_properties, + ( + SELECT COUNT(*) = 1 + FROM pg_dist_object + WHERE objid = (SELECT oid FROM pg_roles WHERE rolname = '%s') + ) AS pg_dist_object_record_for_role_exists, + ( + SELECT COUNT(*) > 0 + FROM pg_dist_object + WHERE classid = 1260 AND objid NOT IN (SELECT oid FROM pg_roles) + ) AS stale_pg_dist_object_record_for_a_role_exists + ) q1 + $$, + p_role_name, p_role_name, p_role_name + ); + + RETURN QUERY + SELECT + CASE WHEN (groupid = 0 AND groupid = (SELECT groupid FROM pg_dist_local_group)) THEN 'coordinator (local)' + WHEN (groupid = 0) THEN 'coordinator (remote)' + WHEN (groupid = (SELECT groupid FROM pg_dist_local_group)) THEN 'worker node (local)' + ELSE 'worker node (remote)' + END AS node_type, + q2.result + FROM run_command_on_all_nodes(v_worker_query) q2 + JOIN pg_dist_node USING (nodeid); +END; +$func$ LANGUAGE plpgsql; + +\c - - - :worker_1_port + +SET search_path TO role_command_from_any_node; +SET client_min_messages TO NOTICE; + +SET citus.enable_create_role_propagation TO OFF; + +CREATE ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + +DROP ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + +CREATE ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + +SET citus.enable_create_role_propagation TO ON; + +-- doesn't fail even if the role doesn't exist on other nodes +DROP ROLE test_role; + +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + +CREATE ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + +DROP ROLE test_role; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role') ORDER BY node_type; + +CREATE ROLE test_role; + +SET citus.enable_alter_role_propagation TO OFF; + +ALTER ROLE test_role RENAME TO test_role_renamed; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + +ALTER ROLE test_role_renamed RENAME TO test_role; + +SET citus.enable_alter_role_propagation TO ON; + +ALTER ROLE test_role RENAME TO test_role_renamed; +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + +SET citus.enable_alter_role_propagation TO OFF; +ALTER ROLE test_role_renamed CREATEDB; +SET citus.enable_alter_role_propagation TO ON; + +SELECT node_type, (result::jsonb)->'role_properties'->'rolcreatedb' as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + +ALTER ROLE test_role_renamed CREATEDB; +SELECT node_type, (result::jsonb)->'role_properties'->'rolcreatedb' as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + +SET citus.enable_alter_role_set_propagation TO ON; + +ALTER ROLE current_user IN DATABASE "regression" SET enable_hashjoin TO OFF; + +SELECT result FROM run_command_on_all_nodes('SHOW enable_hashjoin') ORDER BY result; + +SET citus.enable_alter_role_set_propagation TO OFF; + +ALTER ROLE current_user IN DATABASE "regression" SET enable_hashjoin TO ON; + +SELECT result FROM run_command_on_all_nodes('SHOW enable_hashjoin') ORDER BY result; + +SET citus.enable_alter_role_set_propagation TO ON; + +ALTER ROLE current_user IN DATABASE "regression" RESET enable_hashjoin; + +CREATE ROLE another_user; + +SET citus.enable_create_role_propagation TO OFF; + +GRANT another_user TO test_role_renamed; + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=1 FROM pg_auth_members WHERE roleid = 'another_user'::regrole AND member = 'test_role_renamed'::regrole +$$) ORDER BY result; + +SET citus.enable_create_role_propagation TO ON; + +SET client_min_messages TO ERROR; +GRANT another_user TO test_role_renamed; +SET client_min_messages TO NOTICE; + +SELECT result FROM run_command_on_all_nodes($$ + SELECT COUNT(*)=1 FROM pg_auth_members WHERE roleid = 'another_user'::regrole AND member = 'test_role_renamed'::regrole +$$) ORDER BY result; + +\c - - - :master_port + +SET search_path TO role_command_from_any_node; +SET client_min_messages TO NOTICE; + +SELECT citus_remove_node('localhost', :worker_1_port); +SELECT 1 FROM citus_add_node('localhost', :worker_1_port); + +-- make sure that citus_add_node() propagates the roles created via a worker +SELECT node_type, (result::jsonb - 'role_properties') as result FROM check_role_on_all_nodes('test_role_renamed') ORDER BY node_type; + +SELECT citus_remove_node('localhost', :master_port); + +\c - - - :worker_1_port + +-- they fail because the coordinator is not added to metadata +DROP ROLE test_role_renamed; +ALTER ROLE test_role_renamed RENAME TO test_role; +ALTER ROLE test_role_renamed CREATEDB; +ALTER ROLE current_user IN DATABASE "regression" SET enable_hashjoin TO OFF; +GRANT another_user TO test_role_renamed; + +\c - - - :master_port + +DROP ROLE test_role_renamed, another_user; + +SET client_min_messages TO WARNING; +DROP SCHEMA role_command_from_any_node CASCADE; diff --git a/src/test/regress/sql/role_operations_from_non_maindb.sql b/src/test/regress/sql/role_operations_from_non_maindb.sql new file mode 100644 index 00000000000..5f569208b29 --- /dev/null +++ b/src/test/regress/sql/role_operations_from_non_maindb.sql @@ -0,0 +1,106 @@ +-- Create a new database +set citus.enable_create_database_propagation to on; +CREATE DATABASE role_operations_test_db; +SET citus.superuser TO 'postgres'; +-- Connect to the new database +\c role_operations_test_db +-- Test CREATE ROLE with various options +CREATE ROLE test_role1 WITH LOGIN PASSWORD 'password1'; + +\c role_operations_test_db - - :worker_1_port +CREATE USER "test_role2-needs\!escape" +WITH + SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN REPLICATION BYPASSRLS CONNECTION +LIMIT 10 VALID UNTIL '2023-01-01' IN ROLE test_role1; + +\c regression - - :master_port + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape') + ORDER BY rolname + ) t +$$); + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT r.rolname + FROM pg_dist_object d + JOIN pg_roles r ON d.objid = r.oid + WHERE r.rolname IN ('test_role1', 'test_role2-needs\!escape') + order by r.rolname + ) t +$$); + +\c role_operations_test_db - - :master_port +-- Test ALTER ROLE with various options +ALTER ROLE test_role1 WITH PASSWORD 'new_password1'; + +\c role_operations_test_db - - :worker_1_port +ALTER USER "test_role2-needs\!escape" +WITH + NOSUPERUSER NOCREATEDB NOCREATEROLE NOINHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION +LIMIT 5 VALID UNTIL '2024-01-01'; + +\c regression - - :master_port +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape') + ORDER BY rolname + ) t +$$); + +\c role_operations_test_db - - :master_port +-- Test DROP ROLE +DROP ROLE no_such_role; -- fails nicely +DROP ROLE IF EXISTS no_such_role; -- doesn't fail + +CREATE ROLE new_role; +DROP ROLE IF EXISTS no_such_role, new_role; -- doesn't fail +DROP ROLE IF EXISTS test_role1, "test_role2-needs\!escape"; + +\c regression - - :master_port +--verify that roles and dist_object are dropped +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT rolname, rolsuper, rolinherit, rolcreaterole, rolcreatedb, + rolcanlogin, rolreplication, rolbypassrls, rolconnlimit, + (rolpassword != '') as pass_not_empty, DATE(rolvaliduntil) + FROM pg_authid + WHERE rolname in ('test_role1', 'test_role2-needs\!escape','new_role','no_such_role') + ORDER BY rolname + ) t +$$); + +select result FROM run_command_on_all_nodes($$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT r.rolname + FROM pg_roles r + WHERE r.rolname IN ('test_role1', 'test_role2-needs\!escape','new_role','no_such_role') + order by r.rolname + ) t +$$); + +SELECT result FROM run_command_on_all_nodes($$ + SELECT count(*) leaked_pg_dist_object_records_for_roles + FROM pg_dist_object LEFT JOIN pg_authid ON (objid = oid) + WHERE classid = 1260 AND oid IS NULL +$$); + +-- Clean up: drop the database +set citus.enable_create_database_propagation to on; +DROP DATABASE role_operations_test_db; +reset citus.enable_create_database_propagation; diff --git a/src/test/regress/sql/schema_based_sharding.sql b/src/test/regress/sql/schema_based_sharding.sql index bd8065ab938..f0b2276df00 100644 --- a/src/test/regress/sql/schema_based_sharding.sql +++ b/src/test/regress/sql/schema_based_sharding.sql @@ -12,15 +12,15 @@ SET client_min_messages TO NOTICE; -- Verify that the UDFs used to sync tenant schema metadata to workers -- fail on NULL input. -SELECT citus_internal_add_tenant_schema(NULL, 1); -SELECT citus_internal_add_tenant_schema(1, NULL); -SELECT citus_internal_delete_tenant_schema(NULL); -SELECT citus_internal_unregister_tenant_schema_globally(1, NULL); -SELECT citus_internal_unregister_tenant_schema_globally(NULL, 'text'); +SELECT citus_internal.add_tenant_schema(NULL, 1); +SELECT citus_internal.add_tenant_schema(1, NULL); +SELECT citus_internal.delete_tenant_schema(NULL); +SELECT citus_internal.unregister_tenant_schema_globally(1, NULL); +SELECT citus_internal.unregister_tenant_schema_globally(NULL, 'text'); --- Verify that citus_internal_unregister_tenant_schema_globally can only +-- Verify that citus_internal.unregister_tenant_schema_globally can only -- be called on schemas that are dropped already. -SELECT citus_internal_unregister_tenant_schema_globally('regular_schema'::regnamespace, 'regular_schema'); +SELECT citus_internal.unregister_tenant_schema_globally('regular_schema'::regnamespace, 'regular_schema'); SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); @@ -1022,9 +1022,9 @@ SELECT pg_reload_conf(); ALTER SYSTEM SET citus.enable_schema_based_sharding TO ON; SELECT pg_reload_conf(); --- Verify that citus_internal_unregister_tenant_schema_globally is a no-op +-- Verify that citus_internal.unregister_tenant_schema_globally is a no-op -- on workers. -SELECT citus_internal_unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3'); +SELECT citus_internal.unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3'); \c - - - :master_port diff --git a/src/test/regress/sql/seclabel.sql b/src/test/regress/sql/seclabel.sql new file mode 100644 index 00000000000..d39e0118392 --- /dev/null +++ b/src/test/regress/sql/seclabel.sql @@ -0,0 +1,106 @@ +-- +-- SECLABEL +-- +-- Test suite for SECURITY LABEL ON ROLE statements +-- + +-- first we remove one of the worker nodes to be able to test +-- citus_add_node later +SELECT citus_remove_node('localhost', :worker_2_port); + +-- create two roles, one with characters that need escaping +CREATE ROLE user1; +CREATE ROLE "user 2"; + +-- check an invalid label for our current dummy hook citus_test_object_relabel +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'invalid_label'; + +-- if we disable metadata_sync, the command will not be propagated +SET citus.enable_metadata_sync TO off; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + +RESET citus.enable_metadata_sync; + +-- check that we only support propagating for roles +SET citus.shard_replication_factor to 1; +-- distributed table +CREATE TABLE a (a int); +SELECT create_distributed_table('a', 'a'); +-- distributed view +CREATE VIEW v_dist AS SELECT * FROM a; +-- distributed function +CREATE FUNCTION notice(text) RETURNS void LANGUAGE plpgsql AS $$ + BEGIN RAISE NOTICE '%', $1; END; $$; + +SECURITY LABEL ON TABLE a IS 'citus_classified'; +SECURITY LABEL ON FUNCTION notice IS 'citus_unclassified'; +SECURITY LABEL ON VIEW v_dist IS 'citus_classified'; + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('a') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('notice(text)') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('v_dist') ORDER BY node_type; + +\c - - - :worker_1_port +SECURITY LABEL ON TABLE a IS 'citus_classified'; +SECURITY LABEL ON FUNCTION notice IS 'citus_unclassified'; +SECURITY LABEL ON VIEW v_dist IS 'citus_classified'; + +\c - - - :master_port +SELECT node_type, result FROM get_citus_tests_label_provider_labels('a') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('notice(text)') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('v_dist') ORDER BY node_type; + +DROP TABLE a CASCADE; +DROP FUNCTION notice; + +-- test that SECURITY LABEL statement is actually propagated for ROLES +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; + +-- we have exactly one provider loaded, so we may not include the provider in the command +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +SECURITY LABEL ON ROLE user1 IS NULL; +SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'; +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified'; + +\c - - - :worker_1_port +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +-- command from the worker node should be propagated to the coordinator +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + +RESET citus.log_remote_commands; +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; +\c - - - :master_port + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + +-- add a new node and check that it also propagates the SECURITY LABEL statement to the new node +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +SELECT 1 FROM citus_add_node('localhost', :worker_2_port); + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + +-- disable the GUC and check that the command is not propagated +SET citus.enable_alter_role_propagation TO off; +SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + +\c - - - :worker_2_port +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +SET citus.enable_alter_role_propagation TO off; +SECURITY LABEL ON ROLE user1 IS 'citus ''!unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; +RESET citus.enable_alter_role_propagation; + +-- cleanup +RESET citus.log_remote_commands; +DROP ROLE user1, "user 2"; diff --git a/src/test/regress/sql/seclabel_non_maindb.sql b/src/test/regress/sql/seclabel_non_maindb.sql new file mode 100644 index 00000000000..1833d419363 --- /dev/null +++ b/src/test/regress/sql/seclabel_non_maindb.sql @@ -0,0 +1,71 @@ +-- SECLABEL +-- +-- Test suite for running SECURITY LABEL ON ROLE statements from non-main databases + +SET citus.enable_create_database_propagation to ON; + +CREATE DATABASE database1; +CREATE DATABASE database2; + +\c - - - :worker_1_port +SET citus.enable_create_database_propagation to ON; +CREATE DATABASE database_w1; + + +\c - - - :master_port +CREATE ROLE user1; +\c database1 +SHOW citus.main_db; +SHOW citus.superuser; + +CREATE ROLE "user 2"; + +-- Set a SECURITY LABEL on a role from a non-main database +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_unclassified'; + +-- Check the result +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + +\c database1 +-- Set a SECURITY LABEL on database, it should not be propagated +SECURITY LABEL FOR "citus '!tests_label_provider" ON DATABASE database1 IS 'citus_classified'; + +-- Set a SECURITY LABEL on a table, it should not be propagated +CREATE TABLE a (i int); +SECURITY LABEL ON TABLE a IS 'citus_classified'; + +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('database1') ORDER BY node_type; + +-- Check that only the SECURITY LABEL for ROLES is propagated to the non-main databases on other nodes +\c database_w1 - - :worker_1_port +SELECT provider, objtype, label, objname FROM pg_seclabels ORDER BY objname; + + +-- Check the result after a transaction +BEGIN; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON DATABASE database_w1 IS 'citus_classified'; +COMMIT; + +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('database_w1') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + +BEGIN; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified'; +ROLLBACK; + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + +-- clean up +SET citus.enable_create_database_propagation to ON; +DROP DATABASE database1; +DROP DATABASE database2; +DROP DATABASE database_w1; +DROP ROLE user1; +DROP ROLE "user 2"; +RESET citus.enable_create_database_propagation; diff --git a/src/test/regress/sql/shard_rebalancer.sql b/src/test/regress/sql/shard_rebalancer.sql index 07efa8617c0..9037f8f7504 100644 --- a/src/test/regress/sql/shard_rebalancer.sql +++ b/src/test/regress/sql/shard_rebalancer.sql @@ -1229,6 +1229,20 @@ SELECT citus_add_rebalance_strategy( 0.1 ); +SELECT citus_add_rebalance_strategy( + 'test_improvement_threshold', + 'citus_shard_cost_1', + 'capacity_high_worker_2', + 'citus_shard_allowed_on_node_true', + 0.2, + 0.1, + 0.3 + ); + +SELECT * FROM pg_dist_rebalance_strategy WHERE name='test_improvement_threshold'; + +DELETE FROM pg_catalog.pg_dist_rebalance_strategy WHERE name='test_improvement_threshold'; + -- Make it a data node again SELECT * from master_set_node_property('localhost', :worker_2_port, 'shouldhaveshards', true); DROP TABLE tab; @@ -1326,6 +1340,43 @@ DROP TABLE t1, r1, r2; -- test suites should clean up their distributed tables. SELECT count(*) FROM pg_dist_partition; +-- verify a system with a new node won't copy distributed table shards without reference tables + +SELECT 1 from master_remove_node('localhost', :worker_2_port); +SELECT public.wait_until_metadata_sync(30000); + +CREATE TABLE r1 (a int PRIMARY KEY, b int); +SELECT create_reference_table('r1'); + +CREATE TABLE d1 (a int PRIMARY KEY, b int); +SELECT create_distributed_table('d1', 'a'); + +ALTER SEQUENCE pg_dist_groupid_seq RESTART WITH 15; +SELECT 1 from master_add_node('localhost', :worker_2_port); + +-- count the number of placements for the reference table to verify it is not available on +-- all nodes +SELECT count(*) +FROM pg_dist_shard +JOIN pg_dist_shard_placement USING (shardid) +WHERE logicalrelid = 'r1'::regclass; + +-- #7426 We can't move shards to the fresh node before we copy reference tables there. +-- rebalance_table_shards() will do the copy, but the low-level +-- citus_move_shard_placement() should raise an error +SELECT citus_move_shard_placement(pg_dist_shard.shardid, nodename, nodeport, 'localhost', :worker_2_port) + FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) + WHERE logicalrelid = 'd1'::regclass AND nodename = 'localhost' AND nodeport = :worker_1_port LIMIT 1; + +SELECT replicate_reference_tables(); + +-- After replication, the move should succeed. +SELECT citus_move_shard_placement(pg_dist_shard.shardid, nodename, nodeport, 'localhost', :worker_2_port) + FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) + WHERE logicalrelid = 'd1'::regclass AND nodename = 'localhost' AND nodeport = :worker_1_port LIMIT 1; + +DROP TABLE d1, r1; + -- verify a system having only reference tables will copy the reference tables when -- executing the rebalancer diff --git a/src/test/regress/sql/system_queries.sql b/src/test/regress/sql/system_queries.sql new file mode 100644 index 00000000000..1e1d868765c --- /dev/null +++ b/src/test/regress/sql/system_queries.sql @@ -0,0 +1,27 @@ +-- The following query retrieves the foreign key constraints of the table "pg_dist_background_job" +-- along with their details. This modification includes a fix for a null pointer exception that occurred +-- in the "HasRangeTableRef" method of "worker_shard_visibility". The issue was resolved with PR #7604. +select + ct.conname as constraint_name, + a.attname as column_name, + fc.relname as foreign_table_name, + fns.nspname as foreign_table_schema +from + (SELECT ct.conname, ct.conrelid, ct.confrelid, ct.conkey, ct.contype, +ct.confkey, generate_subscripts(ct.conkey, 1) AS s + FROM pg_constraint ct + ) AS ct + inner join pg_class c on c.oid=ct.conrelid + inner join pg_namespace ns on c.relnamespace=ns.oid + inner join pg_attribute a on a.attrelid=ct.conrelid and a.attnum = +ct.conkey[ct.s] + left join pg_class fc on fc.oid=ct.confrelid + left join pg_namespace fns on fc.relnamespace=fns.oid + left join pg_attribute fa on fa.attrelid=ct.confrelid and fa.attnum = +ct.confkey[ct.s] +where + ct.contype='f' + and fc.relname='pg_dist_background_job' + and ns.nspname='pg_catalog' +order by + fns.nspname, fc.relname, a.attnum; diff --git a/src/test/regress/sql/text_search.sql b/src/test/regress/sql/text_search.sql index d0d4b5a6f6c..4a65a5e1a80 100644 --- a/src/test/regress/sql/text_search.sql +++ b/src/test/regress/sql/text_search.sql @@ -199,9 +199,9 @@ SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config1'::regconfig; SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config2'::regconfig; $$) ORDER BY 1,2; SELECT * FROM run_command_on_workers($$ SELECT 'text_search.config3'::regconfig; $$) ORDER BY 1,2; -- verify they are all removed locally -SELECT 'text_search.config1'::regconfig; -SELECT 'text_search.config2'::regconfig; -SELECT 'text_search.config3'::regconfig; +SELECT 1 FROM pg_ts_config WHERE cfgname = 'config1' AND cfgnamespace = 'text_search'::regnamespace; +SELECT 1 FROM pg_ts_config WHERE cfgname = 'config2' AND cfgnamespace = 'text_search'::regnamespace; +SELECT 1 FROM pg_ts_config WHERE cfgname = 'config3' AND cfgnamespace = 'text_search'::regnamespace; -- verify that indexes created concurrently that would propagate a TEXT SEARCH CONFIGURATION object SET citus.enable_ddl_propagation TO off; @@ -235,7 +235,7 @@ CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_wrongly ( copy = f -- now we expect manually_created_wrongly(citus_backup_XXX) to show up when querying the configurations SELECT * FROM run_command_on_workers($$ - SELECT array_agg(cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_wrongly%'; + SELECT array_agg(cfgname ORDER BY cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_wrongly%'; $$) ORDER BY 1,2; -- verify the objects get reused appropriately when the specification is the same @@ -249,7 +249,7 @@ CREATE TEXT SEARCH CONFIGURATION text_search.manually_created_correct ( copy = f -- now we don't expect manually_created_correct(citus_backup_XXX) to show up when querying the configurations as the -- original one is reused SELECT * FROM run_command_on_workers($$ - SELECT array_agg(cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_correct%'; + SELECT array_agg(cfgname ORDER BY cfgname) FROM pg_ts_config WHERE cfgname LIKE 'manually_created_correct%'; $$) ORDER BY 1,2; CREATE SCHEMA "Text Search Requiring Quote's"; diff --git a/src/test/regress/sql/upgrade_basic_after.sql b/src/test/regress/sql/upgrade_basic_after.sql index b40501a1e67..855c060083e 100644 --- a/src/test/regress/sql/upgrade_basic_after.sql +++ b/src/test/regress/sql/upgrade_basic_after.sql @@ -3,48 +3,6 @@ BEGIN; -- We have the tablename filter to avoid adding an alternative output for when the coordinator is in metadata vs when not SELECT * FROM pg_indexes WHERE schemaname = 'upgrade_basic' and tablename NOT LIKE 'r_%' ORDER BY tablename; -SELECT nextval('pg_dist_shardid_seq') > MAX(shardid) FROM pg_dist_shard; -SELECT nextval('pg_dist_placement_placementid_seq') > MAX(placementid) FROM pg_dist_placement; -SELECT nextval('pg_dist_groupid_seq') > MAX(groupid) FROM pg_dist_node; -SELECT nextval('pg_dist_node_nodeid_seq') > MAX(nodeid) FROM pg_dist_node; -SELECT nextval('pg_dist_colocationid_seq') > MAX(colocationid) FROM pg_dist_colocation; --- while testing sequences on pg_dist_cleanup, they return null in pg upgrade schedule --- but return a valid value in citus upgrade schedule --- that's why we accept both NULL and MAX()+1 here -SELECT - CASE WHEN MAX(operation_id) IS NULL - THEN true - ELSE nextval('pg_dist_operationid_seq') > MAX(operation_id) - END AS check_operationid - FROM pg_dist_cleanup; -SELECT - CASE WHEN MAX(record_id) IS NULL - THEN true - ELSE nextval('pg_dist_cleanup_recordid_seq') > MAX(record_id) - END AS check_recordid - FROM pg_dist_cleanup; -SELECT nextval('pg_dist_background_job_job_id_seq') > COALESCE(MAX(job_id), 0) FROM pg_dist_background_job; -SELECT nextval('pg_dist_background_task_task_id_seq') > COALESCE(MAX(task_id), 0) FROM pg_dist_background_task; -SELECT last_value > 0 FROM pg_dist_clock_logical_seq; - --- If this query gives output it means we've added a new sequence that should --- possibly be restored after upgrades. -SELECT sequence_name FROM information_schema.sequences - WHERE sequence_name LIKE 'pg_dist_%' - AND sequence_name NOT IN ( - -- these ones are restored above - 'pg_dist_shardid_seq', - 'pg_dist_placement_placementid_seq', - 'pg_dist_groupid_seq', - 'pg_dist_node_nodeid_seq', - 'pg_dist_colocationid_seq', - 'pg_dist_operationid_seq', - 'pg_dist_cleanup_recordid_seq', - 'pg_dist_background_job_job_id_seq', - 'pg_dist_background_task_task_id_seq', - 'pg_dist_clock_logical_seq' - ); - SELECT logicalrelid FROM pg_dist_partition JOIN pg_depend ON logicalrelid=objid JOIN pg_catalog.pg_class ON logicalrelid=oid diff --git a/src/test/regress/sql/upgrade_basic_after_non_mixed.sql b/src/test/regress/sql/upgrade_basic_after_non_mixed.sql new file mode 100644 index 00000000000..17b8367fbbd --- /dev/null +++ b/src/test/regress/sql/upgrade_basic_after_non_mixed.sql @@ -0,0 +1,42 @@ +SELECT nextval('pg_dist_shardid_seq') > MAX(shardid) FROM pg_dist_shard; +SELECT nextval('pg_dist_placement_placementid_seq') > MAX(placementid) FROM pg_dist_placement; +SELECT nextval('pg_dist_groupid_seq') > MAX(groupid) FROM pg_dist_node; +SELECT nextval('pg_dist_node_nodeid_seq') > MAX(nodeid) FROM pg_dist_node; +SELECT nextval('pg_dist_colocationid_seq') > MAX(colocationid) FROM pg_dist_colocation; + +-- while testing sequences on pg_dist_cleanup, they return null in pg upgrade schedule +-- but return a valid value in citus upgrade schedule +-- that's why we accept both NULL and MAX()+1 here +SELECT + CASE WHEN MAX(operation_id) IS NULL + THEN true + ELSE nextval('pg_dist_operationid_seq') > MAX(operation_id) + END AS check_operationid + FROM pg_dist_cleanup; +SELECT + CASE WHEN MAX(record_id) IS NULL + THEN true + ELSE nextval('pg_dist_cleanup_recordid_seq') > MAX(record_id) + END AS check_recordid + FROM pg_dist_cleanup; +SELECT nextval('pg_dist_background_job_job_id_seq') > COALESCE(MAX(job_id), 0) FROM pg_dist_background_job; +SELECT nextval('pg_dist_background_task_task_id_seq') > COALESCE(MAX(task_id), 0) FROM pg_dist_background_task; +SELECT last_value > 0 FROM pg_dist_clock_logical_seq; + +-- If this query gives output it means we've added a new sequence that should +-- possibly be restored after upgrades. +SELECT sequence_name FROM information_schema.sequences + WHERE sequence_name LIKE 'pg_dist_%' + AND sequence_name NOT IN ( + -- these ones are restored above + 'pg_dist_shardid_seq', + 'pg_dist_placement_placementid_seq', + 'pg_dist_groupid_seq', + 'pg_dist_node_nodeid_seq', + 'pg_dist_colocationid_seq', + 'pg_dist_operationid_seq', + 'pg_dist_cleanup_recordid_seq', + 'pg_dist_background_job_job_id_seq', + 'pg_dist_background_task_task_id_seq', + 'pg_dist_clock_logical_seq' + ); diff --git a/src/test/regress/sql/upgrade_basic_before_non_mixed.sql b/src/test/regress/sql/upgrade_basic_before_non_mixed.sql new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/test/regress/sql/upgrade_post_11_after.sql b/src/test/regress/sql/upgrade_post_11_after.sql index ba9b12f3b8e..6d948ec340b 100644 --- a/src/test/regress/sql/upgrade_post_11_after.sql +++ b/src/test/regress/sql/upgrade_post_11_after.sql @@ -27,6 +27,21 @@ SET datestyle = "ISO, YMD"; SELECT 1 FROM run_command_on_workers($$ALTER SYSTEM SET datestyle = "ISO, YMD";$$); SELECT 1 FROM run_command_on_workers($$SELECT pg_reload_conf()$$); +-- In the version that we use for upgrade tests (v10.2.0), we propagate +-- "valid until" to the workers as "infinity" even if it's not set. And +-- given that "postgres" role is created in the older version, "valid until" +-- is set to "infinity" on the workers while this is not the case for +-- coordinator. See https://github.com/citusdata/citus/issues/7533. +-- +-- We're fixing this for new versions of Citus and we'll probably backport +-- this to some older versions too. However, v10.2.0 won't ever have this +-- fix. +-- +-- For this reason, here we set "valid until" to "infinity" for all the +-- nodes so that below query doesn't report any difference between the +-- metadata on coordinator and workers. +ALTER ROLE postgres WITH VALID UNTIL 'infinity'; + -- make sure that the metadata is consistent across all nodes -- we exclude the distributed_object_data as they are -- not sorted in the same order (as OIDs differ on the nodes) diff --git a/src/test/regress/sql/upgrade_rebalance_strategy_before.sql b/src/test/regress/sql/upgrade_rebalance_strategy_before.sql index 458fb9cf618..be2012e9ccc 100644 --- a/src/test/regress/sql/upgrade_rebalance_strategy_before.sql +++ b/src/test/regress/sql/upgrade_rebalance_strategy_before.sql @@ -29,3 +29,19 @@ SELECT citus_add_rebalance_strategy( 0.3 ); SELECT citus_set_default_rebalance_strategy('custom_strategy'); + +-- Disable the trigger temporarily to allow the invalid strategy to be added. +-- Normally an invalid strategy can end up in the table by deleting one of the +-- functions it depends on. But we do directly in this test because we want to +-- have a consistent OID, so we get consistent test output. +ALTER TABLE pg_catalog.pg_dist_rebalance_strategy DISABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger; +SELECT citus_add_rebalance_strategy( + 'invalid_strategy', + 1234567, + 'capacity_high_worker_1', + 'only_worker_2', + 0.5, + 0.2, + 0.3 + ); +ALTER TABLE pg_catalog.pg_dist_rebalance_strategy ENABLE TRIGGER pg_dist_rebalance_strategy_validation_trigger; diff --git a/src/test/regress/sql/validate_constraint.sql b/src/test/regress/sql/validate_constraint.sql index 294e9a8b2b3..bb63f28544d 100644 --- a/src/test/regress/sql/validate_constraint.sql +++ b/src/test/regress/sql/validate_constraint.sql @@ -116,9 +116,6 @@ SELECT * FROM constraint_validations_in_workers ORDER BY 1, 2; -DROP TABLE constrained_table; -DROP TABLE referenced_table CASCADE; -DROP TABLE referencing_table; - +SET client_min_messages TO WARNING; DROP SCHEMA validate_constraint CASCADE; SET search_path TO DEFAULT; diff --git a/src/test/regress/sql/worker_split_binary_copy_test.sql b/src/test/regress/sql/worker_split_binary_copy_test.sql index 489ff9dc4d9..d6ca3c9dfc8 100644 --- a/src/test/regress/sql/worker_split_binary_copy_test.sql +++ b/src/test/regress/sql/worker_split_binary_copy_test.sql @@ -4,14 +4,6 @@ SET citus.shard_count TO 1; SET citus.shard_replication_factor TO 1; SET citus.next_shard_id TO 81060000; --- Remove extra nodes added, otherwise GetLocalNodeId() does not bahave correctly. -SELECT citus_remove_node('localhost', 8887); -SELECT citus_remove_node('localhost', 9995); -SELECT citus_remove_node('localhost', 9992); -SELECT citus_remove_node('localhost', 9998); -SELECT citus_remove_node('localhost', 9997); -SELECT citus_remove_node('localhost', 8888); - -- BEGIN: Create distributed table and insert data. CREATE TABLE worker_split_binary_copy_test.shard_to_split_copy ( l_orderkey bigint not null,