diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 11fb010b703..38055f367e0 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -68,7 +68,7 @@ USER citus # build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions FROM base AS pg14 -RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.10 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.11 RUN rm .pgenv/src/*.tar* RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/src/include install @@ -80,7 +80,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/ RUN rm .pgenv-staging/config/default.conf FROM base AS pg15 -RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.5 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.6 RUN rm .pgenv/src/*.tar* RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/src/include install @@ -92,7 +92,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/ RUN rm .pgenv-staging/config/default.conf FROM base AS pg16 -RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.1 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.2 RUN rm .pgenv/src/*.tar* RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/src/include install @@ -210,7 +210,7 @@ COPY --chown=citus:citus .psqlrc . RUN sudo chown --from=root:root citus:citus -R ~ # sets default pg version -RUN pgenv switch 16.1 +RUN pgenv switch 16.2 # make connecting to the coordinator easy ENV PGPORT=9700 diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 16ff091e741..2541296cd83 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -31,11 +31,11 @@ jobs: pgupgrade_image_name: "citus/pgupgradetester" style_checker_image_name: "citus/stylechecker" style_checker_tools_version: "0.8.18" - image_suffix: "-v19b671f" - pg14_version: '{ "major": "14", "full": "14.10" }' - pg15_version: '{ "major": "15", "full": "15.5" }' - pg16_version: '{ "major": "16", "full": "16.1" }' - upgrade_pg_versions: "14.10-15.5-16.1" + image_suffix: "-v390dab3" + pg14_version: '{ "major": "14", "full": "14.11" }' + pg15_version: '{ "major": "15", "full": "15.6" }' + pg16_version: '{ "major": "16", "full": "16.2" }' + upgrade_pg_versions: "14.11-15.6-16.2" steps: # Since GHA jobs needs at least one step we use a noop step here. - name: Set up parameters diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d979c10420..b28788ef954 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,117 @@ +### citus v11.0.10 (February 15, 2024) ### + +* Removes pg_send_cancellation and all references (#7135) + +### citus v12.1.2 (February 12, 2024) ### + +* Fixes the incorrect column count after ALTER TABLE (#7379) + +### citus v12.0.1 (July 11, 2023) ### + +* Fixes incorrect default value assumption for VACUUM(PROCESS_TOAST) #7122) + +* Fixes a bug that causes an unexpected error when adding a column + with a NULL constraint (#7093) + +* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152) + +* Fixes a bug with deleting colocation groups (#6929) + +* Fixes memory and memory contexts leaks in Foreign Constraint Graphs (#7236) + +* Fixes shard size bug with too many shards (#7018) + +* Fixes the incorrect column count after ALTER TABLE (#7379) + +* Improves citus_tables view performance (#7050) + +* Makes sure to disallow creating a replicated distributed table + concurrently (#7219) + +* Removes pg_send_cancellation and all references (#7135) + +### citus v11.3.1 (February 12, 2024) ### + +* Disallows MERGE when the query prunes down to zero shards (#6946) + +* Fixes a bug related to non-existent objects in DDL commands (#6984) + +* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152) + +* Fixes a bug with deleting colocation groups (#6929) + +* Fixes incorrect results on fetching scrollable with hold cursors (#7014) + +* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236) + +* Fixes replicate reference tables task fail when user is superuser (#6930) + +* Fixes the incorrect column count after ALTER TABLE (#7379) + +* Improves citus_shard_sizes performance (#7050) + +* Makes sure to disallow creating a replicated distributed table + concurrently (#7219) + +* Removes pg_send_cancellation and all references (#7135) + +### citus v11.2.2 (February 12, 2024) ### + +* Fixes a bug in background shard rebalancer where the replicate + reference tables task fails if the current user is not a superuser (#6930) + +* Fixes a bug related to non-existent objects in DDL commands (#6984) + +* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152) + +* Fixes a bug with deleting colocation groups (#6929) + +* Fixes incorrect results on fetching scrollable with hold cursors (#7014) + +* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236) + +* Fixes the incorrect column count after ALTER TABLE (#7379) + +* Improves failure handling of distributed execution (#7090) + +* Makes sure to disallow creating a replicated distributed table + concurrently (#7219) + +* Removes pg_send_cancellation (#7135) + +### citus v11.1.7 (February 12, 2024) ### + +* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236) + +* Fixes a bug related to non-existent objects in DDL commands (#6984) + +* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152) + +* Fixes a bug with deleting colocation groups (#6929) + +* Fixes incorrect results on fetching scrollable with hold cursors (#7014) + +* Fixes the incorrect column count after ALTER TABLE (#7379) + +* Improves failure handling of distributed execution (#7090) + +* Makes sure to disallow creating a replicated distributed table + concurrently (#7219) + +* Removes pg_send_cancellation and all references (#7135) + +### citus v11.0.9 (February 12, 2024) ### + +* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152) + +* Fixes a bug with deleting colocation groups (#6929) + +* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236) + +* Fixes the incorrect column count after ALTER TABLE (#7462) + +* Improve failure handling of distributed execution (#7090) + ### citus v12.1.1 (November 9, 2023) ### * Fixes leaking of memory and memory contexts in Citus foreign key cache diff --git a/ci/check_gucs_are_alphabetically_sorted.sh b/ci/check_gucs_are_alphabetically_sorted.sh index 214a5c9cf6c..018fc7d35d4 100755 --- a/ci/check_gucs_are_alphabetically_sorted.sh +++ b/ci/check_gucs_are_alphabetically_sorted.sh @@ -20,6 +20,6 @@ tail -n +$RegisterCitusConfigVariables_begin_linenumber src/backend/distributed/ # extract citus gucs in the form of "citus.X" grep -P "^[\t][\t]\"citus\.[a-zA-Z_0-9]+\"" RegisterCitusConfigVariables_func_def.out > gucs.out -sort -c gucs.out +LC_COLLATE=C sort -c gucs.out rm gucs.out rm RegisterCitusConfigVariables_func_def.out diff --git a/src/backend/distributed/README.md b/src/backend/distributed/README.md index 225b1f9625b..e3a9a7a33dd 100644 --- a/src/backend/distributed/README.md +++ b/src/backend/distributed/README.md @@ -1749,8 +1749,6 @@ The reason for handling dependencies and deparsing in post-process step is that Not all table DDL is currently deparsed. In that case, the original command sent by the client is used. That is a shortcoming in our DDL logic that causes user-facing issues and should be addressed. We do not directly construct a separate DDL command for each shard. Instead, we call the `worker_apply_shard_ddl_command(shardid bigint, ddl_command text)` function which parses the DDL command, replaces the table names with shard names in the parse tree according to the shard ID, and then executes the command. That also has some shortcomings, because we cannot support more complex DDL commands in this manner (e.g. adding multiple foreign keys). Ideally, all DDL would be deparsed, and for table DDL the deparsed query string would have shard names, similar to regular queries. -`markDistributed` is used to indicate whether we add a record to `pg_dist_object` to mark the object as "distributed". - ## Defining a new DDL command All commands that are propagated by Citus should be defined in DistributeObjectOps struct. Below is a sample DistributeObjectOps for ALTER DATABASE command that is defined in [distribute_object_ops.c](commands/distribute_object_ops.c) file. @@ -1810,6 +1808,14 @@ GetDistributeObjectOps(Node *node) ... ``` +Finally, when adding support for propagation of a new DDL command, you also need to make sure that: +* Use `quote_identifier()` or `quote_literal_cstr()` for the fields that might need escaping some characters or bare quotes when deparsing a DDL command. +* The code is tolerant to nullable fields within given `Stmt *` object, i.e., the ones that Postgres allows not specifying at all. +* You register the object into `pg_dist_object` if it's a CREATE command and you delete the object from `pg_dist_object` if it's a DROP command. +* Node activation (e.g., `citus_add_node()`) properly propagates the object and its dependencies to new nodes. +* Add tests cases for all the scenarios noted above. +* Add test cases for different options that can be specified for the settings. For example, `CREATE DATABASE .. IS_TEMPLATE = TRUE` and `CREATE DATABASE .. IS_TEMPLATE = FALSE` should be tested separately. + ## Object & dependency propagation These two topics are closely related, so we'll discuss them together. You can start the topic by reading [Nils' blog](https://www.citusdata.com/blog/2020/06/25/using-custom-types-with-citus-and-postgres/) on the topic. @@ -1885,7 +1891,7 @@ Generally, the process is straightforward: When a new object is created, Citus a Citus employs a universal strategy for dealing with objects. Every object creation, alteration, or deletion event (like custom types, tables, or extensions) is represented by the C struct `DistributeObjectOps`. You can find a list of all supported object types in [`distribute_object_ops.c`](https://github.com/citusdata/citus/blob/2c190d068918d1c457894adf97f550e5b3739184/src/backend/distributed/commands/distribute_object_ops.c#L4). As of Citus 12.1, most Postgres objects are supported, although there are a few exceptions. -Whenever `DistributeObjectOps->markDistributed` is set to true—usually during `CREATE` operations—Citus calls `MarkObjectDistributed()`. Citus also labels the same objects as distributed across all nodes via the `citus_internal_add_object_metadata()` UDF. +Whenever `DistributeObjectOps->markDistributed` is set to true—usually during `CREATE` operations—Citus calls `MarkObjectDistributed()`. Citus also labels the same objects as distributed across all nodes via the `citus_internal.add_object_metadata()` UDF. Here's a simple example: @@ -1895,7 +1901,7 @@ CREATE TYPE type_test AS (a int, b int); ... NOTICE: issuing SELECT worker_create_or_replace_object('CREATE TYPE public.type_test AS (a integer, b integer);'); .... - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; ... -- Then, check pg_dist_object. This should be consistent across all nodes. @@ -2423,7 +2429,7 @@ Cleanup records always need to be committed before creating the actual object. I PostgreSQL supports change data capture (CDC) via the logical decoding interface. The basic idea behind logical decoding is that you make a replication connection (a special type of postgres connection), start replication, and then the backend process reads through the WAL and decodes the WAL records and emits it over the wire in a format defined by the output plugin. If we were to use regular logical decoding on the nodes of a Citus cluster, we would see the name of the shard in each write, and internal data transfers such as shard moves would result in inserts being emitted. We use several techniques to avoid this. -All writes in PostgreSQL are marked with a replication origin (0 by default) and the decoder can make decisions on whether to emit the change based on the replication origin. We use this to filter out internal data transfers. If `citus.enable_change_data_capture` is enabled, all internal data transfers are marked with the special DoNotReplicateId replication origin by calling the `citus_internal_start_replication_origin_tracking()` UDF before writing the data. This replication origin ID is special in the sense that it does not need to be created (which prevents locking issues, especially when dropping replication origins). It is still up to output plugin to decide what to do with changes marked as DoNotReplicateId. +All writes in PostgreSQL are marked with a replication origin (0 by default) and the decoder can make decisions on whether to emit the change based on the replication origin. We use this to filter out internal data transfers. If `citus.enable_change_data_capture` is enabled, all internal data transfers are marked with the special DoNotReplicateId replication origin by calling the `citus_internal.start_replication_origin_tracking()` UDF before writing the data. This replication origin ID is special in the sense that it does not need to be created (which prevents locking issues, especially when dropping replication origins). It is still up to output plugin to decide what to do with changes marked as DoNotReplicateId. We have very minimal control over replication commands like `CREATE_REPLICATION_SLOT`, since there are no direct hooks, and decoder names (e.g. “pgoutput”) are typically hard-coded in the client. The only method we found of overriding logical decoding behaviour is to overload the output plugin name in the dynamic library path. diff --git a/src/backend/distributed/clock/causal_clock.c b/src/backend/distributed/clock/causal_clock.c index 3d64757e3a3..eb4b8d9d362 100644 --- a/src/backend/distributed/clock/causal_clock.c +++ b/src/backend/distributed/clock/causal_clock.c @@ -397,7 +397,7 @@ AdjustClocksToTransactionHighest(List *nodeConnectionList, /* Set the clock value on participating worker nodes */ appendStringInfo(queryToSend, - "SELECT pg_catalog.citus_internal_adjust_local_clock_to_remote" + "SELECT citus_internal.adjust_local_clock_to_remote" "('(%lu, %u)'::pg_catalog.cluster_clock);", transactionClockValue->logical, transactionClockValue->counter); diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 0eb87ec1982..33223f41686 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -481,9 +481,7 @@ PreprocessCreateDatabaseStmt(Node *node, const char *queryString, /* * PostprocessCreateDatabaseStmt is executed after the statement is applied to the local * postgres instance. In this stage we prepare the commands that need to be run on - * all workers to create the database. Since the CREATE DATABASE statement gives error - * in a transaction block, we need to use NontransactionalNodeDDLTaskList to send the - * CREATE DATABASE statement to the workers. + * all workers to create the database. * */ List * @@ -508,20 +506,25 @@ PostprocessCreateDatabaseStmt(Node *node, const char *queryString) char *createDatabaseCommand = DeparseTreeNode(node); - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) createDatabaseCommand, - ENABLE_DDL_PROPAGATION); + List *createDatabaseCommands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) createDatabaseCommand, + ENABLE_DDL_PROPAGATION); - return NontransactionalNodeDDLTaskList(REMOTE_NODES, commands); + /* + * Since the CREATE DATABASE statements cannot be executed in a transaction + * block, we need to use NontransactionalNodeDDLTaskList() to send the CREATE + * DATABASE statement to the workers. + */ + List *createDatabaseDDLJobList = + NontransactionalNodeDDLTaskList(REMOTE_NODES, createDatabaseCommands); + return createDatabaseDDLJobList; } /* * PreprocessDropDatabaseStmt is executed before the statement is applied to the local * postgres instance. In this stage we can prepare the commands that need to be run on - * all workers to drop the database. Since the DROP DATABASE statement gives error in - * transaction context, we need to use NontransactionalNodeDDLTaskList to send the - * DROP DATABASE statement to the workers. + * all workers to drop the database. * * We also serialize database commands globally by acquiring a Citus specific advisory * lock based on OCLASS_DATABASE on the first primary worker node. @@ -559,11 +562,18 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString, char *dropDatabaseCommand = DeparseTreeNode(node); - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) dropDatabaseCommand, - ENABLE_DDL_PROPAGATION); + List *dropDatabaseCommands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) dropDatabaseCommand, + ENABLE_DDL_PROPAGATION); - return NontransactionalNodeDDLTaskList(REMOTE_NODES, commands); + /* + * Due to same reason stated in PostprocessCreateDatabaseStmt(), we need to + * use NontransactionalNodeDDLTaskList() to send the DROP DATABASE statement + * to the workers. + */ + List *dropDatabaseDDLJobList = + NontransactionalNodeDDLTaskList(REMOTE_NODES, dropDatabaseCommands); + return dropDatabaseDDLJobList; } @@ -890,7 +900,7 @@ CreateDatabaseDDLCommand(Oid dbId) /* Generate the CREATE DATABASE statement */ appendStringInfo(outerDbStmt, - "SELECT pg_catalog.citus_internal_database_command(%s)", + "SELECT citus_internal.database_command(%s)", quote_literal_cstr(createStmt)); ReleaseSysCache(tuple); diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index 0284ea64dbe..23847ac0150 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -2663,7 +2663,6 @@ CreateLocalColocatedIntermediateFile(CitusCopyDestReceiver *copyDest, CreateIntermediateResultsDirectory(); const int fileFlags = (O_CREAT | O_RDWR | O_TRUNC); - const int fileMode = (S_IRUSR | S_IWUSR); StringInfo filePath = makeStringInfo(); appendStringInfo(filePath, "%s_%ld", copyDest->colocatedIntermediateResultIdPrefix, @@ -2671,7 +2670,7 @@ CreateLocalColocatedIntermediateFile(CitusCopyDestReceiver *copyDest, const char *fileName = QueryResultFileName(filePath->data); shardState->fileDest = - FileCompatFromFileStart(FileOpenForTransmit(fileName, fileFlags, fileMode)); + FileCompatFromFileStart(FileOpenForTransmit(fileName, fileFlags)); CopyOutState localFileCopyOutState = shardState->copyOutState; bool isBinaryCopy = localFileCopyOutState->binary; diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c index d0b33ccb9b2..f2b567e6e70 100644 --- a/src/backend/distributed/commands/role.c +++ b/src/backend/distributed/commands/role.c @@ -886,6 +886,14 @@ GenerateGrantRoleStmtsOfRole(Oid roleid) { Form_pg_auth_members membership = (Form_pg_auth_members) GETSTRUCT(tuple); + ObjectAddress *roleAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*roleAddress, AuthIdRelationId, membership->grantor); + if (!IsAnyObjectDistributed(list_make1(roleAddress))) + { + /* we only need to propagate the grant if the grantor is distributed */ + continue; + } + GrantRoleStmt *grantRoleStmt = makeNode(GrantRoleStmt); grantRoleStmt->is_grant = true; @@ -901,7 +909,11 @@ GenerateGrantRoleStmtsOfRole(Oid roleid) granteeRole->rolename = GetUserNameFromId(membership->member, true); grantRoleStmt->grantee_roles = list_make1(granteeRole); - grantRoleStmt->grantor = NULL; + RoleSpec *grantorRole = makeNode(RoleSpec); + grantorRole->roletype = ROLESPEC_CSTRING; + grantorRole->location = -1; + grantorRole->rolename = GetUserNameFromId(membership->grantor, false); + grantRoleStmt->grantor = grantorRole; #if PG_VERSION_NUM >= PG_VERSION_16 @@ -1241,12 +1253,6 @@ PreprocessGrantRoleStmt(Node *node, const char *queryString, return NIL; } - /* - * Postgres don't seem to use the grantor. Even dropping the grantor doesn't - * seem to affect the membership. If this changes, we might need to add grantors - * to the dependency resolution too. For now we just don't propagate it. - */ - stmt->grantor = NULL; stmt->grantee_roles = distributedGranteeRoles; char *sql = DeparseTreeNode((Node *) stmt); stmt->grantee_roles = allGranteeRoles; diff --git a/src/backend/distributed/commands/seclabel.c b/src/backend/distributed/commands/seclabel.c index 3e1847dc9ee..1d274a05627 100644 --- a/src/backend/distributed/commands/seclabel.c +++ b/src/backend/distributed/commands/seclabel.c @@ -29,7 +29,7 @@ List * PostprocessSecLabelStmt(Node *node, const char *queryString) { - if (!ShouldPropagate()) + if (!EnableAlterRolePropagation || !ShouldPropagate()) { return NIL; } @@ -59,21 +59,17 @@ PostprocessSecLabelStmt(Node *node, const char *queryString) return NIL; } - if (!EnableCreateRolePropagation) - { - return NIL; - } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); EnsureAllObjectDependenciesExistOnAllNodes(objectAddresses); - const char *sql = DeparseTreeNode((Node *) secLabelStmt); + const char *secLabelCommands = DeparseTreeNode((Node *) secLabelStmt); List *commandList = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, + (void *) secLabelCommands, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commandList); + return NodeDDLTaskList(REMOTE_NODES, commandList); } diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index a72b0dcc3ba..005ee523535 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -43,7 +43,6 @@ #include "foreign/foreign.h" #include "lib/stringinfo.h" #include "nodes/makefuncs.h" -#include "nodes/nodes.h" #include "nodes/parsenodes.h" #include "nodes/pg_list.h" #include "postmaster/postmaster.h" @@ -143,6 +142,40 @@ static const NonMainDbDistributedStatementInfo NonMainDbSupportedStatements[] = sizeof(supportedObjectTypesForGrantStmt) / sizeof(ObjectType) } }; +/* + * NonMainDbDistributedStatementInfo is used to determine whether a statement is + * supported from non-main databases and whether it should be marked as + * distributed explicitly (*). + * + * (*) We always have to mark such objects as "distributed" but while for some + * object types we can delegate this to main database, for some others we have + * to explicitly send a command to all nodes in this code-path to achieve this. + */ +typedef struct NonMainDbDistributedStatementInfo +{ + int statementType; + bool explicitlyMarkAsDistributed; + + /* + * checkSupportedObjectTypes is a callback function that checks whether + * type of the object referred to by given statement is supported. + * + * Can be NULL if not applicable for the statement type. + */ + bool (*checkSupportedObjectTypes)(Node *node); +} NonMainDbDistributedStatementInfo; + +/* + * MarkObjectDistributedParams is used to pass parameters to the + * MarkObjectDistributedFromNonMainDb function. + */ +typedef struct MarkObjectDistributedParams +{ + char *name; + Oid id; + uint16 catalogRelId; +} MarkObjectDistributedParams; + bool EnableDDLPropagation = true; /* ddl propagation is enabled */ int CreateObjectPropagationMode = CREATE_OBJECT_PROPAGATION_IMMEDIATE; @@ -171,17 +204,42 @@ static void PostStandardProcessUtility(Node *parsetree); static void DecrementUtilityHookCountersIfNecessary(Node *parsetree); static bool IsDropSchemaOrDB(Node *parsetree); static bool ShouldCheckUndistributeCitusLocalTables(void); -static void RunPreprocessMainDBCommand(Node *parsetree, const char *queryString); + + +/* + * Functions to support commands used to manage node-wide objects from non-main + * databases. + */ +static bool IsCommandToCreateOrDropMainDB(Node *parsetree); +static void RunPreprocessMainDBCommand(Node *parsetree); static void RunPostprocessMainDBCommand(Node *parsetree); -static bool IsObjectTypeSupported(Node *parsetree, NonMainDbDistributedStatementInfo - nonMainDbDistributedStatementInfo); -static bool IsStatementSupportedInNonMainDb(Node *parsetree); +static bool IsStatementSupportedFromNonMainDb(Node *parsetree); static bool StatementRequiresMarkDistributedFromNonMainDb(Node *parsetree); -static bool StatementRequiresUnmarkDistributedFromNonMainDb(Node *parsetree); -static List * GetObjectInfoList(Node *parsetree); -static void MarkObjectDistributedInNonMainDb(Node *parsetree, ObjectInfo *objectInfo); +static void MarkObjectDistributedFromNonMainDb(Node *parsetree); +static MarkObjectDistributedParams GetMarkObjectDistributedParams(Node *parsetree); static void UnmarkObjectDistributedInNonMainDb(List *objectInfoList); +/* + * checkSupportedObjectTypes callbacks for + * NonMainDbDistributedStatementInfo objects. + */ +static bool NonMainDbCheckSupportedObjectTypeForGrant(Node *node); + + +/* + * NonMainDbSupportedStatements is an array of statements that are supported + * from non-main databases. + */ +ObjectType supportedObjectTypesForGrantStmt[] = { OBJECT_DATABASE }; +static const NonMainDbDistributedStatementInfo NonMainDbSupportedStatements[] = { + { T_GrantRoleStmt, false, NULL }, + { T_CreateRoleStmt, true, NULL }, + { T_GrantStmt, false, NonMainDbCheckSupportedObjectTypeForGrant }, + { T_CreatedbStmt, false, NULL }, + { T_DropdbStmt, false, NULL }, +}; + + /* * ProcessUtilityParseTree is a convenience method to create a PlannedStmt out of * pieces of a utility statement before invoking ProcessUtility. @@ -312,9 +370,24 @@ citus_ProcessUtility(PlannedStmt *pstmt, if (!CitusHasBeenLoaded()) { - if (!IsMainDB) + /* + * We always execute CREATE/DROP DATABASE from the main database. There are no + * transactional visibility issues, since these commands are non-transactional. + * And this way we only have to consider one codepath when creating databases. + * We don't try to send the query to the main database if the CREATE/DROP DATABASE + * command is for the main database itself, this is a very rare case but it's + * exercised by our test suite. + */ + if (!IsMainDB && + !IsCommandToCreateOrDropMainDB(parsetree)) { - RunPreprocessMainDBCommand(parsetree, queryString); + RunPreprocessMainDBCommand(parsetree); + + if (IsA(parsetree, CreatedbStmt) || + IsA(parsetree, DropdbStmt)) + { + return; + } } /* @@ -795,6 +868,13 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt, errhint("Connect to other nodes directly to manually create all" " necessary users and roles."))); } + else if (IsA(parsetree, SecLabelStmt) && !EnableAlterRolePropagation) + { + ereport(NOTICE, (errmsg("not propagating SECURITY LABEL commands to other" + " nodes"), + errhint("Connect to other nodes directly to manually assign" + " necessary labels."))); + } /* * Make sure that on DROP EXTENSION we terminate the background daemon @@ -1654,194 +1734,110 @@ DropSchemaOrDBInProgress(void) /* - * RunPreprocessMainDBCommand runs the necessary commands for a query, in main - * database before query is run on the local node with PrevProcessUtility + * IsCommandToCreateOrDropMainDB checks if this query creates or drops the + * main database, so we can make an exception and not send this query to + * the main database. */ -static void -RunPreprocessMainDBCommand(Node *parsetree, const char *queryString) +static bool +IsCommandToCreateOrDropMainDB(Node *parsetree) { - if (!IsStatementSupportedInNonMainDb(parsetree)) + if (IsA(parsetree, CreatedbStmt)) { - return; + CreatedbStmt *createdbStmt = castNode(CreatedbStmt, parsetree); + return strcmp(createdbStmt->dbname, MainDb) == 0; } - - StringInfo mainDBQuery = makeStringInfo(); - appendStringInfo(mainDBQuery, - START_MANAGEMENT_TRANSACTION, - GetCurrentFullTransactionId().value); - RunCitusMainDBQuery(mainDBQuery->data); - mainDBQuery = makeStringInfo(); - appendStringInfo(mainDBQuery, - EXECUTE_COMMAND_ON_REMOTE_NODES_AS_USER, - quote_literal_cstr(queryString), - quote_literal_cstr(CurrentUserName())); - - if (StatementRequiresUnmarkDistributedFromNonMainDb(parsetree)) + else if (IsA(parsetree, DropdbStmt)) { - List *objectInfoList = GetObjectInfoList(parsetree); - - UnmarkObjectDistributedInNonMainDb(objectInfoList); + DropdbStmt *dropdbStmt = castNode(DropdbStmt, parsetree); + return strcmp(dropdbStmt->dbname, MainDb) == 0; } - RunCitusMainDBQuery(mainDBQuery->data); + return false; } /* - * RunPostprocessMainDBCommand runs the necessary commands for a query, in main - * database after query is run on the local node with PrevProcessUtility + * RunPreprocessMainDBCommand runs the necessary commands for a query, in main + * database before query is run on the local node with PrevProcessUtility */ static void -RunPostprocessMainDBCommand(Node *parsetree) +RunPreprocessMainDBCommand(Node *parsetree) { - if (IsStatementSupportedInNonMainDb(parsetree) && - StatementRequiresMarkDistributedFromNonMainDb(parsetree)) + if (!IsStatementSupportedFromNonMainDb(parsetree)) { - List *objectInfoList = GetObjectInfoList(parsetree); - ObjectInfo *objectInfo = NULL; - - if (objectInfoList != NIL) - { - objectInfo = (ObjectInfo *) linitial(objectInfoList); - MarkObjectDistributedInNonMainDb(parsetree, objectInfo); - } + return; } -} + char *queryString = DeparseTreeNode(parsetree); -/* - * GetObjectInfoList returns the names and oids of the object in the given parsetree. - */ -List * -GetObjectInfoList(Node *parsetree) -{ - List *infoList = NIL; - - if (IsA(parsetree, CreateRoleStmt)) - { - CreateRoleStmt *stmt = castNode(CreateRoleStmt, parsetree); - ObjectInfo *info = palloc(sizeof(ObjectInfo)); - info->name = stmt->role; - info->id = get_role_oid(stmt->role, false); - infoList = lappend(infoList, info); - } - else if (IsA(parsetree, DropRoleStmt)) + if (IsA(parsetree, CreatedbStmt) || + IsA(parsetree, DropdbStmt)) { - DropRoleStmt *stmt = castNode(DropRoleStmt, parsetree); - RoleSpec *roleSpec; - foreach_ptr(roleSpec, stmt->roles) - { - ObjectInfo *info = palloc(sizeof(ObjectInfo)); - info->name = roleSpec->rolename; - info->id = get_role_oid(roleSpec->rolename, false); - infoList = lappend(infoList, info); - } + IsMainDBCommandInXact = false; + RunCitusMainDBQuery((char *) queryString); + return; } + - /* Add else if branches for other statement types */ + if (StatementRequiresUnmarkDistributedFromNonMainDb(parsetree)) + { + List *objectInfoList = GetObjectInfoList(parsetree); - return infoList; -} + UnmarkObjectDistributedInNonMainDb(objectInfoList); + } + IsMainDBCommandInXact = true; -/* - * MarkObjectDistributedInNonMainDb marks the given object as distributed on the - * non-main database. - */ -static void -MarkObjectDistributedInNonMainDb(Node *parsetree, ObjectInfo *objectInfo) -{ StringInfo mainDBQuery = makeStringInfo(); appendStringInfo(mainDBQuery, - MARK_OBJECT_DISTRIBUTED, - AuthIdRelationId, - quote_literal_cstr(objectInfo->name), - objectInfo->id, + START_MANAGEMENT_TRANSACTION, + GetCurrentFullTransactionId().value); + RunCitusMainDBQuery(mainDBQuery->data); + mainDBQuery = makeStringInfo(); + appendStringInfo(mainDBQuery, + EXECUTE_COMMAND_ON_REMOTE_NODES_AS_USER, + quote_literal_cstr(queryString), quote_literal_cstr(CurrentUserName())); RunCitusMainDBQuery(mainDBQuery->data); } /* - * UnmarkObjectDistributedInNonMainDb unmarks the given object as distributed on the - * non-main database. + * RunPostprocessMainDBCommand runs the necessary commands for a query, in main + * database after query is run on the local node with PrevProcessUtility */ static void -UnmarkObjectDistributedInNonMainDb(List *objectInfoList) +RunPostprocessMainDBCommand(Node *parsetree) { - ObjectInfo *objectInfo = NULL; - int subObjectId = 0; - char *checkObjectExistence = "false"; - foreach_ptr(objectInfo, objectInfoList) - { - StringInfo query = makeStringInfo(); - appendStringInfo(query, - UNMARK_OBJECT_DISTRIBUTED, - AuthIdRelationId, - objectInfo->id, - subObjectId, checkObjectExistence); - RunCitusMainDBQuery(query->data); + if (IsStatementSupportedFromNonMainDb(parsetree) && + StatementRequiresMarkDistributedFromNonMainDb(parsetree)) + { + MarkObjectDistributedFromNonMainDb(parsetree); } } /* - * IsStatementSupportedIn2Pc returns true if the statement is supported from a + * IsStatementSupportedFromNonMainDb returns true if the statement is supported from a * non-main database. */ static bool -IsStatementSupportedInNonMainDb(Node *parsetree) +IsStatementSupportedFromNonMainDb(Node *parsetree) { NodeTag type = nodeTag(parsetree); for (int i = 0; i < sizeof(NonMainDbSupportedStatements) / sizeof(NonMainDbSupportedStatements[0]); i++) { - if (type == NonMainDbSupportedStatements[i].statementType) + if (type != NonMainDbSupportedStatements[i].statementType) { - if (NonMainDbSupportedStatements[i].supportedObjectTypes == NULL) - { - return true; - } - else - { - if (type == T_GrantStmt) - { - return IsObjectTypeSupported(parsetree, - NonMainDbSupportedStatements[i]); - } - } + continue; } - } - - return false; -} - - -/* - * IsObjectTypeSupported returns true if the object type is supported in 2pc - */ -bool -IsObjectTypeSupported(Node *parsetree, NonMainDbDistributedStatementInfo - nonMainDbDistributedStatementInfo) -{ - NodeTag type = nodeTag(parsetree); - if (type == T_GrantStmt) - { - GrantStmt *stmt = castNode(GrantStmt, parsetree); - /* check if stmt->objtype is in supportedObjectTypes */ - for (int j = 0; j < nonMainDbDistributedStatementInfo.supportedObjectTypesSize; - j++) - { - if (stmt->objtype == - nonMainDbDistributedStatementInfo.supportedObjectTypes[j]) - { - return true; - } - } - return false; + return !NonMainDbSupportedStatements[i].checkSupportedObjectTypes || + NonMainDbSupportedStatements[i].checkSupportedObjectTypes(parsetree); } + return false; } @@ -1860,8 +1856,7 @@ StatementRequiresMarkDistributedFromNonMainDb(Node *parsetree) { if (type == NonMainDbSupportedStatements[i].statementType) { - return NonMainDbSupportedStatements[i].distributedOperation == - MARK_DISTRIBUTED; + return NonMainDbSupportedStatements[i].explicitlyMarkAsDistributed; } } @@ -1870,23 +1865,57 @@ StatementRequiresMarkDistributedFromNonMainDb(Node *parsetree) /* - * StatementRequiresUnmarkDistributedFromNonMainDb returns true if the statement should be marked - * as distributed when executed from a non-main database. + * MarkObjectDistributedFromNonMainDb marks the given object as distributed on the + * non-main database. */ -static bool -StatementRequiresUnmarkDistributedFromNonMainDb(Node *parsetree) +static void +MarkObjectDistributedFromNonMainDb(Node *parsetree) { - NodeTag type = nodeTag(parsetree); + MarkObjectDistributedParams markObjectDistributedParams = + GetMarkObjectDistributedParams(parsetree); + StringInfo mainDBQuery = makeStringInfo(); + appendStringInfo(mainDBQuery, + MARK_OBJECT_DISTRIBUTED, + markObjectDistributedParams.catalogRelId, + quote_literal_cstr(markObjectDistributedParams.name), + markObjectDistributedParams.id, + quote_literal_cstr(CurrentUserName())); + RunCitusMainDBQuery(mainDBQuery->data); +} - for (int i = 0; i < sizeof(NonMainDbSupportedStatements) / - sizeof(NonMainDbSupportedStatements[0]); i++) + +/* + * GetMarkObjectDistributedParams returns MarkObjectDistributedParams for the target + * object of given parsetree. + */ +static MarkObjectDistributedParams +GetMarkObjectDistributedParams(Node *parsetree) +{ + if (IsA(parsetree, CreateRoleStmt)) { - if (type == NonMainDbSupportedStatements[i].statementType) - { - return NonMainDbSupportedStatements[i].distributedOperation == - UNMARK_DISTRIBUTED; - } + CreateRoleStmt *stmt = castNode(CreateRoleStmt, parsetree); + MarkObjectDistributedParams info = { + .name = stmt->role, + .catalogRelId = AuthIdRelationId, + .id = get_role_oid(stmt->role, false) + }; + + return info; } - return false; + /* Add else if branches for other statement types */ + + elog(ERROR, "unsupported statement type"); +} + + +/* + * NonMainDbCheckSupportedObjectTypeForGrant implements checkSupportedObjectTypes + * callback for GrantStmt. + */ +static bool +NonMainDbCheckSupportedObjectTypeForGrant(Node *node) +{ + GrantStmt *stmt = castNode(GrantStmt, node); + return stmt->objtype == OBJECT_DATABASE; } diff --git a/src/backend/distributed/deparser/citus_grantutils.c b/src/backend/distributed/deparser/citus_grantutils.c index c944013f6d2..8354e047944 100644 --- a/src/backend/distributed/deparser/citus_grantutils.c +++ b/src/backend/distributed/deparser/citus_grantutils.c @@ -74,7 +74,7 @@ AppendGrantRestrictAndCascade(StringInfo buf, GrantStmt *stmt) void AppendGrantedByInGrantForRoleSpec(StringInfo buf, RoleSpec *grantor, bool isGrant) { - if (isGrant && grantor) + if (grantor) { appendStringInfo(buf, " GRANTED BY %s", RoleSpecString(grantor, true)); } diff --git a/src/backend/distributed/deparser/deparse_role_stmts.c b/src/backend/distributed/deparser/deparse_role_stmts.c index b868413457f..a4a085026c9 100644 --- a/src/backend/distributed/deparser/deparse_role_stmts.c +++ b/src/backend/distributed/deparser/deparse_role_stmts.c @@ -488,7 +488,6 @@ AppendGrantRoleStmt(StringInfo buf, GrantRoleStmt *stmt) AppendGrantWithAdminOption(buf, stmt); AppendGrantedByInGrantForRoleSpec(buf, stmt->grantor, stmt->is_grant); AppendGrantRestrictAndCascadeForRoleSpec(buf, stmt->behavior, stmt->is_grant); - AppendGrantedByInGrantForRoleSpec(buf, stmt->grantor, stmt->is_grant); appendStringInfo(buf, ";"); } diff --git a/src/backend/distributed/executor/intermediate_results.c b/src/backend/distributed/executor/intermediate_results.c index 0e18d4416f7..daf707b2462 100644 --- a/src/backend/distributed/executor/intermediate_results.c +++ b/src/backend/distributed/executor/intermediate_results.c @@ -295,7 +295,6 @@ PrepareIntermediateResultBroadcast(RemoteFileDestReceiver *resultDest) if (resultDest->writeLocalFile) { const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); - const int fileMode = (S_IRUSR | S_IWUSR); /* make sure the directory exists */ CreateIntermediateResultsDirectory(); @@ -303,8 +302,7 @@ PrepareIntermediateResultBroadcast(RemoteFileDestReceiver *resultDest) const char *fileName = QueryResultFileName(resultId); resultDest->fileCompat = FileCompatFromFileStart(FileOpenForTransmit(fileName, - fileFlags, - fileMode)); + fileFlags)); } WorkerNode *workerNode = NULL; @@ -606,7 +604,7 @@ CreateIntermediateResultsDirectory(void) { char *resultDirectory = IntermediateResultsDirectory(); - int makeOK = mkdir(resultDirectory, S_IRWXU); + int makeOK = MakePGDirectory(resultDirectory); if (makeOK != 0) { if (errno == EEXIST) @@ -976,7 +974,6 @@ FetchRemoteIntermediateResult(MultiConnection *connection, char *resultId) StringInfo copyCommand = makeStringInfo(); const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); - const int fileMode = (S_IRUSR | S_IWUSR); PGconn *pgConn = connection->pgConn; int socket = PQsocket(pgConn); @@ -998,7 +995,7 @@ FetchRemoteIntermediateResult(MultiConnection *connection, char *resultId) PQclear(result); - File fileDesc = FileOpenForTransmit(localPath, fileFlags, fileMode); + File fileDesc = FileOpenForTransmit(localPath, fileFlags); FileCompat fileCompat = FileCompatFromFileStart(fileDesc); while (true) diff --git a/src/backend/distributed/executor/transmit.c b/src/backend/distributed/executor/transmit.c index a10ae4fbf0b..224d8e5892d 100644 --- a/src/backend/distributed/executor/transmit.c +++ b/src/backend/distributed/executor/transmit.c @@ -17,6 +17,7 @@ #include "pgstat.h" #include "commands/defrem.h" +#include "common/file_perm.h" #include "libpq/libpq.h" #include "libpq/pqformat.h" #include "storage/fd.h" @@ -48,8 +49,7 @@ RedirectCopyDataToRegularFile(const char *filename) { StringInfo copyData = makeStringInfo(); const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); - const int fileMode = (S_IRUSR | S_IWUSR); - File fileDesc = FileOpenForTransmit(filename, fileFlags, fileMode); + File fileDesc = FileOpenForTransmit(filename, fileFlags); FileCompat fileCompat = FileCompatFromFileStart(fileDesc); SendCopyInStart(); @@ -92,7 +92,7 @@ SendRegularFile(const char *filename) const int fileMode = 0; /* we currently do not check if the caller has permissions for this file */ - File fileDesc = FileOpenForTransmit(filename, fileFlags, fileMode); + File fileDesc = FileOpenForTransmitPerm(filename, fileFlags, fileMode); FileCompat fileCompat = FileCompatFromFileStart(fileDesc); /* @@ -136,12 +136,23 @@ FreeStringInfo(StringInfo stringInfo) /* - * FileOpenForTransmit opens file with the given filename and flags. On success, - * the function returns the internal file handle for the opened file. On failure - * the function errors out. + * Open a file with FileOpenForTransmitPerm() and pass default file mode for + * the fileMode parameter. */ File -FileOpenForTransmit(const char *filename, int fileFlags, int fileMode) +FileOpenForTransmit(const char *filename, int fileFlags) +{ + return FileOpenForTransmitPerm(filename, fileFlags, pg_file_create_mode); +} + + +/* + * FileOpenForTransmitPerm opens file with the given filename and flags. On + * success, the function returns the internal file handle for the opened file. + * On failure the function errors out. + */ +File +FileOpenForTransmitPerm(const char *filename, int fileFlags, int fileMode) { struct stat fileStat; diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index 7b58c6d05b3..14f5b462404 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -999,7 +999,7 @@ MarkObjectsDistributedCreateCommand(List *addresses, appendStringInfo(insertDistributedObjectsCommand, ") "); appendStringInfo(insertDistributedObjectsCommand, - "SELECT citus_internal_add_object_metadata(" + "SELECT citus_internal.add_object_metadata(" "typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) " "FROM distributed_object_data;"); @@ -1134,7 +1134,7 @@ DistributionCreateCommand(CitusTableCacheEntry *cacheEntry) } appendStringInfo(insertDistributionCommand, - "SELECT citus_internal_add_partition_metadata " + "SELECT citus_internal.add_partition_metadata " "(%s::regclass, '%c', %s, %d, '%c')", quote_literal_cstr(qualifiedRelationName), distributionMethod, @@ -1176,7 +1176,7 @@ DistributionDeleteMetadataCommand(Oid relationId) char *qualifiedRelationName = generate_qualified_relation_name(relationId); appendStringInfo(deleteCommand, - "SELECT pg_catalog.citus_internal_delete_partition_metadata(%s)", + "SELECT citus_internal.delete_partition_metadata(%s)", quote_literal_cstr(qualifiedRelationName)); return deleteCommand->data; @@ -1259,7 +1259,7 @@ ShardListInsertCommand(List *shardIntervalList) appendStringInfo(insertPlacementCommand, ") "); appendStringInfo(insertPlacementCommand, - "SELECT citus_internal_add_placement_metadata(" + "SELECT citus_internal.add_placement_metadata(" "shardid, shardlength, groupid, placementid) " "FROM placement_data;"); @@ -1315,7 +1315,7 @@ ShardListInsertCommand(List *shardIntervalList) appendStringInfo(insertShardCommand, ") "); appendStringInfo(insertShardCommand, - "SELECT citus_internal_add_shard_metadata(relationname, shardid, " + "SELECT citus_internal.add_shard_metadata(relationname, shardid, " "storagetype, shardminvalue, shardmaxvalue) " "FROM shard_data;"); @@ -1354,7 +1354,7 @@ ShardDeleteCommandList(ShardInterval *shardInterval) StringInfo deleteShardCommand = makeStringInfo(); appendStringInfo(deleteShardCommand, - "SELECT citus_internal_delete_shard_metadata(%ld);", shardId); + "SELECT citus_internal.delete_shard_metadata(%ld);", shardId); return list_make1(deleteShardCommand->data); } @@ -1424,7 +1424,7 @@ ColocationIdUpdateCommand(Oid relationId, uint32 colocationId) StringInfo command = makeStringInfo(); char *qualifiedRelationName = generate_qualified_relation_name(relationId); appendStringInfo(command, - "SELECT citus_internal_update_relation_colocation(%s::regclass, %d)", + "SELECT citus_internal.update_relation_colocation(%s::regclass, %d)", quote_literal_cstr(qualifiedRelationName), colocationId); return command->data; @@ -4056,7 +4056,7 @@ citus_internal_database_command(PG_FUNCTION_ARGS) } else { - ereport(ERROR, (errmsg("citus_internal_database_command() can only be used " + ereport(ERROR, (errmsg("citus_internal.database_command() can only be used " "for CREATE DATABASE command by Citus."))); } @@ -4209,7 +4209,7 @@ ColocationGroupDeleteCommand(uint32 colocationId) StringInfo deleteColocationCommand = makeStringInfo(); appendStringInfo(deleteColocationCommand, - "SELECT pg_catalog.citus_internal_delete_colocation_metadata(%d)", + "SELECT citus_internal.delete_colocation_metadata(%d)", colocationId); return deleteColocationCommand->data; @@ -4225,7 +4225,7 @@ TenantSchemaInsertCommand(Oid schemaId, uint32 colocationId) { StringInfo command = makeStringInfo(); appendStringInfo(command, - "SELECT pg_catalog.citus_internal_add_tenant_schema(%s, %u)", + "SELECT citus_internal.add_tenant_schema(%s, %u)", RemoteSchemaIdExpressionById(schemaId), colocationId); return command->data; @@ -4241,7 +4241,7 @@ TenantSchemaDeleteCommand(char *schemaName) { StringInfo command = makeStringInfo(); appendStringInfo(command, - "SELECT pg_catalog.citus_internal_delete_tenant_schema(%s)", + "SELECT citus_internal.delete_tenant_schema(%s)", RemoteSchemaIdExpressionByName(schemaName)); return command->data; @@ -4258,7 +4258,7 @@ UpdateNoneDistTableMetadataCommand(Oid relationId, char replicationModel, { StringInfo command = makeStringInfo(); appendStringInfo(command, - "SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(%s, '%c', %u, %s)", + "SELECT citus_internal.update_none_dist_table_metadata(%s, '%c', %u, %s)", RemoteTableIdExpression(relationId), replicationModel, colocationId, autoConverted ? "true" : "false"); @@ -4276,7 +4276,7 @@ AddPlacementMetadataCommand(uint64 shardId, uint64 placementId, { StringInfo command = makeStringInfo(); appendStringInfo(command, - "SELECT citus_internal_add_placement_metadata(%ld, %ld, %d, %ld)", + "SELECT citus_internal.add_placement_metadata(%ld, %ld, %d, %ld)", shardId, shardLength, groupId, placementId); return command->data; } @@ -4291,7 +4291,7 @@ DeletePlacementMetadataCommand(uint64 placementId) { StringInfo command = makeStringInfo(); appendStringInfo(command, - "SELECT pg_catalog.citus_internal_delete_placement_metadata(%ld)", + "SELECT citus_internal.delete_placement_metadata(%ld)", placementId); return command->data; } @@ -4957,7 +4957,7 @@ SendTenantSchemaMetadataCommands(MetadataSyncContext *context) StringInfo insertTenantSchemaCommand = makeStringInfo(); appendStringInfo(insertTenantSchemaCommand, - "SELECT pg_catalog.citus_internal_add_tenant_schema(%s, %u)", + "SELECT citus_internal.add_tenant_schema(%s, %u)", RemoteSchemaIdExpressionById(tenantSchemaForm->schemaid), tenantSchemaForm->colocationid); diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c index 094986c855e..d93b133eaed 100644 --- a/src/backend/distributed/metadata/node_metadata.c +++ b/src/backend/distributed/metadata/node_metadata.c @@ -507,7 +507,13 @@ citus_disable_node(PG_FUNCTION_ARGS) { text *nodeNameText = PG_GETARG_TEXT_P(0); int32 nodePort = PG_GETARG_INT32(1); - bool synchronousDisableNode = PG_GETARG_BOOL(2); + + bool synchronousDisableNode = 1; + Assert(PG_NARGS() == 2 || PG_NARGS() == 3); + if (PG_NARGS() == 3) + { + synchronousDisableNode = PG_GETARG_BOOL(2); + } char *nodeName = text_to_cstring(nodeNameText); WorkerNode *workerNode = ModifiableWorkerNode(nodeName, nodePort); @@ -1692,7 +1698,7 @@ EnsureParentSessionHasExclusiveLockOnPgDistNode(pid_t parentSessionPid) if (!parentHasExclusiveLock) { ereport(ERROR, (errmsg("lock is not held by the caller. Unexpected caller " - "for citus_internal_mark_node_not_synced"))); + "for citus_internal.mark_node_not_synced"))); } } diff --git a/src/backend/distributed/operations/delete_protocol.c b/src/backend/distributed/operations/delete_protocol.c index c36121b00ba..39651715853 100644 --- a/src/backend/distributed/operations/delete_protocol.c +++ b/src/backend/distributed/operations/delete_protocol.c @@ -426,10 +426,9 @@ ExecuteDropShardPlacementCommandRemotely(ShardPlacement *shardPlacement, errdetail("Marking this shard placement for " "deletion"))); - InsertCleanupRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - shardRelationName, - shardPlacement->groupId, - CLEANUP_DEFERRED_ON_SUCCESS); + InsertCleanupOnSuccessRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + shardRelationName, + shardPlacement->groupId); return; } diff --git a/src/backend/distributed/operations/shard_cleaner.c b/src/backend/distributed/operations/shard_cleaner.c index 79041453022..db1cad6bcae 100644 --- a/src/backend/distributed/operations/shard_cleaner.c +++ b/src/backend/distributed/operations/shard_cleaner.c @@ -452,15 +452,15 @@ CompareCleanupRecordsByObjectType(const void *leftElement, const void *rightElem /* - * InsertCleanupRecordInCurrentTransaction inserts a new pg_dist_cleanup entry + * InsertCleanupOnSuccessRecordInCurrentTransaction inserts a new pg_dist_cleanup entry * as part of the current transaction. This is primarily useful for deferred drop scenarios, - * since these records would roll back in case of operation failure. + * since these records would roll back in case of operation failure. And for the same reason, + * always sets the policy type to CLEANUP_DEFERRED_ON_SUCCESS. */ void -InsertCleanupRecordInCurrentTransaction(CleanupObject objectType, - char *objectName, - int nodeGroupId, - CleanupPolicy policy) +InsertCleanupOnSuccessRecordInCurrentTransaction(CleanupObject objectType, + char *objectName, + int nodeGroupId) { /* We must have a valid OperationId. Any operation requring cleanup * will call RegisterOperationNeedingCleanup. @@ -482,7 +482,8 @@ InsertCleanupRecordInCurrentTransaction(CleanupObject objectType, values[Anum_pg_dist_cleanup_object_type - 1] = Int32GetDatum(objectType); values[Anum_pg_dist_cleanup_object_name - 1] = CStringGetTextDatum(objectName); values[Anum_pg_dist_cleanup_node_group_id - 1] = Int32GetDatum(nodeGroupId); - values[Anum_pg_dist_cleanup_policy_type - 1] = Int32GetDatum(policy); + values[Anum_pg_dist_cleanup_policy_type - 1] = + Int32GetDatum(CLEANUP_DEFERRED_ON_SUCCESS); /* open cleanup relation and insert new tuple */ Oid relationId = DistCleanupRelationId(); @@ -499,23 +500,27 @@ InsertCleanupRecordInCurrentTransaction(CleanupObject objectType, /* - * InsertCleanupRecordInSubtransaction inserts a new pg_dist_cleanup entry in a + * InsertCleanupRecordOutsideTransaction inserts a new pg_dist_cleanup entry in a * separate transaction to ensure the record persists after rollback. We should * delete these records if the operation completes successfully. * - * For failure scenarios, use a subtransaction (direct insert via localhost). + * This is used in scenarios where we need to cleanup resources on operation + * completion (CLEANUP_ALWAYS) or on failure (CLEANUP_ON_FAILURE). */ void -InsertCleanupRecordInSubtransaction(CleanupObject objectType, - char *objectName, - int nodeGroupId, - CleanupPolicy policy) +InsertCleanupRecordOutsideTransaction(CleanupObject objectType, + char *objectName, + int nodeGroupId, + CleanupPolicy policy) { /* We must have a valid OperationId. Any operation requring cleanup * will call RegisterOperationNeedingCleanup. */ Assert(CurrentOperationId != INVALID_OPERATION_ID); + /* assert the circumstance noted in function comment */ + Assert(policy == CLEANUP_ALWAYS || policy == CLEANUP_ON_FAILURE); + StringInfo sequenceName = makeStringInfo(); appendStringInfo(sequenceName, "%s.%s", PG_CATALOG, diff --git a/src/backend/distributed/operations/shard_split.c b/src/backend/distributed/operations/shard_split.c index cf9f301b746..4baf0fb241b 100644 --- a/src/backend/distributed/operations/shard_split.c +++ b/src/backend/distributed/operations/shard_split.c @@ -733,11 +733,11 @@ CreateSplitShardsForShardGroup(List *shardGroupSplitIntervalListList, workerPlacementNode->workerPort))); } - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - ConstructQualifiedShardName( - shardInterval), - workerPlacementNode->groupId, - CLEANUP_ON_FAILURE); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + ConstructQualifiedShardName( + shardInterval), + workerPlacementNode->groupId, + CLEANUP_ON_FAILURE); /* Create new split child shard on the specified placement list */ CreateObjectOnPlacement(splitShardCreationCommandList, @@ -1314,7 +1314,7 @@ DropShardListMetadata(List *shardIntervalList) { ListCell *commandCell = NULL; - /* send the commands one by one (calls citus_internal_delete_shard_metadata internally) */ + /* send the commands one by one (calls citus_internal.delete_shard_metadata internally) */ List *shardMetadataDeleteCommandList = ShardDeleteCommandList(shardInterval); foreach(commandCell, shardMetadataDeleteCommandList) { @@ -1717,11 +1717,11 @@ CreateDummyShardsForShardGroup(HTAB *mapOfPlacementToDummyShardList, /* Log shard in pg_dist_cleanup. Given dummy shards are transient resources, * we want to cleanup irrespective of operation success or failure. */ - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - ConstructQualifiedShardName( - shardInterval), - workerPlacementNode->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + ConstructQualifiedShardName( + shardInterval), + workerPlacementNode->groupId, + CLEANUP_ALWAYS); /* Create dummy source shard on the specified placement list */ CreateObjectOnPlacement(splitShardCreationCommandList, @@ -1780,11 +1780,11 @@ CreateDummyShardsForShardGroup(HTAB *mapOfPlacementToDummyShardList, /* Log shard in pg_dist_cleanup. Given dummy shards are transient resources, * we want to cleanup irrespective of operation success or failure. */ - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - ConstructQualifiedShardName( - shardInterval), - sourceWorkerNode->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + ConstructQualifiedShardName( + shardInterval), + sourceWorkerNode->groupId, + CLEANUP_ALWAYS); /* Create dummy split child shard on source worker node */ CreateObjectOnPlacement(splitShardCreationCommandList, sourceWorkerNode); diff --git a/src/backend/distributed/operations/shard_transfer.c b/src/backend/distributed/operations/shard_transfer.c index 805ef39d706..737086752d6 100644 --- a/src/backend/distributed/operations/shard_transfer.c +++ b/src/backend/distributed/operations/shard_transfer.c @@ -294,6 +294,17 @@ citus_move_shard_placement(PG_FUNCTION_ARGS) CheckCitusVersion(ERROR); EnsureCoordinator(); + List *referenceTableIdList = NIL; + + if (HasNodesWithMissingReferenceTables(&referenceTableIdList)) + { + ereport(ERROR, (errmsg("there are missing reference tables on some nodes"), + errhint("Copy reference tables first with " + "replicate_reference_tables() or use " + "citus_rebalance_start() that will do it automatically." + ))); + } + int64 shardId = PG_GETARG_INT64(0); char *sourceNodeName = text_to_cstring(PG_GETARG_TEXT_P(1)); int32 sourceNodePort = PG_GETARG_INT32(2); @@ -593,10 +604,10 @@ InsertDeferredDropCleanupRecordsForShards(List *shardIntervalList) * We also log cleanup record in the current transaction. If the current transaction rolls back, * we do not generate a record at all. */ - InsertCleanupRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - qualifiedShardName, - placement->groupId, - CLEANUP_DEFERRED_ON_SUCCESS); + InsertCleanupOnSuccessRecordInCurrentTransaction( + CLEANUP_OBJECT_SHARD_PLACEMENT, + qualifiedShardName, + placement->groupId); } } } @@ -623,10 +634,9 @@ InsertCleanupRecordsForShardPlacementsOnNode(List *shardIntervalList, * We also log cleanup record in the current transaction. If the current transaction rolls back, * we do not generate a record at all. */ - InsertCleanupRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - qualifiedShardName, - groupId, - CLEANUP_DEFERRED_ON_SUCCESS); + InsertCleanupOnSuccessRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + qualifiedShardName, + groupId); } } @@ -1382,10 +1392,11 @@ CopyShardTablesViaLogicalReplication(List *shardIntervalList, char *sourceNodeNa char *tableOwner = TableOwner(shardInterval->relationId); /* drop the shard we created on the target, in case of failure */ - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - ConstructQualifiedShardName(shardInterval), - GroupForNode(targetNodeName, targetNodePort), - CLEANUP_ON_FAILURE); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + ConstructQualifiedShardName(shardInterval), + GroupForNode(targetNodeName, + targetNodePort), + CLEANUP_ON_FAILURE); SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort, tableOwner, @@ -1455,10 +1466,11 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName, char *tableOwner = TableOwner(shardInterval->relationId); /* drop the shard we created on the target, in case of failure */ - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - ConstructQualifiedShardName(shardInterval), - GroupForNode(targetNodeName, targetNodePort), - CLEANUP_ON_FAILURE); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + ConstructQualifiedShardName(shardInterval), + GroupForNode(targetNodeName, + targetNodePort), + CLEANUP_ON_FAILURE); SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort, tableOwner, ddlCommandList); @@ -2035,7 +2047,7 @@ UpdateColocatedShardPlacementMetadataOnWorkers(int64 shardId, StringInfo updateCommand = makeStringInfo(); appendStringInfo(updateCommand, - "SELECT citus_internal_update_placement_metadata(%ld, %d, %d)", + "SELECT citus_internal.update_placement_metadata(%ld, %d, %d)", colocatedShard->shardId, sourceGroupId, targetGroupId); SendCommandToWorkersWithMetadata(updateCommand->data); diff --git a/src/backend/distributed/planner/recursive_planning.c b/src/backend/distributed/planner/recursive_planning.c index 6c42046ff5c..9f520fa5f51 100644 --- a/src/backend/distributed/planner/recursive_planning.c +++ b/src/backend/distributed/planner/recursive_planning.c @@ -1097,8 +1097,8 @@ RecursivelyPlanCTEs(Query *query, RecursivePlanningContext *planningContext) if (query->hasRecursive) { return DeferredError(ERRCODE_FEATURE_NOT_SUPPORTED, - "recursive CTEs are not supported in distributed " - "queries", + "recursive CTEs are only supported when they " + "contain a filter on the distribution column", NULL, NULL); } diff --git a/src/backend/distributed/replication/multi_logical_replication.c b/src/backend/distributed/replication/multi_logical_replication.c index 056bc9a455f..08e6c557308 100644 --- a/src/backend/distributed/replication/multi_logical_replication.c +++ b/src/backend/distributed/replication/multi_logical_replication.c @@ -1335,10 +1335,10 @@ CreatePublications(MultiConnection *connection, WorkerNode *worker = FindWorkerNode(connection->hostname, connection->port); - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_PUBLICATION, - entry->name, - worker->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_PUBLICATION, + entry->name, + worker->groupId, + CLEANUP_ALWAYS); ExecuteCriticalRemoteCommand(connection, DISABLE_DDL_PROPAGATION); ExecuteCriticalRemoteCommand(connection, createPublicationCommand->data); @@ -1435,10 +1435,10 @@ CreateReplicationSlots(MultiConnection *sourceConnection, WorkerNode *worker = FindWorkerNode(sourceConnection->hostname, sourceConnection->port); - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_REPLICATION_SLOT, - replicationSlot->name, - worker->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_REPLICATION_SLOT, + replicationSlot->name, + worker->groupId, + CLEANUP_ALWAYS); if (!firstReplicationSlot) { @@ -1506,10 +1506,10 @@ CreateSubscriptions(MultiConnection *sourceConnection, quote_identifier(GetUserNameFromId(ownerId, false)) ))); - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_USER, - target->subscriptionOwnerName, - worker->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_USER, + target->subscriptionOwnerName, + worker->groupId, + CLEANUP_ALWAYS); StringInfo conninfo = makeStringInfo(); appendStringInfo(conninfo, "host='%s' port=%d user='%s' dbname='%s' " @@ -1567,10 +1567,10 @@ CreateSubscriptions(MultiConnection *sourceConnection, pfree(createSubscriptionCommand->data); pfree(createSubscriptionCommand); - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SUBSCRIPTION, - target->subscriptionName, - worker->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SUBSCRIPTION, + target->subscriptionName, + worker->groupId, + CLEANUP_ALWAYS); ExecuteCriticalRemoteCommand(target->superuserConnection, psprintf( "ALTER SUBSCRIPTION %s OWNER TO %s", diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index ad5a14a2555..45e212e8bd6 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -895,22 +895,13 @@ DecrementExternalClientBackendCounterAtExit(int code, Datum arg) static void CreateRequiredDirectories(void) { - const char *subdirs[] = { - "pg_foreign_file", - "pg_foreign_file/cached", - ("base/" PG_JOB_CACHE_DIR) - }; + const char *subdir = ("base/" PG_JOB_CACHE_DIR); - for (int dirNo = 0; dirNo < lengthof(subdirs); dirNo++) + if (MakePGDirectory(subdir) != 0 && errno != EEXIST) { - int ret = mkdir(subdirs[dirNo], S_IRWXU); - - if (ret != 0 && errno != EEXIST) - { - ereport(ERROR, (errcode_for_file_access(), - errmsg("could not create directory \"%s\": %m", - subdirs[dirNo]))); - } + ereport(ERROR, (errcode_for_file_access(), + errmsg("could not create directory \"%s\": %m", + subdir))); } } diff --git a/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql b/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql index 6a8481e8be2..070d46dded9 100644 --- a/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql +++ b/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql @@ -28,3 +28,28 @@ REVOKE ALL ON FUNCTION citus_internal.start_management_transaction FROM PUBLIC; #include "udfs/citus_internal_add_colocation_metadata/12.2-1.sql" #include "udfs/citus_unmark_object_distributed/12.2-1.sql" +#include "udfs/citus_internal_add_object_metadata/12.2-1.sql" +#include "udfs/citus_internal_add_partition_metadata/12.2-1.sql" +#include "udfs/citus_internal_add_placement_metadata/12.2-1.sql" +#include "udfs/citus_internal_add_shard_metadata/12.2-1.sql" +#include "udfs/citus_internal_add_tenant_schema/12.2-1.sql" +#include "udfs/citus_internal_adjust_local_clock_to_remote/12.2-1.sql" +#include "udfs/citus_internal_delete_colocation_metadata/12.2-1.sql" +#include "udfs/citus_internal_delete_partition_metadata/12.2-1.sql" +#include "udfs/citus_internal_delete_placement_metadata/12.2-1.sql" +#include "udfs/citus_internal_delete_shard_metadata/12.2-1.sql" +#include "udfs/citus_internal_delete_tenant_schema/12.2-1.sql" +#include "udfs/citus_internal_local_blocked_processes/12.2-1.sql" +#include "udfs/citus_internal_global_blocked_processes/12.2-1.sql" +#include "udfs/citus_blocking_pids/12.2-1.sql" +#include "udfs/citus_isolation_test_session_is_blocked/12.2-1.sql" +DROP VIEW IF EXISTS pg_catalog.citus_lock_waits; +#include "udfs/citus_lock_waits/12.2-1.sql" + +#include "udfs/citus_internal_mark_node_not_synced/12.2-1.sql" +#include "udfs/citus_internal_unregister_tenant_schema_globally/12.2-1.sql" +#include "udfs/citus_drop_trigger/12.2-1.sql" +#include "udfs/citus_internal_update_none_dist_table_metadata/12.2-1.sql" +#include "udfs/citus_internal_update_placement_metadata/12.2-1.sql" +#include "udfs/citus_internal_update_relation_colocation/12.2-1.sql" +#include "udfs/repl_origin_helper/12.2-1.sql" diff --git a/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql b/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql index f82b12d1eab..7c5e543c39b 100644 --- a/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql +++ b/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql @@ -1,6 +1,6 @@ -- citus--12.2-1--12.1-1 -DROP FUNCTION pg_catalog.citus_internal_database_command(text); +DROP FUNCTION citus_internal.database_command(text); DROP FUNCTION citus_internal.acquire_citus_advisory_object_class_lock(int, cstring); #include "../udfs/citus_add_rebalance_strategy/10.1-1.sql" @@ -24,6 +24,33 @@ ALTER TABLE pg_catalog.pg_dist_transaction DROP COLUMN outer_xid; REVOKE USAGE ON SCHEMA citus_internal FROM PUBLIC; DROP FUNCTION citus_internal.add_colocation_metadata(int, int, int, regtype, oid); +DROP FUNCTION citus_internal.add_object_metadata(text, text[], text[], integer, integer, boolean); +DROP FUNCTION citus_internal.add_partition_metadata(regclass, "char", text, integer, "char"); +DROP FUNCTION citus_internal.add_placement_metadata(bigint, bigint, integer, bigint); +DROP FUNCTION citus_internal.add_shard_metadata(regclass, bigint, "char", text, text); +DROP FUNCTION citus_internal.add_tenant_schema(oid, integer); +DROP FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock); +DROP FUNCTION citus_internal.delete_colocation_metadata(int); +DROP FUNCTION citus_internal.delete_partition_metadata(regclass); +DROP FUNCTION citus_internal.delete_placement_metadata(bigint); +DROP FUNCTION citus_internal.delete_shard_metadata(bigint); +DROP FUNCTION citus_internal.delete_tenant_schema(oid); +DROP FUNCTION citus_internal.local_blocked_processes(); +#include "../udfs/citus_blocking_pids/11.0-1.sql" +#include "../udfs/citus_isolation_test_session_is_blocked/11.1-1.sql" +DROP VIEW IF EXISTS pg_catalog.citus_lock_waits; +#include "../udfs/citus_lock_waits/11.0-1.sql" +DROP FUNCTION citus_internal.global_blocked_processes(); + +DROP FUNCTION citus_internal.mark_node_not_synced(int, int); +DROP FUNCTION citus_internal.unregister_tenant_schema_globally(oid, text); +#include "../udfs/citus_drop_trigger/12.0-1.sql" +DROP FUNCTION citus_internal.update_none_dist_table_metadata(oid, "char", bigint, boolean); +DROP FUNCTION citus_internal.update_placement_metadata(bigint, integer, integer); +DROP FUNCTION citus_internal.update_relation_colocation(oid, int); +DROP FUNCTION citus_internal.start_replication_origin_tracking(); +DROP FUNCTION citus_internal.stop_replication_origin_tracking(); +DROP FUNCTION citus_internal.is_replication_origin_tracking_active(); #include "../udfs/citus_internal_add_colocation_metadata/11.0-1.sql" diff --git a/src/backend/distributed/sql/udfs/citus_blocking_pids/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_blocking_pids/12.2-1.sql new file mode 100644 index 00000000000..4e747ff4fc9 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_blocking_pids/12.2-1.sql @@ -0,0 +1,34 @@ +DROP FUNCTION pg_catalog.citus_blocking_pids; +CREATE FUNCTION pg_catalog.citus_blocking_pids(pBlockedPid integer) +RETURNS int4[] AS $$ + DECLARE + mLocalBlockingPids int4[]; + mRemoteBlockingPids int4[]; + mLocalGlobalPid int8; + BEGIN + SELECT pg_catalog.old_pg_blocking_pids(pBlockedPid) INTO mLocalBlockingPids; + + IF (array_length(mLocalBlockingPids, 1) > 0) THEN + RETURN mLocalBlockingPids; + END IF; + + -- pg says we're not blocked locally; check whether we're blocked globally. + SELECT global_pid INTO mLocalGlobalPid + FROM get_all_active_transactions() WHERE process_id = pBlockedPid; + + SELECT array_agg(global_pid) INTO mRemoteBlockingPids FROM ( + WITH activeTransactions AS ( + SELECT global_pid FROM get_all_active_transactions() + ), blockingTransactions AS ( + SELECT blocking_global_pid FROM citus_internal.global_blocked_processes() + WHERE waiting_global_pid = mLocalGlobalPid + ) + SELECT activeTransactions.global_pid FROM activeTransactions, blockingTransactions + WHERE activeTransactions.global_pid = blockingTransactions.blocking_global_pid + ) AS sub; + + RETURN mRemoteBlockingPids; + END; +$$ LANGUAGE plpgsql; + +REVOKE ALL ON FUNCTION citus_blocking_pids(integer) FROM PUBLIC; diff --git a/src/backend/distributed/sql/udfs/citus_blocking_pids/latest.sql b/src/backend/distributed/sql/udfs/citus_blocking_pids/latest.sql index c7e607c1c34..4e747ff4fc9 100644 --- a/src/backend/distributed/sql/udfs/citus_blocking_pids/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_blocking_pids/latest.sql @@ -20,7 +20,7 @@ RETURNS int4[] AS $$ WITH activeTransactions AS ( SELECT global_pid FROM get_all_active_transactions() ), blockingTransactions AS ( - SELECT blocking_global_pid FROM citus_internal_global_blocked_processes() + SELECT blocking_global_pid FROM citus_internal.global_blocked_processes() WHERE waiting_global_pid = mLocalGlobalPid ) SELECT activeTransactions.global_pid FROM activeTransactions, blockingTransactions diff --git a/src/backend/distributed/sql/udfs/citus_drop_trigger/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_drop_trigger/12.2-1.sql new file mode 100644 index 00000000000..6e4c52209a6 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_drop_trigger/12.2-1.sql @@ -0,0 +1,68 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_drop_trigger() + RETURNS event_trigger + LANGUAGE plpgsql + SET search_path = pg_catalog + AS $cdbdt$ +DECLARE + constraint_event_count INTEGER; + v_obj record; + dropped_table_is_a_partition boolean := false; +BEGIN + FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() + WHERE object_type IN ('table', 'foreign table') + LOOP + -- first drop the table and metadata on the workers + -- then drop all the shards on the workers + -- finally remove the pg_dist_partition entry on the coordinator + PERFORM master_remove_distributed_table_metadata_from_workers(v_obj.objid, v_obj.schema_name, v_obj.object_name); + + -- If both original and normal values are false, the dropped table was a partition + -- that was dropped as a result of its parent being dropped + -- NOTE: the other way around is not true: + -- the table being a partition doesn't imply both original and normal values are false + SELECT (v_obj.original = false AND v_obj.normal = false) INTO dropped_table_is_a_partition; + + -- The partition's shards will be dropped when dropping the parent's shards, so we can skip: + -- i.e. we call citus_drop_all_shards with drop_shards_metadata_only parameter set to true + IF dropped_table_is_a_partition + THEN + PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := true); + ELSE + PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false); + END IF; + + PERFORM master_remove_partition_metadata(v_obj.objid, v_obj.schema_name, v_obj.object_name); + END LOOP; + + FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + -- Remove entries from pg_catalog.pg_dist_schema for all dropped tenant schemas. + -- Also delete the corresponding colocation group from pg_catalog.pg_dist_colocation. + -- + -- Although normally we automatically delete the colocation groups when they become empty, + -- we don't do so for the colocation groups that are created for tenant schemas. For this + -- reason, here we need to delete the colocation group when the tenant schema is dropped. + IF v_obj.object_type = 'schema' AND EXISTS (SELECT 1 FROM pg_catalog.pg_dist_schema WHERE schemaid = v_obj.objid) + THEN + PERFORM citus_internal.unregister_tenant_schema_globally(v_obj.objid, v_obj.object_name); + END IF; + + -- remove entries from citus.pg_dist_object for all dropped root (objsubid = 0) objects + PERFORM master_unmark_object_distributed(v_obj.classid, v_obj.objid, v_obj.objsubid); + END LOOP; + + SELECT COUNT(*) INTO constraint_event_count + FROM pg_event_trigger_dropped_objects() + WHERE object_type IN ('table constraint'); + + IF constraint_event_count > 0 + THEN + -- Tell utility hook that a table constraint is dropped so we might + -- need to undistribute some of the citus local tables that are not + -- connected to any reference tables. + PERFORM notify_constraint_dropped(); + END IF; +END; +$cdbdt$; +COMMENT ON FUNCTION pg_catalog.citus_drop_trigger() + IS 'perform checks and actions at the end of DROP actions'; diff --git a/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql b/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql index 312099aeb54..6e4c52209a6 100644 --- a/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql @@ -44,7 +44,7 @@ BEGIN -- reason, here we need to delete the colocation group when the tenant schema is dropped. IF v_obj.object_type = 'schema' AND EXISTS (SELECT 1 FROM pg_catalog.pg_dist_schema WHERE schemaid = v_obj.objid) THEN - PERFORM pg_catalog.citus_internal_unregister_tenant_schema_globally(v_obj.objid, v_obj.object_name); + PERFORM citus_internal.unregister_tenant_schema_globally(v_obj.objid, v_obj.object_name); END IF; -- remove entries from citus.pg_dist_object for all dropped root (objsubid = 0) objects diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_colocation_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_add_colocation_metadata/12.2-1.sql index 66b876e251a..e054448011c 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_add_colocation_metadata/12.2-1.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_add_colocation_metadata/12.2-1.sql @@ -11,3 +11,17 @@ CREATE OR REPLACE FUNCTION citus_internal.add_colocation_metadata( COMMENT ON FUNCTION citus_internal.add_colocation_metadata(int,int,int,regtype,oid) IS 'Inserts a co-location group into pg_dist_colocation'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_colocation_metadata( + colocation_id int, + shard_count int, + replication_factor int, + distribution_column_type regtype, + distribution_column_collation oid) + RETURNS void + LANGUAGE C + STRICT + AS 'MODULE_PATHNAME'; + +COMMENT ON FUNCTION pg_catalog.citus_internal_add_colocation_metadata(int,int,int,regtype,oid) IS + 'Inserts a co-location group into pg_dist_colocation'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_colocation_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_add_colocation_metadata/latest.sql index 66b876e251a..e054448011c 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_add_colocation_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_add_colocation_metadata/latest.sql @@ -11,3 +11,17 @@ CREATE OR REPLACE FUNCTION citus_internal.add_colocation_metadata( COMMENT ON FUNCTION citus_internal.add_colocation_metadata(int,int,int,regtype,oid) IS 'Inserts a co-location group into pg_dist_colocation'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_colocation_metadata( + colocation_id int, + shard_count int, + replication_factor int, + distribution_column_type regtype, + distribution_column_collation oid) + RETURNS void + LANGUAGE C + STRICT + AS 'MODULE_PATHNAME'; + +COMMENT ON FUNCTION pg_catalog.citus_internal_add_colocation_metadata(int,int,int,regtype,oid) IS + 'Inserts a co-location group into pg_dist_colocation'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_object_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_add_object_metadata/12.2-1.sql new file mode 100644 index 00000000000..560bce69072 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_add_object_metadata/12.2-1.sql @@ -0,0 +1,29 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_object_metadata( + typeText text, + objNames text[], + objArgs text[], + distribution_argument_index int, + colocationid int, + force_delegation bool) + RETURNS void + LANGUAGE C + STRICT + AS 'MODULE_PATHNAME', $$citus_internal_add_object_metadata$$; + +COMMENT ON FUNCTION citus_internal.add_object_metadata(text,text[],text[],int,int,bool) IS + 'Inserts distributed object into pg_dist_object'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_object_metadata( + typeText text, + objNames text[], + objArgs text[], + distribution_argument_index int, + colocationid int, + force_delegation bool) + RETURNS void + LANGUAGE C + STRICT + AS 'MODULE_PATHNAME'; + +COMMENT ON FUNCTION pg_catalog.citus_internal_add_object_metadata(text,text[],text[],int,int,bool) IS + 'Inserts distributed object into pg_dist_object'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_object_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_add_object_metadata/latest.sql index d35198f9005..560bce69072 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_add_object_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_add_object_metadata/latest.sql @@ -1,3 +1,18 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_object_metadata( + typeText text, + objNames text[], + objArgs text[], + distribution_argument_index int, + colocationid int, + force_delegation bool) + RETURNS void + LANGUAGE C + STRICT + AS 'MODULE_PATHNAME', $$citus_internal_add_object_metadata$$; + +COMMENT ON FUNCTION citus_internal.add_object_metadata(text,text[],text[],int,int,bool) IS + 'Inserts distributed object into pg_dist_object'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_object_metadata( typeText text, objNames text[], diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_partition_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_add_partition_metadata/12.2-1.sql new file mode 100644 index 00000000000..511a5b1c171 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_add_partition_metadata/12.2-1.sql @@ -0,0 +1,22 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_partition_metadata( + relation_id regclass, distribution_method "char", + distribution_column text, colocation_id integer, + replication_model "char") + RETURNS void + LANGUAGE C + AS 'MODULE_PATHNAME', $$citus_internal_add_partition_metadata$$; + +COMMENT ON FUNCTION citus_internal.add_partition_metadata(regclass, "char", text, integer, "char") IS + 'Inserts into pg_dist_partition with user checks'; + + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_partition_metadata( + relation_id regclass, distribution_method "char", + distribution_column text, colocation_id integer, + replication_model "char") + RETURNS void + LANGUAGE C + AS 'MODULE_PATHNAME'; + +COMMENT ON FUNCTION pg_catalog.citus_internal_add_partition_metadata(regclass, "char", text, integer, "char") IS + 'Inserts into pg_dist_partition with user checks'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_partition_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_add_partition_metadata/latest.sql index ed4f853a68c..511a5b1c171 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_add_partition_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_add_partition_metadata/latest.sql @@ -1,3 +1,15 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_partition_metadata( + relation_id regclass, distribution_method "char", + distribution_column text, colocation_id integer, + replication_model "char") + RETURNS void + LANGUAGE C + AS 'MODULE_PATHNAME', $$citus_internal_add_partition_metadata$$; + +COMMENT ON FUNCTION citus_internal.add_partition_metadata(regclass, "char", text, integer, "char") IS + 'Inserts into pg_dist_partition with user checks'; + + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_partition_metadata( relation_id regclass, distribution_method "char", distribution_column text, colocation_id integer, diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/12.2-1.sql new file mode 100644 index 00000000000..339fc2948c5 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/12.2-1.sql @@ -0,0 +1,36 @@ +-- create a new function, without shardstate +CREATE OR REPLACE FUNCTION citus_internal.add_placement_metadata( + shard_id bigint, + shard_length bigint, group_id integer, + placement_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata$$; + +COMMENT ON FUNCTION citus_internal.add_placement_metadata(bigint, bigint, integer, bigint) IS + 'Inserts into pg_dist_shard_placement with user checks'; + +-- create a new function, without shardstate +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_placement_metadata( + shard_id bigint, + shard_length bigint, group_id integer, + placement_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata$$; + +COMMENT ON FUNCTION pg_catalog.citus_internal_add_placement_metadata(bigint, bigint, integer, bigint) IS + 'Inserts into pg_dist_shard_placement with user checks'; + +-- replace the old one so it would call the old C function with shard_state +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_placement_metadata( + shard_id bigint, shard_state integer, + shard_length bigint, group_id integer, + placement_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata_legacy$$; + +COMMENT ON FUNCTION pg_catalog.citus_internal_add_placement_metadata(bigint, integer, bigint, integer, bigint) IS + 'Inserts into pg_dist_shard_placement with user checks'; + diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/latest.sql index 9d1dd4ffa36..339fc2948c5 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_add_placement_metadata/latest.sql @@ -1,3 +1,15 @@ +-- create a new function, without shardstate +CREATE OR REPLACE FUNCTION citus_internal.add_placement_metadata( + shard_id bigint, + shard_length bigint, group_id integer, + placement_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_add_placement_metadata$$; + +COMMENT ON FUNCTION citus_internal.add_placement_metadata(bigint, bigint, integer, bigint) IS + 'Inserts into pg_dist_shard_placement with user checks'; + -- create a new function, without shardstate CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_placement_metadata( shard_id bigint, diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/12.2-1.sql new file mode 100644 index 00000000000..82c29f054dc --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/12.2-1.sql @@ -0,0 +1,21 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_shard_metadata( + relation_id regclass, shard_id bigint, + storage_type "char", shard_min_value text, + shard_max_value text + ) + RETURNS void + LANGUAGE C + AS 'MODULE_PATHNAME', $$citus_internal_add_shard_metadata$$; +COMMENT ON FUNCTION citus_internal.add_shard_metadata(regclass, bigint, "char", text, text) IS + 'Inserts into pg_dist_shard with user checks'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_shard_metadata( + relation_id regclass, shard_id bigint, + storage_type "char", shard_min_value text, + shard_max_value text + ) + RETURNS void + LANGUAGE C + AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_add_shard_metadata(regclass, bigint, "char", text, text) IS + 'Inserts into pg_dist_shard with user checks'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/latest.sql index 7411d917981..82c29f054dc 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_add_shard_metadata/latest.sql @@ -1,3 +1,14 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_shard_metadata( + relation_id regclass, shard_id bigint, + storage_type "char", shard_min_value text, + shard_max_value text + ) + RETURNS void + LANGUAGE C + AS 'MODULE_PATHNAME', $$citus_internal_add_shard_metadata$$; +COMMENT ON FUNCTION citus_internal.add_shard_metadata(regclass, bigint, "char", text, text) IS + 'Inserts into pg_dist_shard with user checks'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_shard_metadata( relation_id regclass, shard_id bigint, storage_type "char", shard_min_value text, diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/12.2-1.sql new file mode 100644 index 00000000000..028848f90c3 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/12.2-1.sql @@ -0,0 +1,17 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_tenant_schema(schema_id Oid, colocation_id int) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME', $$citus_internal_add_tenant_schema$$; + +COMMENT ON FUNCTION citus_internal.add_tenant_schema(Oid, int) IS + 'insert given tenant schema into pg_dist_schema with given colocation id'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_tenant_schema(schema_id Oid, colocation_id int) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME'; + +COMMENT ON FUNCTION pg_catalog.citus_internal_add_tenant_schema(Oid, int) IS + 'insert given tenant schema into pg_dist_schema with given colocation id'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/latest.sql index 56b3cae8449..028848f90c3 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_add_tenant_schema/latest.sql @@ -1,3 +1,12 @@ +CREATE OR REPLACE FUNCTION citus_internal.add_tenant_schema(schema_id Oid, colocation_id int) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME', $$citus_internal_add_tenant_schema$$; + +COMMENT ON FUNCTION citus_internal.add_tenant_schema(Oid, int) IS + 'insert given tenant schema into pg_dist_schema with given colocation id'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_add_tenant_schema(schema_id Oid, colocation_id int) RETURNS void LANGUAGE C diff --git a/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/12.2-1.sql new file mode 100644 index 00000000000..36d37a9e66d --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/12.2-1.sql @@ -0,0 +1,17 @@ +CREATE OR REPLACE FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) + RETURNS void + LANGUAGE C STABLE PARALLEL SAFE STRICT + AS 'MODULE_PATHNAME', $$citus_internal_adjust_local_clock_to_remote$$; +COMMENT ON FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) + IS 'Internal UDF used to adjust the local clock to the maximum of nodes in the cluster'; + +REVOKE ALL ON FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) FROM PUBLIC; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_adjust_local_clock_to_remote(pg_catalog.cluster_clock) + RETURNS void + LANGUAGE C STABLE PARALLEL SAFE STRICT + AS 'MODULE_PATHNAME', $$citus_internal_adjust_local_clock_to_remote$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_adjust_local_clock_to_remote(pg_catalog.cluster_clock) + IS 'Internal UDF used to adjust the local clock to the maximum of nodes in the cluster'; + +REVOKE ALL ON FUNCTION pg_catalog.citus_internal_adjust_local_clock_to_remote(pg_catalog.cluster_clock) FROM PUBLIC; diff --git a/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/latest.sql index 240f7a9b74b..36d37a9e66d 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_adjust_local_clock_to_remote/latest.sql @@ -1,3 +1,12 @@ +CREATE OR REPLACE FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) + RETURNS void + LANGUAGE C STABLE PARALLEL SAFE STRICT + AS 'MODULE_PATHNAME', $$citus_internal_adjust_local_clock_to_remote$$; +COMMENT ON FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) + IS 'Internal UDF used to adjust the local clock to the maximum of nodes in the cluster'; + +REVOKE ALL ON FUNCTION citus_internal.adjust_local_clock_to_remote(pg_catalog.cluster_clock) FROM PUBLIC; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_adjust_local_clock_to_remote(pg_catalog.cluster_clock) RETURNS void LANGUAGE C STABLE PARALLEL SAFE STRICT diff --git a/src/backend/distributed/sql/udfs/citus_internal_database_command/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_database_command/12.2-1.sql index 9f6d873cc7d..2c6e916c016 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_database_command/12.2-1.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_database_command/12.2-1.sql @@ -1,10 +1,10 @@ -- --- citus_internal_database_command run given database command without transaction block restriction. +-- citus_internal.database_command run given database command without transaction block restriction. -CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_database_command(command text) +CREATE OR REPLACE FUNCTION citus_internal.database_command(command text) RETURNS void LANGUAGE C VOLATILE AS 'MODULE_PATHNAME', $$citus_internal_database_command$$; -COMMENT ON FUNCTION pg_catalog.citus_internal_database_command(text) IS +COMMENT ON FUNCTION citus_internal.database_command(text) IS 'run a database command without transaction block restrictions'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_database_command/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_database_command/latest.sql index 9f6d873cc7d..2c6e916c016 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_database_command/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_database_command/latest.sql @@ -1,10 +1,10 @@ -- --- citus_internal_database_command run given database command without transaction block restriction. +-- citus_internal.database_command run given database command without transaction block restriction. -CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_database_command(command text) +CREATE OR REPLACE FUNCTION citus_internal.database_command(command text) RETURNS void LANGUAGE C VOLATILE AS 'MODULE_PATHNAME', $$citus_internal_database_command$$; -COMMENT ON FUNCTION pg_catalog.citus_internal_database_command(text) IS +COMMENT ON FUNCTION citus_internal.database_command(text) IS 'run a database command without transaction block restrictions'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/12.2-1.sql new file mode 100644 index 00000000000..cb56a25cd3e --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/12.2-1.sql @@ -0,0 +1,19 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_colocation_metadata( + colocation_id int) + RETURNS void + LANGUAGE C + STRICT + AS 'MODULE_PATHNAME', $$citus_internal_delete_colocation_metadata$$; + +COMMENT ON FUNCTION citus_internal.delete_colocation_metadata(int) IS + 'deletes a co-location group from pg_dist_colocation'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_colocation_metadata( + colocation_id int) + RETURNS void + LANGUAGE C + STRICT + AS 'MODULE_PATHNAME'; + +COMMENT ON FUNCTION pg_catalog.citus_internal_delete_colocation_metadata(int) IS + 'deletes a co-location group from pg_dist_colocation'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/latest.sql index d4c3f1be957..cb56a25cd3e 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_colocation_metadata/latest.sql @@ -1,3 +1,13 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_colocation_metadata( + colocation_id int) + RETURNS void + LANGUAGE C + STRICT + AS 'MODULE_PATHNAME', $$citus_internal_delete_colocation_metadata$$; + +COMMENT ON FUNCTION citus_internal.delete_colocation_metadata(int) IS + 'deletes a co-location group from pg_dist_colocation'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_colocation_metadata( colocation_id int) RETURNS void diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/12.2-1.sql new file mode 100644 index 00000000000..693815abf3e --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/12.2-1.sql @@ -0,0 +1,14 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_partition_metadata(table_name regclass) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_delete_partition_metadata$$; +COMMENT ON FUNCTION citus_internal.delete_partition_metadata(regclass) IS + 'Deletes a row from pg_dist_partition with table ownership checks'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_partition_metadata(table_name regclass) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_delete_partition_metadata(regclass) IS + 'Deletes a row from pg_dist_partition with table ownership checks'; + diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/latest.sql index c7cb5455d6b..693815abf3e 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_partition_metadata/latest.sql @@ -1,3 +1,10 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_partition_metadata(table_name regclass) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_delete_partition_metadata$$; +COMMENT ON FUNCTION citus_internal.delete_partition_metadata(regclass) IS + 'Deletes a row from pg_dist_partition with table ownership checks'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_partition_metadata(table_name regclass) RETURNS void LANGUAGE C STRICT diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/12.2-1.sql new file mode 100644 index 00000000000..f78c9a08e98 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/12.2-1.sql @@ -0,0 +1,19 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_placement_metadata( + placement_id bigint) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME', +$$citus_internal_delete_placement_metadata$$; +COMMENT ON FUNCTION citus_internal.delete_placement_metadata(bigint) + IS 'Delete placement with given id from pg_dist_placement metadata table.'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_placement_metadata( + placement_id bigint) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME', +$$citus_internal_delete_placement_metadata$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_delete_placement_metadata(bigint) + IS 'Delete placement with given id from pg_dist_placement metadata table.'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/latest.sql index 5af65f0bebb..f78c9a08e98 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_placement_metadata/latest.sql @@ -1,3 +1,13 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_placement_metadata( + placement_id bigint) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME', +$$citus_internal_delete_placement_metadata$$; +COMMENT ON FUNCTION citus_internal.delete_placement_metadata(bigint) + IS 'Delete placement with given id from pg_dist_placement metadata table.'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_placement_metadata( placement_id bigint) RETURNS void diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/12.2-1.sql new file mode 100644 index 00000000000..bcd121b0de6 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/12.2-1.sql @@ -0,0 +1,14 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_shard_metadata(shard_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_delete_shard_metadata$$; +COMMENT ON FUNCTION citus_internal.delete_shard_metadata(bigint) IS + 'Deletes rows from pg_dist_shard and pg_dist_shard_placement with user checks'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_shard_metadata(shard_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_delete_shard_metadata(bigint) IS + 'Deletes rows from pg_dist_shard and pg_dist_shard_placement with user checks'; + diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/latest.sql index 7bfd86bdd9a..bcd121b0de6 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_shard_metadata/latest.sql @@ -1,3 +1,10 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_shard_metadata(shard_id bigint) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_delete_shard_metadata$$; +COMMENT ON FUNCTION citus_internal.delete_shard_metadata(bigint) IS + 'Deletes rows from pg_dist_shard and pg_dist_shard_placement with user checks'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_shard_metadata(shard_id bigint) RETURNS void LANGUAGE C STRICT diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/12.2-1.sql new file mode 100644 index 00000000000..2c36108b4c6 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/12.2-1.sql @@ -0,0 +1,17 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_tenant_schema(schema_id Oid) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME', $$citus_internal_delete_tenant_schema$$; + +COMMENT ON FUNCTION citus_internal.delete_tenant_schema(Oid) IS + 'delete given tenant schema from pg_dist_schema'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_tenant_schema(schema_id Oid) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME'; + +COMMENT ON FUNCTION pg_catalog.citus_internal_delete_tenant_schema(Oid) IS + 'delete given tenant schema from pg_dist_schema'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/latest.sql index 4a2bf00675d..2c36108b4c6 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_delete_tenant_schema/latest.sql @@ -1,3 +1,12 @@ +CREATE OR REPLACE FUNCTION citus_internal.delete_tenant_schema(schema_id Oid) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME', $$citus_internal_delete_tenant_schema$$; + +COMMENT ON FUNCTION citus_internal.delete_tenant_schema(Oid) IS + 'delete given tenant schema from pg_dist_schema'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_delete_tenant_schema(schema_id Oid) RETURNS void LANGUAGE C diff --git a/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/12.2-1.sql new file mode 100644 index 00000000000..da8e98c20cf --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/12.2-1.sql @@ -0,0 +1,35 @@ +CREATE OR REPLACE FUNCTION citus_internal.global_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$; +COMMENT ON FUNCTION citus_internal.global_blocked_processes() +IS 'returns a global list of blocked backends originating from this node'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_global_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_global_blocked_processes() +IS 'returns a global list of blocked backends originating from this node'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/latest.sql index 510cdf93d4d..da8e98c20cf 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/latest.sql @@ -1,3 +1,21 @@ +CREATE OR REPLACE FUNCTION citus_internal.global_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$; +COMMENT ON FUNCTION citus_internal.global_blocked_processes() +IS 'returns a global list of blocked backends originating from this node'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_global_blocked_processes( OUT waiting_global_pid int8, OUT waiting_pid int4, diff --git a/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/12.2-1.sql new file mode 100644 index 00000000000..b27f16d5316 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/12.2-1.sql @@ -0,0 +1,35 @@ +CREATE OR REPLACE FUNCTION citus_internal.local_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$; +COMMENT ON FUNCTION citus_internal.local_blocked_processes() +IS 'returns all local lock wait chains, that start from any citus backend'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_local_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_local_blocked_processes() +IS 'returns all local lock wait chains, that start from any citus backend'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/latest.sql index 3157a9aad83..b27f16d5316 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/latest.sql @@ -1,3 +1,21 @@ +CREATE OR REPLACE FUNCTION citus_internal.local_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$; +COMMENT ON FUNCTION citus_internal.local_blocked_processes() +IS 'returns all local lock wait chains, that start from any citus backend'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_local_blocked_processes( OUT waiting_global_pid int8, OUT waiting_pid int4, diff --git a/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/12.2-1.sql new file mode 100644 index 00000000000..8635b969946 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/12.2-1.sql @@ -0,0 +1,13 @@ +CREATE OR REPLACE FUNCTION citus_internal.mark_node_not_synced(parent_pid int, nodeid int) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_mark_node_not_synced$$; +COMMENT ON FUNCTION citus_internal.mark_node_not_synced(int, int) + IS 'marks given node not synced by unsetting metadatasynced column at the start of the nontransactional sync.'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_mark_node_not_synced(parent_pid int, nodeid int) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_mark_node_not_synced$$; +COMMENT ON FUNCTION citus_internal_mark_node_not_synced(int, int) + IS 'marks given node not synced by unsetting metadatasynced column at the start of the nontransactional sync.'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/latest.sql index 0d90c8f1afe..8635b969946 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/latest.sql @@ -1,3 +1,10 @@ +CREATE OR REPLACE FUNCTION citus_internal.mark_node_not_synced(parent_pid int, nodeid int) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_mark_node_not_synced$$; +COMMENT ON FUNCTION citus_internal.mark_node_not_synced(int, int) + IS 'marks given node not synced by unsetting metadatasynced column at the start of the nontransactional sync.'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_mark_node_not_synced(parent_pid int, nodeid int) RETURNS VOID LANGUAGE C STRICT diff --git a/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/12.2-1.sql new file mode 100644 index 00000000000..b07eb425d9e --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/12.2-1.sql @@ -0,0 +1,15 @@ +CREATE OR REPLACE FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME', $$citus_internal_unregister_tenant_schema_globally$$; +COMMENT ON FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text) IS + 'Delete a tenant schema and the corresponding colocation group from metadata tables.'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text) IS + 'Delete a tenant schema and the corresponding colocation group from metadata tables.'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/latest.sql index 1863f1ddf6f..b07eb425d9e 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/latest.sql @@ -1,3 +1,11 @@ +CREATE OR REPLACE FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME', $$citus_internal_unregister_tenant_schema_globally$$; +COMMENT ON FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text) IS + 'Delete a tenant schema and the corresponding colocation group from metadata tables.'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text) RETURNS void LANGUAGE C diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/12.2-1.sql new file mode 100644 index 00000000000..cab96054412 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/12.2-1.sql @@ -0,0 +1,23 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_none_dist_table_metadata( + relation_id oid, + replication_model "char", + colocation_id bigint, + auto_converted boolean) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME', $$citus_internal_update_none_dist_table_metadata$$; +COMMENT ON FUNCTION citus_internal.update_none_dist_table_metadata(oid, "char", bigint, boolean) + IS 'Update pg_dist_partition metadata table for given none-distributed table, to convert it to another type of none-distributed table.'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata( + relation_id oid, + replication_model "char", + colocation_id bigint, + auto_converted boolean) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata(oid, "char", bigint, boolean) + IS 'Update pg_dist_partition metadata table for given none-distributed table, to convert it to another type of none-distributed table.'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/latest.sql index bcd05d8d014..cab96054412 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/latest.sql @@ -1,3 +1,15 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_none_dist_table_metadata( + relation_id oid, + replication_model "char", + colocation_id bigint, + auto_converted boolean) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME', $$citus_internal_update_none_dist_table_metadata$$; +COMMENT ON FUNCTION citus_internal.update_none_dist_table_metadata(oid, "char", bigint, boolean) + IS 'Update pg_dist_partition metadata table for given none-distributed table, to convert it to another type of none-distributed table.'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata( relation_id oid, replication_model "char", diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/12.2-1.sql new file mode 100644 index 00000000000..b7c47364744 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/12.2-1.sql @@ -0,0 +1,19 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_placement_metadata( + shard_id bigint, source_group_id integer, + target_group_id integer) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_update_placement_metadata$$; + +COMMENT ON FUNCTION citus_internal.update_placement_metadata(bigint, integer, integer) IS + 'Updates into pg_dist_placement with user checks'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_placement_metadata( + shard_id bigint, source_group_id integer, + target_group_id integer) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME'; + +COMMENT ON FUNCTION pg_catalog.citus_internal_update_placement_metadata(bigint, integer, integer) IS + 'Updates into pg_dist_placement with user checks'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/latest.sql index 7cb71774011..b7c47364744 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/latest.sql @@ -1,3 +1,13 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_placement_metadata( + shard_id bigint, source_group_id integer, + target_group_id integer) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_update_placement_metadata$$; + +COMMENT ON FUNCTION citus_internal.update_placement_metadata(bigint, integer, integer) IS + 'Updates into pg_dist_placement with user checks'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_placement_metadata( shard_id bigint, source_group_id integer, target_group_id integer) diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/12.2-1.sql new file mode 100644 index 00000000000..2266895296f --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/12.2-1.sql @@ -0,0 +1,14 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_relation_colocation(relation_id Oid, target_colocation_id int) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_update_relation_colocation$$; +COMMENT ON FUNCTION citus_internal.update_relation_colocation(oid, int) IS + 'Updates colocationId field of pg_dist_partition for the relation_id'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_relation_colocation(relation_id Oid, target_colocation_id int) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_update_relation_colocation(oid, int) IS + 'Updates colocationId field of pg_dist_partition for the relation_id'; + diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/latest.sql index a7f2ec1c604..2266895296f 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/latest.sql @@ -1,3 +1,10 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_relation_colocation(relation_id Oid, target_colocation_id int) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_update_relation_colocation$$; +COMMENT ON FUNCTION citus_internal.update_relation_colocation(oid, int) IS + 'Updates colocationId field of pg_dist_partition for the relation_id'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_relation_colocation(relation_id Oid, target_colocation_id int) RETURNS void LANGUAGE C STRICT diff --git a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/12.2-1.sql new file mode 100644 index 00000000000..6f494fa74a5 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/12.2-1.sql @@ -0,0 +1,45 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_isolation_test_session_is_blocked(pBlockedPid integer, pInterestingPids integer[]) +RETURNS boolean AS $$ + DECLARE + mBlockedGlobalPid int8; + workerProcessId integer := current_setting('citus.isolation_test_session_remote_process_id'); + coordinatorProcessId integer := current_setting('citus.isolation_test_session_process_id'); + BEGIN + IF pg_catalog.old_pg_isolation_test_session_is_blocked(pBlockedPid, pInterestingPids) THEN + RETURN true; + END IF; + + -- pg says we're not blocked locally; check whether we're blocked globally. + -- Note that worker process may be blocked or waiting for a lock. So we need to + -- get transaction number for both of them. Following IF provides the transaction + -- number when the worker process waiting for other session. + IF EXISTS (SELECT 1 FROM get_global_active_transactions() + WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId) THEN + SELECT global_pid INTO mBlockedGlobalPid FROM get_global_active_transactions() + WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId; + ELSE + -- Check whether transactions initiated from the coordinator get locked + SELECT global_pid INTO mBlockedGlobalPid + FROM get_all_active_transactions() WHERE process_id = pBlockedPid; + END IF; + + -- We convert the blocking_global_pid to a regular pid and only look at + -- blocks caused by the interesting pids, or the workerProcessPid. If we + -- don't do that we might find unrelated blocks caused by some random + -- other processes that are not involved in this isolation test. Because we + -- run our isolation tests on a single physical machine, the PID part of + -- the GPID is known to be unique within the whole cluster. + RETURN EXISTS ( + SELECT 1 FROM citus_internal.global_blocked_processes() + WHERE waiting_global_pid = mBlockedGlobalPid + AND ( + citus_pid_for_gpid(blocking_global_pid) in ( + select * from unnest(pInterestingPids) + ) + OR citus_pid_for_gpid(blocking_global_pid) = workerProcessId + ) + ); + END; +$$ LANGUAGE plpgsql; + +REVOKE ALL ON FUNCTION citus_isolation_test_session_is_blocked(integer,integer[]) FROM PUBLIC; diff --git a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql index ff098391002..6f494fa74a5 100644 --- a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql @@ -30,7 +30,7 @@ RETURNS boolean AS $$ -- run our isolation tests on a single physical machine, the PID part of -- the GPID is known to be unique within the whole cluster. RETURN EXISTS ( - SELECT 1 FROM citus_internal_global_blocked_processes() + SELECT 1 FROM citus_internal.global_blocked_processes() WHERE waiting_global_pid = mBlockedGlobalPid AND ( citus_pid_for_gpid(blocking_global_pid) in ( diff --git a/src/backend/distributed/sql/udfs/citus_lock_waits/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_lock_waits/12.2-1.sql new file mode 100644 index 00000000000..880306b992f --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_lock_waits/12.2-1.sql @@ -0,0 +1,47 @@ +SET search_path = 'pg_catalog'; + +CREATE VIEW citus.citus_lock_waits AS +WITH +unique_global_wait_edges_with_calculated_gpids AS ( +SELECT + -- if global_pid is NULL, it is most likely that a backend is blocked on a DDL + -- also for legacy reasons citus_internal.global_blocked_processes() returns groupId, we replace that with nodeIds + case WHEN waiting_global_pid !=0 THEN waiting_global_pid ELSE citus_calculate_gpid(get_nodeid_for_groupid(waiting_node_id), waiting_pid) END waiting_global_pid, + case WHEN blocking_global_pid !=0 THEN blocking_global_pid ELSE citus_calculate_gpid(get_nodeid_for_groupid(blocking_node_id), blocking_pid) END blocking_global_pid, + + -- citus_internal.global_blocked_processes returns groupId, we replace it here with actual + -- nodeId to be consisten with the other views + get_nodeid_for_groupid(blocking_node_id) as blocking_node_id, + get_nodeid_for_groupid(waiting_node_id) as waiting_node_id, + + blocking_transaction_waiting + + FROM citus_internal.global_blocked_processes() +), +unique_global_wait_edges AS +( + SELECT DISTINCT ON(waiting_global_pid, blocking_global_pid) * FROM unique_global_wait_edges_with_calculated_gpids +), +citus_dist_stat_activity_with_calculated_gpids AS +( + -- if global_pid is NULL, it is most likely that a backend is blocked on a DDL + SELECT CASE WHEN global_pid != 0 THEN global_pid ELSE citus_calculate_gpid(nodeid, pid) END global_pid, nodeid, pid, query FROM citus_dist_stat_activity +) +SELECT + waiting.global_pid as waiting_gpid, + blocking.global_pid as blocking_gpid, + waiting.query AS blocked_statement, + blocking.query AS current_statement_in_blocking_process, + waiting.nodeid AS waiting_nodeid, + blocking.nodeid AS blocking_nodeid +FROM + unique_global_wait_edges + JOIN + citus_dist_stat_activity_with_calculated_gpids waiting ON (unique_global_wait_edges.waiting_global_pid = waiting.global_pid) + JOIN + citus_dist_stat_activity_with_calculated_gpids blocking ON (unique_global_wait_edges.blocking_global_pid = blocking.global_pid); + +ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC; + +RESET search_path; diff --git a/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql b/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql index b3de12632b5..880306b992f 100644 --- a/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql @@ -5,18 +5,18 @@ WITH unique_global_wait_edges_with_calculated_gpids AS ( SELECT -- if global_pid is NULL, it is most likely that a backend is blocked on a DDL - -- also for legacy reasons citus_internal_global_blocked_processes() returns groupId, we replace that with nodeIds + -- also for legacy reasons citus_internal.global_blocked_processes() returns groupId, we replace that with nodeIds case WHEN waiting_global_pid !=0 THEN waiting_global_pid ELSE citus_calculate_gpid(get_nodeid_for_groupid(waiting_node_id), waiting_pid) END waiting_global_pid, case WHEN blocking_global_pid !=0 THEN blocking_global_pid ELSE citus_calculate_gpid(get_nodeid_for_groupid(blocking_node_id), blocking_pid) END blocking_global_pid, - -- citus_internal_global_blocked_processes returns groupId, we replace it here with actual + -- citus_internal.global_blocked_processes returns groupId, we replace it here with actual -- nodeId to be consisten with the other views get_nodeid_for_groupid(blocking_node_id) as blocking_node_id, get_nodeid_for_groupid(waiting_node_id) as waiting_node_id, blocking_transaction_waiting - FROM citus_internal_global_blocked_processes() + FROM citus_internal.global_blocked_processes() ), unique_global_wait_edges AS ( diff --git a/src/backend/distributed/sql/udfs/repl_origin_helper/12.2-1.sql b/src/backend/distributed/sql/udfs/repl_origin_helper/12.2-1.sql new file mode 100644 index 00000000000..8c6d175d074 --- /dev/null +++ b/src/backend/distributed/sql/udfs/repl_origin_helper/12.2-1.sql @@ -0,0 +1,41 @@ +CREATE OR REPLACE FUNCTION citus_internal.start_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_start_replication_origin_tracking$$; +COMMENT ON FUNCTION citus_internal.start_replication_origin_tracking() + IS 'To start replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION citus_internal.stop_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_stop_replication_origin_tracking$$; +COMMENT ON FUNCTION citus_internal.stop_replication_origin_tracking() + IS 'To stop replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION citus_internal.is_replication_origin_tracking_active() +RETURNS boolean +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_is_replication_origin_tracking_active$$; +COMMENT ON FUNCTION citus_internal.is_replication_origin_tracking_active() + IS 'To check if replication origin tracking is active for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_start_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_start_replication_origin_tracking$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_start_replication_origin_tracking() + IS 'To start replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_stop_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_stop_replication_origin_tracking$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_stop_replication_origin_tracking() + IS 'To stop replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_is_replication_origin_tracking_active() +RETURNS boolean +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_is_replication_origin_tracking_active$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_is_replication_origin_tracking_active() + IS 'To check if replication origin tracking is active for skipping publishing of duplicated events during internal data movements for CDC'; diff --git a/src/backend/distributed/sql/udfs/repl_origin_helper/latest.sql b/src/backend/distributed/sql/udfs/repl_origin_helper/latest.sql index 5fe5a3bb92d..8c6d175d074 100644 --- a/src/backend/distributed/sql/udfs/repl_origin_helper/latest.sql +++ b/src/backend/distributed/sql/udfs/repl_origin_helper/latest.sql @@ -1,3 +1,24 @@ +CREATE OR REPLACE FUNCTION citus_internal.start_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_start_replication_origin_tracking$$; +COMMENT ON FUNCTION citus_internal.start_replication_origin_tracking() + IS 'To start replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION citus_internal.stop_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_stop_replication_origin_tracking$$; +COMMENT ON FUNCTION citus_internal.stop_replication_origin_tracking() + IS 'To stop replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION citus_internal.is_replication_origin_tracking_active() +RETURNS boolean +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_is_replication_origin_tracking_active$$; +COMMENT ON FUNCTION citus_internal.is_replication_origin_tracking_active() + IS 'To check if replication origin tracking is active for skipping publishing of duplicated events during internal data movements for CDC'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_start_replication_origin_tracking() RETURNS void LANGUAGE C STRICT diff --git a/src/backend/distributed/transaction/lock_graph.c b/src/backend/distributed/transaction/lock_graph.c index 82f936243f0..b55a72843bf 100644 --- a/src/backend/distributed/transaction/lock_graph.c +++ b/src/backend/distributed/transaction/lock_graph.c @@ -192,7 +192,7 @@ BuildGlobalWaitGraph(bool onlyDistributedTx) "waiting_node_id, waiting_transaction_num, waiting_transaction_stamp, " "blocking_global_pid,blocking_pid, blocking_node_id, " "blocking_transaction_num, blocking_transaction_stamp, blocking_transaction_waiting " - "FROM citus_internal_local_blocked_processes()"); + "FROM citus_internal.local_blocked_processes()"); } int querySent = SendRemoteCommand(connection, queryString->data); @@ -226,7 +226,7 @@ BuildGlobalWaitGraph(bool onlyDistributedTx) else if (!onlyDistributedTx && colCount != 11) { ereport(WARNING, (errmsg("unexpected number of columns from " - "citus_internal_local_blocked_processes"))); + "citus_internal.local_blocked_processes"))); continue; } diff --git a/src/backend/distributed/transaction/remote_transaction.c b/src/backend/distributed/transaction/remote_transaction.c index 71b6a78dd70..4c26e2478ca 100644 --- a/src/backend/distributed/transaction/remote_transaction.c +++ b/src/backend/distributed/transaction/remote_transaction.c @@ -107,6 +107,12 @@ bool IsMainDB = true; */ char *SuperuserRole = NULL; +/* + * IsMainDBCommandInXact shows if the query sent to the main database requires + * a transaction + */ +bool IsMainDBCommandInXact = true; + /* * start_management_transaction starts a management transaction @@ -190,7 +196,11 @@ RunCitusMainDBQuery(char *query) PostPortNumber, SuperuserRole, MainDb); - RemoteTransactionBegin(MainDBConnection); + + if (IsMainDBCommandInXact) + { + RemoteTransactionBegin(MainDBConnection); + } } SendRemoteCommand(MainDBConnection, query); diff --git a/src/backend/distributed/transaction/transaction_management.c b/src/backend/distributed/transaction/transaction_management.c index 29f5b367e97..9c7b456807e 100644 --- a/src/backend/distributed/transaction/transaction_management.c +++ b/src/backend/distributed/transaction/transaction_management.c @@ -333,7 +333,7 @@ CoordinatedTransactionCallback(XactEvent event, void *arg) * If this is a non-Citus main database we should try to commit the prepared * transactions created by the Citus main database on the worker nodes. */ - if (!IsMainDB && MainDBConnection != NULL) + if (!IsMainDB && MainDBConnection != NULL && IsMainDBCommandInXact) { RunCitusMainDBQuery(COMMIT_MANAGEMENT_COMMAND_2PC); CleanCitusMainDBConnection(); @@ -533,7 +533,7 @@ CoordinatedTransactionCallback(XactEvent event, void *arg) * main database query. So if some error happens on the distributed main * database query we wouldn't have committed the current query. */ - if (!IsMainDB && MainDBConnection != NULL) + if (!IsMainDB && MainDBConnection != NULL && IsMainDBCommandInXact) { RunCitusMainDBQuery("COMMIT"); } diff --git a/src/backend/distributed/utils/directory.c b/src/backend/distributed/utils/directory.c index bad585809ef..6701bf8fb13 100644 --- a/src/backend/distributed/utils/directory.c +++ b/src/backend/distributed/utils/directory.c @@ -29,7 +29,7 @@ static bool FileIsLink(const char *filename, struct stat filestat); void CitusCreateDirectory(StringInfo directoryName) { - int makeOK = mkdir(directoryName->data, S_IRWXU); + int makeOK = MakePGDirectory(directoryName->data); if (makeOK != 0) { ereport(ERROR, (errcode_for_file_access(), diff --git a/src/backend/distributed/utils/replication_origin_session_utils.c b/src/backend/distributed/utils/replication_origin_session_utils.c index 370b061be6c..f96e23f8f26 100644 --- a/src/backend/distributed/utils/replication_origin_session_utils.c +++ b/src/backend/distributed/utils/replication_origin_session_utils.c @@ -186,7 +186,7 @@ SetupReplicationOriginRemoteSession(MultiConnection *connection) { StringInfo replicationOriginSessionSetupQuery = makeStringInfo(); appendStringInfo(replicationOriginSessionSetupQuery, - "select pg_catalog.citus_internal_start_replication_origin_tracking();"); + "select citus_internal.start_replication_origin_tracking();"); ExecuteCriticalRemoteCommand(connection, replicationOriginSessionSetupQuery->data); connection->isReplicationOriginSessionSetup = true; @@ -205,7 +205,7 @@ ResetReplicationOriginRemoteSession(MultiConnection *connection) { StringInfo replicationOriginSessionResetQuery = makeStringInfo(); appendStringInfo(replicationOriginSessionResetQuery, - "select pg_catalog.citus_internal_stop_replication_origin_tracking();"); + "select citus_internal.stop_replication_origin_tracking();"); ExecuteCriticalRemoteCommand(connection, replicationOriginSessionResetQuery->data); connection->isReplicationOriginSessionSetup = false; @@ -229,7 +229,7 @@ IsRemoteReplicationOriginSessionSetup(MultiConnection *connection) StringInfo isReplicationOriginSessionSetupQuery = makeStringInfo(); appendStringInfo(isReplicationOriginSessionSetupQuery, - "SELECT pg_catalog.citus_internal_is_replication_origin_tracking_active()"); + "SELECT citus_internal.is_replication_origin_tracking_active()"); bool result = ExecuteRemoteCommandAndCheckResult(connection, isReplicationOriginSessionSetupQuery->data, diff --git a/src/backend/distributed/worker/worker_sql_task_protocol.c b/src/backend/distributed/worker/worker_sql_task_protocol.c index 2cf48fc6ffd..708fee15d02 100644 --- a/src/backend/distributed/worker/worker_sql_task_protocol.c +++ b/src/backend/distributed/worker/worker_sql_task_protocol.c @@ -126,7 +126,6 @@ TaskFileDestReceiverStartup(DestReceiver *dest, int operation, const char *nullPrintCharacter = "\\N"; const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); - const int fileMode = (S_IRUSR | S_IWUSR); /* use the memory context that was in place when the DestReceiver was created */ MemoryContext oldContext = MemoryContextSwitchTo(taskFileDest->memoryContext); @@ -148,8 +147,7 @@ TaskFileDestReceiverStartup(DestReceiver *dest, int operation, taskFileDest->fileCompat = FileCompatFromFileStart(FileOpenForTransmit( taskFileDest->filePath, - fileFlags, - fileMode)); + fileFlags)); if (copyOutState->binary) { diff --git a/src/include/distributed/metadata_sync.h b/src/include/distributed/metadata_sync.h index 9f4c0a24b86..cb111e16e03 100644 --- a/src/include/distributed/metadata_sync.h +++ b/src/include/distributed/metadata_sync.h @@ -189,7 +189,7 @@ extern void SendInterTableRelationshipCommands(MetadataSyncContext *context); #define WORKER_DROP_ALL_SHELL_TABLES \ "CALL pg_catalog.worker_drop_all_shell_tables(%s)" #define CITUS_INTERNAL_MARK_NODE_NOT_SYNCED \ - "SELECT citus_internal_mark_node_not_synced(%d, %d)" + "SELECT citus_internal.mark_node_not_synced(%d, %d)" #define REMOVE_ALL_CITUS_TABLES_COMMAND \ "SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition" diff --git a/src/include/distributed/remote_transaction.h b/src/include/distributed/remote_transaction.h index 2b61c25bdd0..45e2eba70f5 100644 --- a/src/include/distributed/remote_transaction.h +++ b/src/include/distributed/remote_transaction.h @@ -152,5 +152,6 @@ extern bool IsMainDB; extern char *SuperuserRole; extern char *MainDb; extern struct MultiConnection *MainDBConnection; +extern bool IsMainDBCommandInXact; #endif /* REMOTE_TRANSACTION_H */ diff --git a/src/include/distributed/shard_cleaner.h b/src/include/distributed/shard_cleaner.h index e7d3dea1bf7..4967846b2ba 100644 --- a/src/include/distributed/shard_cleaner.h +++ b/src/include/distributed/shard_cleaner.h @@ -81,16 +81,16 @@ typedef enum CleanupPolicy extern OperationId RegisterOperationNeedingCleanup(void); /* - * InsertCleanupRecordInCurrentTransaction inserts a new pg_dist_cleanup entry + * InsertCleanupOnSuccessRecordInCurrentTransaction inserts a new pg_dist_cleanup entry * as part of the current transaction. * * This is primarily useful for deferred cleanup (CLEANUP_DEFERRED_ON_SUCCESS) - * scenarios, since the records would roll back in case of failure. + * scenarios, since the records would roll back in case of failure. And for the + * same reason, always sets the policy type to CLEANUP_DEFERRED_ON_SUCCESS. */ -extern void InsertCleanupRecordInCurrentTransaction(CleanupObject objectType, - char *objectName, - int nodeGroupId, - CleanupPolicy policy); +extern void InsertCleanupOnSuccessRecordInCurrentTransaction(CleanupObject objectType, + char *objectName, + int nodeGroupId); /* * InsertCleanupRecordInSeparateTransaction inserts a new pg_dist_cleanup entry @@ -99,10 +99,10 @@ extern void InsertCleanupRecordInCurrentTransaction(CleanupObject objectType, * This is used in scenarios where we need to cleanup resources on operation * completion (CLEANUP_ALWAYS) or on failure (CLEANUP_ON_FAILURE). */ -extern void InsertCleanupRecordInSubtransaction(CleanupObject objectType, - char *objectName, - int nodeGroupId, - CleanupPolicy policy); +extern void InsertCleanupRecordOutsideTransaction(CleanupObject objectType, + char *objectName, + int nodeGroupId, + CleanupPolicy policy); /* * FinalizeOperationNeedingCleanupOnSuccess is be called by an operation to signal diff --git a/src/include/distributed/transmit.h b/src/include/distributed/transmit.h index b86fd91506b..9c2ab87ab65 100644 --- a/src/include/distributed/transmit.h +++ b/src/include/distributed/transmit.h @@ -21,7 +21,8 @@ /* Function declarations for transmitting files between two nodes */ extern void RedirectCopyDataToRegularFile(const char *filename); extern void SendRegularFile(const char *filename); -extern File FileOpenForTransmit(const char *filename, int fileFlags, int fileMode); +extern File FileOpenForTransmit(const char *filename, int fileFlags); +extern File FileOpenForTransmitPerm(const char *filename, int fileFlags, int fileMode); #endif /* TRANSMIT_H */ diff --git a/src/test/regress/Pipfile b/src/test/regress/Pipfile index d4b2cc39f07..a863d795ec3 100644 --- a/src/test/regress/Pipfile +++ b/src/test/regress/Pipfile @@ -16,6 +16,7 @@ pytest-timeout = "*" pytest-xdist = "*" pytest-repeat = "*" pyyaml = "*" +werkzeug = "==2.3.7" [dev-packages] black = "*" diff --git a/src/test/regress/Pipfile.lock b/src/test/regress/Pipfile.lock index bdb42a1c319..c0f8734a02e 100644 --- a/src/test/regress/Pipfile.lock +++ b/src/test/regress/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "b92bf682aeeea1a66a16beaf78584a5318fd0ae908ce85c7e2a4807aa2bee532" + "sha256": "bf20354a2d9c93d46041ac4c6fa427588ebfe29343ea0b02138b9079f2d82f18" }, "pipfile-spec": 6, "requires": { @@ -119,11 +119,11 @@ }, "certifi": { "hashes": [ - "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082", - "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9" + "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f", + "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1" ], "markers": "python_version >= '3.6'", - "version": "==2023.7.22" + "version": "==2024.2.2" }, "cffi": { "hashes": [ @@ -180,7 +180,7 @@ "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956", "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357" ], - "markers": "python_version >= '3.8'", + "markers": "platform_python_implementation != 'PyPy'", "version": "==1.16.0" }, "click": { @@ -200,33 +200,42 @@ }, "cryptography": { "hashes": [ - "sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67", - "sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311", - "sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8", - "sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13", - "sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143", - "sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f", - "sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829", - "sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd", - "sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397", - "sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac", - "sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d", - "sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a", - "sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839", - "sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e", - "sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6", - "sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9", - "sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860", - "sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca", - "sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91", - "sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d", - "sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714", - "sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb", - "sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f" + "sha256:04859aa7f12c2b5f7e22d25198ddd537391f1695df7057c8700f71f26f47a129", + "sha256:069d2ce9be5526a44093a0991c450fe9906cdf069e0e7cd67d9dee49a62b9ebe", + "sha256:0d3ec384058b642f7fb7e7bff9664030011ed1af8f852540c76a1317a9dd0d20", + "sha256:0fab2a5c479b360e5e0ea9f654bcebb535e3aa1e493a715b13244f4e07ea8eec", + "sha256:0fea01527d4fb22ffe38cd98951c9044400f6eff4788cf52ae116e27d30a1ba3", + "sha256:1b797099d221df7cce5ff2a1d272761d1554ddf9a987d3e11f6459b38cd300fd", + "sha256:1e935c2900fb53d31f491c0de04f41110351377be19d83d908c1fd502ae8daa5", + "sha256:20100c22b298c9eaebe4f0b9032ea97186ac2555f426c3e70670f2517989543b", + "sha256:20180da1b508f4aefc101cebc14c57043a02b355d1a652b6e8e537967f1e1b46", + "sha256:25b09b73db78facdfd7dd0fa77a3f19e94896197c86e9f6dc16bce7b37a96504", + "sha256:2619487f37da18d6826e27854a7f9d4d013c51eafb066c80d09c63cf24505306", + "sha256:2eb6368d5327d6455f20327fb6159b97538820355ec00f8cc9464d617caecead", + "sha256:35772a6cffd1f59b85cb670f12faba05513446f80352fe811689b4e439b5d89e", + "sha256:39d5c93e95bcbc4c06313fc6a500cee414ee39b616b55320c1904760ad686938", + "sha256:3d96ea47ce6d0055d5b97e761d37b4e84195485cb5a38401be341fabf23bc32a", + "sha256:4dcab7c25e48fc09a73c3e463d09ac902a932a0f8d0c568238b3696d06bf377b", + "sha256:5fbf0f3f0fac7c089308bd771d2c6c7b7d53ae909dce1db52d8e921f6c19bb3a", + "sha256:6c25e1e9c2ce682d01fc5e2dde6598f7313027343bd14f4049b82ad0402e52cd", + "sha256:762f3771ae40e111d78d77cbe9c1035e886ac04a234d3ee0856bf4ecb3749d54", + "sha256:90147dad8c22d64b2ff7331f8d4cddfdc3ee93e4879796f837bdbb2a0b141e0c", + "sha256:935cca25d35dda9e7bd46a24831dfd255307c55a07ff38fd1a92119cffc34857", + "sha256:93fbee08c48e63d5d1b39ab56fd3fdd02e6c2431c3da0f4edaf54954744c718f", + "sha256:9541c69c62d7446539f2c1c06d7046aef822940d248fa4b8962ff0302862cc1f", + "sha256:c23f03cfd7d9826cdcbad7850de67e18b4654179e01fe9bc623d37c2638eb4ef", + "sha256:c3d1f5a1d403a8e640fa0887e9f7087331abb3f33b0f2207d2cc7f213e4a864c", + "sha256:d1998e545081da0ab276bcb4b33cce85f775adb86a516e8f55b3dac87f469548", + "sha256:d5cf11bc7f0b71fb71af26af396c83dfd3f6eed56d4b6ef95d57867bf1e4ba65", + "sha256:db0480ffbfb1193ac4e1e88239f31314fe4c6cdcf9c0b8712b55414afbf80db4", + "sha256:de4ae486041878dc46e571a4c70ba337ed5233a1344c14a0790c4c4be4bbb8b4", + "sha256:de5086cd475d67113ccb6f9fae6d8fe3ac54a4f9238fd08bfdb07b03d791ff0a", + "sha256:df34312149b495d9d03492ce97471234fd9037aa5ba217c2a6ea890e9166f151", + "sha256:ead69ba488f806fe1b1b4050febafdbf206b81fa476126f3e16110c818bac396" ], "index": "pypi", "markers": "python_version >= '3.7'", - "version": "==41.0.4" + "version": "==42.0.3" }, "docopt": { "hashes": [ @@ -237,11 +246,11 @@ }, "exceptiongroup": { "hashes": [ - "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9", - "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3" + "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14", + "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68" ], "markers": "python_version < '3.11'", - "version": "==1.1.3" + "version": "==1.2.0" }, "execnet": { "hashes": [ @@ -253,12 +262,12 @@ }, "filelock": { "hashes": [ - "sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4", - "sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd" + "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e", + "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c" ], "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==3.12.4" + "version": "==3.13.1" }, "flask": { "hashes": [ @@ -318,11 +327,11 @@ }, "jinja2": { "hashes": [ - "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852", - "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61" + "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa", + "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90" ], "markers": "python_version >= '3.7'", - "version": "==3.1.2" + "version": "==3.1.3" }, "kaitaistruct": { "hashes": [ @@ -342,69 +351,69 @@ }, "markupsafe": { "hashes": [ - "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e", - "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e", - "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431", - "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686", - "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c", - "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559", - "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc", - "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb", - "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939", - "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c", - "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0", - "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4", - "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9", - "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575", - "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba", - "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d", - "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd", - "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3", - "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00", - "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155", - "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac", - "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52", - "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f", - "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8", - "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b", - "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007", - "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24", - "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea", - "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198", - "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0", - "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee", - "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be", - "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2", - "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1", - "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707", - "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6", - "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c", - "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58", - "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823", - "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779", - "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636", - "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c", - "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad", - "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee", - "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc", - "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2", - "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48", - "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7", - "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e", - "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b", - "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa", - "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5", - "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e", - "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb", - "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9", - "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57", - "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc", - "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc", - "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2", - "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11" + "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf", + "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff", + "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f", + "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3", + "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532", + "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f", + "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617", + "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df", + "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4", + "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906", + "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f", + "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4", + "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8", + "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371", + "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2", + "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465", + "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52", + "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6", + "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169", + "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad", + "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2", + "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0", + "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029", + "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f", + "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a", + "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced", + "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5", + "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c", + "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf", + "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9", + "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb", + "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad", + "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3", + "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1", + "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46", + "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc", + "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a", + "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee", + "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900", + "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5", + "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea", + "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f", + "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5", + "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e", + "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a", + "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f", + "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50", + "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a", + "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b", + "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4", + "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff", + "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2", + "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46", + "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b", + "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf", + "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5", + "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5", + "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab", + "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd", + "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68" ], "markers": "python_version >= '3.7'", - "version": "==2.1.3" + "version": "==2.1.5" }, "mitmproxy": { "editable": true, @@ -491,11 +500,11 @@ }, "pluggy": { "hashes": [ - "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12", - "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7" + "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981", + "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be" ], "markers": "python_version >= '3.8'", - "version": "==1.3.0" + "version": "==1.4.0" }, "protobuf": { "hashes": [ @@ -526,12 +535,12 @@ }, "psycopg": { "hashes": [ - "sha256:7542c45810ea16356e5126c9b4291cbc3802aa326fcbba09ff154fe380de29be", - "sha256:cd711edb64b07d7f8a233c365806caf7e55bbe7cbbd8d5c680f672bb5353c8d5" + "sha256:31144d3fb4c17d78094d9e579826f047d4af1da6a10427d91dfcfb6ecdf6f12b", + "sha256:4d5a0a5a8590906daa58ebd5f3cfc34091377354a1acced269dd10faf55da60e" ], "index": "pypi", "markers": "python_version >= '3.7'", - "version": "==3.1.11" + "version": "==3.1.18" }, "publicsuffix2": { "hashes": [ @@ -542,11 +551,11 @@ }, "pyasn1": { "hashes": [ - "sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57", - "sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde" + "sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58", + "sha256:6d391a96e59b23130a5cfa74d6fd7f388dbbe26cc8f1edf39fdddf08d9d6676c" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", - "version": "==0.5.0" + "version": "==0.5.1" }, "pycparser": { "hashes": [ @@ -557,11 +566,11 @@ }, "pyopenssl": { "hashes": [ - "sha256:24f0dc5227396b3e831f4c7f602b950a5e9833d292c8e4a2e06b709292806ae2", - "sha256:276f931f55a452e7dea69c7173e984eb2a4407ce413c918aa34b55f82f9b8bac" + "sha256:6aa33039a93fffa4563e655b61d11364d01264be8ccb49906101e02a334530bf", + "sha256:ba07553fb6fd6a7a2259adb9b84e12302a9a8a75c44046e8bb5d3e5ee887e3c3" ], - "markers": "python_version >= '3.6'", - "version": "==23.2.0" + "markers": "python_version >= '3.7'", + "version": "==24.0.0" }, "pyparsing": { "hashes": [ @@ -579,48 +588,48 @@ }, "pytest": { "hashes": [ - "sha256:1d881c6124e08ff0a1bb75ba3ec0bfd8b5354a01c194ddd5a0a870a48d99b002", - "sha256:a766259cfab564a2ad52cb1aae1b881a75c3eb7e34ca3779697c23ed47c47069" + "sha256:249b1b0864530ba251b7438274c4d251c58d868edaaec8762893ad4a0d71c36c", + "sha256:50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6" ], "index": "pypi", - "markers": "python_version >= '3.7'", - "version": "==7.4.2" + "markers": "python_version >= '3.8'", + "version": "==8.0.0" }, "pytest-asyncio": { "hashes": [ - "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d", - "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b" + "sha256:3a048872a9c4ba14c3e90cc1aa20cbc2def7d01c7c8db3777ec281ba9c057675", + "sha256:4e7093259ba018d58ede7d5315131d21923a60f8a6e9ee266ce1589685c89eac" ], "index": "pypi", - "markers": "python_version >= '3.7'", - "version": "==0.21.1" + "markers": "python_version >= '3.8'", + "version": "==0.23.5" }, "pytest-repeat": { "hashes": [ - "sha256:4474a7d9e9137f6d8cc8ae297f8c4168d33c56dd740aa78cfffe562557e6b96e", - "sha256:5cd3289745ab3156d43eb9c8e7f7d00a926f3ae5c9cf425bec649b2fe15bad5b" + "sha256:26ab2df18226af9d5ce441c858f273121e92ff55f5bb311d25755b8d7abdd8ed", + "sha256:ffd3836dfcd67bb270bec648b330e20be37d2966448c4148c4092d1e8aba8185" ], "index": "pypi", - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==0.9.1" + "markers": "python_version >= '3.7'", + "version": "==0.9.3" }, "pytest-timeout": { "hashes": [ - "sha256:c07ca07404c612f8abbe22294b23c368e2e5104b521c1790195561f37e1ac3d9", - "sha256:f6f50101443ce70ad325ceb4473c4255e9d74e3c7cd0ef827309dfa4c0d975c6" + "sha256:3b0b95dabf3cb50bac9ef5ca912fa0cfc286526af17afc806824df20c2f72c90", + "sha256:bde531e096466f49398a59f2dde76fa78429a09a12411466f88a07213e220de2" ], "index": "pypi", - "markers": "python_version >= '3.6'", - "version": "==2.1.0" + "markers": "python_version >= '3.7'", + "version": "==2.2.0" }, "pytest-xdist": { "hashes": [ - "sha256:d5ee0520eb1b7bcca50a60a518ab7a7707992812c578198f8b44fdfac78e8c93", - "sha256:ff9daa7793569e6a68544850fd3927cd257cc03a7ef76c95e86915355e82b5f2" + "sha256:cbb36f3d67e0c478baa57fa4edc8843887e0f6cfc42d677530a36d7472b32d8a", + "sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24" ], "index": "pypi", "markers": "python_version >= '3.7'", - "version": "==3.3.1" + "version": "==3.5.0" }, "pyyaml": { "hashes": [ @@ -653,6 +662,7 @@ "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4", "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba", "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8", + "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef", "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5", "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd", "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3", @@ -693,36 +703,37 @@ "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001", "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462", "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9", + "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe", "sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b", "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b", "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615", + "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62", "sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15", "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b", + "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1", "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9", "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675", - "sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1", "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899", "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7", "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7", "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312", "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa", - "sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f", "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91", - "sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa", "sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b", + "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6", "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3", "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334", "sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5", "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3", "sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe", - "sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3", + "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c", "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed", "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337", "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880", + "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f", "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d", "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248", "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d", - "sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279", "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf", "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512", "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069", @@ -731,7 +742,6 @@ "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d", "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31", "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92", - "sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd", "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5", "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28", "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d", @@ -760,28 +770,28 @@ }, "tornado": { "hashes": [ - "sha256:1bd19ca6c16882e4d37368e0152f99c099bad93e0950ce55e71daed74045908f", - "sha256:22d3c2fa10b5793da13c807e6fc38ff49a4f6e1e3868b0a6f4164768bb8e20f5", - "sha256:502fba735c84450974fec147340016ad928d29f1e91f49be168c0a4c18181e1d", - "sha256:65ceca9500383fbdf33a98c0087cb975b2ef3bfb874cb35b8de8740cf7f41bd3", - "sha256:71a8db65160a3c55d61839b7302a9a400074c9c753040455494e2af74e2501f2", - "sha256:7ac51f42808cca9b3613f51ffe2a965c8525cb1b00b7b2d56828b8045354f76a", - "sha256:7d01abc57ea0dbb51ddfed477dfe22719d376119844e33c661d873bf9c0e4a16", - "sha256:805d507b1f588320c26f7f097108eb4023bbaa984d63176d1652e184ba24270a", - "sha256:9dc4444c0defcd3929d5c1eb5706cbe1b116e762ff3e0deca8b715d14bf6ec17", - "sha256:ceb917a50cd35882b57600709dd5421a418c29ddc852da8bcdab1f0db33406b0", - "sha256:e7d8db41c0181c80d76c982aacc442c0783a2c54d6400fe028954201a2e032fe" + "sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0", + "sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63", + "sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263", + "sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052", + "sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f", + "sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee", + "sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78", + "sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579", + "sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212", + "sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e", + "sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2" ], "markers": "python_version >= '3.8'", - "version": "==6.3.3" + "version": "==6.4" }, "typing-extensions": { "hashes": [ - "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0", - "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef" + "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783", + "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd" ], "markers": "python_version >= '3.8'", - "version": "==4.8.0" + "version": "==4.9.0" }, "urwid": { "hashes": [ @@ -791,12 +801,12 @@ }, "werkzeug": { "hashes": [ - "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc", - "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10" + "sha256:2b8c0e447b4b9dbcc85dd97b6eeb4dcbaf6c8b6c3be0bd654e25553e0a2157d8", + "sha256:effc12dba7f3bd72e605ce49807bbe692bd729c3bb122a3b91747a6ae77df528" ], "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==3.0.1" + "version": "==2.3.7" }, "wsproto": { "hashes": [ @@ -864,40 +874,40 @@ "develop": { "attrs": { "hashes": [ - "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04", - "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015" + "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30", + "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1" ], "markers": "python_version >= '3.7'", - "version": "==23.1.0" + "version": "==23.2.0" }, "black": { "hashes": [ - "sha256:031e8c69f3d3b09e1aa471a926a1eeb0b9071f80b17689a655f7885ac9325a6f", - "sha256:13a2e4a93bb8ca74a749b6974925c27219bb3df4d42fc45e948a5d9feb5122b7", - "sha256:13ef033794029b85dfea8032c9d3b92b42b526f1ff4bf13b2182ce4e917f5100", - "sha256:14f04c990259576acd093871e7e9b14918eb28f1866f91968ff5524293f9c573", - "sha256:24b6b3ff5c6d9ea08a8888f6977eae858e1f340d7260cf56d70a49823236b62d", - "sha256:403397c033adbc45c2bd41747da1f7fc7eaa44efbee256b53842470d4ac5a70f", - "sha256:50254ebfa56aa46a9fdd5d651f9637485068a1adf42270148cd101cdf56e0ad9", - "sha256:538efb451cd50f43aba394e9ec7ad55a37598faae3348d723b59ea8e91616300", - "sha256:638619a559280de0c2aa4d76f504891c9860bb8fa214267358f0a20f27c12948", - "sha256:6a3b50e4b93f43b34a9d3ef00d9b6728b4a722c997c99ab09102fd5efdb88325", - "sha256:6ccd59584cc834b6d127628713e4b6b968e5f79572da66284532525a042549f9", - "sha256:75a2dc41b183d4872d3a500d2b9c9016e67ed95738a3624f4751a0cb4818fe71", - "sha256:7d30ec46de88091e4316b17ae58bbbfc12b2de05e069030f6b747dfc649ad186", - "sha256:8431445bf62d2a914b541da7ab3e2b4f3bc052d2ccbf157ebad18ea126efb91f", - "sha256:8fc1ddcf83f996247505db6b715294eba56ea9372e107fd54963c7553f2b6dfe", - "sha256:a732b82747235e0542c03bf352c126052c0fbc458d8a239a94701175b17d4855", - "sha256:adc3e4442eef57f99b5590b245a328aad19c99552e0bdc7f0b04db6656debd80", - "sha256:c46767e8df1b7beefb0899c4a95fb43058fa8500b6db144f4ff3ca38eb2f6393", - "sha256:c619f063c2d68f19b2d7270f4cf3192cb81c9ec5bc5ba02df91471d0b88c4c5c", - "sha256:cf3a4d00e4cdb6734b64bf23cd4341421e8953615cba6b3670453737a72ec204", - "sha256:cf99f3de8b3273a8317681d8194ea222f10e0133a24a7548c73ce44ea1679377", - "sha256:d6bc09188020c9ac2555a498949401ab35bb6bf76d4e0f8ee251694664df6301" + "sha256:057c3dc602eaa6fdc451069bd027a1b2635028b575a6c3acfd63193ced20d9c8", + "sha256:08654d0797e65f2423f850fc8e16a0ce50925f9337fb4a4a176a7aa4026e63f8", + "sha256:163baf4ef40e6897a2a9b83890e59141cc8c2a98f2dda5080dc15c00ee1e62cd", + "sha256:1e08fb9a15c914b81dd734ddd7fb10513016e5ce7e6704bdd5e1251ceee51ac9", + "sha256:4dd76e9468d5536abd40ffbc7a247f83b2324f0c050556d9c371c2b9a9a95e31", + "sha256:4f9de21bafcba9683853f6c96c2d515e364aee631b178eaa5145fc1c61a3cc92", + "sha256:61a0391772490ddfb8a693c067df1ef5227257e72b0e4108482b8d41b5aee13f", + "sha256:6981eae48b3b33399c8757036c7f5d48a535b962a7c2310d19361edeef64ce29", + "sha256:7e53a8c630f71db01b28cd9602a1ada68c937cbf2c333e6ed041390d6968faf4", + "sha256:810d445ae6069ce64030c78ff6127cd9cd178a9ac3361435708b907d8a04c693", + "sha256:93601c2deb321b4bad8f95df408e3fb3943d85012dddb6121336b8e24a0d1218", + "sha256:992e451b04667116680cb88f63449267c13e1ad134f30087dec8527242e9862a", + "sha256:9db528bccb9e8e20c08e716b3b09c6bdd64da0dd129b11e160bf082d4642ac23", + "sha256:a0057f800de6acc4407fe75bb147b0c2b5cbb7c3ed110d3e5999cd01184d53b0", + "sha256:ba15742a13de85e9b8f3239c8f807723991fbfae24bad92d34a2b12e81904982", + "sha256:bce4f25c27c3435e4dace4815bcb2008b87e167e3bf4ee47ccdc5ce906eb4894", + "sha256:ca610d29415ee1a30a3f30fab7a8f4144e9d34c89a235d81292a1edb2b55f540", + "sha256:d533d5e3259720fdbc1b37444491b024003e012c5173f7d06825a77508085430", + "sha256:d84f29eb3ee44859052073b7636533ec995bd0f64e2fb43aeceefc70090e752b", + "sha256:e37c99f89929af50ffaf912454b3e3b47fd64109659026b678c091a4cd450fb2", + "sha256:e8a6ae970537e67830776488bca52000eaa37fa63b9988e8c487458d9cd5ace6", + "sha256:faf2ee02e6612577ba0181f4347bcbcf591eb122f7841ae5ba233d12c39dcb4d" ], "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==23.9.1" + "version": "==24.2.0" }, "click": { "hashes": [ @@ -909,30 +919,30 @@ }, "flake8": { "hashes": [ - "sha256:d5b3857f07c030bdb5bf41c7f53799571d75c4491748a3adcd47de929e34cd23", - "sha256:ffdfce58ea94c6580c77888a86506937f9a1a227dfcd15f245d694ae20a6b6e5" + "sha256:33f96621059e65eec474169085dc92bf26e7b2d47366b70be2f67ab80dc25132", + "sha256:a6dfbb75e03252917f2473ea9653f7cd799c3064e54d4c8140044c5c065f53c3" ], "index": "pypi", "markers": "python_full_version >= '3.8.1'", - "version": "==6.1.0" + "version": "==7.0.0" }, "flake8-bugbear": { "hashes": [ - "sha256:90cf04b19ca02a682feb5aac67cae8de742af70538590509941ab10ae8351f71", - "sha256:b182cf96ea8f7a8595b2f87321d7d9b28728f4d9c3318012d896543d19742cb5" + "sha256:663ef5de80cd32aacd39d362212983bc4636435a6f83700b4ed35acbd0b7d1b8", + "sha256:f9cb5f2a9e792dd80ff68e89a14c12eed8620af8b41a49d823b7a33064ac9658" ], "index": "pypi", "markers": "python_full_version >= '3.8.1'", - "version": "==23.9.16" + "version": "==24.2.6" }, "isort": { "hashes": [ - "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504", - "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6" + "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109", + "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6" ], "index": "pypi", "markers": "python_full_version >= '3.8.0'", - "version": "==5.12.0" + "version": "==5.13.2" }, "mccabe": { "hashes": [ @@ -960,19 +970,19 @@ }, "pathspec": { "hashes": [ - "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20", - "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3" + "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", + "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712" ], - "markers": "python_version >= '3.7'", - "version": "==0.11.2" + "markers": "python_version >= '3.8'", + "version": "==0.12.1" }, "platformdirs": { "hashes": [ - "sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3", - "sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e" + "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068", + "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768" ], - "markers": "python_version >= '3.7'", - "version": "==3.11.0" + "markers": "python_version >= '3.8'", + "version": "==4.2.0" }, "pycodestyle": { "hashes": [ @@ -984,11 +994,11 @@ }, "pyflakes": { "hashes": [ - "sha256:4132f6d49cb4dae6819e5379898f2b8cce3c5f23994194c24b77d5da2e36f774", - "sha256:a0aae034c444db0071aa077972ba4768d40c830d9539fd45bf4cd3f8f6992efc" + "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f", + "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a" ], "markers": "python_version >= '3.8'", - "version": "==3.1.0" + "version": "==3.2.0" }, "tomli": { "hashes": [ @@ -1000,11 +1010,11 @@ }, "typing-extensions": { "hashes": [ - "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0", - "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef" + "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783", + "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd" ], "markers": "python_version >= '3.8'", - "version": "==4.8.0" + "version": "==4.9.0" } } } diff --git a/src/test/regress/citus_tests/run_test.py b/src/test/regress/citus_tests/run_test.py index d844622218f..9a648c0ab8a 100755 --- a/src/test/regress/citus_tests/run_test.py +++ b/src/test/regress/citus_tests/run_test.py @@ -212,6 +212,18 @@ def extra_tests(self): ["columnar_create", "columnar_load"], repeatable=False, ), + "multi_metadata_sync": TestDeps( + None, + [ + "multi_sequence_default", + "alter_database_propagation", + "alter_role_propagation", + "grant_on_schema_propagation", + "multi_test_catalog_views", + "multi_drop_extension", + ], + repeatable=False, + ), } diff --git a/src/test/regress/expected/create_drop_database_propagation.out b/src/test/regress/expected/create_drop_database_propagation.out index b9788e3b64c..da4ec4eb711 100644 --- a/src/test/regress/expected/create_drop_database_propagation.out +++ b/src/test/regress/expected/create_drop_database_propagation.out @@ -3,7 +3,7 @@ -- For versions >= 15, pg15_create_drop_database_propagation.sql is used. -- For versions >= 16, pg16_create_drop_database_propagation.sql is used. -- Test the UDF that we use to issue database command during metadata sync. -SELECT pg_catalog.citus_internal_database_command(null); +SELECT citus_internal.database_command(null); ERROR: This is an internal Citus function can only be used in a distributed transaction CREATE ROLE test_db_commands WITH LOGIN; ALTER SYSTEM SET citus.enable_manual_metadata_changes_for_user TO 'test_db_commands'; @@ -21,22 +21,22 @@ SELECT pg_sleep(0.1); SET ROLE test_db_commands; -- fails on null input -SELECT pg_catalog.citus_internal_database_command(null); +SELECT citus_internal.database_command(null); ERROR: command cannot be NULL -- fails on non create / drop db command -SELECT pg_catalog.citus_internal_database_command('CREATE TABLE foo_bar(a int)'); -ERROR: citus_internal_database_command() can only be used for CREATE DATABASE command by Citus. -SELECT pg_catalog.citus_internal_database_command('SELECT 1'); -ERROR: citus_internal_database_command() can only be used for CREATE DATABASE command by Citus. -SELECT pg_catalog.citus_internal_database_command('asfsfdsg'); +SELECT citus_internal.database_command('CREATE TABLE foo_bar(a int)'); +ERROR: citus_internal.database_command() can only be used for CREATE DATABASE command by Citus. +SELECT citus_internal.database_command('SELECT 1'); +ERROR: citus_internal.database_command() can only be used for CREATE DATABASE command by Citus. +SELECT citus_internal.database_command('asfsfdsg'); ERROR: syntax error at or near "asfsfdsg" -SELECT pg_catalog.citus_internal_database_command(''); +SELECT citus_internal.database_command(''); ERROR: cannot execute multiple utility events RESET ROLE; ALTER ROLE test_db_commands nocreatedb; SET ROLE test_db_commands; --- make sure that pg_catalog.citus_internal_database_command doesn't cause privilege escalation -SELECT pg_catalog.citus_internal_database_command('CREATE DATABASE no_permissions'); +-- make sure that citus_internal.database_command doesn't cause privilege escalation +SELECT citus_internal.database_command('CREATE DATABASE no_permissions'); ERROR: permission denied to create database RESET ROLE; DROP USER test_db_commands; diff --git a/src/test/regress/expected/create_ref_dist_from_citus_local.out b/src/test/regress/expected/create_ref_dist_from_citus_local.out index dc67400e0ba..cce7081b079 100644 --- a/src/test/regress/expected/create_ref_dist_from_citus_local.out +++ b/src/test/regress/expected/create_ref_dist_from_citus_local.out @@ -369,9 +369,9 @@ ROLLBACK; \set VERBOSITY DEFAULT -- Test the UDFs that we use to convert Citus local tables to single-shard tables and -- reference tables. -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, true); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', 1, true); ERROR: This is an internal Citus function can only be used in a distributed transaction -SELECT pg_catalog.citus_internal_delete_placement_metadata(1); +SELECT citus_internal.delete_placement_metadata(1); ERROR: This is an internal Citus function can only be used in a distributed transaction CREATE ROLE test_user_create_ref_dist WITH LOGIN; GRANT ALL ON SCHEMA create_ref_dist_from_citus_local TO test_user_create_ref_dist; @@ -393,15 +393,15 @@ SET citus.next_shard_id TO 1850000; SET citus.next_placement_id TO 8510000; SET citus.shard_replication_factor TO 1; SET search_path TO create_ref_dist_from_citus_local; -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(null, 't', 1, true); +SELECT citus_internal.update_none_dist_table_metadata(null, 't', 1, true); ERROR: relation_id cannot be NULL -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, null, 1, true); +SELECT citus_internal.update_none_dist_table_metadata(1, null, 1, true); ERROR: replication_model cannot be NULL -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', null, true); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', null, true); ERROR: colocation_id cannot be NULL -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, null); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', 1, null); ERROR: auto_converted cannot be NULL -SELECT pg_catalog.citus_internal_delete_placement_metadata(null); +SELECT citus_internal.delete_placement_metadata(null); ERROR: placement_id cannot be NULL CREATE TABLE udf_test (col_1 int); SELECT citus_add_local_table_to_metadata('udf_test'); @@ -411,8 +411,8 @@ SELECT citus_add_local_table_to_metadata('udf_test'); (1 row) BEGIN; - SELECT pg_catalog.citus_internal_update_none_dist_table_metadata('create_ref_dist_from_citus_local.udf_test'::regclass, 'k', 99999, true); - citus_internal_update_none_dist_table_metadata + SELECT citus_internal.update_none_dist_table_metadata('create_ref_dist_from_citus_local.udf_test'::regclass, 'k', 99999, true); + update_none_dist_table_metadata --------------------------------------------------------------------- (1 row) @@ -426,8 +426,8 @@ BEGIN; SELECT placementid AS udf_test_placementid FROM pg_dist_shard_placement WHERE shardid = get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.udf_test') \gset - SELECT pg_catalog.citus_internal_delete_placement_metadata(:udf_test_placementid); - citus_internal_delete_placement_metadata + SELECT citus_internal.delete_placement_metadata(:udf_test_placementid); + delete_placement_metadata --------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/create_role_propagation.out b/src/test/regress/expected/create_role_propagation.out index 48310bdc338..90f2690ce91 100644 --- a/src/test/regress/expected/create_role_propagation.out +++ b/src/test/regress/expected/create_role_propagation.out @@ -196,6 +196,7 @@ SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::t (1 row) \c - - - :master_port +create role test_admin_role; -- test grants with distributed and non-distributed roles SELECT master_remove_node('localhost', :worker_2_port); master_remove_node @@ -221,29 +222,72 @@ CREATE ROLE non_dist_role_4; NOTICE: not propagating CREATE ROLE/USER commands to other nodes HINT: Connect to other nodes directly to manually create all necessary users and roles. SET citus.enable_create_role_propagation TO ON; +grant dist_role_3,dist_role_1 to test_admin_role with admin option; SET ROLE dist_role_1; GRANT non_dist_role_1 TO non_dist_role_2; SET citus.enable_create_role_propagation TO OFF; +grant dist_role_1 to non_dist_role_1 with admin option; SET ROLE non_dist_role_1; -GRANT dist_role_1 TO dist_role_2; +GRANT dist_role_1 TO dist_role_2 granted by non_dist_role_1; RESET ROLE; SET citus.enable_create_role_propagation TO ON; -GRANT dist_role_3 TO non_dist_role_3; +GRANT dist_role_3 TO non_dist_role_3 granted by test_admin_role; GRANT non_dist_role_4 TO dist_role_4; +GRANT dist_role_3 TO dist_role_4 granted by test_admin_role; SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + ) q; + $$ +); + result +--------------------------------------------------------------------- + [{"member":"dist_role_4","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, + + {"member":"non_dist_role_3","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, + + {"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"dist_role_4","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, + + {"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"dist_role_4","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, + + {"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] +(3 rows) + +REVOKE dist_role_3 from dist_role_4 granted by test_admin_role cascade; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + order by member::regrole::text + ) q; + $$ +); + result +--------------------------------------------------------------------- + [{"member":"non_dist_role_3","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, + + {"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] +(3 rows) + +SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1','test_admin_role')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; role | member | grantor | admin_option --------------------------------------------------------------------- dist_role_1 | dist_role_2 | t | f + dist_role_1 | non_dist_role_1 | t | t + dist_role_1 | test_admin_role | t | t dist_role_3 | non_dist_role_3 | t | f + dist_role_3 | test_admin_role | t | t non_dist_role_1 | non_dist_role_2 | t | f non_dist_role_4 | dist_role_4 | t | f -(4 rows) +(7 rows) SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1; objid @@ -255,6 +299,25 @@ SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid':: non_dist_role_4 (5 rows) +REVOKE dist_role_3 from non_dist_role_3 granted by test_admin_role cascade; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + order by member::regrole::text + ) q; + $$ +); + result +--------------------------------------------------------------------- + [{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] +(3 rows) + +revoke dist_role_3,dist_role_1 from test_admin_role cascade; +drop role test_admin_role; \c - - - :worker_1_port SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; role | member | grantor | admin_option @@ -276,9 +339,8 @@ SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_%' ORDER BY 1; SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; role | member | grantor | admin_option --------------------------------------------------------------------- - dist_role_1 | dist_role_2 | postgres | f non_dist_role_4 | dist_role_4 | postgres | f -(2 rows) +(1 row) SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_%' ORDER BY 1; rolname diff --git a/src/test/regress/expected/drop_partitioned_table.out b/src/test/regress/expected/drop_partitioned_table.out index 660adb89cd8..a92dee71112 100644 --- a/src/test/regress/expected/drop_partitioned_table.out +++ b/src/test/regress/expected/drop_partitioned_table.out @@ -354,8 +354,8 @@ NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.pa NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.parent_xxxxx CASCADE NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') -NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(1344400) -NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(1344400) +NOTICE: issuing SELECT citus_internal.delete_colocation_metadata(1344400) +NOTICE: issuing SELECT citus_internal.delete_colocation_metadata(1344400) ROLLBACK; NOTICE: issuing ROLLBACK NOTICE: issuing ROLLBACK @@ -377,8 +377,8 @@ NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.parent_xxxxx CASCAD NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') NOTICE: issuing SELECT worker_drop_distributed_table('drop_partitioned_table.child1') NOTICE: issuing DROP TABLE IF EXISTS drop_partitioned_table.child1_xxxxx CASCADE -NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(1344400) -NOTICE: issuing SELECT pg_catalog.citus_internal_delete_colocation_metadata(1344400) +NOTICE: issuing SELECT citus_internal.delete_colocation_metadata(1344400) +NOTICE: issuing SELECT citus_internal.delete_colocation_metadata(1344400) ROLLBACK; NOTICE: issuing ROLLBACK NOTICE: issuing ROLLBACK diff --git a/src/test/regress/expected/failure_mx_metadata_sync.out b/src/test/regress/expected/failure_mx_metadata_sync.out index 7b4c04ff8c7..c2418e9ab22 100644 --- a/src/test/regress/expected/failure_mx_metadata_sync.out +++ b/src/test/regress/expected/failure_mx_metadata_sync.out @@ -132,7 +132,7 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; -- Check failures on DDL command propagation CREATE TABLE t2 (id int PRIMARY KEY); -SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_placement_metadata").kill()'); +SELECT citus.mitmproxy('conn.onParse(query="citus_internal.add_placement_metadata").kill()'); mitmproxy --------------------------------------------------------------------- @@ -140,7 +140,7 @@ SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_placement_metadat SELECT create_distributed_table('t2', 'id'); ERROR: connection not open -SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_shard_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onParse(query="citus_internal.add_shard_metadata").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/failure_mx_metadata_sync_multi_trans.out b/src/test/regress/expected/failure_mx_metadata_sync_multi_trans.out index 6b820cf3398..e71f092c345 100644 --- a/src/test/regress/expected/failure_mx_metadata_sync_multi_trans.out +++ b/src/test/regress/expected/failure_mx_metadata_sync_multi_trans.out @@ -650,7 +650,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE mx_metadata_sync_multi_t SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to add partition metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_partition_metadata").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -658,7 +658,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_ SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_partition_metadata").kill()'); mitmproxy --------------------------------------------------------------------- @@ -667,7 +667,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_ SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to add shard metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_shard_metadata").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -675,7 +675,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_meta SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_shard_metadata").kill()'); mitmproxy --------------------------------------------------------------------- @@ -684,7 +684,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_meta SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to add placement metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_placement_metadata").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -692,7 +692,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_ SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_placement_metadata").kill()'); mitmproxy --------------------------------------------------------------------- @@ -718,7 +718,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_colocation SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: connection to the remote node postgres@localhost:xxxxx failed with the following error: connection not open -- Failure to add distributed object metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_object_metadata").cancel(' || :pid || ')'); mitmproxy --------------------------------------------------------------------- @@ -726,7 +726,7 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_met SELECT citus_activate_node('localhost', :worker_2_proxy_port); ERROR: canceling statement due to user request -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_object_metadata").kill()'); mitmproxy --------------------------------------------------------------------- diff --git a/src/test/regress/expected/grant_on_database_propagation_from_non_maindb.out b/src/test/regress/expected/grant_on_database_propagation_from_non_maindb.out new file mode 100644 index 00000000000..594e3b74eb4 --- /dev/null +++ b/src/test/regress/expected/grant_on_database_propagation_from_non_maindb.out @@ -0,0 +1,471 @@ +-- Public role has connect,temp,temporary privileges on database +-- To test these scenarios, we need to revoke these privileges from public role +-- since public role privileges are inherited by new roles/users +set citus.enable_create_database_propagation to on; +create database test_2pc_db; +show citus.main_db; + citus.main_db +--------------------------------------------------------------------- + regression +(1 row) + +revoke connect,temp,temporary on database test_2pc_db from public; +CREATE SCHEMA grant_on_database_propagation_non_maindb; +SET search_path TO grant_on_database_propagation_non_maindb; +-- test grant/revoke CREATE privilege propagation on database +create user "myuser'_test"; +\c test_2pc_db - - :master_port +grant create on database test_2pc_db to "myuser'_test"; +\c regression - - :master_port; +select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) +(3 rows) + +\c test_2pc_db - - :master_port +revoke create on database test_2pc_db from "myuser'_test"; +\c regression - - :master_port; +select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) +(3 rows) + +drop user "myuser'_test"; +--------------------------------------------------------------------- +-- test grant/revoke CONNECT privilege propagation on database +\c regression - - :master_port +create user myuser2; +\c test_2pc_db - - :master_port +grant CONNECT on database test_2pc_db to myuser2; +\c regression - - :master_port; +select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']); + check_database_privileges +--------------------------------------------------------------------- + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) +(3 rows) + +\c test_2pc_db - - :master_port +revoke connect on database test_2pc_db from myuser2; +\c regression - - :master_port +select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']); + check_database_privileges +--------------------------------------------------------------------- + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) +(3 rows) + +drop user myuser2; +--------------------------------------------------------------------- +-- test grant/revoke TEMP privilege propagation on database +\c regression - - :master_port +create user myuser3; +-- test grant/revoke temp on database +\c test_2pc_db - - :master_port +grant TEMP on database test_2pc_db to myuser3; +\c regression - - :master_port; +select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']); + check_database_privileges +--------------------------------------------------------------------- + (TEMP,t) + (TEMP,t) + (TEMP,t) +(3 rows) + +\c test_2pc_db - - :worker_1_port +revoke TEMP on database test_2pc_db from myuser3; +\c regression - - :master_port; +select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']); + check_database_privileges +--------------------------------------------------------------------- + (TEMP,f) + (TEMP,f) + (TEMP,f) +(3 rows) + +drop user myuser3; +--------------------------------------------------------------------- +\c regression - - :master_port +-- test temporary privilege on database +create user myuser4; +-- test grant/revoke temporary on database +\c test_2pc_db - - :worker_1_port +grant TEMPORARY on database test_2pc_db to myuser4; +\c regression - - :master_port +select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(3 rows) + +\c test_2pc_db - - :master_port +revoke TEMPORARY on database test_2pc_db from myuser4; +\c regression - - :master_port; +select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(3 rows) + +drop user myuser4; +--------------------------------------------------------------------- +-- test ALL privileges with ALL statement on database +create user myuser5; +grant ALL on database test_2pc_db to myuser5; +\c regression - - :master_port +select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +revoke ALL on database test_2pc_db from myuser5; +\c regression - - :master_port +select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +drop user myuser5; +--------------------------------------------------------------------- +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database +create user myuser6; +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser6; +\c regression - - :master_port +select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser6; +\c regression - - :master_port +select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +drop user myuser6; +--------------------------------------------------------------------- +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database with grant option +create user myuser7; +create user myuser_1; +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7; +set role myuser7; +--here since myuser7 does not have grant option, it should fail +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1; +WARNING: no privileges were granted for "test_2pc_db" +\c regression - - :master_port +select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +\c test_2pc_db - - :master_port +RESET ROLE; +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7 with grant option; +set role myuser7; +--here since myuser have grant option, it should succeed +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1 granted by myuser7; +\c regression - - :master_port +select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +RESET ROLE; +--below test should fail and should throw an error since myuser_1 still have the dependent privileges +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict; +ERROR: dependent privileges exist +HINT: Use CASCADE to revoke them too. +--below test should fail and should throw an error since myuser_1 still have the dependent privileges +revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict ; +ERROR: dependent privileges exist +HINT: Use CASCADE to revoke them too. +--below test should succeed and should not throw any error since myuser_1 privileges are revoked with cascade +revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 cascade ; +--here we test if myuser7 still have the privileges after revoke grant option for +\c regression - - :master_port +select check_database_privileges('myuser7','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +reset role; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser_1; +\c regression - - :master_port +drop user myuser_1; +drop user myuser7; +--------------------------------------------------------------------- +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database multi database +-- and multi user +\c regression - - :master_port +create user myuser8; +create user myuser_2; +set citus.enable_create_database_propagation to on; +create database test_db; +revoke connect,temp,temporary on database test_db from public; +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db to myuser8,myuser_2; +\c regression - - :master_port +select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +RESET ROLE; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 ; +--below test should succeed and should not throw any error +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser_2; +--below test should succeed and should not throw any error +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 cascade; +\c regression - - :master_port +select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +\c test_2pc_db - - :master_port +reset role; +\c regression - - :master_port +drop user myuser_2; +drop user myuser8; +set citus.enable_create_database_propagation to on; +drop database test_db; +--------------------------------------------------------------------- +-- rollbacks public role database privileges to original state +grant connect,temp,temporary on database test_2pc_db to public; +drop database test_2pc_db; +set citus.enable_create_database_propagation to off; +DROP SCHEMA grant_on_database_propagation_non_maindb CASCADE; +reset citus.enable_create_database_propagation; +reset search_path; +--------------------------------------------------------------------- diff --git a/src/test/regress/expected/grant_role_from_non_maindb.out b/src/test/regress/expected/grant_role_from_non_maindb.out new file mode 100644 index 00000000000..6dc0b6c60ff --- /dev/null +++ b/src/test/regress/expected/grant_role_from_non_maindb.out @@ -0,0 +1,160 @@ +CREATE SCHEMA grant_role2pc; +SET search_path TO grant_role2pc; +set citus.enable_create_database_propagation to on; +CREATE DATABASE grant_role2pc_db; +\c grant_role2pc_db +SHOW citus.main_db; + citus.main_db +--------------------------------------------------------------------- + regression +(1 row) + +SET citus.superuser TO 'postgres'; +CREATE USER grant_role2pc_user1; +CREATE USER grant_role2pc_user2; +CREATE USER grant_role2pc_user3; +CREATE USER grant_role2pc_user4; +CREATE USER grant_role2pc_user5; +CREATE USER grant_role2pc_user6; +CREATE USER grant_role2pc_user7; +\c grant_role2pc_db +--test with empty superuser +SET citus.superuser TO ''; +grant grant_role2pc_user1 to grant_role2pc_user2; +ERROR: No superuser role is given for Citus main database connection +HINT: Set citus.superuser to a superuser role name +SET citus.superuser TO 'postgres'; +grant grant_role2pc_user1 to grant_role2pc_user2 with admin option granted by CURRENT_USER; +\c regression +select result FROM run_command_on_all_nodes( + $$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text = 'grant_role2pc_user2' + order by member::regrole::text, roleid::regrole::text + ) t + $$ +); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true}] + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true}] + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true}] +(3 rows) + +\c grant_role2pc_db +--test grant under transactional context with multiple operations +BEGIN; +grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user3 WITH ADMIN OPTION; +grant grant_role2pc_user1 to grant_role2pc_user4 granted by grant_role2pc_user3 ; +COMMIT; +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user5 WITH ADMIN OPTION granted by grant_role2pc_user3; +grant grant_role2pc_user1 to grant_role2pc_user6; +ROLLBACK; +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user7; +SELECT 1/0; +ERROR: division by zero +commit; +\c regression +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user3","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user3","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user3","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false}] +(3 rows) + +\c grant_role2pc_db +grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user5,grant_role2pc_user6,grant_role2pc_user7 granted by grant_role2pc_user3; +\c regression +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] +(3 rows) + +\c grant_role2pc_db +revoke admin option for grant_role2pc_user1 from grant_role2pc_user5 granted by grant_role2pc_user3; +--test revoke under transactional context with multiple operations +BEGIN; +revoke grant_role2pc_user1 from grant_role2pc_user5 granted by grant_role2pc_user3 ; +revoke grant_role2pc_user1 from grant_role2pc_user4 granted by grant_role2pc_user3; +COMMIT; +\c grant_role2pc_db - - :worker_1_port +BEGIN; +revoke grant_role2pc_user1 from grant_role2pc_user6,grant_role2pc_user7 granted by grant_role2pc_user3; +revoke grant_role2pc_user1 from grant_role2pc_user3 cascade; +COMMIT; +\c regression +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user2','grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] +(3 rows) + +\c grant_role2pc_db - - :worker_1_port +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user5 WITH ADMIN OPTION; +grant grant_role2pc_user1 to grant_role2pc_user6; +COMMIT; +\c regression - - :master_port +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user5','grant_role2pc_user6') + order by member::regrole::text, roleid::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] +(3 rows) + +revoke grant_role2pc_user1 from grant_role2pc_user5,grant_role2pc_user6; +--clean resources +DROP SCHEMA grant_role2pc; +set citus.enable_create_database_propagation to on; +DROP DATABASE grant_role2pc_db; +drop user grant_role2pc_user2,grant_role2pc_user3,grant_role2pc_user4,grant_role2pc_user5,grant_role2pc_user6,grant_role2pc_user7; +drop user grant_role2pc_user1; +reset citus.enable_create_database_propagation; diff --git a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out index e37724e4b8b..1aa7cbcc1e1 100644 --- a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out @@ -138,7 +138,7 @@ step s2-view-worker: ('%pg_prepared_xacts%'), ('%COMMIT%'), ('%dump_local_%'), - ('%citus_internal_local_blocked_processes%'), + ('%citus_internal.local_blocked_processes%'), ('%add_node%'), ('%csa_from_one_node%'), ('%pg_locks%')) diff --git a/src/test/regress/expected/metadata_sync_from_non_maindb.out b/src/test/regress/expected/metadata_sync_from_non_maindb.out new file mode 100644 index 00000000000..f1fdcd93d77 --- /dev/null +++ b/src/test/regress/expected/metadata_sync_from_non_maindb.out @@ -0,0 +1,132 @@ +CREATE SCHEMA metadata_sync_2pc_schema; +SET search_path TO metadata_sync_2pc_schema; +set citus.enable_create_database_propagation to on; +CREATE DATABASE metadata_sync_2pc_db; +revoke connect,temp,temporary on database metadata_sync_2pc_db from public; +\c metadata_sync_2pc_db +SHOW citus.main_db; + citus.main_db +--------------------------------------------------------------------- + regression +(1 row) + +CREATE USER "grant_role2pc'_user1"; +CREATE USER "grant_role2pc'_user2"; +CREATE USER "grant_role2pc'_user3"; +CREATE USER grant_role2pc_user4; +CREATE USER grant_role2pc_user5; +\c regression +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c metadata_sync_2pc_db +grant "grant_role2pc'_user1","grant_role2pc'_user2" to "grant_role2pc'_user3" WITH ADMIN OPTION; +grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3"; +--test for grant on database +\c metadata_sync_2pc_db - - :master_port +grant create on database metadata_sync_2pc_db to "grant_role2pc'_user1"; +grant connect on database metadata_sync_2pc_db to "grant_role2pc'_user2"; +grant ALL on database metadata_sync_2pc_db to "grant_role2pc'_user3"; +\c regression +select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) +(2 rows) + +select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); + check_database_privileges +--------------------------------------------------------------------- + (CONNECT,t) + (CONNECT,t) +(2 rows) + +select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) +(8 rows) + +\c regression +set citus.enable_create_database_propagation to on; +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('"grant_role2pc''_user2"','"grant_role2pc''_user3"','grant_role2pc_user4','grant_role2pc_user5') + order by member::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false}] + [{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false}] + [{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"\"grant_role2pc'_user3\"","admin_option":false}] +(3 rows) + +select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) +(3 rows) + +select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); + check_database_privileges +--------------------------------------------------------------------- + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) +(3 rows) + +select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c metadata_sync_2pc_db +revoke "grant_role2pc'_user1","grant_role2pc'_user2" from grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3"; +revoke admin option for "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; +revoke "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; +revoke ALL on database metadata_sync_2pc_db from "grant_role2pc'_user3"; +revoke CONNECT on database metadata_sync_2pc_db from "grant_role2pc'_user2"; +revoke CREATE on database metadata_sync_2pc_db from "grant_role2pc'_user1"; +\c regression +drop user "grant_role2pc'_user1","grant_role2pc'_user2","grant_role2pc'_user3",grant_role2pc_user4,grant_role2pc_user5; +set citus.enable_create_database_propagation to on; +drop database metadata_sync_2pc_db; +drop schema metadata_sync_2pc_schema; +reset citus.enable_create_database_propagation; +reset search_path; diff --git a/src/test/regress/expected/metadata_sync_helpers.out b/src/test/regress/expected/metadata_sync_helpers.out index a41ac9d5f4e..9db68eaf569 100644 --- a/src/test/regress/expected/metadata_sync_helpers.out +++ b/src/test/regress/expected/metadata_sync_helpers.out @@ -12,9 +12,9 @@ RESET client_min_messages; SET search_path TO metadata_sync_helpers; CREATE TABLE test(col_1 int); -- not in a distributed transaction -SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); +SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction -SELECT citus_internal_update_relation_colocation ('test'::regclass, 1); +SELECT citus_internal.update_relation_colocation ('test'::regclass, 1); ERROR: This is an internal Citus function can only be used in a distributed transaction -- in a distributed transaction, but the application name is not Citus BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -24,7 +24,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) - SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction ROLLBACK; -- in a distributed transaction and the application name is Citus, allowed. @@ -36,8 +36,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) @@ -61,7 +61,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ERROR: must be owner of table test ROLLBACK; -- we do not own the relation @@ -73,7 +73,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation ('test'::regclass, 10); + SELECT citus_internal.update_relation_colocation ('test'::regclass, 10); ERROR: must be owner of table test ROLLBACK; -- finally, a user can only add its own tables to the metadata @@ -87,8 +87,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) @@ -109,8 +109,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_rebalancer gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) @@ -125,7 +125,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=not a correct gpid'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction ROLLBACK; -- also faills if done by the rebalancer @@ -137,7 +137,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_rebalancer gpid=not a correct gpid'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction ROLLBACK; -- application_name with suffix is ok (e.g. pgbouncer might add this) @@ -149,8 +149,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001 - from 10.12.14.16:10370'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) @@ -165,7 +165,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid='; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction ROLLBACK; -- empty application_name @@ -177,7 +177,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to ''; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction ROLLBACK; -- application_name with incorrect prefix @@ -189,7 +189,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction ROLLBACK; -- fails because there is no X distribution method @@ -201,7 +201,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ERROR: Metadata syncing is only allowed for hash, reference and local tables:X ROLLBACK; -- fails because there is the column does not exist @@ -213,7 +213,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'non_existing_col', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'non_existing_col', 0, 's'); ERROR: column "non_existing_col" of relation "test_2" does not exist ROLLBACK; --- fails because we do not allow NULL parameters @@ -225,7 +225,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata (NULL, 'h', 'non_existing_col', 0, 's'); + SELECT citus_internal.add_partition_metadata (NULL, 'h', 'non_existing_col', 0, 's'); ERROR: relation cannot be NULL ROLLBACK; -- fails because colocationId cannot be negative @@ -237,7 +237,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', -1, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', -1, 's'); ERROR: Metadata syncing is only allowed for valid colocation id values. ROLLBACK; -- fails because there is no X replication model @@ -249,7 +249,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 'X'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 'X'); ERROR: Metadata syncing is only allowed for hash, reference and local tables:X ROLLBACK; -- the same table cannot be added twice, that is enforced by a primary key @@ -262,13 +262,13 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ERROR: duplicate key value violates unique constraint "pg_dist_partition_logical_relid_index" ROLLBACK; -- the same table cannot be added twice, that is enforced by a primary key even if distribution key changes @@ -281,13 +281,13 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_2', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_2', 0, 's'); ERROR: duplicate key value violates unique constraint "pg_dist_partition_logical_relid_index" ROLLBACK; -- hash distributed table cannot have NULL distribution key @@ -300,7 +300,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', NULL, 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', NULL, 0, 's'); ERROR: Distribution column cannot be NULL for relation "test_2" ROLLBACK; -- even if metadata_sync_helper_role is not owner of the table test @@ -332,8 +332,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) @@ -349,7 +349,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420007, 10000, 11111); + SELECT citus_internal.update_placement_metadata(1420007, 10000, 11111); ERROR: could not find valid entry for shard xxxxx ROLLBACK; -- non-existing users should fail to pass the checks @@ -378,7 +378,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ERROR: role "non_existing_user" does not exist ROLLBACK; \c - postgres - :worker_1_port @@ -409,7 +409,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_ref'::regclass, 'n', 'col_1', 0, 's'); ERROR: Reference or local tables cannot have distribution columns ROLLBACK; -- non-valid replication model @@ -421,7 +421,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'A'); + SELECT citus_internal.add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'A'); ERROR: Metadata syncing is only allowed for known replication models. ROLLBACK; -- not-matching replication model for reference table @@ -433,7 +433,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'c'); + SELECT citus_internal.add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'c'); ERROR: Local or references tables can only have 's' or 't' as the replication model. ROLLBACK; -- add entry for super user table @@ -448,8 +448,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('super_user_table'::regclass, 'h', 'col_1', 0, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('super_user_table'::regclass, 'h', 'col_1', 0, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) @@ -470,7 +470,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: must be owner of table super_user_table ROLLBACK; -- the user is only allowed to add a shard for add a table which is in pg_dist_partition @@ -485,7 +485,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: The relation "test_2" does not have a valid entry in pg_dist_partition. ROLLBACK; -- ok, now add the table to the pg_dist_partition @@ -497,20 +497,20 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 250, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 250, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) - SELECT citus_internal_add_partition_metadata ('test_3'::regclass, 'h', 'col_1', 251, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_3'::regclass, 'h', 'col_1', 251, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) - SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 't'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 't'); + add_partition_metadata --------------------------------------------------------------------- (1 row) @@ -525,8 +525,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation ('test_2'::regclass, 1231231232); - citus_internal_update_relation_colocation + SELECT citus_internal.update_relation_colocation ('test_2'::regclass, 1231231232); + update_relation_colocation --------------------------------------------------------------------- (1 row) @@ -544,7 +544,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, -1, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Invalid shard id: -1 ROLLBACK; -- invalid storage types are not allowed @@ -559,7 +559,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 'X'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Invalid shard storage type: X ROLLBACK; -- NULL shard ranges are not allowed for hash distributed tables @@ -574,7 +574,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 't'::"char", NULL, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Shards of has distributed table "test_2" cannot have NULL shard ranges ROLLBACK; -- non-integer shard ranges are not allowed @@ -589,7 +589,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", 'non-int'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: invalid input syntax for type integer: "non-int" ROLLBACK; -- shardMinValue should be smaller than shardMaxValue @@ -604,7 +604,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-1610612737'::text, '-2147483648'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: shardMinValue=-1610612737 is greater than shardMaxValue=-2147483648 for table "test_2", which is not allowed ROLLBACK; -- we do not allow overlapping shards for the same table @@ -621,7 +621,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text), ('test_2'::regclass, 1420002::bigint, 't'::"char", '10'::text, '50'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Shard intervals overlap for table "test_2": 1420001 and 1420000 ROLLBACK; -- Now let's check valid pg_dist_object updates @@ -637,7 +637,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ERROR: unrecognized object type "non_existing_type" ROLLBACK; -- check the sanity of distributionArgumentIndex and colocationId @@ -652,7 +652,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -100, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ERROR: distribution_argument_index must be between 0 and 100 ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -666,7 +666,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -1, -1, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ERROR: colocationId must be a positive number ROLLBACK; -- check with non-existing object @@ -681,10 +681,10 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ERROR: role "non_existing_user" does not exist ROLLBACK; --- since citus_internal_add_object_metadata is strict function returns NULL +-- since citus_internal.add_object_metadata is strict function returns NULL -- if any parameter is NULL BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); @@ -697,15 +697,15 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; - citus_internal_add_object_metadata + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + add_object_metadata --------------------------------------------------------------------- (1 row) ROLLBACK; \c - postgres - :worker_1_port --- Show that citus_internal_add_object_metadata only works for object types +-- Show that citus_internal.add_object_metadata only works for object types -- which is known how to distribute BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); @@ -724,10 +724,10 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET ROLE metadata_sync_helper_role; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('operator', ARRAY['===']::text[], ARRAY['int','int']::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ERROR: operator object can not be distributed by Citus ROLLBACK; --- Show that citus_internal_add_object_metadata checks the priviliges +-- Show that citus_internal.add_object_metadata checks the priviliges BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); assign_distributed_transaction_id @@ -744,7 +744,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET ROLE metadata_sync_helper_role; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('function', ARRAY['distribution_test_function']::text[], ARRAY['integer']::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ERROR: must be owner of function distribution_test_function ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -761,7 +761,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET ROLE metadata_sync_helper_role; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('type', ARRAY['distributed_test_type']::text[], ARRAY[]::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ERROR: must be owner of type distributed_test_type ROLLBACK; -- we do not allow wrong partmethod @@ -780,7 +780,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Metadata syncing is only allowed for hash, reference and local tables: X ROLLBACK; -- we do not allow NULL shardMinMax values @@ -797,8 +797,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - citus_internal_add_shard_metadata + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + add_shard_metadata --------------------------------------------------------------------- (1 row) @@ -807,7 +807,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 1420000; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Shards of has distributed table "test_2" cannot have NULL shard ranges ROLLBACK; \c - metadata_sync_helper_role - :worker_1_port @@ -830,8 +830,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ('test_2'::regclass, 1420004::bigint, 't'::"char", '51'::text, '60'::text), ('test_2'::regclass, 1420005::bigint, 't'::"char", '61'::text, '70'::text), ('test_3'::regclass, 1420008::bigint, 't'::"char", '11'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - citus_internal_add_shard_metadata + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + add_shard_metadata --------------------------------------------------------------------- @@ -852,7 +852,7 @@ BEGIN; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ERROR: cannot colocate tables test_2 and test_3 ROLLBACK; -- now, add few more shards for test_3 to make it colocated with test_2 @@ -871,8 +871,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ('test_3'::regclass, 1420011::bigint, 't'::"char", '41'::text, '50'::text), ('test_3'::regclass, 1420012::bigint, 't'::"char", '51'::text, '60'::text), ('test_3'::regclass, 1420013::bigint, 't'::"char", '61'::text, '70'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - citus_internal_add_shard_metadata + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + add_shard_metadata --------------------------------------------------------------------- @@ -894,7 +894,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420003::bigint, 't'::"char", '-1610612737'::text, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: Shards of reference or local table "test_ref" should have NULL shard ranges ROLLBACK; -- reference tables cannot have multiple shards @@ -910,7 +910,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL), ('test_ref'::regclass, 1420007::bigint, 't'::"char", NULL, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ERROR: relation "test_ref" has already at least one shard, adding more is not allowed ROLLBACK; -- finally, add a shard for reference tables @@ -925,8 +925,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - citus_internal_add_shard_metadata + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + add_shard_metadata --------------------------------------------------------------------- (1 row) @@ -946,8 +946,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420007::bigint, 't'::"char", '11'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - citus_internal_add_shard_metadata + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + add_shard_metadata --------------------------------------------------------------------- (1 row) @@ -966,9 +966,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS - (VALUES (-10, 1, 0::bigint, 1::int, 1500000::bigint)) - SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS + (VALUES (-10, 0::bigint, 1::int, 1500000::bigint)) + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: could not find valid entry for shard xxxxx ROLLBACK; -- invalid placementid @@ -983,7 +983,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420000, 0::bigint, 1::int, -10)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: Shard placement has invalid placement id (-10) for shard(1420000) ROLLBACK; -- non-existing shard @@ -998,7 +998,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1430100, 0::bigint, 1::int, 10)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: could not find valid entry for shard xxxxx ROLLBACK; -- non-existing node with non-existing node-id 123123123 @@ -1013,7 +1013,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES ( 1420000, 0::bigint, 123123123::int, 1500000)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: Node with group id 123123123 for shard placement xxxxx does not exist ROLLBACK; -- create a volatile function that returns the local node id @@ -1044,7 +1044,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420000, 0::bigint, get_node_id(), 1500000), (1420000, 0::bigint, get_node_id(), 1500001)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: duplicate key value violates unique constraint "placement_shardid_groupid_unique_index" ROLLBACK; -- shard is not owned by us @@ -1059,7 +1059,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420007, 0::bigint, get_node_id(), 1500000)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ERROR: must be owner of table super_user_table ROLLBACK; -- sucessfully add placements @@ -1085,8 +1085,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1420011, 0::bigint, get_node_id(), 1500009), (1420012, 0::bigint, get_node_id(), 1500010), (1420013, 0::bigint, get_node_id(), 1500011)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - citus_internal_add_placement_metadata + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + add_placement_metadata --------------------------------------------------------------------- @@ -1112,8 +1112,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); - citus_internal_update_relation_colocation + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); + update_relation_colocation --------------------------------------------------------------------- (1 row) @@ -1130,7 +1130,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); + SELECT citus_internal.update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); ERROR: Node with group id 1014 for shard placement xxxxx does not exist COMMIT; -- fails because the source node doesn't contain the shard @@ -1143,7 +1143,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); + SELECT citus_internal.update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); ERROR: Active placement for shard xxxxx is not found on group:14 COMMIT; -- fails because shard does not exist @@ -1156,7 +1156,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(0, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(0, get_node_id(), get_node_id()+1); ERROR: Shard id does not exists: 0 COMMIT; -- fails because none-existing shard @@ -1169,7 +1169,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); ERROR: Shard id does not exists: 213123123123 COMMIT; -- fails because we do not own the shard @@ -1182,7 +1182,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420007, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(1420007, get_node_id(), get_node_id()+1); ERROR: must be owner of table super_user_table COMMIT; -- the user only allowed to delete their own shards @@ -1197,7 +1197,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420007)) - SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data; + SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data; ERROR: must be owner of table super_user_table ROLLBACK; -- the user cannot delete non-existing shards @@ -1212,7 +1212,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420100)) - SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data; + SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data; ERROR: Shard id does not exists: 1420100 ROLLBACK; -- sucessfully delete shards @@ -1239,8 +1239,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420000)) - SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data; - citus_internal_delete_shard_metadata + SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data; + delete_shard_metadata --------------------------------------------------------------------- (1 row) @@ -1274,7 +1274,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET repmodel = 't' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ERROR: cannot colocate tables test_2 and test_3 ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -1298,7 +1298,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}' WHERE logicalrelid = 'test_2'::regclass; \endif - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ERROR: cannot colocate tables test_2 and test_3 ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -1313,7 +1313,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = '' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ERROR: The relation "test_2" does not have a valid entry in pg_dist_partition. ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -1328,7 +1328,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = 'a' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ERROR: Updating colocation ids are only allowed for hash and single shard distributed tables: a ROLLBACK; -- colocated hash distributed table should have the same dist key columns @@ -1343,13 +1343,13 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_add_partition_metadata ('test_5'::regclass, 'h', 'int_col', 500, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_5'::regclass, 'h', 'int_col', 500, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) - SELECT citus_internal_add_partition_metadata ('test_6'::regclass, 'h', 'text_col', 500, 's'); + SELECT citus_internal.add_partition_metadata ('test_6'::regclass, 'h', 'text_col', 500, 's'); ERROR: cannot colocate tables test_6 and test_5 ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -1367,13 +1367,13 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_add_partition_metadata ('test_7'::regclass, 'h', 'text_col', 500, 's'); - citus_internal_add_partition_metadata + SELECT citus_internal.add_partition_metadata ('test_7'::regclass, 'h', 'text_col', 500, 's'); + add_partition_metadata --------------------------------------------------------------------- (1 row) - SELECT citus_internal_add_partition_metadata ('test_8'::regclass, 'h', 'text_col', 500, 's'); + SELECT citus_internal.add_partition_metadata ('test_8'::regclass, 'h', 'text_col', 500, 's'); ERROR: cannot colocate tables test_8 and test_7 ROLLBACK; -- we don't need the table/schema anymore diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index 35fd9fc7476..0aecd652fbc 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -1420,17 +1420,37 @@ SELECT * FROM multi_extension.print_extension_changes(); -- Snapshot of state at 12.2-1 ALTER EXTENSION citus UPDATE TO '12.2-1'; SELECT * FROM multi_extension.print_extension_changes(); - previous_object | current_object ---------------------------------------------------------------------- - | function citus_internal.acquire_citus_advisory_object_class_lock(integer,cstring) void - | function citus_internal.add_colocation_metadata(integer,integer,integer,regtype,oid) void - | function citus_internal.commit_management_command_2pc() void - | function citus_internal.execute_command_on_remote_nodes_as_user(text,text) void - | function citus_internal.mark_object_distributed(oid,text,oid,text) void - | function citus_internal.start_management_transaction(xid8) void - | function citus_internal_database_command(text) void - | function citus_unmark_object_distributed(oid,oid,integer,boolean) void -(8 rows) + previous_object | current_object +--------------------------------------------------------------------- + | function citus_internal.acquire_citus_advisory_object_class_lock(integer,cstring) void + | function citus_internal.add_colocation_metadata(integer,integer,integer,regtype,oid) void + | function citus_internal.add_object_metadata(text,text[],text[],integer,integer,boolean) void + | function citus_internal.add_partition_metadata(regclass,"char",text,integer,"char") void + | function citus_internal.add_placement_metadata(bigint,bigint,integer,bigint) void + | function citus_internal.add_shard_metadata(regclass,bigint,"char",text,text) void + | function citus_internal.add_tenant_schema(oid,integer) void + | function citus_internal.adjust_local_clock_to_remote(cluster_clock) void + | function citus_internal.commit_management_command_2pc() void + | function citus_internal.database_command(text) void + | function citus_internal.delete_colocation_metadata(integer) void + | function citus_internal.delete_partition_metadata(regclass) void + | function citus_internal.delete_placement_metadata(bigint) void + | function citus_internal.delete_shard_metadata(bigint) void + | function citus_internal.delete_tenant_schema(oid) void + | function citus_internal.execute_command_on_remote_nodes_as_user(text,text) void + | function citus_internal.global_blocked_processes() SETOF record + | function citus_internal.is_replication_origin_tracking_active() boolean + | function citus_internal.local_blocked_processes() SETOF record + | function citus_internal.mark_node_not_synced(integer,integer) void + | function citus_internal.mark_object_distributed(oid,text,oid,text) void + | function citus_internal.start_management_transaction(xid8) void + | function citus_internal.start_replication_origin_tracking() void + | function citus_internal.stop_replication_origin_tracking() void + | function citus_internal.unregister_tenant_schema_globally(oid,text) void + | function citus_internal.update_none_dist_table_metadata(oid,"char",bigint,boolean) void + | function citus_internal.update_placement_metadata(bigint,integer,integer) void + | function citus_internal.update_relation_colocation(oid,integer) void +(28 rows) DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version diff --git a/src/test/regress/expected/multi_fix_partition_shard_index_names.out b/src/test/regress/expected/multi_fix_partition_shard_index_names.out index e6795317c85..975a4935149 100644 --- a/src/test/regress/expected/multi_fix_partition_shard_index_names.out +++ b/src/test/regress/expected/multi_fix_partition_shard_index_names.out @@ -658,9 +658,9 @@ NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED;SELECT assign_distributed_transaction_id(xx, xx, 'xxxxxxx'); DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('extension', ARRAY['citus_columnar']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; +NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('extension', ARRAY['citus_columnar']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('extension', ARRAY['citus_columnar']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; +NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('extension', ARRAY['citus_columnar']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_apply_shard_ddl_command (915002, 'fix_idx_names', 'CREATE TABLE fix_idx_names.p2 (dist_col integer NOT NULL, another_col integer, partition_col timestamp without time zone NOT NULL, name text) USING columnar') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx @@ -692,25 +692,25 @@ NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT citus_internal_add_partition_metadata ('fix_idx_names.p2'::regclass, 'h', 'dist_col', 1370001, 's') +NOTICE: issuing SELECT citus_internal.add_partition_metadata ('fix_idx_names.p2'::regclass, 'h', 'dist_col', 1370001, 's') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing SELECT citus_internal_add_partition_metadata ('fix_idx_names.p2'::regclass, 'h', 'dist_col', 1370001, 's') +NOTICE: issuing SELECT citus_internal.add_partition_metadata ('fix_idx_names.p2'::regclass, 'h', 'dist_col', 1370001, 's') DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; +NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; +NOTICE: issuing WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('fix_idx_names.p2'::regclass, 915002, 't'::"char", '-2147483648', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (xxxxxx, xxxxxx, xxxxxx, xxxxxx)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; +NOTICE: issuing WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (xxxxxx, xxxxxx, xxxxxx, xxxxxx)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (xxxxxx, xxxxxx, xxxxxx, xxxxxx)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; +NOTICE: issuing WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (xxxxxx, xxxxxx, xxxxxx, xxxxxx)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['fix_idx_names', 'p2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; +NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['fix_idx_names', 'p2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['fix_idx_names', 'p2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; +NOTICE: issuing WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['fix_idx_names', 'p2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SET citus.enable_ddl_propagation TO 'off' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx diff --git a/src/test/regress/expected/multi_metadata_sync.out b/src/test/regress/expected/multi_metadata_sync.out index 4432432bbd3..d15e7516c21 100644 --- a/src/test/regress/expected/multi_metadata_sync.out +++ b/src/test/regress/expected/multi_metadata_sync.out @@ -101,9 +101,9 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; (33 rows) -- Create a test table with constraints and SERIAL and default from user defined sequence @@ -162,8 +162,8 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT citus_internal_add_partition_metadata ('public.single_shard_tbl'::regclass, 'n', NULL, 3, 's') + SELECT citus_internal.add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('public.single_shard_tbl'::regclass, 'n', NULL, 3, 's') SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table'); SELECT pg_catalog.worker_drop_sequence_dependency('public.single_shard_tbl'); SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition @@ -185,17 +185,17 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (3, 1, 1, 0, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'single_shard_tbl']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310008, 0, 2, 100008)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.single_shard_tbl'::regclass, 1310008, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'single_shard_tbl']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310008, 0, 2, 100008)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.single_shard_tbl'::regclass, 1310008, 't'::"char", NULL, NULL)) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (61 rows) -- Drop single shard table @@ -230,7 +230,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table'); SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3') @@ -249,14 +249,14 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (52 rows) -- Show that schema changes are included in the activate node snapshot @@ -291,7 +291,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') @@ -310,15 +310,15 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (54 rows) -- Show that append distributed tables are not included in the activate node snapshot @@ -359,7 +359,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') @@ -378,15 +378,15 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (54 rows) -- Show that range distributed tables are not included in the activate node snapshot @@ -420,7 +420,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') @@ -439,15 +439,15 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (54 rows) -- Test start_metadata_sync_to_node and citus_activate_node UDFs @@ -1996,12 +1996,12 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('mx_test_schema_1.mx_table_1'::regclass, 'h', 'col1', 7, 's') - SELECT citus_internal_add_partition_metadata ('mx_test_schema_2.mx_table_2'::regclass, 'h', 'col1', 7, 's') - SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT citus_internal_add_partition_metadata ('public.dist_table_1'::regclass, 'h', 'a', 10010, 's') - SELECT citus_internal_add_partition_metadata ('public.mx_ref'::regclass, 'n', NULL, 10009, 't') - SELECT citus_internal_add_partition_metadata ('public.test_table'::regclass, 'h', 'id', 10010, 's') + SELECT citus_internal.add_partition_metadata ('mx_test_schema_1.mx_table_1'::regclass, 'h', 'col1', 7, 's') + SELECT citus_internal.add_partition_metadata ('mx_test_schema_2.mx_table_2'::regclass, 'h', 'col1', 7, 's') + SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('public.dist_table_1'::regclass, 'h', 'a', 10010, 's') + SELECT citus_internal.add_partition_metadata ('public.mx_ref'::regclass, 'n', NULL, 10009, 't') + SELECT citus_internal.add_partition_metadata ('public.test_table'::regclass, 'h', 'id', 10010, 's') SELECT pg_catalog.worker_drop_sequence_dependency('mx_test_schema_1.mx_table_1'); SELECT pg_catalog.worker_drop_sequence_dependency('mx_test_schema_2.mx_table_2'); SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); @@ -2033,35 +2033,35 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310076, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310078, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310079, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310075, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310085, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310086, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310087, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310088, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310076, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310078, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310079, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310075, 't'::"char", NULL, NULL)) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310085, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310086, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310087, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310088, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (118 rows) -- shouldn't work since test_table is MX diff --git a/src/test/regress/expected/multi_metadata_sync_0.out b/src/test/regress/expected/multi_metadata_sync_0.out index af69bde12a3..bc1775ada37 100644 --- a/src/test/regress/expected/multi_metadata_sync_0.out +++ b/src/test/regress/expected/multi_metadata_sync_0.out @@ -101,9 +101,9 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET hasmetadata = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; (33 rows) -- Create a test table with constraints and SERIAL and default from user defined sequence @@ -162,8 +162,8 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT citus_internal_add_partition_metadata ('public.single_shard_tbl'::regclass, 'n', NULL, 3, 's') + SELECT citus_internal.add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('public.single_shard_tbl'::regclass, 'n', NULL, 3, 's') SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table'); SELECT pg_catalog.worker_drop_sequence_dependency('public.single_shard_tbl'); SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition @@ -185,17 +185,17 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (3, 1, 1, 0, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'single_shard_tbl']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310008, 0, 2, 100008)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.single_shard_tbl'::regclass, 1310008, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'single_shard_tbl']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310008, 0, 2, 100008)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.single_shard_tbl'::regclass, 1310008, 't'::"char", NULL, NULL)) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (61 rows) -- Drop single shard table @@ -230,7 +230,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('public.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency('public.mx_test_table'); SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('public.mx_test_table_col_3_seq'::regclass,'public.mx_test_table'::regclass,'col_3') @@ -249,14 +249,14 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('public.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('public.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('public.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('public.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('public.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('public.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('public.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (52 rows) -- Show that schema changes are included in the activate node snapshot @@ -291,7 +291,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') @@ -310,15 +310,15 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (54 rows) -- Show that append distributed tables are not included in the activate node snapshot @@ -359,7 +359,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') @@ -378,15 +378,15 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (54 rows) -- Show that range distributed tables are not included in the activate node snapshot @@ -420,7 +420,7 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); SELECT pg_catalog.worker_drop_sequence_dependency(logicalrelid::regclass::text) FROM pg_dist_partition SELECT pg_catalog.worker_record_sequence_dependency('mx_testing_schema.mx_test_table_col_3_seq'::regclass,'mx_testing_schema.mx_test_table'::regclass,'col_3') @@ -439,15 +439,15 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET isactive = TRUE WHERE nodeid = 2 UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (2, 8, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 2, 100001), (1310002, 0, 1, 100002), (1310003, 0, 2, 100003), (1310004, 0, 1, 100004), (1310005, 0, 2, 100005), (1310006, 0, 1, 100006), (1310007, 0, 2, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (54 rows) -- Test start_metadata_sync_to_node and citus_activate_node UDFs @@ -1996,12 +1996,12 @@ SELECT unnest(activate_node_snapshot()) order by 1; RESET ROLE RESET ROLE SELECT alter_role_if_exists('postgres', 'ALTER ROLE postgres SET lc_messages = ''C''') - SELECT citus_internal_add_partition_metadata ('mx_test_schema_1.mx_table_1'::regclass, 'h', 'col1', 7, 's') - SELECT citus_internal_add_partition_metadata ('mx_test_schema_2.mx_table_2'::regclass, 'h', 'col1', 7, 's') - SELECT citus_internal_add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') - SELECT citus_internal_add_partition_metadata ('public.dist_table_1'::regclass, 'h', 'a', 10010, 's') - SELECT citus_internal_add_partition_metadata ('public.mx_ref'::regclass, 'n', NULL, 10009, 't') - SELECT citus_internal_add_partition_metadata ('public.test_table'::regclass, 'h', 'id', 10010, 's') + SELECT citus_internal.add_partition_metadata ('mx_test_schema_1.mx_table_1'::regclass, 'h', 'col1', 7, 's') + SELECT citus_internal.add_partition_metadata ('mx_test_schema_2.mx_table_2'::regclass, 'h', 'col1', 7, 's') + SELECT citus_internal.add_partition_metadata ('mx_testing_schema.mx_test_table'::regclass, 'h', 'col_1', 2, 's') + SELECT citus_internal.add_partition_metadata ('public.dist_table_1'::regclass, 'h', 'a', 10010, 's') + SELECT citus_internal.add_partition_metadata ('public.mx_ref'::regclass, 'n', NULL, 10009, 't') + SELECT citus_internal.add_partition_metadata ('public.test_table'::regclass, 'h', 'id', 10010, 's') SELECT pg_catalog.worker_drop_sequence_dependency('mx_test_schema_1.mx_table_1'); SELECT pg_catalog.worker_drop_sequence_dependency('mx_test_schema_2.mx_table_2'); SELECT pg_catalog.worker_drop_sequence_dependency('mx_testing_schema.mx_test_table'); @@ -2033,35 +2033,35 @@ SELECT unnest(activate_node_snapshot()) order by 1; UPDATE pg_dist_node SET metadatasynced = TRUE WHERE nodeid = 2 WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10009, 1, -1, 0, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) WITH colocation_group_data (colocationid, shardcount, replicationfactor, distributioncolumntype, distributioncolumncollationname, distributioncolumncollationschema) AS (VALUES (10010, 4, 1, 'integer'::regtype, NULL, NULL)) SELECT citus_internal.add_colocation_metadata(colocationid, shardcount, replicationfactor, distributioncolumntype, coalesce(c.oid, 0)) FROM colocation_group_data d LEFT JOIN pg_collation c ON (d.distributioncolumncollationname = c.collname AND d.distributioncolumncollationschema::regnamespace = c.collnamespace) - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310076, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310078, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310079, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310075, 't'::"char", NULL, NULL)) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; - WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310085, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310086, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310087, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310088, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('database', ARRAY['regression']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['postgres']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_test_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['mx_testing_schema_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('schema', ARRAY['public']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['mx_testing_schema', 'mx_test_table_col_3_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_0']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'mx_test_sequence_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('sequence', ARRAY['public', 'user_defined_seq']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_1', 'mx_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_test_schema_2', 'mx_table_2']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['mx_testing_schema', 'mx_test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'dist_table_1']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'mx_ref']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('table', ARRAY['public', 'test_table']::text[], ARRAY[]::text[], -1, 0, false)) SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex::int, colocationid::int, force_delegation::bool) FROM distributed_object_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310000, 0, 1, 100000), (1310001, 0, 5, 100001), (1310002, 0, 1, 100002), (1310003, 0, 5, 100003), (1310004, 0, 1, 100004), (1310005, 0, 5, 100005), (1310006, 0, 1, 100006), (1310007, 0, 5, 100007)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310022, 0, 1, 100022), (1310023, 0, 5, 100023), (1310024, 0, 1, 100024), (1310025, 0, 5, 100025), (1310026, 0, 1, 100026)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310027, 0, 1, 100027), (1310028, 0, 5, 100028), (1310029, 0, 1, 100029), (1310030, 0, 5, 100030), (1310031, 0, 1, 100031)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310075, 0, 0, 100077), (1310075, 0, 1, 100078), (1310075, 0, 5, 100079)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310076, 0, 1, 100080), (1310077, 0, 5, 100081), (1310078, 0, 1, 100082), (1310079, 0, 5, 100083)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1310085, 0, 1, 100091), (1310086, 0, 5, 100092), (1310087, 0, 1, 100093), (1310088, 0, 5, 100094)) SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_1.mx_table_1'::regclass, 1310022, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_1.mx_table_1'::regclass, 1310023, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_1.mx_table_1'::regclass, 1310024, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_1.mx_table_1'::regclass, 1310025, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_1.mx_table_1'::regclass, 1310026, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_test_schema_2.mx_table_2'::regclass, 1310027, 't'::"char", '-2147483648', '-1288490190'), ('mx_test_schema_2.mx_table_2'::regclass, 1310028, 't'::"char", '-1288490189', '-429496731'), ('mx_test_schema_2.mx_table_2'::regclass, 1310029, 't'::"char", '-429496730', '429496728'), ('mx_test_schema_2.mx_table_2'::regclass, 1310030, 't'::"char", '429496729', '1288490187'), ('mx_test_schema_2.mx_table_2'::regclass, 1310031, 't'::"char", '1288490188', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('mx_testing_schema.mx_test_table'::regclass, 1310000, 't'::"char", '-2147483648', '-1610612737'), ('mx_testing_schema.mx_test_table'::regclass, 1310001, 't'::"char", '-1610612736', '-1073741825'), ('mx_testing_schema.mx_test_table'::regclass, 1310002, 't'::"char", '-1073741824', '-536870913'), ('mx_testing_schema.mx_test_table'::regclass, 1310003, 't'::"char", '-536870912', '-1'), ('mx_testing_schema.mx_test_table'::regclass, 1310004, 't'::"char", '0', '536870911'), ('mx_testing_schema.mx_test_table'::regclass, 1310005, 't'::"char", '536870912', '1073741823'), ('mx_testing_schema.mx_test_table'::regclass, 1310006, 't'::"char", '1073741824', '1610612735'), ('mx_testing_schema.mx_test_table'::regclass, 1310007, 't'::"char", '1610612736', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.dist_table_1'::regclass, 1310076, 't'::"char", '-2147483648', '-1073741825'), ('public.dist_table_1'::regclass, 1310077, 't'::"char", '-1073741824', '-1'), ('public.dist_table_1'::regclass, 1310078, 't'::"char", '0', '1073741823'), ('public.dist_table_1'::regclass, 1310079, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.mx_ref'::regclass, 1310075, 't'::"char", NULL, NULL)) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('public.test_table'::regclass, 1310085, 't'::"char", '-2147483648', '-1073741825'), ('public.test_table'::regclass, 1310086, 't'::"char", '-1073741824', '-1'), ('public.test_table'::regclass, 1310087, 't'::"char", '0', '1073741823'), ('public.test_table'::regclass, 1310088, 't'::"char", '1073741824', '2147483647')) SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; (118 rows) -- shouldn't work since test_table is MX diff --git a/src/test/regress/expected/multi_mx_router_planner.out b/src/test/regress/expected/multi_mx_router_planner.out index e7855a898f9..5ac6093cb99 100644 --- a/src/test/regress/expected/multi_mx_router_planner.out +++ b/src/test/regress/expected/multi_mx_router_planner.out @@ -370,7 +370,7 @@ WITH RECURSIVE hierarchy as ( h.company_id = ce.company_id)) SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: Router planner cannot handle multi-shard select queries -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- logically wrong query, query involves different shards -- from the same table, but still router plannable due to -- shard being placed on the same worker. @@ -386,7 +386,7 @@ WITH RECURSIVE hierarchy as ( ce.company_id = 2)) SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: router planner does not support queries that reference non-colocated distributed tables -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- grouping sets are supported on single shard SELECT id, substring(title, 2, 1) AS subtitle, count(*) diff --git a/src/test/regress/expected/multi_router_planner.out b/src/test/regress/expected/multi_router_planner.out index 702b115dadb..fee821a7d24 100644 --- a/src/test/regress/expected/multi_router_planner.out +++ b/src/test/regress/expected/multi_router_planner.out @@ -436,7 +436,7 @@ WITH RECURSIVE hierarchy as MATERIALIZED ( h.company_id = ce.company_id)) SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: Router planner cannot handle multi-shard select queries -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- logically wrong query, query involves different shards -- from the same table WITH RECURSIVE hierarchy as MATERIALIZED ( @@ -451,7 +451,7 @@ WITH RECURSIVE hierarchy as MATERIALIZED ( ce.company_id = 2)) SELECT * FROM hierarchy WHERE LEVEL <= 2; DEBUG: router planner does not support queries that reference non-colocated distributed tables -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- Test router modifying CTEs WITH new_article AS MATERIALIZED( INSERT INTO articles_hash VALUES (1, 1, 'arsenous', 9) RETURNING * diff --git a/src/test/regress/expected/multi_test_helpers.out b/src/test/regress/expected/multi_test_helpers.out index 5fc694d1304..0f31f2354eb 100644 --- a/src/test/regress/expected/multi_test_helpers.out +++ b/src/test/regress/expected/multi_test_helpers.out @@ -634,7 +634,8 @@ DECLARE BEGIN FOREACH permission IN ARRAY permissions LOOP - RETURN QUERY EXECUTE format($inner$SELECT '%s', result FROM run_command_on_all_nodes($$select has_database_privilege('%s','%s', '%s'); $$)$inner$, permission, role_name, db_name, permission); + RETURN QUERY EXECUTE format($inner$SELECT %s, result FROM run_command_on_all_nodes($$select has_database_privilege(%s,%s,%s); $$)$inner$, + quote_literal(permission), quote_literal(role_name), quote_literal(db_name), quote_literal(permission)); END LOOP; END; $func$ LANGUAGE plpgsql; diff --git a/src/test/regress/expected/other_databases.out b/src/test/regress/expected/other_databases.out index a15c4bb50d8..c67746055a5 100644 --- a/src/test/regress/expected/other_databases.out +++ b/src/test/regress/expected/other_databases.out @@ -98,11 +98,11 @@ REVOKE ALL ON SCHEMA citus_internal FROM nonsuperuser; DROP USER other_db_user9, nonsuperuser; -- test from a worker \c - - - :worker_1_port -CREATE DATABASE other_db2; +CREATE DATABASE worker_other_db; NOTICE: Citus partially supports CREATE DATABASE for distributed databases DETAIL: Citus does not propagate CREATE DATABASE command to other nodes HINT: You can manually create a database and its extensions on other nodes. -\c other_db2 +\c worker_other_db CREATE USER worker_user1; BEGIN; CREATE USER worker_user2; @@ -129,8 +129,211 @@ SELECT usename FROM pg_user WHERE usename LIKE 'worker\_user%' ORDER BY 1; -- some user creation commands will fail but let's make sure we try to drop them just in case DROP USER IF EXISTS worker_user1, worker_user2, worker_user3; NOTICE: role "worker_user3" does not exist, skipping -\c - - - :worker_1_port -DROP DATABASE other_db2; +-- test creating and dropping a database from a Citus non-main database +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +\c other_db1 +CREATE DATABASE other_db3; +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "other_db3", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "other_db3", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "other_db3", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c other_db1 +DROP DATABASE other_db3; +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c worker_other_db - - :worker_1_port +CREATE DATABASE other_db4; +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": {"datacl": null, "datname": "other_db4", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": {"datacl": null, "datname": "other_db4", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "other_db4", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c worker_other_db +DROP DATABASE other_db4; +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +DROP DATABASE worker_other_db; +CREATE DATABASE other_db5; +-- disable create database propagation for the next test +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +\c other_db5 - - :worker_2_port +-- locally create a database +CREATE DATABASE local_db; +\c regression - - - +-- re-enable create database propagation +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +\c other_db5 - - :master_port +-- Test a scenario where create database fails because the database +-- already exists on another node and we don't crash etc. +CREATE DATABASE local_db; +ERROR: database "local_db" already exists +CONTEXT: while executing command on localhost:xxxxx +while executing command on localhost:xxxxx +\c regression - - - +SELECT * FROM public.check_database_on_all_nodes('local_db') ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "local_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c - - - :worker_2_port +-- locally drop the database for cleanup purposes +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +DROP DATABASE local_db; +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + \c - - - :master_port +DROP DATABASE other_db5; +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + DROP SCHEMA other_databases; DROP DATABASE other_db1; diff --git a/src/test/regress/expected/pg14.out b/src/test/regress/expected/pg14.out index badd232407e..bbfd5dafa08 100644 --- a/src/test/regress/expected/pg14.out +++ b/src/test/regress/expected/pg14.out @@ -1142,7 +1142,7 @@ WITH RECURSIVE search_graph(f, t, label) AS ( WHERE g.f = sg.t and g.f = 1 ) SEARCH DEPTH FIRST BY f, t SET seq SELECT * FROM search_graph ORDER BY seq; -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column WITH RECURSIVE search_graph(f, t, label) AS ( SELECT * FROM graph0 g WHERE f = 1 UNION ALL @@ -1151,7 +1151,7 @@ WITH RECURSIVE search_graph(f, t, label) AS ( WHERE g.f = sg.t and g.f = 1 ) SEARCH DEPTH FIRST BY f, t SET seq DELETE FROM graph0 WHERE t IN (SELECT t FROM search_graph ORDER BY seq); -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column CREATE TABLE graph1(f INT, t INT, label TEXT); SELECT create_reference_table('graph1'); create_reference_table @@ -1170,7 +1170,7 @@ WITH RECURSIVE search_graph(f, t, label) AS ( WHERE g.f = sg.t and g.f = 1 ) SEARCH DEPTH FIRST BY f, t SET seq SELECT * FROM search_graph ORDER BY seq; -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column WITH RECURSIVE search_graph(f, t, label) AS ( SELECT * FROM graph1 g WHERE f = 1 UNION ALL @@ -1179,7 +1179,7 @@ WITH RECURSIVE search_graph(f, t, label) AS ( WHERE g.f = sg.t and g.f = 1 ) SEARCH DEPTH FIRST BY f, t SET seq DELETE FROM graph1 WHERE t IN (SELECT t FROM search_graph ORDER BY seq); -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column SELECT * FROM ( WITH RECURSIVE search_graph(f, t, label) AS ( SELECT * @@ -1191,7 +1191,7 @@ SELECT * FROM ( ) SEARCH DEPTH FIRST BY f, t SET seq SELECT * FROM search_graph ORDER BY seq ) as foo; -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- -- https://github.com/citusdata/citus/issues/5258 -- diff --git a/src/test/regress/expected/query_single_shard_table.out b/src/test/regress/expected/query_single_shard_table.out index ad6037b65b5..5f551a9881d 100644 --- a/src/test/regress/expected/query_single_shard_table.out +++ b/src/test/regress/expected/query_single_shard_table.out @@ -1529,7 +1529,7 @@ DEBUG: Router planner cannot handle multi-shard select queries DEBUG: Router planner cannot handle multi-shard select queries DEBUG: generating subplan XXX_1 for CTE level_1: WITH RECURSIVE level_2_recursive(x) AS (VALUES (1) UNION ALL SELECT (nullkey_c1_t1.a OPERATOR(pg_catalog.+) 1) FROM (query_single_shard_table.nullkey_c1_t1 JOIN level_2_recursive level_2_recursive_1 ON ((nullkey_c1_t1.a OPERATOR(pg_catalog.=) level_2_recursive_1.x))) WHERE (nullkey_c1_t1.a OPERATOR(pg_catalog.<) 100)) SELECT level_2_recursive.x, distributed_table.a, distributed_table.b FROM (level_2_recursive JOIN query_single_shard_table.distributed_table ON ((level_2_recursive.x OPERATOR(pg_catalog.=) distributed_table.a))) DEBUG: Router planner cannot handle multi-shard select queries -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- grouping set SELECT id, substring(title, 2, 1) AS subtitle, count(*) diff --git a/src/test/regress/expected/schema_based_sharding.out b/src/test/regress/expected/schema_based_sharding.out index 28cb45688f9..711c3914137 100644 --- a/src/test/regress/expected/schema_based_sharding.out +++ b/src/test/regress/expected/schema_based_sharding.out @@ -13,19 +13,19 @@ SELECT 1 FROM citus_add_node('localhost', :master_port, groupid => 0); SET client_min_messages TO NOTICE; -- Verify that the UDFs used to sync tenant schema metadata to workers -- fail on NULL input. -SELECT citus_internal_add_tenant_schema(NULL, 1); +SELECT citus_internal.add_tenant_schema(NULL, 1); ERROR: schema_id cannot be NULL -SELECT citus_internal_add_tenant_schema(1, NULL); +SELECT citus_internal.add_tenant_schema(1, NULL); ERROR: colocation_id cannot be NULL -SELECT citus_internal_delete_tenant_schema(NULL); +SELECT citus_internal.delete_tenant_schema(NULL); ERROR: schema_id cannot be NULL -SELECT citus_internal_unregister_tenant_schema_globally(1, NULL); +SELECT citus_internal.unregister_tenant_schema_globally(1, NULL); ERROR: schema_name cannot be NULL -SELECT citus_internal_unregister_tenant_schema_globally(NULL, 'text'); +SELECT citus_internal.unregister_tenant_schema_globally(NULL, 'text'); ERROR: schema_id cannot be NULL --- Verify that citus_internal_unregister_tenant_schema_globally can only +-- Verify that citus_internal.unregister_tenant_schema_globally can only -- be called on schemas that are dropped already. -SELECT citus_internal_unregister_tenant_schema_globally('regular_schema'::regnamespace, 'regular_schema'); +SELECT citus_internal.unregister_tenant_schema_globally('regular_schema'::regnamespace, 'regular_schema'); ERROR: schema is expected to be already dropped because this function is only expected to be called from Citus drop hook SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); ?column? @@ -1511,10 +1511,10 @@ SELECT pg_reload_conf(); t (1 row) --- Verify that citus_internal_unregister_tenant_schema_globally is a no-op +-- Verify that citus_internal.unregister_tenant_schema_globally is a no-op -- on workers. -SELECT citus_internal_unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3'); - citus_internal_unregister_tenant_schema_globally +SELECT citus_internal.unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3'); + unregister_tenant_schema_globally --------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/seclabel.out b/src/test/regress/expected/seclabel.out index f826de44b66..ae658973408 100644 --- a/src/test/regress/expected/seclabel.out +++ b/src/test/regress/expected/seclabel.out @@ -115,16 +115,13 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'; NOTICE: issuing SECURITY LABEL ON ROLE user1 IS 'citus_unclassified' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'; -NOTICE: issuing SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified' +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified'; +NOTICE: issuing SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx \c - - - :worker_1_port --- command not allowed from worker node -SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus ''!unclassified'; -ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. -\c - - - :master_port -RESET citus.log_remote_commands; +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +-- command from the worker node should be propagated to the coordinator SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; node_type | result --------------------------------------------------------------------- @@ -132,6 +129,33 @@ SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORD worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} (2 rows) +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +NOTICE: issuing SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +RESET citus.log_remote_commands; +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +\c - - - :master_port +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; node_type | result --------------------------------------------------------------------- @@ -143,7 +167,7 @@ SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') SET citus.log_remote_commands TO on; SET citus.grep_remote_commands = '%SECURITY LABEL%'; SELECT 1 FROM citus_add_node('localhost', :worker_2_port); -NOTICE: issuing SELECT worker_create_or_alter_role('user1', 'CREATE ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''', 'ALTER ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''');SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified' +NOTICE: issuing SELECT worker_create_or_alter_role('user1', 'CREATE ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''', 'ALTER ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''');SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_create_or_alter_role('user 2', 'CREATE ROLE "user 2" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''', 'ALTER ROLE "user 2" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''');SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx @@ -155,9 +179,9 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; node_type | result --------------------------------------------------------------------- - coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} - worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} - worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} (3 rows) SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; @@ -168,6 +192,35 @@ SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') worker_2 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} (3 rows) +-- disable the GUC and check that the command is not propagated +SET citus.enable_alter_role_propagation TO off; +SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'; +NOTICE: not propagating SECURITY LABEL commands to other nodes +HINT: Connect to other nodes directly to manually assign necessary labels. +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +\c - - - :worker_2_port +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +SET citus.enable_alter_role_propagation TO off; +SECURITY LABEL ON ROLE user1 IS 'citus ''!unclassified'; +NOTICE: not propagating SECURITY LABEL commands to other nodes +HINT: Connect to other nodes directly to manually assign necessary labels. +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +RESET citus.enable_alter_role_propagation; -- cleanup RESET citus.log_remote_commands; DROP ROLE user1, "user 2"; diff --git a/src/test/regress/expected/shard_rebalancer.out b/src/test/regress/expected/shard_rebalancer.out index a7cd6b38cb9..988fa68be88 100644 --- a/src/test/regress/expected/shard_rebalancer.out +++ b/src/test/regress/expected/shard_rebalancer.out @@ -2395,6 +2395,74 @@ SELECT count(*) FROM pg_dist_partition; 0 (1 row) +-- verify a system with a new node won't copy distributed table shards without reference tables +SELECT 1 from master_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT public.wait_until_metadata_sync(30000); + wait_until_metadata_sync +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE r1 (a int PRIMARY KEY, b int); +SELECT create_reference_table('r1'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE d1 (a int PRIMARY KEY, b int); +SELECT create_distributed_table('d1', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +ALTER SEQUENCE pg_dist_groupid_seq RESTART WITH 15; +SELECT 1 from master_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- count the number of placements for the reference table to verify it is not available on +-- all nodes +SELECT count(*) +FROM pg_dist_shard +JOIN pg_dist_shard_placement USING (shardid) +WHERE logicalrelid = 'r1'::regclass; + count +--------------------------------------------------------------------- + 1 +(1 row) + +-- #7426 We can't move shards to the fresh node before we copy reference tables there. +-- rebalance_table_shards() will do the copy, but the low-level +-- citus_move_shard_placement() should raise an error +SELECT citus_move_shard_placement(pg_dist_shard.shardid, nodename, nodeport, 'localhost', :worker_2_port) + FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) + WHERE logicalrelid = 'd1'::regclass AND nodename = 'localhost' AND nodeport = :worker_1_port LIMIT 1; +ERROR: there are missing reference tables on some nodes +SELECT replicate_reference_tables(); + replicate_reference_tables +--------------------------------------------------------------------- + +(1 row) + +-- After replication, the move should succeed. +SELECT citus_move_shard_placement(pg_dist_shard.shardid, nodename, nodeport, 'localhost', :worker_2_port) + FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) + WHERE logicalrelid = 'd1'::regclass AND nodename = 'localhost' AND nodeport = :worker_1_port LIMIT 1; + citus_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +DROP TABLE d1, r1; -- verify a system having only reference tables will copy the reference tables when -- executing the rebalancer SELECT 1 from master_remove_node('localhost', :worker_2_port); diff --git a/src/test/regress/expected/subquery_and_cte.out b/src/test/regress/expected/subquery_and_cte.out index c15e9b9d78b..89686086560 100644 --- a/src/test/regress/expected/subquery_and_cte.out +++ b/src/test/regress/expected/subquery_and_cte.out @@ -527,7 +527,7 @@ FROM ) as bar WHERE foo.user_id = bar.user_id ORDER BY 1 DESC; -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column CREATE TABLE ref_table_1 (a int); SELECT create_reference_table('ref_table_1'); create_reference_table diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index 5b3f9a7a8ff..cab9b6736fb 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -58,9 +58,25 @@ ORDER BY 1; function citus_get_transaction_clock() function citus_internal.acquire_citus_advisory_object_class_lock(integer,cstring) function citus_internal.add_colocation_metadata(integer,integer,integer,regtype,oid) + function citus_internal.add_object_metadata(text,text[],text[],integer,integer,boolean) + function citus_internal.add_partition_metadata(regclass,"char",text,integer,"char") + function citus_internal.add_placement_metadata(bigint,bigint,integer,bigint) + function citus_internal.add_shard_metadata(regclass,bigint,"char",text,text) + function citus_internal.add_tenant_schema(oid,integer) + function citus_internal.adjust_local_clock_to_remote(cluster_clock) function citus_internal.commit_management_command_2pc() + function citus_internal.database_command(text) + function citus_internal.delete_colocation_metadata(integer) + function citus_internal.delete_partition_metadata(regclass) + function citus_internal.delete_placement_metadata(bigint) + function citus_internal.delete_shard_metadata(bigint) + function citus_internal.delete_tenant_schema(oid) function citus_internal.execute_command_on_remote_nodes_as_user(text,text) function citus_internal.find_groupid_for_node(text,integer) + function citus_internal.global_blocked_processes() + function citus_internal.is_replication_origin_tracking_active() + function citus_internal.local_blocked_processes() + function citus_internal.mark_node_not_synced(integer,integer) function citus_internal.mark_object_distributed(oid,text,oid,text) function citus_internal.pg_dist_node_trigger_func() function citus_internal.pg_dist_rebalance_strategy_trigger_func() @@ -69,6 +85,12 @@ ORDER BY 1; function citus_internal.replace_isolation_tester_func() function citus_internal.restore_isolation_tester_func() function citus_internal.start_management_transaction(xid8) + function citus_internal.start_replication_origin_tracking() + function citus_internal.stop_replication_origin_tracking() + function citus_internal.unregister_tenant_schema_globally(oid,text) + function citus_internal.update_none_dist_table_metadata(oid,"char",bigint,boolean) + function citus_internal.update_placement_metadata(bigint,integer,integer) + function citus_internal.update_relation_colocation(oid,integer) function citus_internal_add_colocation_metadata(integer,integer,integer,regtype,oid) function citus_internal_add_object_metadata(text,text[],text[],integer,integer,boolean) function citus_internal_add_partition_metadata(regclass,"char",text,integer,"char") @@ -77,7 +99,6 @@ ORDER BY 1; function citus_internal_add_shard_metadata(regclass,bigint,"char",text,text) function citus_internal_add_tenant_schema(oid,integer) function citus_internal_adjust_local_clock_to_remote(cluster_clock) - function citus_internal_database_command(text) function citus_internal_delete_colocation_metadata(integer) function citus_internal_delete_partition_metadata(regclass) function citus_internal_delete_placement_metadata(bigint) @@ -351,5 +372,5 @@ ORDER BY 1; view citus_stat_tenants_local view pg_dist_shard_placement view time_partitions -(341 rows) +(361 rows) diff --git a/src/test/regress/expected/with_basics.out b/src/test/regress/expected/with_basics.out index 4eefb883755..78ed17317da 100644 --- a/src/test/regress/expected/with_basics.out +++ b/src/test/regress/expected/with_basics.out @@ -664,14 +664,14 @@ WITH RECURSIVE basic_recursive(x) AS ( SELECT user_id + 1 FROM users_table JOIN basic_recursive ON (user_id = x) WHERE user_id < 100 ) SELECT sum(x) FROM basic_recursive; -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column WITH RECURSIVE basic_recursive AS ( SELECT -1 as user_id, '2017-11-22 20:16:16.614779'::timestamp, -1, -1, -1, -1 UNION ALL SELECT basic_recursive.* FROM users_table JOIN basic_recursive USING (user_id) WHERE user_id>1 ) SELECT * FROM basic_recursive ORDER BY user_id LIMIT 1; -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- basic_recursive in FROM should error out SELECT * @@ -682,7 +682,7 @@ FROM SELECT basic_recursive.* FROM users_table JOIN basic_recursive USING (user_id) WHERE user_id>1 ) SELECT * FROM basic_recursive ORDER BY user_id LIMIT 1) cte_rec; -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- basic_recursive in WHERE with UNION ALL SELECT * @@ -696,7 +696,7 @@ WHERE SELECT basic_recursive.* FROM users_table JOIN basic_recursive USING (user_id) WHERE user_id>1 ) SELECT * FROM basic_recursive ORDER BY user_id LIMIT 1); -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- one recursive one regular CTE should error out WITH RECURSIVE basic_recursive(x) AS( VALUES (1) @@ -707,7 +707,7 @@ basic AS ( SELECT count(user_id) FROM users_table ) SELECT x FROM basic, basic_recursive; -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- one recursive one regular which SELECTs from the recursive CTE under a simple SELECT WITH RECURSIVE basic_recursive(x) AS( VALUES (1) @@ -718,7 +718,7 @@ basic AS ( SELECT count(x) FROM basic_recursive ) SELECT * FROM basic; -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- recursive CTE in a NESTED manner WITH regular_cte AS ( WITH regular_2 AS ( @@ -732,7 +732,7 @@ WITH regular_cte AS ( SELECT * FROM regular_2 ) SELECT * FROM regular_cte; -ERROR: recursive CTEs are not supported in distributed queries +ERROR: recursive CTEs are only supported when they contain a filter on the distribution column -- CTEs should work with VIEWs as well CREATE VIEW basic_view AS SELECT * FROM users_table; diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index e7c3f7d981a..015f7497316 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -40,7 +40,7 @@ test: create_drop_database_propagation_pg15 test: create_drop_database_propagation_pg16 test: comment_on_database test: comment_on_role -test: metadata_sync_2pc +test: metadata_sync_from_non_maindb # don't parallelize single_shard_table_udfs to make sure colocation ids are sequential test: single_shard_table_udfs test: schema_based_sharding @@ -58,8 +58,7 @@ test: multi_metadata_attributes test: multi_read_from_secondaries -test: grant_on_database_propagation -test: grant_on_database_propagation_2pc +test: grant_on_database_propagation grant_on_database_propagation_from_non_maindb test: alter_database_propagation test: citus_shards diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index 1bd939d7254..793bb800684 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -108,8 +108,8 @@ test: object_propagation_debug test: undistribute_table test: run_command_on_all_nodes test: background_task_queue_monitor +test: other_databases grant_role_from_non_maindb test: other_databases -test: grant_role_2pc test: role_operations_2pc test: citus_internal_access diff --git a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec index fce3794274a..9683935bed0 100644 --- a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec +++ b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec @@ -90,7 +90,7 @@ step "s2-view-worker" ('%pg_prepared_xacts%'), ('%COMMIT%'), ('%dump_local_%'), - ('%citus_internal_local_blocked_processes%'), + ('%citus_internal.local_blocked_processes%'), ('%add_node%'), ('%csa_from_one_node%'), ('%pg_locks%')) diff --git a/src/test/regress/sql/create_drop_database_propagation.sql b/src/test/regress/sql/create_drop_database_propagation.sql index 2a63194c16b..329f4861203 100644 --- a/src/test/regress/sql/create_drop_database_propagation.sql +++ b/src/test/regress/sql/create_drop_database_propagation.sql @@ -5,7 +5,7 @@ -- For versions >= 16, pg16_create_drop_database_propagation.sql is used. -- Test the UDF that we use to issue database command during metadata sync. -SELECT pg_catalog.citus_internal_database_command(null); +SELECT citus_internal.database_command(null); CREATE ROLE test_db_commands WITH LOGIN; ALTER SYSTEM SET citus.enable_manual_metadata_changes_for_user TO 'test_db_commands'; @@ -14,20 +14,20 @@ SELECT pg_sleep(0.1); SET ROLE test_db_commands; -- fails on null input -SELECT pg_catalog.citus_internal_database_command(null); +SELECT citus_internal.database_command(null); -- fails on non create / drop db command -SELECT pg_catalog.citus_internal_database_command('CREATE TABLE foo_bar(a int)'); -SELECT pg_catalog.citus_internal_database_command('SELECT 1'); -SELECT pg_catalog.citus_internal_database_command('asfsfdsg'); -SELECT pg_catalog.citus_internal_database_command(''); +SELECT citus_internal.database_command('CREATE TABLE foo_bar(a int)'); +SELECT citus_internal.database_command('SELECT 1'); +SELECT citus_internal.database_command('asfsfdsg'); +SELECT citus_internal.database_command(''); RESET ROLE; ALTER ROLE test_db_commands nocreatedb; SET ROLE test_db_commands; --- make sure that pg_catalog.citus_internal_database_command doesn't cause privilege escalation -SELECT pg_catalog.citus_internal_database_command('CREATE DATABASE no_permissions'); +-- make sure that citus_internal.database_command doesn't cause privilege escalation +SELECT citus_internal.database_command('CREATE DATABASE no_permissions'); RESET ROLE; DROP USER test_db_commands; diff --git a/src/test/regress/sql/create_ref_dist_from_citus_local.sql b/src/test/regress/sql/create_ref_dist_from_citus_local.sql index 7c10abce6ab..e9610d65fbe 100644 --- a/src/test/regress/sql/create_ref_dist_from_citus_local.sql +++ b/src/test/regress/sql/create_ref_dist_from_citus_local.sql @@ -219,8 +219,8 @@ ROLLBACK; -- Test the UDFs that we use to convert Citus local tables to single-shard tables and -- reference tables. -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, true); -SELECT pg_catalog.citus_internal_delete_placement_metadata(1); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', 1, true); +SELECT citus_internal.delete_placement_metadata(1); CREATE ROLE test_user_create_ref_dist WITH LOGIN; GRANT ALL ON SCHEMA create_ref_dist_from_citus_local TO test_user_create_ref_dist; @@ -234,18 +234,18 @@ SET citus.next_placement_id TO 8510000; SET citus.shard_replication_factor TO 1; SET search_path TO create_ref_dist_from_citus_local; -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(null, 't', 1, true); -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, null, 1, true); -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', null, true); -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, null); +SELECT citus_internal.update_none_dist_table_metadata(null, 't', 1, true); +SELECT citus_internal.update_none_dist_table_metadata(1, null, 1, true); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', null, true); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', 1, null); -SELECT pg_catalog.citus_internal_delete_placement_metadata(null); +SELECT citus_internal.delete_placement_metadata(null); CREATE TABLE udf_test (col_1 int); SELECT citus_add_local_table_to_metadata('udf_test'); BEGIN; - SELECT pg_catalog.citus_internal_update_none_dist_table_metadata('create_ref_dist_from_citus_local.udf_test'::regclass, 'k', 99999, true); + SELECT citus_internal.update_none_dist_table_metadata('create_ref_dist_from_citus_local.udf_test'::regclass, 'k', 99999, true); SELECT COUNT(*)=1 FROM pg_dist_partition WHERE logicalrelid = 'create_ref_dist_from_citus_local.udf_test'::regclass AND repmodel = 'k' AND colocationid = 99999 AND autoconverted = true; @@ -253,7 +253,7 @@ BEGIN; SELECT placementid AS udf_test_placementid FROM pg_dist_shard_placement WHERE shardid = get_shard_id_for_distribution_column('create_ref_dist_from_citus_local.udf_test') \gset - SELECT pg_catalog.citus_internal_delete_placement_metadata(:udf_test_placementid); + SELECT citus_internal.delete_placement_metadata(:udf_test_placementid); SELECT COUNT(*)=0 FROM pg_dist_placement WHERE placementid = :udf_test_placementid; ROLLBACK; diff --git a/src/test/regress/sql/create_role_propagation.sql b/src/test/regress/sql/create_role_propagation.sql index fa32cf2d25b..bd2951b175c 100644 --- a/src/test/regress/sql/create_role_propagation.sql +++ b/src/test/regress/sql/create_role_propagation.sql @@ -75,6 +75,8 @@ SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::t \c - - - :master_port +create role test_admin_role; + -- test grants with distributed and non-distributed roles SELECT master_remove_node('localhost', :worker_2_port); @@ -84,6 +86,8 @@ CREATE ROLE dist_role_2; CREATE ROLE dist_role_3; CREATE ROLE dist_role_4; + + SET citus.enable_create_role_propagation TO OFF; CREATE ROLE non_dist_role_1 SUPERUSER; @@ -93,28 +97,71 @@ CREATE ROLE non_dist_role_4; SET citus.enable_create_role_propagation TO ON; + +grant dist_role_3,dist_role_1 to test_admin_role with admin option; + SET ROLE dist_role_1; GRANT non_dist_role_1 TO non_dist_role_2; SET citus.enable_create_role_propagation TO OFF; +grant dist_role_1 to non_dist_role_1 with admin option; SET ROLE non_dist_role_1; -GRANT dist_role_1 TO dist_role_2; +GRANT dist_role_1 TO dist_role_2 granted by non_dist_role_1; RESET ROLE; SET citus.enable_create_role_propagation TO ON; -GRANT dist_role_3 TO non_dist_role_3; + +GRANT dist_role_3 TO non_dist_role_3 granted by test_admin_role; GRANT non_dist_role_4 TO dist_role_4; +GRANT dist_role_3 TO dist_role_4 granted by test_admin_role; + SELECT 1 FROM master_add_node('localhost', :worker_2_port); -SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + ) q; + $$ +); + +REVOKE dist_role_3 from dist_role_4 granted by test_admin_role cascade; + +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + order by member::regrole::text + ) q; + $$ +); + +SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1','test_admin_role')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1; +REVOKE dist_role_3 from non_dist_role_3 granted by test_admin_role cascade; + +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + order by member::regrole::text + ) q; + $$ +); + +revoke dist_role_3,dist_role_1 from test_admin_role cascade; +drop role test_admin_role; + \c - - - :worker_1_port SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_%' ORDER BY 1; diff --git a/src/test/regress/sql/failure_mx_metadata_sync.sql b/src/test/regress/sql/failure_mx_metadata_sync.sql index 90e882fe519..d8f82296f18 100644 --- a/src/test/regress/sql/failure_mx_metadata_sync.sql +++ b/src/test/regress/sql/failure_mx_metadata_sync.sql @@ -56,10 +56,10 @@ SELECT hasmetadata FROM pg_dist_node WHERE nodeport=:worker_2_proxy_port; -- Check failures on DDL command propagation CREATE TABLE t2 (id int PRIMARY KEY); -SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_placement_metadata").kill()'); +SELECT citus.mitmproxy('conn.onParse(query="citus_internal.add_placement_metadata").kill()'); SELECT create_distributed_table('t2', 'id'); -SELECT citus.mitmproxy('conn.onParse(query="citus_internal_add_shard_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onParse(query="citus_internal.add_shard_metadata").cancel(' || :pid || ')'); SELECT create_distributed_table('t2', 'id'); -- Verify that the table was not distributed diff --git a/src/test/regress/sql/failure_mx_metadata_sync_multi_trans.sql b/src/test/regress/sql/failure_mx_metadata_sync_multi_trans.sql index 011bae037df..afe6a64e905 100644 --- a/src/test/regress/sql/failure_mx_metadata_sync_multi_trans.sql +++ b/src/test/regress/sql/failure_mx_metadata_sync_multi_trans.sql @@ -279,21 +279,21 @@ SELECT citus.mitmproxy('conn.onQuery(query="ALTER TABLE mx_metadata_sync_multi_t SELECT citus_activate_node('localhost', :worker_2_proxy_port); -- Failure to add partition metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_partition_metadata").cancel(' || :pid || ')'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_partition_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_partition_metadata").kill()'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -- Failure to add shard metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_shard_metadata").cancel(' || :pid || ')'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_shard_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_shard_metadata").kill()'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -- Failure to add placement metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_placement_metadata").cancel(' || :pid || ')'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_placement_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_placement_metadata").kill()'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -- Failure to add colocation metadata @@ -303,9 +303,9 @@ SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_colocation SELECT citus_activate_node('localhost', :worker_2_proxy_port); -- Failure to add distributed object metadata -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_metadata").cancel(' || :pid || ')'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_object_metadata").cancel(' || :pid || ')'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal_add_object_metadata").kill()'); +SELECT citus.mitmproxy('conn.onQuery(query="SELECT citus_internal.add_object_metadata").kill()'); SELECT citus_activate_node('localhost', :worker_2_proxy_port); -- Failure to mark function as distributed diff --git a/src/test/regress/sql/grant_on_database_propagation_from_non_maindb.sql b/src/test/regress/sql/grant_on_database_propagation_from_non_maindb.sql new file mode 100644 index 00000000000..f83472b3655 --- /dev/null +++ b/src/test/regress/sql/grant_on_database_propagation_from_non_maindb.sql @@ -0,0 +1,246 @@ +-- Public role has connect,temp,temporary privileges on database +-- To test these scenarios, we need to revoke these privileges from public role +-- since public role privileges are inherited by new roles/users +set citus.enable_create_database_propagation to on; +create database test_2pc_db; +show citus.main_db; +revoke connect,temp,temporary on database test_2pc_db from public; + +CREATE SCHEMA grant_on_database_propagation_non_maindb; +SET search_path TO grant_on_database_propagation_non_maindb; + +-- test grant/revoke CREATE privilege propagation on database +create user "myuser'_test"; + +\c test_2pc_db - - :master_port +grant create on database test_2pc_db to "myuser'_test"; + +\c regression - - :master_port; +select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']); + +\c test_2pc_db - - :master_port +revoke create on database test_2pc_db from "myuser'_test"; + +\c regression - - :master_port; +select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']); + +drop user "myuser'_test"; +----------------------------------------------------------------------- + +-- test grant/revoke CONNECT privilege propagation on database +\c regression - - :master_port +create user myuser2; + +\c test_2pc_db - - :master_port +grant CONNECT on database test_2pc_db to myuser2; + +\c regression - - :master_port; +select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']); + +\c test_2pc_db - - :master_port +revoke connect on database test_2pc_db from myuser2; + +\c regression - - :master_port +select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']); + +drop user myuser2; + +----------------------------------------------------------------------- + +-- test grant/revoke TEMP privilege propagation on database +\c regression - - :master_port +create user myuser3; + +-- test grant/revoke temp on database +\c test_2pc_db - - :master_port +grant TEMP on database test_2pc_db to myuser3; + +\c regression - - :master_port; +select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']); + + +\c test_2pc_db - - :worker_1_port +revoke TEMP on database test_2pc_db from myuser3; + +\c regression - - :master_port; +select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']); + +drop user myuser3; + +----------------------------------------------------------------------- + +\c regression - - :master_port +-- test temporary privilege on database +create user myuser4; + +-- test grant/revoke temporary on database +\c test_2pc_db - - :worker_1_port +grant TEMPORARY on database test_2pc_db to myuser4; + +\c regression - - :master_port +select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']); + +\c test_2pc_db - - :master_port +revoke TEMPORARY on database test_2pc_db from myuser4; + +\c regression - - :master_port; +select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']); + +drop user myuser4; +----------------------------------------------------------------------- + +-- test ALL privileges with ALL statement on database +create user myuser5; + +grant ALL on database test_2pc_db to myuser5; + +\c regression - - :master_port +select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +\c test_2pc_db - - :master_port +revoke ALL on database test_2pc_db from myuser5; + +\c regression - - :master_port +select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + +drop user myuser5; +----------------------------------------------------------------------- + +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database +create user myuser6; + +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser6; + +\c regression - - :master_port +select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + +\c test_2pc_db - - :master_port +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser6; + +\c regression - - :master_port +select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +drop user myuser6; +----------------------------------------------------------------------- + +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database with grant option +create user myuser7; +create user myuser_1; + +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7; + +set role myuser7; +--here since myuser7 does not have grant option, it should fail +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1; + +\c regression - - :master_port +select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + +\c test_2pc_db - - :master_port + +RESET ROLE; + +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7 with grant option; +set role myuser7; + +--here since myuser have grant option, it should succeed +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1 granted by myuser7; + +\c regression - - :master_port +select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + +\c test_2pc_db - - :master_port + +RESET ROLE; + +--below test should fail and should throw an error since myuser_1 still have the dependent privileges +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict; +--below test should fail and should throw an error since myuser_1 still have the dependent privileges +revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict ; + +--below test should succeed and should not throw any error since myuser_1 privileges are revoked with cascade +revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 cascade ; + +--here we test if myuser7 still have the privileges after revoke grant option for + +\c regression - - :master_port +select check_database_privileges('myuser7','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +\c test_2pc_db - - :master_port + +reset role; + +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser_1; + +\c regression - - :master_port +drop user myuser_1; +drop user myuser7; + +----------------------------------------------------------------------- + +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database multi database +-- and multi user +\c regression - - :master_port +create user myuser8; +create user myuser_2; + +set citus.enable_create_database_propagation to on; +create database test_db; + +revoke connect,temp,temporary on database test_db from public; + +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db to myuser8,myuser_2; + +\c regression - - :master_port +select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +\c test_2pc_db - - :master_port + +RESET ROLE; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 ; + +--below test should succeed and should not throw any error +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser_2; + +--below test should succeed and should not throw any error +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 cascade; + +\c regression - - :master_port +select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +\c test_2pc_db - - :master_port + +reset role; + +\c regression - - :master_port +drop user myuser_2; +drop user myuser8; + +set citus.enable_create_database_propagation to on; +drop database test_db; + +--------------------------------------------------------------------------- +-- rollbacks public role database privileges to original state +grant connect,temp,temporary on database test_2pc_db to public; +drop database test_2pc_db; +set citus.enable_create_database_propagation to off; +DROP SCHEMA grant_on_database_propagation_non_maindb CASCADE; + +reset citus.enable_create_database_propagation; +reset search_path; +--------------------------------------------------------------------------- diff --git a/src/test/regress/sql/grant_role_from_non_maindb.sql b/src/test/regress/sql/grant_role_from_non_maindb.sql new file mode 100644 index 00000000000..b74b5092d20 --- /dev/null +++ b/src/test/regress/sql/grant_role_from_non_maindb.sql @@ -0,0 +1,147 @@ +CREATE SCHEMA grant_role2pc; +SET search_path TO grant_role2pc; +set citus.enable_create_database_propagation to on; + +CREATE DATABASE grant_role2pc_db; + +\c grant_role2pc_db +SHOW citus.main_db; + +SET citus.superuser TO 'postgres'; +CREATE USER grant_role2pc_user1; +CREATE USER grant_role2pc_user2; +CREATE USER grant_role2pc_user3; +CREATE USER grant_role2pc_user4; +CREATE USER grant_role2pc_user5; +CREATE USER grant_role2pc_user6; +CREATE USER grant_role2pc_user7; + +\c grant_role2pc_db + +--test with empty superuser +SET citus.superuser TO ''; +grant grant_role2pc_user1 to grant_role2pc_user2; + +SET citus.superuser TO 'postgres'; +grant grant_role2pc_user1 to grant_role2pc_user2 with admin option granted by CURRENT_USER; + +\c regression + +select result FROM run_command_on_all_nodes( + $$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text = 'grant_role2pc_user2' + order by member::regrole::text, roleid::regrole::text + ) t + $$ +); + +\c grant_role2pc_db +--test grant under transactional context with multiple operations +BEGIN; +grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user3 WITH ADMIN OPTION; +grant grant_role2pc_user1 to grant_role2pc_user4 granted by grant_role2pc_user3 ; +COMMIT; + +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user5 WITH ADMIN OPTION granted by grant_role2pc_user3; +grant grant_role2pc_user1 to grant_role2pc_user6; +ROLLBACK; + + + +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user7; +SELECT 1/0; +commit; + + +\c regression + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + + +\c grant_role2pc_db + +grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user5,grant_role2pc_user6,grant_role2pc_user7 granted by grant_role2pc_user3; + +\c regression + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + +\c grant_role2pc_db +revoke admin option for grant_role2pc_user1 from grant_role2pc_user5 granted by grant_role2pc_user3; + +--test revoke under transactional context with multiple operations +BEGIN; +revoke grant_role2pc_user1 from grant_role2pc_user5 granted by grant_role2pc_user3 ; +revoke grant_role2pc_user1 from grant_role2pc_user4 granted by grant_role2pc_user3; +COMMIT; +\c grant_role2pc_db - - :worker_1_port +BEGIN; +revoke grant_role2pc_user1 from grant_role2pc_user6,grant_role2pc_user7 granted by grant_role2pc_user3; +revoke grant_role2pc_user1 from grant_role2pc_user3 cascade; +COMMIT; + +\c regression + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user2','grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + +\c grant_role2pc_db - - :worker_1_port +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user5 WITH ADMIN OPTION; +grant grant_role2pc_user1 to grant_role2pc_user6; +COMMIT; + +\c regression - - :master_port + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user5','grant_role2pc_user6') + order by member::regrole::text, roleid::regrole::text +) t +$$); + +revoke grant_role2pc_user1 from grant_role2pc_user5,grant_role2pc_user6; + +--clean resources +DROP SCHEMA grant_role2pc; +set citus.enable_create_database_propagation to on; +DROP DATABASE grant_role2pc_db; +drop user grant_role2pc_user2,grant_role2pc_user3,grant_role2pc_user4,grant_role2pc_user5,grant_role2pc_user6,grant_role2pc_user7; +drop user grant_role2pc_user1; +reset citus.enable_create_database_propagation; diff --git a/src/test/regress/sql/metadata_sync_from_non_maindb.sql b/src/test/regress/sql/metadata_sync_from_non_maindb.sql new file mode 100644 index 00000000000..43f52518994 --- /dev/null +++ b/src/test/regress/sql/metadata_sync_from_non_maindb.sql @@ -0,0 +1,72 @@ +CREATE SCHEMA metadata_sync_2pc_schema; +SET search_path TO metadata_sync_2pc_schema; +set citus.enable_create_database_propagation to on; +CREATE DATABASE metadata_sync_2pc_db; + +revoke connect,temp,temporary on database metadata_sync_2pc_db from public; + +\c metadata_sync_2pc_db +SHOW citus.main_db; + +CREATE USER "grant_role2pc'_user1"; +CREATE USER "grant_role2pc'_user2"; +CREATE USER "grant_role2pc'_user3"; +CREATE USER grant_role2pc_user4; +CREATE USER grant_role2pc_user5; + +\c regression +select 1 from citus_remove_node('localhost', :worker_2_port); + +\c metadata_sync_2pc_db +grant "grant_role2pc'_user1","grant_role2pc'_user2" to "grant_role2pc'_user3" WITH ADMIN OPTION; +grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3"; + +--test for grant on database +\c metadata_sync_2pc_db - - :master_port +grant create on database metadata_sync_2pc_db to "grant_role2pc'_user1"; +grant connect on database metadata_sync_2pc_db to "grant_role2pc'_user2"; +grant ALL on database metadata_sync_2pc_db to "grant_role2pc'_user3"; + +\c regression +select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); +select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); +select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); + +\c regression +set citus.enable_create_database_propagation to on; +select 1 from citus_add_node('localhost', :worker_2_port); + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('"grant_role2pc''_user2"','"grant_role2pc''_user3"','grant_role2pc_user4','grant_role2pc_user5') + order by member::regrole::text +) t +$$); + +select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); +select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); +select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); + +\c metadata_sync_2pc_db +revoke "grant_role2pc'_user1","grant_role2pc'_user2" from grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3"; + +revoke admin option for "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; + +revoke "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; +revoke ALL on database metadata_sync_2pc_db from "grant_role2pc'_user3"; +revoke CONNECT on database metadata_sync_2pc_db from "grant_role2pc'_user2"; +revoke CREATE on database metadata_sync_2pc_db from "grant_role2pc'_user1"; + +\c regression + +drop user "grant_role2pc'_user1","grant_role2pc'_user2","grant_role2pc'_user3",grant_role2pc_user4,grant_role2pc_user5; +set citus.enable_create_database_propagation to on; +drop database metadata_sync_2pc_db; +drop schema metadata_sync_2pc_schema; + +reset citus.enable_create_database_propagation; +reset search_path; diff --git a/src/test/regress/sql/metadata_sync_helpers.sql b/src/test/regress/sql/metadata_sync_helpers.sql index 642b2f708fb..dae331d258b 100644 --- a/src/test/regress/sql/metadata_sync_helpers.sql +++ b/src/test/regress/sql/metadata_sync_helpers.sql @@ -15,20 +15,20 @@ SET search_path TO metadata_sync_helpers; CREATE TABLE test(col_1 int); -- not in a distributed transaction -SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); -SELECT citus_internal_update_relation_colocation ('test'::regclass, 1); +SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); +SELECT citus_internal.update_relation_colocation ('test'::regclass, 1); -- in a distributed transaction, but the application name is not Citus BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); - SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- in a distributed transaction and the application name is Citus, allowed. BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; \c - postgres - \c - - - :worker_1_port @@ -47,14 +47,14 @@ SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- we do not own the relation BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation ('test'::regclass, 10); + SELECT citus_internal.update_relation_colocation ('test'::regclass, 10); ROLLBACK; -- finally, a user can only add its own tables to the metadata @@ -63,7 +63,7 @@ CREATE TABLE test_3(col_1 int, col_2 int); BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); SELECT count(*) FROM pg_dist_partition WHERE logicalrelid = 'metadata_sync_helpers.test_2'::regclass; ROLLBACK; @@ -71,84 +71,84 @@ ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_rebalancer gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- application_name with incorrect gpid BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=not a correct gpid'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- also faills if done by the rebalancer BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_rebalancer gpid=not a correct gpid'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- application_name with suffix is ok (e.g. pgbouncer might add this) BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001 - from 10.12.14.16:10370'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- application_name with empty gpid BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid='; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- empty application_name BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to ''; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- application_name with incorrect prefix BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- fails because there is no X distribution method BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ROLLBACK; -- fails because there is the column does not exist BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'non_existing_col', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'non_existing_col', 0, 's'); ROLLBACK; --- fails because we do not allow NULL parameters BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata (NULL, 'h', 'non_existing_col', 0, 's'); + SELECT citus_internal.add_partition_metadata (NULL, 'h', 'non_existing_col', 0, 's'); ROLLBACK; -- fails because colocationId cannot be negative BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', -1, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', -1, 's'); ROLLBACK; -- fails because there is no X replication model BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 'X'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 'X'); ROLLBACK; -- the same table cannot be added twice, that is enforced by a primary key @@ -156,8 +156,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); ROLLBACK; -- the same table cannot be added twice, that is enforced by a primary key even if distribution key changes @@ -165,8 +165,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_2', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_2', 0, 's'); ROLLBACK; -- hash distributed table cannot have NULL distribution key @@ -174,7 +174,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', NULL, 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', NULL, 0, 's'); ROLLBACK; -- even if metadata_sync_helper_role is not owner of the table test @@ -194,7 +194,7 @@ SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ROLLBACK; -- should throw error even if we skip the checks, there are no such nodes @@ -202,7 +202,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420007, 10000, 11111); + SELECT citus_internal.update_placement_metadata(1420007, 10000, 11111); ROLLBACK; -- non-existing users should fail to pass the checks @@ -218,7 +218,7 @@ SET search_path TO metadata_sync_helpers; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'X', 'col_1', 0, 's'); ROLLBACK; \c - postgres - :worker_1_port @@ -236,21 +236,21 @@ CREATE TABLE test_ref(col_1 int, col_2 int); BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('test_ref'::regclass, 'n', 'col_1', 0, 's'); ROLLBACK; -- non-valid replication model BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'A'); + SELECT citus_internal.add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'A'); ROLLBACK; -- not-matching replication model for reference table BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'c'); + SELECT citus_internal.add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 'c'); ROLLBACK; -- add entry for super user table @@ -260,7 +260,7 @@ CREATE TABLE super_user_table(col_1 int); BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('super_user_table'::regclass, 'h', 'col_1', 0, 's'); + SELECT citus_internal.add_partition_metadata ('super_user_table'::regclass, 'h', 'col_1', 0, 's'); COMMIT; -- now, lets check shard metadata @@ -276,7 +276,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- the user is only allowed to add a shard for add a table which is in pg_dist_partition @@ -286,23 +286,23 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- ok, now add the table to the pg_dist_partition BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 250, 's'); - SELECT citus_internal_add_partition_metadata ('test_3'::regclass, 'h', 'col_1', 251, 's'); - SELECT citus_internal_add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 't'); + SELECT citus_internal.add_partition_metadata ('test_2'::regclass, 'h', 'col_1', 250, 's'); + SELECT citus_internal.add_partition_metadata ('test_3'::regclass, 'h', 'col_1', 251, 's'); + SELECT citus_internal.add_partition_metadata ('test_ref'::regclass, 'n', NULL, 0, 't'); COMMIT; -- we can update to a non-existing colocation group (e.g., colocate_with:=none) BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation ('test_2'::regclass, 1231231232); + SELECT citus_internal.update_relation_colocation ('test_2'::regclass, 1231231232); ROLLBACK; -- invalid shard ids are not allowed @@ -312,7 +312,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, -1, 't'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- invalid storage types are not allowed @@ -322,7 +322,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 'X'::"char", '-2147483648'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- NULL shard ranges are not allowed for hash distributed tables @@ -332,7 +332,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000, 't'::"char", NULL, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- non-integer shard ranges are not allowed @@ -342,7 +342,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", 'non-int'::text, '-1610612737'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- shardMinValue should be smaller than shardMaxValue @@ -352,7 +352,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '-1610612737'::text, '-2147483648'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- we do not allow overlapping shards for the same table @@ -364,7 +364,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text), ('test_2'::regclass, 1420002::bigint, 't'::"char", '10'::text, '50'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- Now let's check valid pg_dist_object updates @@ -376,7 +376,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('non_existing_type', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ROLLBACK; -- check the sanity of distributionArgumentIndex and colocationId @@ -386,7 +386,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -100, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -395,7 +395,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], -1, -1, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ROLLBACK; -- check with non-existing object @@ -405,10 +405,10 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['non_existing_user']::text[], ARRAY[]::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ROLLBACK; --- since citus_internal_add_object_metadata is strict function returns NULL +-- since citus_internal.add_object_metadata is strict function returns NULL -- if any parameter is NULL BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); @@ -416,12 +416,12 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('role', ARRAY['metadata_sync_helper_role']::text[], ARRAY[]::text[], 0, NULL::int, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ROLLBACK; \c - postgres - :worker_1_port --- Show that citus_internal_add_object_metadata only works for object types +-- Show that citus_internal.add_object_metadata only works for object types -- which is known how to distribute BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); @@ -437,10 +437,10 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET ROLE metadata_sync_helper_role; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('operator', ARRAY['===']::text[], ARRAY['int','int']::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ROLLBACK; --- Show that citus_internal_add_object_metadata checks the priviliges +-- Show that citus_internal.add_object_metadata checks the priviliges BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; @@ -454,7 +454,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET ROLE metadata_sync_helper_role; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('function', ARRAY['distribution_test_function']::text[], ARRAY['integer']::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -468,7 +468,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET ROLE metadata_sync_helper_role; WITH distributed_object_data(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) AS (VALUES ('type', ARRAY['distributed_test_type']::text[], ARRAY[]::text[], -1, 0, false)) - SELECT citus_internal_add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; + SELECT citus_internal.add_object_metadata(typetext, objnames, objargs, distargumentindex, colocationid, force_delegation) FROM distributed_object_data; ROLLBACK; -- we do not allow wrong partmethod @@ -482,7 +482,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text), ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- we do not allow NULL shardMinMax values @@ -494,12 +494,12 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420000::bigint, 't'::"char", '10'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; -- manually ingest NULL values, otherwise not likely unless metadata is corrupted UPDATE pg_dist_shard SET shardminvalue = NULL WHERE shardid = 1420000; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_2'::regclass, 1420001::bigint, 't'::"char", '20'::text, '30'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; \c - metadata_sync_helper_role - :worker_1_port @@ -518,14 +518,14 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ('test_2'::regclass, 1420004::bigint, 't'::"char", '51'::text, '60'::text), ('test_2'::regclass, 1420005::bigint, 't'::"char", '61'::text, '70'::text), ('test_3'::regclass, 1420008::bigint, 't'::"char", '11'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; COMMIT; -- we cannot mark these two tables colocated because they are not colocated BEGIN; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; -- now, add few more shards for test_3 to make it colocated with test_2 @@ -539,7 +539,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; ('test_3'::regclass, 1420011::bigint, 't'::"char", '41'::text, '50'::text), ('test_3'::regclass, 1420012::bigint, 't'::"char", '51'::text, '60'::text), ('test_3'::regclass, 1420013::bigint, 't'::"char", '61'::text, '70'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; COMMIT; -- shardMin/MaxValues should be NULL for reference tables @@ -549,7 +549,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420003::bigint, 't'::"char", '-1610612737'::text, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- reference tables cannot have multiple shards @@ -560,7 +560,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL), ('test_ref'::regclass, 1420007::bigint, 't'::"char", NULL, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; ROLLBACK; -- finally, add a shard for reference tables @@ -570,7 +570,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('test_ref'::regclass, 1420006::bigint, 't'::"char", NULL, NULL)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; COMMIT; \c - postgres - :worker_1_port @@ -583,7 +583,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) AS (VALUES ('super_user_table'::regclass, 1420007::bigint, 't'::"char", '11'::text, '20'::text)) - SELECT citus_internal_add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; + SELECT citus_internal.add_shard_metadata(relationname, shardid, storagetype, shardminvalue, shardmaxvalue) FROM shard_data; COMMIT; \c - metadata_sync_helper_role - :worker_1_port @@ -596,9 +596,9 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - WITH placement_data(shardid, shardstate, shardlength, groupid, placementid) AS - (VALUES (-10, 1, 0::bigint, 1::int, 1500000::bigint)) - SELECT citus_internal_add_placement_metadata(shardid, shardstate, shardlength, groupid, placementid) FROM placement_data; + WITH placement_data(shardid, shardlength, groupid, placementid) AS + (VALUES (-10, 0::bigint, 1::int, 1500000::bigint)) + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- invalid placementid @@ -608,7 +608,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420000, 0::bigint, 1::int, -10)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- non-existing shard @@ -618,7 +618,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1430100, 0::bigint, 1::int, 10)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- non-existing node with non-existing node-id 123123123 @@ -628,7 +628,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES ( 1420000, 0::bigint, 123123123::int, 1500000)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- create a volatile function that returns the local node id @@ -655,7 +655,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420000, 0::bigint, get_node_id(), 1500000), (1420000, 0::bigint, get_node_id(), 1500001)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- shard is not owned by us @@ -665,7 +665,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH placement_data(shardid, shardlength, groupid, placementid) AS (VALUES (1420007, 0::bigint, get_node_id(), 1500000)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; ROLLBACK; -- sucessfully add placements @@ -686,14 +686,14 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1420011, 0::bigint, get_node_id(), 1500009), (1420012, 0::bigint, get_node_id(), 1500010), (1420013, 0::bigint, get_node_id(), 1500011)) - SELECT citus_internal_add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; + SELECT citus_internal.add_placement_metadata(shardid, shardlength, groupid, placementid) FROM placement_data; COMMIT; -- we should be able to colocate both tables now BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; -- try to update placements @@ -703,7 +703,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); + SELECT citus_internal.update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); COMMIT; -- fails because the source node doesn't contain the shard @@ -711,7 +711,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); + SELECT citus_internal.update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); COMMIT; -- fails because shard does not exist @@ -719,7 +719,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(0, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(0, get_node_id(), get_node_id()+1); COMMIT; -- fails because none-existing shard @@ -727,7 +727,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); COMMIT; -- fails because we do not own the shard @@ -735,7 +735,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420007, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(1420007, get_node_id(), get_node_id()+1); COMMIT; -- the user only allowed to delete their own shards @@ -745,7 +745,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420007)) - SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data; + SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data; ROLLBACK; -- the user cannot delete non-existing shards @@ -755,7 +755,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420100)) - SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data; + SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data; ROLLBACK; @@ -770,7 +770,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; \set VERBOSITY terse WITH shard_data(shardid) AS (VALUES (1420000)) - SELECT citus_internal_delete_shard_metadata(shardid) FROM shard_data; + SELECT citus_internal.delete_shard_metadata(shardid) FROM shard_data; SELECT count(*) FROM pg_dist_shard WHERE shardid = 1420000; SELECT count(*) FROM pg_dist_placement WHERE shardid = 1420000; @@ -788,7 +788,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET repmodel = 't' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; @@ -810,7 +810,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WHERE logicalrelid = 'test_2'::regclass; \endif - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -820,7 +820,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = '' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -830,7 +830,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = 'a' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; -- colocated hash distributed table should have the same dist key columns @@ -841,8 +841,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_add_partition_metadata ('test_5'::regclass, 'h', 'int_col', 500, 's'); - SELECT citus_internal_add_partition_metadata ('test_6'::regclass, 'h', 'text_col', 500, 's'); + SELECT citus_internal.add_partition_metadata ('test_5'::regclass, 'h', 'int_col', 500, 's'); + SELECT citus_internal.add_partition_metadata ('test_6'::regclass, 'h', 'text_col', 500, 's'); ROLLBACK; @@ -858,8 +858,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_add_partition_metadata ('test_7'::regclass, 'h', 'text_col', 500, 's'); - SELECT citus_internal_add_partition_metadata ('test_8'::regclass, 'h', 'text_col', 500, 's'); + SELECT citus_internal.add_partition_metadata ('test_7'::regclass, 'h', 'text_col', 500, 's'); + SELECT citus_internal.add_partition_metadata ('test_8'::regclass, 'h', 'text_col', 500, 's'); ROLLBACK; -- we don't need the table/schema anymore diff --git a/src/test/regress/sql/multi_test_helpers.sql b/src/test/regress/sql/multi_test_helpers.sql index 40bbaaf07b4..7d218361ce7 100644 --- a/src/test/regress/sql/multi_test_helpers.sql +++ b/src/test/regress/sql/multi_test_helpers.sql @@ -661,7 +661,8 @@ DECLARE BEGIN FOREACH permission IN ARRAY permissions LOOP - RETURN QUERY EXECUTE format($inner$SELECT '%s', result FROM run_command_on_all_nodes($$select has_database_privilege('%s','%s', '%s'); $$)$inner$, permission, role_name, db_name, permission); + RETURN QUERY EXECUTE format($inner$SELECT %s, result FROM run_command_on_all_nodes($$select has_database_privilege(%s,%s,%s); $$)$inner$, + quote_literal(permission), quote_literal(role_name), quote_literal(db_name), quote_literal(permission)); END LOOP; END; $func$ LANGUAGE plpgsql; diff --git a/src/test/regress/sql/other_databases.sql b/src/test/regress/sql/other_databases.sql index 8cd54f35432..aa936e50753 100644 --- a/src/test/regress/sql/other_databases.sql +++ b/src/test/regress/sql/other_databases.sql @@ -75,9 +75,9 @@ DROP USER other_db_user9, nonsuperuser; -- test from a worker \c - - - :worker_1_port -CREATE DATABASE other_db2; +CREATE DATABASE worker_other_db; -\c other_db2 +\c worker_other_db CREATE USER worker_user1; @@ -98,9 +98,85 @@ SELECT usename FROM pg_user WHERE usename LIKE 'worker\_user%' ORDER BY 1; -- some user creation commands will fail but let's make sure we try to drop them just in case DROP USER IF EXISTS worker_user1, worker_user2, worker_user3; -\c - - - :worker_1_port -DROP DATABASE other_db2; +-- test creating and dropping a database from a Citus non-main database +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); +\c other_db1 +CREATE DATABASE other_db3; + +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type; + +\c other_db1 +DROP DATABASE other_db3; + +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type; + +\c worker_other_db - - :worker_1_port +CREATE DATABASE other_db4; + +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type; + +\c worker_other_db +DROP DATABASE other_db4; + +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type; + +DROP DATABASE worker_other_db; + +CREATE DATABASE other_db5; + +-- disable create database propagation for the next test +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + +\c other_db5 - - :worker_2_port + +-- locally create a database +CREATE DATABASE local_db; + +\c regression - - - + +-- re-enable create database propagation +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + +\c other_db5 - - :master_port + +-- Test a scenario where create database fails because the database +-- already exists on another node and we don't crash etc. +CREATE DATABASE local_db; + +\c regression - - - + +SELECT * FROM public.check_database_on_all_nodes('local_db') ORDER BY node_type, result; + +\c - - - :worker_2_port + +-- locally drop the database for cleanup purposes +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + +DROP DATABASE local_db; + +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + \c - - - :master_port +DROP DATABASE other_db5; + +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + DROP SCHEMA other_databases; DROP DATABASE other_db1; diff --git a/src/test/regress/sql/schema_based_sharding.sql b/src/test/regress/sql/schema_based_sharding.sql index bd8065ab938..f0b2276df00 100644 --- a/src/test/regress/sql/schema_based_sharding.sql +++ b/src/test/regress/sql/schema_based_sharding.sql @@ -12,15 +12,15 @@ SET client_min_messages TO NOTICE; -- Verify that the UDFs used to sync tenant schema metadata to workers -- fail on NULL input. -SELECT citus_internal_add_tenant_schema(NULL, 1); -SELECT citus_internal_add_tenant_schema(1, NULL); -SELECT citus_internal_delete_tenant_schema(NULL); -SELECT citus_internal_unregister_tenant_schema_globally(1, NULL); -SELECT citus_internal_unregister_tenant_schema_globally(NULL, 'text'); +SELECT citus_internal.add_tenant_schema(NULL, 1); +SELECT citus_internal.add_tenant_schema(1, NULL); +SELECT citus_internal.delete_tenant_schema(NULL); +SELECT citus_internal.unregister_tenant_schema_globally(1, NULL); +SELECT citus_internal.unregister_tenant_schema_globally(NULL, 'text'); --- Verify that citus_internal_unregister_tenant_schema_globally can only +-- Verify that citus_internal.unregister_tenant_schema_globally can only -- be called on schemas that are dropped already. -SELECT citus_internal_unregister_tenant_schema_globally('regular_schema'::regnamespace, 'regular_schema'); +SELECT citus_internal.unregister_tenant_schema_globally('regular_schema'::regnamespace, 'regular_schema'); SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); @@ -1022,9 +1022,9 @@ SELECT pg_reload_conf(); ALTER SYSTEM SET citus.enable_schema_based_sharding TO ON; SELECT pg_reload_conf(); --- Verify that citus_internal_unregister_tenant_schema_globally is a no-op +-- Verify that citus_internal.unregister_tenant_schema_globally is a no-op -- on workers. -SELECT citus_internal_unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3'); +SELECT citus_internal.unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3'); \c - - - :master_port diff --git a/src/test/regress/sql/seclabel.sql b/src/test/regress/sql/seclabel.sql index e523fc1dacd..d39e0118392 100644 --- a/src/test/regress/sql/seclabel.sql +++ b/src/test/regress/sql/seclabel.sql @@ -62,14 +62,20 @@ SET citus.grep_remote_commands = '%SECURITY LABEL%'; SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; SECURITY LABEL ON ROLE user1 IS NULL; SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'; -SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'; +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified'; \c - - - :worker_1_port --- command not allowed from worker node -SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus ''!unclassified'; +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +-- command from the worker node should be propagated to the coordinator +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; -\c - - - :master_port RESET citus.log_remote_commands; +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; +\c - - - :master_port SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; @@ -82,6 +88,19 @@ SELECT 1 FROM citus_add_node('localhost', :worker_2_port); SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; +-- disable the GUC and check that the command is not propagated +SET citus.enable_alter_role_propagation TO off; +SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + +\c - - - :worker_2_port +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +SET citus.enable_alter_role_propagation TO off; +SECURITY LABEL ON ROLE user1 IS 'citus ''!unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; +RESET citus.enable_alter_role_propagation; + -- cleanup RESET citus.log_remote_commands; DROP ROLE user1, "user 2"; diff --git a/src/test/regress/sql/shard_rebalancer.sql b/src/test/regress/sql/shard_rebalancer.sql index 5d8e89b3631..9037f8f7504 100644 --- a/src/test/regress/sql/shard_rebalancer.sql +++ b/src/test/regress/sql/shard_rebalancer.sql @@ -1340,6 +1340,43 @@ DROP TABLE t1, r1, r2; -- test suites should clean up their distributed tables. SELECT count(*) FROM pg_dist_partition; +-- verify a system with a new node won't copy distributed table shards without reference tables + +SELECT 1 from master_remove_node('localhost', :worker_2_port); +SELECT public.wait_until_metadata_sync(30000); + +CREATE TABLE r1 (a int PRIMARY KEY, b int); +SELECT create_reference_table('r1'); + +CREATE TABLE d1 (a int PRIMARY KEY, b int); +SELECT create_distributed_table('d1', 'a'); + +ALTER SEQUENCE pg_dist_groupid_seq RESTART WITH 15; +SELECT 1 from master_add_node('localhost', :worker_2_port); + +-- count the number of placements for the reference table to verify it is not available on +-- all nodes +SELECT count(*) +FROM pg_dist_shard +JOIN pg_dist_shard_placement USING (shardid) +WHERE logicalrelid = 'r1'::regclass; + +-- #7426 We can't move shards to the fresh node before we copy reference tables there. +-- rebalance_table_shards() will do the copy, but the low-level +-- citus_move_shard_placement() should raise an error +SELECT citus_move_shard_placement(pg_dist_shard.shardid, nodename, nodeport, 'localhost', :worker_2_port) + FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) + WHERE logicalrelid = 'd1'::regclass AND nodename = 'localhost' AND nodeport = :worker_1_port LIMIT 1; + +SELECT replicate_reference_tables(); + +-- After replication, the move should succeed. +SELECT citus_move_shard_placement(pg_dist_shard.shardid, nodename, nodeport, 'localhost', :worker_2_port) + FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) + WHERE logicalrelid = 'd1'::regclass AND nodename = 'localhost' AND nodeport = :worker_1_port LIMIT 1; + +DROP TABLE d1, r1; + -- verify a system having only reference tables will copy the reference tables when -- executing the rebalancer