diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 11fb010b703..38055f367e0 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -68,7 +68,7 @@ USER citus # build postgres versions separately for effective parrallelism and caching of already built versions when changing only certain versions FROM base AS pg14 -RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.10 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 14.11 RUN rm .pgenv/src/*.tar* RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/src/include install @@ -80,7 +80,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/ RUN rm .pgenv-staging/config/default.conf FROM base AS pg15 -RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.5 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 15.6 RUN rm .pgenv/src/*.tar* RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/src/include install @@ -92,7 +92,7 @@ RUN cp -r .pgenv/src .pgenv/pgsql-* .pgenv/config .pgenv-staging/ RUN rm .pgenv-staging/config/default.conf FROM base AS pg16 -RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.1 +RUN MAKEFLAGS="-j $(nproc)" pgenv build 16.2 RUN rm .pgenv/src/*.tar* RUN make -C .pgenv/src/postgresql-*/ clean RUN make -C .pgenv/src/postgresql-*/src/include install @@ -210,7 +210,7 @@ COPY --chown=citus:citus .psqlrc . RUN sudo chown --from=root:root citus:citus -R ~ # sets default pg version -RUN pgenv switch 16.1 +RUN pgenv switch 16.2 # make connecting to the coordinator easy ENV PGPORT=9700 diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 16ff091e741..2541296cd83 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -31,11 +31,11 @@ jobs: pgupgrade_image_name: "citus/pgupgradetester" style_checker_image_name: "citus/stylechecker" style_checker_tools_version: "0.8.18" - image_suffix: "-v19b671f" - pg14_version: '{ "major": "14", "full": "14.10" }' - pg15_version: '{ "major": "15", "full": "15.5" }' - pg16_version: '{ "major": "16", "full": "16.1" }' - upgrade_pg_versions: "14.10-15.5-16.1" + image_suffix: "-v390dab3" + pg14_version: '{ "major": "14", "full": "14.11" }' + pg15_version: '{ "major": "15", "full": "15.6" }' + pg16_version: '{ "major": "16", "full": "16.2" }' + upgrade_pg_versions: "14.11-15.6-16.2" steps: # Since GHA jobs needs at least one step we use a noop step here. - name: Set up parameters diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d979c10420..b28788ef954 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,117 @@ +### citus v11.0.10 (February 15, 2024) ### + +* Removes pg_send_cancellation and all references (#7135) + +### citus v12.1.2 (February 12, 2024) ### + +* Fixes the incorrect column count after ALTER TABLE (#7379) + +### citus v12.0.1 (July 11, 2023) ### + +* Fixes incorrect default value assumption for VACUUM(PROCESS_TOAST) #7122) + +* Fixes a bug that causes an unexpected error when adding a column + with a NULL constraint (#7093) + +* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152) + +* Fixes a bug with deleting colocation groups (#6929) + +* Fixes memory and memory contexts leaks in Foreign Constraint Graphs (#7236) + +* Fixes shard size bug with too many shards (#7018) + +* Fixes the incorrect column count after ALTER TABLE (#7379) + +* Improves citus_tables view performance (#7050) + +* Makes sure to disallow creating a replicated distributed table + concurrently (#7219) + +* Removes pg_send_cancellation and all references (#7135) + +### citus v11.3.1 (February 12, 2024) ### + +* Disallows MERGE when the query prunes down to zero shards (#6946) + +* Fixes a bug related to non-existent objects in DDL commands (#6984) + +* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152) + +* Fixes a bug with deleting colocation groups (#6929) + +* Fixes incorrect results on fetching scrollable with hold cursors (#7014) + +* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236) + +* Fixes replicate reference tables task fail when user is superuser (#6930) + +* Fixes the incorrect column count after ALTER TABLE (#7379) + +* Improves citus_shard_sizes performance (#7050) + +* Makes sure to disallow creating a replicated distributed table + concurrently (#7219) + +* Removes pg_send_cancellation and all references (#7135) + +### citus v11.2.2 (February 12, 2024) ### + +* Fixes a bug in background shard rebalancer where the replicate + reference tables task fails if the current user is not a superuser (#6930) + +* Fixes a bug related to non-existent objects in DDL commands (#6984) + +* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152) + +* Fixes a bug with deleting colocation groups (#6929) + +* Fixes incorrect results on fetching scrollable with hold cursors (#7014) + +* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236) + +* Fixes the incorrect column count after ALTER TABLE (#7379) + +* Improves failure handling of distributed execution (#7090) + +* Makes sure to disallow creating a replicated distributed table + concurrently (#7219) + +* Removes pg_send_cancellation (#7135) + +### citus v11.1.7 (February 12, 2024) ### + +* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236) + +* Fixes a bug related to non-existent objects in DDL commands (#6984) + +* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152) + +* Fixes a bug with deleting colocation groups (#6929) + +* Fixes incorrect results on fetching scrollable with hold cursors (#7014) + +* Fixes the incorrect column count after ALTER TABLE (#7379) + +* Improves failure handling of distributed execution (#7090) + +* Makes sure to disallow creating a replicated distributed table + concurrently (#7219) + +* Removes pg_send_cancellation and all references (#7135) + +### citus v11.0.9 (February 12, 2024) ### + +* Fixes a bug that could cause COPY logic to skip data in case of OOM (#7152) + +* Fixes a bug with deleting colocation groups (#6929) + +* Fixes memory and memory context leaks in Foreign Constraint Graphs (#7236) + +* Fixes the incorrect column count after ALTER TABLE (#7462) + +* Improve failure handling of distributed execution (#7090) + ### citus v12.1.1 (November 9, 2023) ### * Fixes leaking of memory and memory contexts in Citus foreign key cache diff --git a/ci/check_gucs_are_alphabetically_sorted.sh b/ci/check_gucs_are_alphabetically_sorted.sh index 214a5c9cf6c..018fc7d35d4 100755 --- a/ci/check_gucs_are_alphabetically_sorted.sh +++ b/ci/check_gucs_are_alphabetically_sorted.sh @@ -20,6 +20,6 @@ tail -n +$RegisterCitusConfigVariables_begin_linenumber src/backend/distributed/ # extract citus gucs in the form of "citus.X" grep -P "^[\t][\t]\"citus\.[a-zA-Z_0-9]+\"" RegisterCitusConfigVariables_func_def.out > gucs.out -sort -c gucs.out +LC_COLLATE=C sort -c gucs.out rm gucs.out rm RegisterCitusConfigVariables_func_def.out diff --git a/src/backend/distributed/README.md b/src/backend/distributed/README.md index 6e3d8cf1c42..e3a9a7a33dd 100644 --- a/src/backend/distributed/README.md +++ b/src/backend/distributed/README.md @@ -2429,7 +2429,7 @@ Cleanup records always need to be committed before creating the actual object. I PostgreSQL supports change data capture (CDC) via the logical decoding interface. The basic idea behind logical decoding is that you make a replication connection (a special type of postgres connection), start replication, and then the backend process reads through the WAL and decodes the WAL records and emits it over the wire in a format defined by the output plugin. If we were to use regular logical decoding on the nodes of a Citus cluster, we would see the name of the shard in each write, and internal data transfers such as shard moves would result in inserts being emitted. We use several techniques to avoid this. -All writes in PostgreSQL are marked with a replication origin (0 by default) and the decoder can make decisions on whether to emit the change based on the replication origin. We use this to filter out internal data transfers. If `citus.enable_change_data_capture` is enabled, all internal data transfers are marked with the special DoNotReplicateId replication origin by calling the `citus_internal_start_replication_origin_tracking()` UDF before writing the data. This replication origin ID is special in the sense that it does not need to be created (which prevents locking issues, especially when dropping replication origins). It is still up to output plugin to decide what to do with changes marked as DoNotReplicateId. +All writes in PostgreSQL are marked with a replication origin (0 by default) and the decoder can make decisions on whether to emit the change based on the replication origin. We use this to filter out internal data transfers. If `citus.enable_change_data_capture` is enabled, all internal data transfers are marked with the special DoNotReplicateId replication origin by calling the `citus_internal.start_replication_origin_tracking()` UDF before writing the data. This replication origin ID is special in the sense that it does not need to be created (which prevents locking issues, especially when dropping replication origins). It is still up to output plugin to decide what to do with changes marked as DoNotReplicateId. We have very minimal control over replication commands like `CREATE_REPLICATION_SLOT`, since there are no direct hooks, and decoder names (e.g. “pgoutput”) are typically hard-coded in the client. The only method we found of overriding logical decoding behaviour is to overload the output plugin name in the dynamic library path. diff --git a/src/backend/distributed/commands/database.c b/src/backend/distributed/commands/database.c index 55cd9e13087..33223f41686 100644 --- a/src/backend/distributed/commands/database.c +++ b/src/backend/distributed/commands/database.c @@ -481,9 +481,7 @@ PreprocessCreateDatabaseStmt(Node *node, const char *queryString, /* * PostprocessCreateDatabaseStmt is executed after the statement is applied to the local * postgres instance. In this stage we prepare the commands that need to be run on - * all workers to create the database. Since the CREATE DATABASE statement gives error - * in a transaction block, we need to use NontransactionalNodeDDLTaskList to send the - * CREATE DATABASE statement to the workers. + * all workers to create the database. * */ List * @@ -508,20 +506,25 @@ PostprocessCreateDatabaseStmt(Node *node, const char *queryString) char *createDatabaseCommand = DeparseTreeNode(node); - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) createDatabaseCommand, - ENABLE_DDL_PROPAGATION); + List *createDatabaseCommands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) createDatabaseCommand, + ENABLE_DDL_PROPAGATION); - return NontransactionalNodeDDLTaskList(REMOTE_NODES, commands); + /* + * Since the CREATE DATABASE statements cannot be executed in a transaction + * block, we need to use NontransactionalNodeDDLTaskList() to send the CREATE + * DATABASE statement to the workers. + */ + List *createDatabaseDDLJobList = + NontransactionalNodeDDLTaskList(REMOTE_NODES, createDatabaseCommands); + return createDatabaseDDLJobList; } /* * PreprocessDropDatabaseStmt is executed before the statement is applied to the local * postgres instance. In this stage we can prepare the commands that need to be run on - * all workers to drop the database. Since the DROP DATABASE statement gives error in - * transaction context, we need to use NontransactionalNodeDDLTaskList to send the - * DROP DATABASE statement to the workers. + * all workers to drop the database. * * We also serialize database commands globally by acquiring a Citus specific advisory * lock based on OCLASS_DATABASE on the first primary worker node. @@ -559,11 +562,18 @@ PreprocessDropDatabaseStmt(Node *node, const char *queryString, char *dropDatabaseCommand = DeparseTreeNode(node); - List *commands = list_make3(DISABLE_DDL_PROPAGATION, - (void *) dropDatabaseCommand, - ENABLE_DDL_PROPAGATION); + List *dropDatabaseCommands = list_make3(DISABLE_DDL_PROPAGATION, + (void *) dropDatabaseCommand, + ENABLE_DDL_PROPAGATION); - return NontransactionalNodeDDLTaskList(REMOTE_NODES, commands); + /* + * Due to same reason stated in PostprocessCreateDatabaseStmt(), we need to + * use NontransactionalNodeDDLTaskList() to send the DROP DATABASE statement + * to the workers. + */ + List *dropDatabaseDDLJobList = + NontransactionalNodeDDLTaskList(REMOTE_NODES, dropDatabaseCommands); + return dropDatabaseDDLJobList; } diff --git a/src/backend/distributed/commands/multi_copy.c b/src/backend/distributed/commands/multi_copy.c index 0284ea64dbe..23847ac0150 100644 --- a/src/backend/distributed/commands/multi_copy.c +++ b/src/backend/distributed/commands/multi_copy.c @@ -2663,7 +2663,6 @@ CreateLocalColocatedIntermediateFile(CitusCopyDestReceiver *copyDest, CreateIntermediateResultsDirectory(); const int fileFlags = (O_CREAT | O_RDWR | O_TRUNC); - const int fileMode = (S_IRUSR | S_IWUSR); StringInfo filePath = makeStringInfo(); appendStringInfo(filePath, "%s_%ld", copyDest->colocatedIntermediateResultIdPrefix, @@ -2671,7 +2670,7 @@ CreateLocalColocatedIntermediateFile(CitusCopyDestReceiver *copyDest, const char *fileName = QueryResultFileName(filePath->data); shardState->fileDest = - FileCompatFromFileStart(FileOpenForTransmit(fileName, fileFlags, fileMode)); + FileCompatFromFileStart(FileOpenForTransmit(fileName, fileFlags)); CopyOutState localFileCopyOutState = shardState->copyOutState; bool isBinaryCopy = localFileCopyOutState->binary; diff --git a/src/backend/distributed/commands/role.c b/src/backend/distributed/commands/role.c index d0b33ccb9b2..f2b567e6e70 100644 --- a/src/backend/distributed/commands/role.c +++ b/src/backend/distributed/commands/role.c @@ -886,6 +886,14 @@ GenerateGrantRoleStmtsOfRole(Oid roleid) { Form_pg_auth_members membership = (Form_pg_auth_members) GETSTRUCT(tuple); + ObjectAddress *roleAddress = palloc0(sizeof(ObjectAddress)); + ObjectAddressSet(*roleAddress, AuthIdRelationId, membership->grantor); + if (!IsAnyObjectDistributed(list_make1(roleAddress))) + { + /* we only need to propagate the grant if the grantor is distributed */ + continue; + } + GrantRoleStmt *grantRoleStmt = makeNode(GrantRoleStmt); grantRoleStmt->is_grant = true; @@ -901,7 +909,11 @@ GenerateGrantRoleStmtsOfRole(Oid roleid) granteeRole->rolename = GetUserNameFromId(membership->member, true); grantRoleStmt->grantee_roles = list_make1(granteeRole); - grantRoleStmt->grantor = NULL; + RoleSpec *grantorRole = makeNode(RoleSpec); + grantorRole->roletype = ROLESPEC_CSTRING; + grantorRole->location = -1; + grantorRole->rolename = GetUserNameFromId(membership->grantor, false); + grantRoleStmt->grantor = grantorRole; #if PG_VERSION_NUM >= PG_VERSION_16 @@ -1241,12 +1253,6 @@ PreprocessGrantRoleStmt(Node *node, const char *queryString, return NIL; } - /* - * Postgres don't seem to use the grantor. Even dropping the grantor doesn't - * seem to affect the membership. If this changes, we might need to add grantors - * to the dependency resolution too. For now we just don't propagate it. - */ - stmt->grantor = NULL; stmt->grantee_roles = distributedGranteeRoles; char *sql = DeparseTreeNode((Node *) stmt); stmt->grantee_roles = allGranteeRoles; diff --git a/src/backend/distributed/commands/seclabel.c b/src/backend/distributed/commands/seclabel.c index 3e1847dc9ee..1d274a05627 100644 --- a/src/backend/distributed/commands/seclabel.c +++ b/src/backend/distributed/commands/seclabel.c @@ -29,7 +29,7 @@ List * PostprocessSecLabelStmt(Node *node, const char *queryString) { - if (!ShouldPropagate()) + if (!EnableAlterRolePropagation || !ShouldPropagate()) { return NIL; } @@ -59,21 +59,17 @@ PostprocessSecLabelStmt(Node *node, const char *queryString) return NIL; } - if (!EnableCreateRolePropagation) - { - return NIL; - } - EnsureCoordinator(); + EnsurePropagationToCoordinator(); EnsureAllObjectDependenciesExistOnAllNodes(objectAddresses); - const char *sql = DeparseTreeNode((Node *) secLabelStmt); + const char *secLabelCommands = DeparseTreeNode((Node *) secLabelStmt); List *commandList = list_make3(DISABLE_DDL_PROPAGATION, - (void *) sql, + (void *) secLabelCommands, ENABLE_DDL_PROPAGATION); - return NodeDDLTaskList(NON_COORDINATOR_NODES, commandList); + return NodeDDLTaskList(REMOTE_NODES, commandList); } diff --git a/src/backend/distributed/commands/utility_hook.c b/src/backend/distributed/commands/utility_hook.c index 68af4b7b558..a1a23331076 100644 --- a/src/backend/distributed/commands/utility_hook.c +++ b/src/backend/distributed/commands/utility_hook.c @@ -94,6 +94,40 @@ #define MARK_OBJECT_DISTRIBUTED \ "SELECT citus_internal.mark_object_distributed(%d, %s, %d, %s)" +/* + * NonMainDbDistributedStatementInfo is used to determine whether a statement is + * supported from non-main databases and whether it should be marked as + * distributed explicitly (*). + * + * (*) We always have to mark such objects as "distributed" but while for some + * object types we can delegate this to main database, for some others we have + * to explicitly send a command to all nodes in this code-path to achieve this. + */ +typedef struct NonMainDbDistributedStatementInfo +{ + int statementType; + bool explicitlyMarkAsDistributed; + + /* + * checkSupportedObjectTypes is a callback function that checks whether + * type of the object referred to by given statement is supported. + * + * Can be NULL if not applicable for the statement type. + */ + bool (*checkSupportedObjectTypes)(Node *node); +} NonMainDbDistributedStatementInfo; + +/* + * MarkObjectDistributedParams is used to pass parameters to the + * MarkObjectDistributedFromNonMainDb function. + */ +typedef struct MarkObjectDistributedParams +{ + char *name; + Oid id; + uint16 catalogRelId; +} MarkObjectDistributedParams; + bool EnableDDLPropagation = true; /* ddl propagation is enabled */ int CreateObjectPropagationMode = CREATE_OBJECT_PROPAGATION_IMMEDIATE; @@ -122,8 +156,42 @@ static void PostStandardProcessUtility(Node *parsetree); static void DecrementUtilityHookCountersIfNecessary(Node *parsetree); static bool IsDropSchemaOrDB(Node *parsetree); static bool ShouldCheckUndistributeCitusLocalTables(void); -static void RunPreprocessMainDBCommand(Node *parsetree, const char *queryString); + + +/* + * Functions to support commands used to manage node-wide objects from non-main + * databases. + */ +static bool IsCommandToCreateOrDropMainDB(Node *parsetree); +static void RunPreprocessMainDBCommand(Node *parsetree); static void RunPostprocessMainDBCommand(Node *parsetree); +static bool IsStatementSupportedFromNonMainDb(Node *parsetree); +static bool StatementRequiresMarkDistributedFromNonMainDb(Node *parsetree); +static void MarkObjectDistributedFromNonMainDb(Node *parsetree); +static MarkObjectDistributedParams GetMarkObjectDistributedParams(Node *parsetree); + +/* + * checkSupportedObjectTypes callbacks for + * NonMainDbDistributedStatementInfo objects. + */ +static bool NonMainDbCheckSupportedObjectTypeForGrant(Node *node); +static bool NonMainDbCheckSupportedObjectTypeForSecLabel(Node *node); + + +/* + * NonMainDbSupportedStatements is an array of statements that are supported + * from non-main databases. + */ +ObjectType supportedObjectTypesForGrantStmt[] = { OBJECT_DATABASE }; +static const NonMainDbDistributedStatementInfo NonMainDbSupportedStatements[] = { + { T_GrantRoleStmt, false, NULL }, + { T_CreateRoleStmt, true, NULL }, + { T_GrantStmt, false, NonMainDbCheckSupportedObjectTypeForGrant }, + { T_CreatedbStmt, false, NULL }, + { T_DropdbStmt, false, NULL }, + { T_SecLabelStmt, false, NonMainDbCheckSupportedObjectTypeForSecLabel }, +}; + /* * ProcessUtilityParseTree is a convenience method to create a PlannedStmt out of @@ -255,9 +323,24 @@ citus_ProcessUtility(PlannedStmt *pstmt, if (!CitusHasBeenLoaded()) { - if (!IsMainDB) + /* + * We always execute CREATE/DROP DATABASE from the main database. There are no + * transactional visibility issues, since these commands are non-transactional. + * And this way we only have to consider one codepath when creating databases. + * We don't try to send the query to the main database if the CREATE/DROP DATABASE + * command is for the main database itself, this is a very rare case but it's + * exercised by our test suite. + */ + if (!IsMainDB && + !IsCommandToCreateOrDropMainDB(parsetree)) { - RunPreprocessMainDBCommand(parsetree, queryString); + RunPreprocessMainDBCommand(parsetree); + + if (IsA(parsetree, CreatedbStmt) || + IsA(parsetree, DropdbStmt)) + { + return; + } } /* @@ -738,6 +821,13 @@ citus_ProcessUtilityInternal(PlannedStmt *pstmt, errhint("Connect to other nodes directly to manually create all" " necessary users and roles."))); } + else if (IsA(parsetree, SecLabelStmt) && !EnableAlterRolePropagation) + { + ereport(NOTICE, (errmsg("not propagating SECURITY LABEL commands to other" + " nodes"), + errhint("Connect to other nodes directly to manually assign" + " necessary labels."))); + } /* * Make sure that on DROP EXTENSION we terminate the background daemon @@ -1596,27 +1686,64 @@ DropSchemaOrDBInProgress(void) } +/* + * IsCommandToCreateOrDropMainDB checks if this query creates or drops the + * main database, so we can make an exception and not send this query to + * the main database. + */ +static bool +IsCommandToCreateOrDropMainDB(Node *parsetree) +{ + if (IsA(parsetree, CreatedbStmt)) + { + CreatedbStmt *createdbStmt = castNode(CreatedbStmt, parsetree); + return strcmp(createdbStmt->dbname, MainDb) == 0; + } + else if (IsA(parsetree, DropdbStmt)) + { + DropdbStmt *dropdbStmt = castNode(DropdbStmt, parsetree); + return strcmp(dropdbStmt->dbname, MainDb) == 0; + } + + return false; +} + + /* * RunPreprocessMainDBCommand runs the necessary commands for a query, in main * database before query is run on the local node with PrevProcessUtility */ static void -RunPreprocessMainDBCommand(Node *parsetree, const char *queryString) +RunPreprocessMainDBCommand(Node *parsetree) { - if (IsA(parsetree, CreateRoleStmt)) + if (!IsStatementSupportedFromNonMainDb(parsetree)) + { + return; + } + + char *queryString = DeparseTreeNode(parsetree); + + if (IsA(parsetree, CreatedbStmt) || + IsA(parsetree, DropdbStmt)) { - StringInfo mainDBQuery = makeStringInfo(); - appendStringInfo(mainDBQuery, - START_MANAGEMENT_TRANSACTION, - GetCurrentFullTransactionId().value); - RunCitusMainDBQuery(mainDBQuery->data); - mainDBQuery = makeStringInfo(); - appendStringInfo(mainDBQuery, - EXECUTE_COMMAND_ON_REMOTE_NODES_AS_USER, - quote_literal_cstr(queryString), - quote_literal_cstr(CurrentUserName())); - RunCitusMainDBQuery(mainDBQuery->data); + IsMainDBCommandInXact = false; + RunCitusMainDBQuery((char *) queryString); + return; } + + IsMainDBCommandInXact = true; + + StringInfo mainDBQuery = makeStringInfo(); + appendStringInfo(mainDBQuery, + START_MANAGEMENT_TRANSACTION, + GetCurrentFullTransactionId().value); + RunCitusMainDBQuery(mainDBQuery->data); + mainDBQuery = makeStringInfo(); + appendStringInfo(mainDBQuery, + EXECUTE_COMMAND_ON_REMOTE_NODES_AS_USER, + quote_literal_cstr(queryString), + quote_literal_cstr(CurrentUserName())); + RunCitusMainDBQuery(mainDBQuery->data); } @@ -1626,18 +1753,126 @@ RunPreprocessMainDBCommand(Node *parsetree, const char *queryString) */ static void RunPostprocessMainDBCommand(Node *parsetree) +{ + if (IsStatementSupportedFromNonMainDb(parsetree) && + StatementRequiresMarkDistributedFromNonMainDb(parsetree)) + { + MarkObjectDistributedFromNonMainDb(parsetree); + } +} + + +/* + * IsStatementSupportedFromNonMainDb returns true if the statement is supported from a + * non-main database. + */ +static bool +IsStatementSupportedFromNonMainDb(Node *parsetree) +{ + NodeTag type = nodeTag(parsetree); + + for (int i = 0; i < sizeof(NonMainDbSupportedStatements) / + sizeof(NonMainDbSupportedStatements[0]); i++) + { + if (type != NonMainDbSupportedStatements[i].statementType) + { + continue; + } + + return !NonMainDbSupportedStatements[i].checkSupportedObjectTypes || + NonMainDbSupportedStatements[i].checkSupportedObjectTypes(parsetree); + } + + return false; +} + + +/* + * StatementRequiresMarkDistributedFromNonMainDb returns true if the statement should be marked + * as distributed when executed from a non-main database. + */ +static bool +StatementRequiresMarkDistributedFromNonMainDb(Node *parsetree) +{ + NodeTag type = nodeTag(parsetree); + + for (int i = 0; i < sizeof(NonMainDbSupportedStatements) / + sizeof(NonMainDbSupportedStatements[0]); i++) + { + if (type == NonMainDbSupportedStatements[i].statementType) + { + return NonMainDbSupportedStatements[i].explicitlyMarkAsDistributed; + } + } + + return false; +} + + +/* + * MarkObjectDistributedFromNonMainDb marks the given object as distributed on the + * non-main database. + */ +static void +MarkObjectDistributedFromNonMainDb(Node *parsetree) +{ + MarkObjectDistributedParams markObjectDistributedParams = + GetMarkObjectDistributedParams(parsetree); + StringInfo mainDBQuery = makeStringInfo(); + appendStringInfo(mainDBQuery, + MARK_OBJECT_DISTRIBUTED, + markObjectDistributedParams.catalogRelId, + quote_literal_cstr(markObjectDistributedParams.name), + markObjectDistributedParams.id, + quote_literal_cstr(CurrentUserName())); + RunCitusMainDBQuery(mainDBQuery->data); +} + + +/* + * GetMarkObjectDistributedParams returns MarkObjectDistributedParams for the target + * object of given parsetree. + */ +static MarkObjectDistributedParams +GetMarkObjectDistributedParams(Node *parsetree) { if (IsA(parsetree, CreateRoleStmt)) { - StringInfo mainDBQuery = makeStringInfo(); - CreateRoleStmt *createRoleStmt = castNode(CreateRoleStmt, parsetree); - Oid roleOid = get_role_oid(createRoleStmt->role, false); - appendStringInfo(mainDBQuery, - MARK_OBJECT_DISTRIBUTED, - AuthIdRelationId, - quote_literal_cstr(createRoleStmt->role), - roleOid, - quote_literal_cstr(CurrentUserName())); - RunCitusMainDBQuery(mainDBQuery->data); + CreateRoleStmt *stmt = castNode(CreateRoleStmt, parsetree); + MarkObjectDistributedParams info = { + .name = stmt->role, + .catalogRelId = AuthIdRelationId, + .id = get_role_oid(stmt->role, false) + }; + + return info; } + + /* Add else if branches for other statement types */ + + elog(ERROR, "unsupported statement type"); +} + + +/* + * NonMainDbCheckSupportedObjectTypeForGrant implements checkSupportedObjectTypes + * callback for GrantStmt. + */ +static bool +NonMainDbCheckSupportedObjectTypeForGrant(Node *node) +{ + GrantStmt *stmt = castNode(GrantStmt, node); + return stmt->objtype == OBJECT_DATABASE; +} + + +/* + * NonMainDbCheckSupportedObjectTypeForSecLabel implements checkSupportedObjectTypes + * callback for SecLabel. + */ +static bool +NonMainDbCheckSupportedObjectTypeForSecLabel(Node *node) +{ + SecLabelStmt *stmt = castNode(SecLabelStmt, node); + return stmt->objtype == OBJECT_ROLE; } diff --git a/src/backend/distributed/deparser/citus_grantutils.c b/src/backend/distributed/deparser/citus_grantutils.c index c944013f6d2..8354e047944 100644 --- a/src/backend/distributed/deparser/citus_grantutils.c +++ b/src/backend/distributed/deparser/citus_grantutils.c @@ -74,7 +74,7 @@ AppendGrantRestrictAndCascade(StringInfo buf, GrantStmt *stmt) void AppendGrantedByInGrantForRoleSpec(StringInfo buf, RoleSpec *grantor, bool isGrant) { - if (isGrant && grantor) + if (grantor) { appendStringInfo(buf, " GRANTED BY %s", RoleSpecString(grantor, true)); } diff --git a/src/backend/distributed/deparser/deparse_database_stmts.c b/src/backend/distributed/deparser/deparse_database_stmts.c index 30ac3f32c55..66df5361e50 100644 --- a/src/backend/distributed/deparser/deparse_database_stmts.c +++ b/src/backend/distributed/deparser/deparse_database_stmts.c @@ -277,6 +277,11 @@ AppendCreateDatabaseStmt(StringInfo buf, CreatedbStmt *stmt) /* * Make sure that we don't try to deparse something that this * function doesn't expect. + * + * This is also useful to throw an error for unsupported CREATE + * DATABASE options when the command is issued from non-main dbs + * because we use the same function to deparse CREATE DATABASE + * commands there too. */ EnsureSupportedCreateDatabaseCommand(stmt); diff --git a/src/backend/distributed/deparser/deparse_role_stmts.c b/src/backend/distributed/deparser/deparse_role_stmts.c index b868413457f..a4a085026c9 100644 --- a/src/backend/distributed/deparser/deparse_role_stmts.c +++ b/src/backend/distributed/deparser/deparse_role_stmts.c @@ -488,7 +488,6 @@ AppendGrantRoleStmt(StringInfo buf, GrantRoleStmt *stmt) AppendGrantWithAdminOption(buf, stmt); AppendGrantedByInGrantForRoleSpec(buf, stmt->grantor, stmt->is_grant); AppendGrantRestrictAndCascadeForRoleSpec(buf, stmt->behavior, stmt->is_grant); - AppendGrantedByInGrantForRoleSpec(buf, stmt->grantor, stmt->is_grant); appendStringInfo(buf, ";"); } diff --git a/src/backend/distributed/executor/intermediate_results.c b/src/backend/distributed/executor/intermediate_results.c index 0e18d4416f7..daf707b2462 100644 --- a/src/backend/distributed/executor/intermediate_results.c +++ b/src/backend/distributed/executor/intermediate_results.c @@ -295,7 +295,6 @@ PrepareIntermediateResultBroadcast(RemoteFileDestReceiver *resultDest) if (resultDest->writeLocalFile) { const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); - const int fileMode = (S_IRUSR | S_IWUSR); /* make sure the directory exists */ CreateIntermediateResultsDirectory(); @@ -303,8 +302,7 @@ PrepareIntermediateResultBroadcast(RemoteFileDestReceiver *resultDest) const char *fileName = QueryResultFileName(resultId); resultDest->fileCompat = FileCompatFromFileStart(FileOpenForTransmit(fileName, - fileFlags, - fileMode)); + fileFlags)); } WorkerNode *workerNode = NULL; @@ -606,7 +604,7 @@ CreateIntermediateResultsDirectory(void) { char *resultDirectory = IntermediateResultsDirectory(); - int makeOK = mkdir(resultDirectory, S_IRWXU); + int makeOK = MakePGDirectory(resultDirectory); if (makeOK != 0) { if (errno == EEXIST) @@ -976,7 +974,6 @@ FetchRemoteIntermediateResult(MultiConnection *connection, char *resultId) StringInfo copyCommand = makeStringInfo(); const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); - const int fileMode = (S_IRUSR | S_IWUSR); PGconn *pgConn = connection->pgConn; int socket = PQsocket(pgConn); @@ -998,7 +995,7 @@ FetchRemoteIntermediateResult(MultiConnection *connection, char *resultId) PQclear(result); - File fileDesc = FileOpenForTransmit(localPath, fileFlags, fileMode); + File fileDesc = FileOpenForTransmit(localPath, fileFlags); FileCompat fileCompat = FileCompatFromFileStart(fileDesc); while (true) diff --git a/src/backend/distributed/executor/transmit.c b/src/backend/distributed/executor/transmit.c index a10ae4fbf0b..224d8e5892d 100644 --- a/src/backend/distributed/executor/transmit.c +++ b/src/backend/distributed/executor/transmit.c @@ -17,6 +17,7 @@ #include "pgstat.h" #include "commands/defrem.h" +#include "common/file_perm.h" #include "libpq/libpq.h" #include "libpq/pqformat.h" #include "storage/fd.h" @@ -48,8 +49,7 @@ RedirectCopyDataToRegularFile(const char *filename) { StringInfo copyData = makeStringInfo(); const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); - const int fileMode = (S_IRUSR | S_IWUSR); - File fileDesc = FileOpenForTransmit(filename, fileFlags, fileMode); + File fileDesc = FileOpenForTransmit(filename, fileFlags); FileCompat fileCompat = FileCompatFromFileStart(fileDesc); SendCopyInStart(); @@ -92,7 +92,7 @@ SendRegularFile(const char *filename) const int fileMode = 0; /* we currently do not check if the caller has permissions for this file */ - File fileDesc = FileOpenForTransmit(filename, fileFlags, fileMode); + File fileDesc = FileOpenForTransmitPerm(filename, fileFlags, fileMode); FileCompat fileCompat = FileCompatFromFileStart(fileDesc); /* @@ -136,12 +136,23 @@ FreeStringInfo(StringInfo stringInfo) /* - * FileOpenForTransmit opens file with the given filename and flags. On success, - * the function returns the internal file handle for the opened file. On failure - * the function errors out. + * Open a file with FileOpenForTransmitPerm() and pass default file mode for + * the fileMode parameter. */ File -FileOpenForTransmit(const char *filename, int fileFlags, int fileMode) +FileOpenForTransmit(const char *filename, int fileFlags) +{ + return FileOpenForTransmitPerm(filename, fileFlags, pg_file_create_mode); +} + + +/* + * FileOpenForTransmitPerm opens file with the given filename and flags. On + * success, the function returns the internal file handle for the opened file. + * On failure the function errors out. + */ +File +FileOpenForTransmitPerm(const char *filename, int fileFlags, int fileMode) { struct stat fileStat; diff --git a/src/backend/distributed/metadata/metadata_sync.c b/src/backend/distributed/metadata/metadata_sync.c index 518f13eb73f..e2305689662 100644 --- a/src/backend/distributed/metadata/metadata_sync.c +++ b/src/backend/distributed/metadata/metadata_sync.c @@ -1419,7 +1419,7 @@ ColocationIdUpdateCommand(Oid relationId, uint32 colocationId) StringInfo command = makeStringInfo(); char *qualifiedRelationName = generate_qualified_relation_name(relationId); appendStringInfo(command, - "SELECT citus_internal_update_relation_colocation(%s::regclass, %d)", + "SELECT citus_internal.update_relation_colocation(%s::regclass, %d)", quote_literal_cstr(qualifiedRelationName), colocationId); return command->data; @@ -4301,7 +4301,7 @@ UpdateNoneDistTableMetadataCommand(Oid relationId, char replicationModel, { StringInfo command = makeStringInfo(); appendStringInfo(command, - "SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(%s, '%c', %u, %s)", + "SELECT citus_internal.update_none_dist_table_metadata(%s, '%c', %u, %s)", RemoteTableIdExpression(relationId), replicationModel, colocationId, autoConverted ? "true" : "false"); diff --git a/src/backend/distributed/metadata/node_metadata.c b/src/backend/distributed/metadata/node_metadata.c index 094986c855e..d93b133eaed 100644 --- a/src/backend/distributed/metadata/node_metadata.c +++ b/src/backend/distributed/metadata/node_metadata.c @@ -507,7 +507,13 @@ citus_disable_node(PG_FUNCTION_ARGS) { text *nodeNameText = PG_GETARG_TEXT_P(0); int32 nodePort = PG_GETARG_INT32(1); - bool synchronousDisableNode = PG_GETARG_BOOL(2); + + bool synchronousDisableNode = 1; + Assert(PG_NARGS() == 2 || PG_NARGS() == 3); + if (PG_NARGS() == 3) + { + synchronousDisableNode = PG_GETARG_BOOL(2); + } char *nodeName = text_to_cstring(nodeNameText); WorkerNode *workerNode = ModifiableWorkerNode(nodeName, nodePort); @@ -1692,7 +1698,7 @@ EnsureParentSessionHasExclusiveLockOnPgDistNode(pid_t parentSessionPid) if (!parentHasExclusiveLock) { ereport(ERROR, (errmsg("lock is not held by the caller. Unexpected caller " - "for citus_internal_mark_node_not_synced"))); + "for citus_internal.mark_node_not_synced"))); } } diff --git a/src/backend/distributed/operations/delete_protocol.c b/src/backend/distributed/operations/delete_protocol.c index c36121b00ba..39651715853 100644 --- a/src/backend/distributed/operations/delete_protocol.c +++ b/src/backend/distributed/operations/delete_protocol.c @@ -426,10 +426,9 @@ ExecuteDropShardPlacementCommandRemotely(ShardPlacement *shardPlacement, errdetail("Marking this shard placement for " "deletion"))); - InsertCleanupRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - shardRelationName, - shardPlacement->groupId, - CLEANUP_DEFERRED_ON_SUCCESS); + InsertCleanupOnSuccessRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + shardRelationName, + shardPlacement->groupId); return; } diff --git a/src/backend/distributed/operations/shard_cleaner.c b/src/backend/distributed/operations/shard_cleaner.c index 79041453022..db1cad6bcae 100644 --- a/src/backend/distributed/operations/shard_cleaner.c +++ b/src/backend/distributed/operations/shard_cleaner.c @@ -452,15 +452,15 @@ CompareCleanupRecordsByObjectType(const void *leftElement, const void *rightElem /* - * InsertCleanupRecordInCurrentTransaction inserts a new pg_dist_cleanup entry + * InsertCleanupOnSuccessRecordInCurrentTransaction inserts a new pg_dist_cleanup entry * as part of the current transaction. This is primarily useful for deferred drop scenarios, - * since these records would roll back in case of operation failure. + * since these records would roll back in case of operation failure. And for the same reason, + * always sets the policy type to CLEANUP_DEFERRED_ON_SUCCESS. */ void -InsertCleanupRecordInCurrentTransaction(CleanupObject objectType, - char *objectName, - int nodeGroupId, - CleanupPolicy policy) +InsertCleanupOnSuccessRecordInCurrentTransaction(CleanupObject objectType, + char *objectName, + int nodeGroupId) { /* We must have a valid OperationId. Any operation requring cleanup * will call RegisterOperationNeedingCleanup. @@ -482,7 +482,8 @@ InsertCleanupRecordInCurrentTransaction(CleanupObject objectType, values[Anum_pg_dist_cleanup_object_type - 1] = Int32GetDatum(objectType); values[Anum_pg_dist_cleanup_object_name - 1] = CStringGetTextDatum(objectName); values[Anum_pg_dist_cleanup_node_group_id - 1] = Int32GetDatum(nodeGroupId); - values[Anum_pg_dist_cleanup_policy_type - 1] = Int32GetDatum(policy); + values[Anum_pg_dist_cleanup_policy_type - 1] = + Int32GetDatum(CLEANUP_DEFERRED_ON_SUCCESS); /* open cleanup relation and insert new tuple */ Oid relationId = DistCleanupRelationId(); @@ -499,23 +500,27 @@ InsertCleanupRecordInCurrentTransaction(CleanupObject objectType, /* - * InsertCleanupRecordInSubtransaction inserts a new pg_dist_cleanup entry in a + * InsertCleanupRecordOutsideTransaction inserts a new pg_dist_cleanup entry in a * separate transaction to ensure the record persists after rollback. We should * delete these records if the operation completes successfully. * - * For failure scenarios, use a subtransaction (direct insert via localhost). + * This is used in scenarios where we need to cleanup resources on operation + * completion (CLEANUP_ALWAYS) or on failure (CLEANUP_ON_FAILURE). */ void -InsertCleanupRecordInSubtransaction(CleanupObject objectType, - char *objectName, - int nodeGroupId, - CleanupPolicy policy) +InsertCleanupRecordOutsideTransaction(CleanupObject objectType, + char *objectName, + int nodeGroupId, + CleanupPolicy policy) { /* We must have a valid OperationId. Any operation requring cleanup * will call RegisterOperationNeedingCleanup. */ Assert(CurrentOperationId != INVALID_OPERATION_ID); + /* assert the circumstance noted in function comment */ + Assert(policy == CLEANUP_ALWAYS || policy == CLEANUP_ON_FAILURE); + StringInfo sequenceName = makeStringInfo(); appendStringInfo(sequenceName, "%s.%s", PG_CATALOG, diff --git a/src/backend/distributed/operations/shard_split.c b/src/backend/distributed/operations/shard_split.c index ac7ed6bf365..4baf0fb241b 100644 --- a/src/backend/distributed/operations/shard_split.c +++ b/src/backend/distributed/operations/shard_split.c @@ -733,11 +733,11 @@ CreateSplitShardsForShardGroup(List *shardGroupSplitIntervalListList, workerPlacementNode->workerPort))); } - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - ConstructQualifiedShardName( - shardInterval), - workerPlacementNode->groupId, - CLEANUP_ON_FAILURE); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + ConstructQualifiedShardName( + shardInterval), + workerPlacementNode->groupId, + CLEANUP_ON_FAILURE); /* Create new split child shard on the specified placement list */ CreateObjectOnPlacement(splitShardCreationCommandList, @@ -1717,11 +1717,11 @@ CreateDummyShardsForShardGroup(HTAB *mapOfPlacementToDummyShardList, /* Log shard in pg_dist_cleanup. Given dummy shards are transient resources, * we want to cleanup irrespective of operation success or failure. */ - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - ConstructQualifiedShardName( - shardInterval), - workerPlacementNode->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + ConstructQualifiedShardName( + shardInterval), + workerPlacementNode->groupId, + CLEANUP_ALWAYS); /* Create dummy source shard on the specified placement list */ CreateObjectOnPlacement(splitShardCreationCommandList, @@ -1780,11 +1780,11 @@ CreateDummyShardsForShardGroup(HTAB *mapOfPlacementToDummyShardList, /* Log shard in pg_dist_cleanup. Given dummy shards are transient resources, * we want to cleanup irrespective of operation success or failure. */ - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - ConstructQualifiedShardName( - shardInterval), - sourceWorkerNode->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + ConstructQualifiedShardName( + shardInterval), + sourceWorkerNode->groupId, + CLEANUP_ALWAYS); /* Create dummy split child shard on source worker node */ CreateObjectOnPlacement(splitShardCreationCommandList, sourceWorkerNode); diff --git a/src/backend/distributed/operations/shard_transfer.c b/src/backend/distributed/operations/shard_transfer.c index 805ef39d706..737086752d6 100644 --- a/src/backend/distributed/operations/shard_transfer.c +++ b/src/backend/distributed/operations/shard_transfer.c @@ -294,6 +294,17 @@ citus_move_shard_placement(PG_FUNCTION_ARGS) CheckCitusVersion(ERROR); EnsureCoordinator(); + List *referenceTableIdList = NIL; + + if (HasNodesWithMissingReferenceTables(&referenceTableIdList)) + { + ereport(ERROR, (errmsg("there are missing reference tables on some nodes"), + errhint("Copy reference tables first with " + "replicate_reference_tables() or use " + "citus_rebalance_start() that will do it automatically." + ))); + } + int64 shardId = PG_GETARG_INT64(0); char *sourceNodeName = text_to_cstring(PG_GETARG_TEXT_P(1)); int32 sourceNodePort = PG_GETARG_INT32(2); @@ -593,10 +604,10 @@ InsertDeferredDropCleanupRecordsForShards(List *shardIntervalList) * We also log cleanup record in the current transaction. If the current transaction rolls back, * we do not generate a record at all. */ - InsertCleanupRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - qualifiedShardName, - placement->groupId, - CLEANUP_DEFERRED_ON_SUCCESS); + InsertCleanupOnSuccessRecordInCurrentTransaction( + CLEANUP_OBJECT_SHARD_PLACEMENT, + qualifiedShardName, + placement->groupId); } } } @@ -623,10 +634,9 @@ InsertCleanupRecordsForShardPlacementsOnNode(List *shardIntervalList, * We also log cleanup record in the current transaction. If the current transaction rolls back, * we do not generate a record at all. */ - InsertCleanupRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - qualifiedShardName, - groupId, - CLEANUP_DEFERRED_ON_SUCCESS); + InsertCleanupOnSuccessRecordInCurrentTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + qualifiedShardName, + groupId); } } @@ -1382,10 +1392,11 @@ CopyShardTablesViaLogicalReplication(List *shardIntervalList, char *sourceNodeNa char *tableOwner = TableOwner(shardInterval->relationId); /* drop the shard we created on the target, in case of failure */ - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - ConstructQualifiedShardName(shardInterval), - GroupForNode(targetNodeName, targetNodePort), - CLEANUP_ON_FAILURE); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + ConstructQualifiedShardName(shardInterval), + GroupForNode(targetNodeName, + targetNodePort), + CLEANUP_ON_FAILURE); SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort, tableOwner, @@ -1455,10 +1466,11 @@ CopyShardTablesViaBlockWrites(List *shardIntervalList, char *sourceNodeName, char *tableOwner = TableOwner(shardInterval->relationId); /* drop the shard we created on the target, in case of failure */ - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, - ConstructQualifiedShardName(shardInterval), - GroupForNode(targetNodeName, targetNodePort), - CLEANUP_ON_FAILURE); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SHARD_PLACEMENT, + ConstructQualifiedShardName(shardInterval), + GroupForNode(targetNodeName, + targetNodePort), + CLEANUP_ON_FAILURE); SendCommandListToWorkerOutsideTransaction(targetNodeName, targetNodePort, tableOwner, ddlCommandList); @@ -2035,7 +2047,7 @@ UpdateColocatedShardPlacementMetadataOnWorkers(int64 shardId, StringInfo updateCommand = makeStringInfo(); appendStringInfo(updateCommand, - "SELECT citus_internal_update_placement_metadata(%ld, %d, %d)", + "SELECT citus_internal.update_placement_metadata(%ld, %d, %d)", colocatedShard->shardId, sourceGroupId, targetGroupId); SendCommandToWorkersWithMetadata(updateCommand->data); diff --git a/src/backend/distributed/replication/multi_logical_replication.c b/src/backend/distributed/replication/multi_logical_replication.c index 056bc9a455f..08e6c557308 100644 --- a/src/backend/distributed/replication/multi_logical_replication.c +++ b/src/backend/distributed/replication/multi_logical_replication.c @@ -1335,10 +1335,10 @@ CreatePublications(MultiConnection *connection, WorkerNode *worker = FindWorkerNode(connection->hostname, connection->port); - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_PUBLICATION, - entry->name, - worker->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_PUBLICATION, + entry->name, + worker->groupId, + CLEANUP_ALWAYS); ExecuteCriticalRemoteCommand(connection, DISABLE_DDL_PROPAGATION); ExecuteCriticalRemoteCommand(connection, createPublicationCommand->data); @@ -1435,10 +1435,10 @@ CreateReplicationSlots(MultiConnection *sourceConnection, WorkerNode *worker = FindWorkerNode(sourceConnection->hostname, sourceConnection->port); - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_REPLICATION_SLOT, - replicationSlot->name, - worker->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_REPLICATION_SLOT, + replicationSlot->name, + worker->groupId, + CLEANUP_ALWAYS); if (!firstReplicationSlot) { @@ -1506,10 +1506,10 @@ CreateSubscriptions(MultiConnection *sourceConnection, quote_identifier(GetUserNameFromId(ownerId, false)) ))); - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_USER, - target->subscriptionOwnerName, - worker->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_USER, + target->subscriptionOwnerName, + worker->groupId, + CLEANUP_ALWAYS); StringInfo conninfo = makeStringInfo(); appendStringInfo(conninfo, "host='%s' port=%d user='%s' dbname='%s' " @@ -1567,10 +1567,10 @@ CreateSubscriptions(MultiConnection *sourceConnection, pfree(createSubscriptionCommand->data); pfree(createSubscriptionCommand); - InsertCleanupRecordInSubtransaction(CLEANUP_OBJECT_SUBSCRIPTION, - target->subscriptionName, - worker->groupId, - CLEANUP_ALWAYS); + InsertCleanupRecordOutsideTransaction(CLEANUP_OBJECT_SUBSCRIPTION, + target->subscriptionName, + worker->groupId, + CLEANUP_ALWAYS); ExecuteCriticalRemoteCommand(target->superuserConnection, psprintf( "ALTER SUBSCRIPTION %s OWNER TO %s", diff --git a/src/backend/distributed/shared_library_init.c b/src/backend/distributed/shared_library_init.c index ad5a14a2555..45e212e8bd6 100644 --- a/src/backend/distributed/shared_library_init.c +++ b/src/backend/distributed/shared_library_init.c @@ -895,22 +895,13 @@ DecrementExternalClientBackendCounterAtExit(int code, Datum arg) static void CreateRequiredDirectories(void) { - const char *subdirs[] = { - "pg_foreign_file", - "pg_foreign_file/cached", - ("base/" PG_JOB_CACHE_DIR) - }; + const char *subdir = ("base/" PG_JOB_CACHE_DIR); - for (int dirNo = 0; dirNo < lengthof(subdirs); dirNo++) + if (MakePGDirectory(subdir) != 0 && errno != EEXIST) { - int ret = mkdir(subdirs[dirNo], S_IRWXU); - - if (ret != 0 && errno != EEXIST) - { - ereport(ERROR, (errcode_for_file_access(), - errmsg("could not create directory \"%s\": %m", - subdirs[dirNo]))); - } + ereport(ERROR, (errcode_for_file_access(), + errmsg("could not create directory \"%s\": %m", + subdir))); } } diff --git a/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql b/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql index 0042fdaa1aa..68823b3be3a 100644 --- a/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql +++ b/src/backend/distributed/sql/citus--12.1-1--12.2-1.sql @@ -38,3 +38,17 @@ REVOKE ALL ON FUNCTION citus_internal.start_management_transaction FROM PUBLIC; #include "udfs/citus_internal_delete_placement_metadata/12.2-1.sql" #include "udfs/citus_internal_delete_shard_metadata/12.2-1.sql" #include "udfs/citus_internal_delete_tenant_schema/12.2-1.sql" +#include "udfs/citus_internal_local_blocked_processes/12.2-1.sql" +#include "udfs/citus_internal_global_blocked_processes/12.2-1.sql" +#include "udfs/citus_blocking_pids/12.2-1.sql" +#include "udfs/citus_isolation_test_session_is_blocked/12.2-1.sql" +DROP VIEW IF EXISTS pg_catalog.citus_lock_waits; +#include "udfs/citus_lock_waits/12.2-1.sql" + +#include "udfs/citus_internal_mark_node_not_synced/12.2-1.sql" +#include "udfs/citus_internal_unregister_tenant_schema_globally/12.2-1.sql" +#include "udfs/citus_drop_trigger/12.2-1.sql" +#include "udfs/citus_internal_update_none_dist_table_metadata/12.2-1.sql" +#include "udfs/citus_internal_update_placement_metadata/12.2-1.sql" +#include "udfs/citus_internal_update_relation_colocation/12.2-1.sql" +#include "udfs/repl_origin_helper/12.2-1.sql" diff --git a/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql b/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql index 337e93b9844..5b2828cfe2c 100644 --- a/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql +++ b/src/backend/distributed/sql/downgrades/citus--12.2-1--12.1-1.sql @@ -35,3 +35,19 @@ DROP FUNCTION citus_internal.delete_partition_metadata(regclass); DROP FUNCTION citus_internal.delete_placement_metadata(bigint); DROP FUNCTION citus_internal.delete_shard_metadata(bigint); DROP FUNCTION citus_internal.delete_tenant_schema(oid); +DROP FUNCTION citus_internal.local_blocked_processes(); +#include "../udfs/citus_blocking_pids/11.0-1.sql" +#include "../udfs/citus_isolation_test_session_is_blocked/11.1-1.sql" +DROP VIEW IF EXISTS pg_catalog.citus_lock_waits; +#include "../udfs/citus_lock_waits/11.0-1.sql" +DROP FUNCTION citus_internal.global_blocked_processes(); + +DROP FUNCTION citus_internal.mark_node_not_synced(int, int); +DROP FUNCTION citus_internal.unregister_tenant_schema_globally(oid, text); +#include "../udfs/citus_drop_trigger/12.0-1.sql" +DROP FUNCTION citus_internal.update_none_dist_table_metadata(oid, "char", bigint, boolean); +DROP FUNCTION citus_internal.update_placement_metadata(bigint, integer, integer); +DROP FUNCTION citus_internal.update_relation_colocation(oid, int); +DROP FUNCTION citus_internal.start_replication_origin_tracking(); +DROP FUNCTION citus_internal.stop_replication_origin_tracking(); +DROP FUNCTION citus_internal.is_replication_origin_tracking_active(); diff --git a/src/backend/distributed/sql/udfs/citus_blocking_pids/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_blocking_pids/12.2-1.sql new file mode 100644 index 00000000000..4e747ff4fc9 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_blocking_pids/12.2-1.sql @@ -0,0 +1,34 @@ +DROP FUNCTION pg_catalog.citus_blocking_pids; +CREATE FUNCTION pg_catalog.citus_blocking_pids(pBlockedPid integer) +RETURNS int4[] AS $$ + DECLARE + mLocalBlockingPids int4[]; + mRemoteBlockingPids int4[]; + mLocalGlobalPid int8; + BEGIN + SELECT pg_catalog.old_pg_blocking_pids(pBlockedPid) INTO mLocalBlockingPids; + + IF (array_length(mLocalBlockingPids, 1) > 0) THEN + RETURN mLocalBlockingPids; + END IF; + + -- pg says we're not blocked locally; check whether we're blocked globally. + SELECT global_pid INTO mLocalGlobalPid + FROM get_all_active_transactions() WHERE process_id = pBlockedPid; + + SELECT array_agg(global_pid) INTO mRemoteBlockingPids FROM ( + WITH activeTransactions AS ( + SELECT global_pid FROM get_all_active_transactions() + ), blockingTransactions AS ( + SELECT blocking_global_pid FROM citus_internal.global_blocked_processes() + WHERE waiting_global_pid = mLocalGlobalPid + ) + SELECT activeTransactions.global_pid FROM activeTransactions, blockingTransactions + WHERE activeTransactions.global_pid = blockingTransactions.blocking_global_pid + ) AS sub; + + RETURN mRemoteBlockingPids; + END; +$$ LANGUAGE plpgsql; + +REVOKE ALL ON FUNCTION citus_blocking_pids(integer) FROM PUBLIC; diff --git a/src/backend/distributed/sql/udfs/citus_blocking_pids/latest.sql b/src/backend/distributed/sql/udfs/citus_blocking_pids/latest.sql index c7e607c1c34..4e747ff4fc9 100644 --- a/src/backend/distributed/sql/udfs/citus_blocking_pids/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_blocking_pids/latest.sql @@ -20,7 +20,7 @@ RETURNS int4[] AS $$ WITH activeTransactions AS ( SELECT global_pid FROM get_all_active_transactions() ), blockingTransactions AS ( - SELECT blocking_global_pid FROM citus_internal_global_blocked_processes() + SELECT blocking_global_pid FROM citus_internal.global_blocked_processes() WHERE waiting_global_pid = mLocalGlobalPid ) SELECT activeTransactions.global_pid FROM activeTransactions, blockingTransactions diff --git a/src/backend/distributed/sql/udfs/citus_drop_trigger/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_drop_trigger/12.2-1.sql new file mode 100644 index 00000000000..6e4c52209a6 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_drop_trigger/12.2-1.sql @@ -0,0 +1,68 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_drop_trigger() + RETURNS event_trigger + LANGUAGE plpgsql + SET search_path = pg_catalog + AS $cdbdt$ +DECLARE + constraint_event_count INTEGER; + v_obj record; + dropped_table_is_a_partition boolean := false; +BEGIN + FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() + WHERE object_type IN ('table', 'foreign table') + LOOP + -- first drop the table and metadata on the workers + -- then drop all the shards on the workers + -- finally remove the pg_dist_partition entry on the coordinator + PERFORM master_remove_distributed_table_metadata_from_workers(v_obj.objid, v_obj.schema_name, v_obj.object_name); + + -- If both original and normal values are false, the dropped table was a partition + -- that was dropped as a result of its parent being dropped + -- NOTE: the other way around is not true: + -- the table being a partition doesn't imply both original and normal values are false + SELECT (v_obj.original = false AND v_obj.normal = false) INTO dropped_table_is_a_partition; + + -- The partition's shards will be dropped when dropping the parent's shards, so we can skip: + -- i.e. we call citus_drop_all_shards with drop_shards_metadata_only parameter set to true + IF dropped_table_is_a_partition + THEN + PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := true); + ELSE + PERFORM citus_drop_all_shards(v_obj.objid, v_obj.schema_name, v_obj.object_name, drop_shards_metadata_only := false); + END IF; + + PERFORM master_remove_partition_metadata(v_obj.objid, v_obj.schema_name, v_obj.object_name); + END LOOP; + + FOR v_obj IN SELECT * FROM pg_event_trigger_dropped_objects() + LOOP + -- Remove entries from pg_catalog.pg_dist_schema for all dropped tenant schemas. + -- Also delete the corresponding colocation group from pg_catalog.pg_dist_colocation. + -- + -- Although normally we automatically delete the colocation groups when they become empty, + -- we don't do so for the colocation groups that are created for tenant schemas. For this + -- reason, here we need to delete the colocation group when the tenant schema is dropped. + IF v_obj.object_type = 'schema' AND EXISTS (SELECT 1 FROM pg_catalog.pg_dist_schema WHERE schemaid = v_obj.objid) + THEN + PERFORM citus_internal.unregister_tenant_schema_globally(v_obj.objid, v_obj.object_name); + END IF; + + -- remove entries from citus.pg_dist_object for all dropped root (objsubid = 0) objects + PERFORM master_unmark_object_distributed(v_obj.classid, v_obj.objid, v_obj.objsubid); + END LOOP; + + SELECT COUNT(*) INTO constraint_event_count + FROM pg_event_trigger_dropped_objects() + WHERE object_type IN ('table constraint'); + + IF constraint_event_count > 0 + THEN + -- Tell utility hook that a table constraint is dropped so we might + -- need to undistribute some of the citus local tables that are not + -- connected to any reference tables. + PERFORM notify_constraint_dropped(); + END IF; +END; +$cdbdt$; +COMMENT ON FUNCTION pg_catalog.citus_drop_trigger() + IS 'perform checks and actions at the end of DROP actions'; diff --git a/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql b/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql index 312099aeb54..6e4c52209a6 100644 --- a/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_drop_trigger/latest.sql @@ -44,7 +44,7 @@ BEGIN -- reason, here we need to delete the colocation group when the tenant schema is dropped. IF v_obj.object_type = 'schema' AND EXISTS (SELECT 1 FROM pg_catalog.pg_dist_schema WHERE schemaid = v_obj.objid) THEN - PERFORM pg_catalog.citus_internal_unregister_tenant_schema_globally(v_obj.objid, v_obj.object_name); + PERFORM citus_internal.unregister_tenant_schema_globally(v_obj.objid, v_obj.object_name); END IF; -- remove entries from citus.pg_dist_object for all dropped root (objsubid = 0) objects diff --git a/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/12.2-1.sql new file mode 100644 index 00000000000..da8e98c20cf --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/12.2-1.sql @@ -0,0 +1,35 @@ +CREATE OR REPLACE FUNCTION citus_internal.global_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$; +COMMENT ON FUNCTION citus_internal.global_blocked_processes() +IS 'returns a global list of blocked backends originating from this node'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_global_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_global_blocked_processes() +IS 'returns a global list of blocked backends originating from this node'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/latest.sql index 510cdf93d4d..da8e98c20cf 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_global_blocked_processes/latest.sql @@ -1,3 +1,21 @@ +CREATE OR REPLACE FUNCTION citus_internal.global_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_global_blocked_processes$$; +COMMENT ON FUNCTION citus_internal.global_blocked_processes() +IS 'returns a global list of blocked backends originating from this node'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_global_blocked_processes( OUT waiting_global_pid int8, OUT waiting_pid int4, diff --git a/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/12.2-1.sql new file mode 100644 index 00000000000..b27f16d5316 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/12.2-1.sql @@ -0,0 +1,35 @@ +CREATE OR REPLACE FUNCTION citus_internal.local_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$; +COMMENT ON FUNCTION citus_internal.local_blocked_processes() +IS 'returns all local lock wait chains, that start from any citus backend'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_local_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_local_blocked_processes() +IS 'returns all local lock wait chains, that start from any citus backend'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/latest.sql index 3157a9aad83..b27f16d5316 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_local_blocked_processes/latest.sql @@ -1,3 +1,21 @@ +CREATE OR REPLACE FUNCTION citus_internal.local_blocked_processes( + OUT waiting_global_pid int8, + OUT waiting_pid int4, + OUT waiting_node_id int4, + OUT waiting_transaction_num int8, + OUT waiting_transaction_stamp timestamptz, + OUT blocking_global_pid int8, + OUT blocking_pid int4, + OUT blocking_node_id int4, + OUT blocking_transaction_num int8, + OUT blocking_transaction_stamp timestamptz, + OUT blocking_transaction_waiting bool) +RETURNS SETOF RECORD +LANGUAGE C STRICT +AS $$MODULE_PATHNAME$$, $$citus_internal_local_blocked_processes$$; +COMMENT ON FUNCTION citus_internal.local_blocked_processes() +IS 'returns all local lock wait chains, that start from any citus backend'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_local_blocked_processes( OUT waiting_global_pid int8, OUT waiting_pid int4, diff --git a/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/12.2-1.sql new file mode 100644 index 00000000000..8635b969946 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/12.2-1.sql @@ -0,0 +1,13 @@ +CREATE OR REPLACE FUNCTION citus_internal.mark_node_not_synced(parent_pid int, nodeid int) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_mark_node_not_synced$$; +COMMENT ON FUNCTION citus_internal.mark_node_not_synced(int, int) + IS 'marks given node not synced by unsetting metadatasynced column at the start of the nontransactional sync.'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_mark_node_not_synced(parent_pid int, nodeid int) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_mark_node_not_synced$$; +COMMENT ON FUNCTION citus_internal_mark_node_not_synced(int, int) + IS 'marks given node not synced by unsetting metadatasynced column at the start of the nontransactional sync.'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/latest.sql index 0d90c8f1afe..8635b969946 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_mark_node_not_synced/latest.sql @@ -1,3 +1,10 @@ +CREATE OR REPLACE FUNCTION citus_internal.mark_node_not_synced(parent_pid int, nodeid int) + RETURNS VOID + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_mark_node_not_synced$$; +COMMENT ON FUNCTION citus_internal.mark_node_not_synced(int, int) + IS 'marks given node not synced by unsetting metadatasynced column at the start of the nontransactional sync.'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_mark_node_not_synced(parent_pid int, nodeid int) RETURNS VOID LANGUAGE C STRICT diff --git a/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/12.2-1.sql new file mode 100644 index 00000000000..b07eb425d9e --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/12.2-1.sql @@ -0,0 +1,15 @@ +CREATE OR REPLACE FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME', $$citus_internal_unregister_tenant_schema_globally$$; +COMMENT ON FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text) IS + 'Delete a tenant schema and the corresponding colocation group from metadata tables.'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text) IS + 'Delete a tenant schema and the corresponding colocation group from metadata tables.'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/latest.sql index 1863f1ddf6f..b07eb425d9e 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_unregister_tenant_schema_globally/latest.sql @@ -1,3 +1,11 @@ +CREATE OR REPLACE FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text) + RETURNS void + LANGUAGE C + VOLATILE + AS 'MODULE_PATHNAME', $$citus_internal_unregister_tenant_schema_globally$$; +COMMENT ON FUNCTION citus_internal.unregister_tenant_schema_globally(schema_id Oid, schema_name text) IS + 'Delete a tenant schema and the corresponding colocation group from metadata tables.'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_unregister_tenant_schema_globally(schema_id Oid, schema_name text) RETURNS void LANGUAGE C diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/12.2-1.sql new file mode 100644 index 00000000000..cab96054412 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/12.2-1.sql @@ -0,0 +1,23 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_none_dist_table_metadata( + relation_id oid, + replication_model "char", + colocation_id bigint, + auto_converted boolean) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME', $$citus_internal_update_none_dist_table_metadata$$; +COMMENT ON FUNCTION citus_internal.update_none_dist_table_metadata(oid, "char", bigint, boolean) + IS 'Update pg_dist_partition metadata table for given none-distributed table, to convert it to another type of none-distributed table.'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata( + relation_id oid, + replication_model "char", + colocation_id bigint, + auto_converted boolean) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata(oid, "char", bigint, boolean) + IS 'Update pg_dist_partition metadata table for given none-distributed table, to convert it to another type of none-distributed table.'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/latest.sql index bcd05d8d014..cab96054412 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_update_none_dist_table_metadata/latest.sql @@ -1,3 +1,15 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_none_dist_table_metadata( + relation_id oid, + replication_model "char", + colocation_id bigint, + auto_converted boolean) +RETURNS void +LANGUAGE C +VOLATILE +AS 'MODULE_PATHNAME', $$citus_internal_update_none_dist_table_metadata$$; +COMMENT ON FUNCTION citus_internal.update_none_dist_table_metadata(oid, "char", bigint, boolean) + IS 'Update pg_dist_partition metadata table for given none-distributed table, to convert it to another type of none-distributed table.'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_none_dist_table_metadata( relation_id oid, replication_model "char", diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/12.2-1.sql new file mode 100644 index 00000000000..b7c47364744 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/12.2-1.sql @@ -0,0 +1,19 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_placement_metadata( + shard_id bigint, source_group_id integer, + target_group_id integer) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_update_placement_metadata$$; + +COMMENT ON FUNCTION citus_internal.update_placement_metadata(bigint, integer, integer) IS + 'Updates into pg_dist_placement with user checks'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_placement_metadata( + shard_id bigint, source_group_id integer, + target_group_id integer) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME'; + +COMMENT ON FUNCTION pg_catalog.citus_internal_update_placement_metadata(bigint, integer, integer) IS + 'Updates into pg_dist_placement with user checks'; diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/latest.sql index 7cb71774011..b7c47364744 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_update_placement_metadata/latest.sql @@ -1,3 +1,13 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_placement_metadata( + shard_id bigint, source_group_id integer, + target_group_id integer) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_update_placement_metadata$$; + +COMMENT ON FUNCTION citus_internal.update_placement_metadata(bigint, integer, integer) IS + 'Updates into pg_dist_placement with user checks'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_placement_metadata( shard_id bigint, source_group_id integer, target_group_id integer) diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/12.2-1.sql new file mode 100644 index 00000000000..2266895296f --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/12.2-1.sql @@ -0,0 +1,14 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_relation_colocation(relation_id Oid, target_colocation_id int) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_update_relation_colocation$$; +COMMENT ON FUNCTION citus_internal.update_relation_colocation(oid, int) IS + 'Updates colocationId field of pg_dist_partition for the relation_id'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_relation_colocation(relation_id Oid, target_colocation_id int) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME'; +COMMENT ON FUNCTION pg_catalog.citus_internal_update_relation_colocation(oid, int) IS + 'Updates colocationId field of pg_dist_partition for the relation_id'; + diff --git a/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/latest.sql b/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/latest.sql index a7f2ec1c604..2266895296f 100644 --- a/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_internal_update_relation_colocation/latest.sql @@ -1,3 +1,10 @@ +CREATE OR REPLACE FUNCTION citus_internal.update_relation_colocation(relation_id Oid, target_colocation_id int) + RETURNS void + LANGUAGE C STRICT + AS 'MODULE_PATHNAME', $$citus_internal_update_relation_colocation$$; +COMMENT ON FUNCTION citus_internal.update_relation_colocation(oid, int) IS + 'Updates colocationId field of pg_dist_partition for the relation_id'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_update_relation_colocation(relation_id Oid, target_colocation_id int) RETURNS void LANGUAGE C STRICT diff --git a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/12.2-1.sql new file mode 100644 index 00000000000..6f494fa74a5 --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/12.2-1.sql @@ -0,0 +1,45 @@ +CREATE OR REPLACE FUNCTION pg_catalog.citus_isolation_test_session_is_blocked(pBlockedPid integer, pInterestingPids integer[]) +RETURNS boolean AS $$ + DECLARE + mBlockedGlobalPid int8; + workerProcessId integer := current_setting('citus.isolation_test_session_remote_process_id'); + coordinatorProcessId integer := current_setting('citus.isolation_test_session_process_id'); + BEGIN + IF pg_catalog.old_pg_isolation_test_session_is_blocked(pBlockedPid, pInterestingPids) THEN + RETURN true; + END IF; + + -- pg says we're not blocked locally; check whether we're blocked globally. + -- Note that worker process may be blocked or waiting for a lock. So we need to + -- get transaction number for both of them. Following IF provides the transaction + -- number when the worker process waiting for other session. + IF EXISTS (SELECT 1 FROM get_global_active_transactions() + WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId) THEN + SELECT global_pid INTO mBlockedGlobalPid FROM get_global_active_transactions() + WHERE process_id = workerProcessId AND pBlockedPid = coordinatorProcessId; + ELSE + -- Check whether transactions initiated from the coordinator get locked + SELECT global_pid INTO mBlockedGlobalPid + FROM get_all_active_transactions() WHERE process_id = pBlockedPid; + END IF; + + -- We convert the blocking_global_pid to a regular pid and only look at + -- blocks caused by the interesting pids, or the workerProcessPid. If we + -- don't do that we might find unrelated blocks caused by some random + -- other processes that are not involved in this isolation test. Because we + -- run our isolation tests on a single physical machine, the PID part of + -- the GPID is known to be unique within the whole cluster. + RETURN EXISTS ( + SELECT 1 FROM citus_internal.global_blocked_processes() + WHERE waiting_global_pid = mBlockedGlobalPid + AND ( + citus_pid_for_gpid(blocking_global_pid) in ( + select * from unnest(pInterestingPids) + ) + OR citus_pid_for_gpid(blocking_global_pid) = workerProcessId + ) + ); + END; +$$ LANGUAGE plpgsql; + +REVOKE ALL ON FUNCTION citus_isolation_test_session_is_blocked(integer,integer[]) FROM PUBLIC; diff --git a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql index ff098391002..6f494fa74a5 100644 --- a/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_isolation_test_session_is_blocked/latest.sql @@ -30,7 +30,7 @@ RETURNS boolean AS $$ -- run our isolation tests on a single physical machine, the PID part of -- the GPID is known to be unique within the whole cluster. RETURN EXISTS ( - SELECT 1 FROM citus_internal_global_blocked_processes() + SELECT 1 FROM citus_internal.global_blocked_processes() WHERE waiting_global_pid = mBlockedGlobalPid AND ( citus_pid_for_gpid(blocking_global_pid) in ( diff --git a/src/backend/distributed/sql/udfs/citus_lock_waits/12.2-1.sql b/src/backend/distributed/sql/udfs/citus_lock_waits/12.2-1.sql new file mode 100644 index 00000000000..880306b992f --- /dev/null +++ b/src/backend/distributed/sql/udfs/citus_lock_waits/12.2-1.sql @@ -0,0 +1,47 @@ +SET search_path = 'pg_catalog'; + +CREATE VIEW citus.citus_lock_waits AS +WITH +unique_global_wait_edges_with_calculated_gpids AS ( +SELECT + -- if global_pid is NULL, it is most likely that a backend is blocked on a DDL + -- also for legacy reasons citus_internal.global_blocked_processes() returns groupId, we replace that with nodeIds + case WHEN waiting_global_pid !=0 THEN waiting_global_pid ELSE citus_calculate_gpid(get_nodeid_for_groupid(waiting_node_id), waiting_pid) END waiting_global_pid, + case WHEN blocking_global_pid !=0 THEN blocking_global_pid ELSE citus_calculate_gpid(get_nodeid_for_groupid(blocking_node_id), blocking_pid) END blocking_global_pid, + + -- citus_internal.global_blocked_processes returns groupId, we replace it here with actual + -- nodeId to be consisten with the other views + get_nodeid_for_groupid(blocking_node_id) as blocking_node_id, + get_nodeid_for_groupid(waiting_node_id) as waiting_node_id, + + blocking_transaction_waiting + + FROM citus_internal.global_blocked_processes() +), +unique_global_wait_edges AS +( + SELECT DISTINCT ON(waiting_global_pid, blocking_global_pid) * FROM unique_global_wait_edges_with_calculated_gpids +), +citus_dist_stat_activity_with_calculated_gpids AS +( + -- if global_pid is NULL, it is most likely that a backend is blocked on a DDL + SELECT CASE WHEN global_pid != 0 THEN global_pid ELSE citus_calculate_gpid(nodeid, pid) END global_pid, nodeid, pid, query FROM citus_dist_stat_activity +) +SELECT + waiting.global_pid as waiting_gpid, + blocking.global_pid as blocking_gpid, + waiting.query AS blocked_statement, + blocking.query AS current_statement_in_blocking_process, + waiting.nodeid AS waiting_nodeid, + blocking.nodeid AS blocking_nodeid +FROM + unique_global_wait_edges + JOIN + citus_dist_stat_activity_with_calculated_gpids waiting ON (unique_global_wait_edges.waiting_global_pid = waiting.global_pid) + JOIN + citus_dist_stat_activity_with_calculated_gpids blocking ON (unique_global_wait_edges.blocking_global_pid = blocking.global_pid); + +ALTER VIEW citus.citus_lock_waits SET SCHEMA pg_catalog; +GRANT SELECT ON pg_catalog.citus_lock_waits TO PUBLIC; + +RESET search_path; diff --git a/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql b/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql index b3de12632b5..880306b992f 100644 --- a/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql +++ b/src/backend/distributed/sql/udfs/citus_lock_waits/latest.sql @@ -5,18 +5,18 @@ WITH unique_global_wait_edges_with_calculated_gpids AS ( SELECT -- if global_pid is NULL, it is most likely that a backend is blocked on a DDL - -- also for legacy reasons citus_internal_global_blocked_processes() returns groupId, we replace that with nodeIds + -- also for legacy reasons citus_internal.global_blocked_processes() returns groupId, we replace that with nodeIds case WHEN waiting_global_pid !=0 THEN waiting_global_pid ELSE citus_calculate_gpid(get_nodeid_for_groupid(waiting_node_id), waiting_pid) END waiting_global_pid, case WHEN blocking_global_pid !=0 THEN blocking_global_pid ELSE citus_calculate_gpid(get_nodeid_for_groupid(blocking_node_id), blocking_pid) END blocking_global_pid, - -- citus_internal_global_blocked_processes returns groupId, we replace it here with actual + -- citus_internal.global_blocked_processes returns groupId, we replace it here with actual -- nodeId to be consisten with the other views get_nodeid_for_groupid(blocking_node_id) as blocking_node_id, get_nodeid_for_groupid(waiting_node_id) as waiting_node_id, blocking_transaction_waiting - FROM citus_internal_global_blocked_processes() + FROM citus_internal.global_blocked_processes() ), unique_global_wait_edges AS ( diff --git a/src/backend/distributed/sql/udfs/repl_origin_helper/12.2-1.sql b/src/backend/distributed/sql/udfs/repl_origin_helper/12.2-1.sql new file mode 100644 index 00000000000..8c6d175d074 --- /dev/null +++ b/src/backend/distributed/sql/udfs/repl_origin_helper/12.2-1.sql @@ -0,0 +1,41 @@ +CREATE OR REPLACE FUNCTION citus_internal.start_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_start_replication_origin_tracking$$; +COMMENT ON FUNCTION citus_internal.start_replication_origin_tracking() + IS 'To start replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION citus_internal.stop_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_stop_replication_origin_tracking$$; +COMMENT ON FUNCTION citus_internal.stop_replication_origin_tracking() + IS 'To stop replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION citus_internal.is_replication_origin_tracking_active() +RETURNS boolean +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_is_replication_origin_tracking_active$$; +COMMENT ON FUNCTION citus_internal.is_replication_origin_tracking_active() + IS 'To check if replication origin tracking is active for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_start_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_start_replication_origin_tracking$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_start_replication_origin_tracking() + IS 'To start replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_stop_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_stop_replication_origin_tracking$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_stop_replication_origin_tracking() + IS 'To stop replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_is_replication_origin_tracking_active() +RETURNS boolean +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_is_replication_origin_tracking_active$$; +COMMENT ON FUNCTION pg_catalog.citus_internal_is_replication_origin_tracking_active() + IS 'To check if replication origin tracking is active for skipping publishing of duplicated events during internal data movements for CDC'; diff --git a/src/backend/distributed/sql/udfs/repl_origin_helper/latest.sql b/src/backend/distributed/sql/udfs/repl_origin_helper/latest.sql index 5fe5a3bb92d..8c6d175d074 100644 --- a/src/backend/distributed/sql/udfs/repl_origin_helper/latest.sql +++ b/src/backend/distributed/sql/udfs/repl_origin_helper/latest.sql @@ -1,3 +1,24 @@ +CREATE OR REPLACE FUNCTION citus_internal.start_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_start_replication_origin_tracking$$; +COMMENT ON FUNCTION citus_internal.start_replication_origin_tracking() + IS 'To start replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION citus_internal.stop_replication_origin_tracking() +RETURNS void +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_stop_replication_origin_tracking$$; +COMMENT ON FUNCTION citus_internal.stop_replication_origin_tracking() + IS 'To stop replication origin tracking for skipping publishing of duplicated events during internal data movements for CDC'; + +CREATE OR REPLACE FUNCTION citus_internal.is_replication_origin_tracking_active() +RETURNS boolean +LANGUAGE C STRICT +AS 'MODULE_PATHNAME', $$citus_internal_is_replication_origin_tracking_active$$; +COMMENT ON FUNCTION citus_internal.is_replication_origin_tracking_active() + IS 'To check if replication origin tracking is active for skipping publishing of duplicated events during internal data movements for CDC'; + CREATE OR REPLACE FUNCTION pg_catalog.citus_internal_start_replication_origin_tracking() RETURNS void LANGUAGE C STRICT diff --git a/src/backend/distributed/transaction/lock_graph.c b/src/backend/distributed/transaction/lock_graph.c index 82f936243f0..b55a72843bf 100644 --- a/src/backend/distributed/transaction/lock_graph.c +++ b/src/backend/distributed/transaction/lock_graph.c @@ -192,7 +192,7 @@ BuildGlobalWaitGraph(bool onlyDistributedTx) "waiting_node_id, waiting_transaction_num, waiting_transaction_stamp, " "blocking_global_pid,blocking_pid, blocking_node_id, " "blocking_transaction_num, blocking_transaction_stamp, blocking_transaction_waiting " - "FROM citus_internal_local_blocked_processes()"); + "FROM citus_internal.local_blocked_processes()"); } int querySent = SendRemoteCommand(connection, queryString->data); @@ -226,7 +226,7 @@ BuildGlobalWaitGraph(bool onlyDistributedTx) else if (!onlyDistributedTx && colCount != 11) { ereport(WARNING, (errmsg("unexpected number of columns from " - "citus_internal_local_blocked_processes"))); + "citus_internal.local_blocked_processes"))); continue; } diff --git a/src/backend/distributed/transaction/remote_transaction.c b/src/backend/distributed/transaction/remote_transaction.c index 71b6a78dd70..4c26e2478ca 100644 --- a/src/backend/distributed/transaction/remote_transaction.c +++ b/src/backend/distributed/transaction/remote_transaction.c @@ -107,6 +107,12 @@ bool IsMainDB = true; */ char *SuperuserRole = NULL; +/* + * IsMainDBCommandInXact shows if the query sent to the main database requires + * a transaction + */ +bool IsMainDBCommandInXact = true; + /* * start_management_transaction starts a management transaction @@ -190,7 +196,11 @@ RunCitusMainDBQuery(char *query) PostPortNumber, SuperuserRole, MainDb); - RemoteTransactionBegin(MainDBConnection); + + if (IsMainDBCommandInXact) + { + RemoteTransactionBegin(MainDBConnection); + } } SendRemoteCommand(MainDBConnection, query); diff --git a/src/backend/distributed/transaction/transaction_management.c b/src/backend/distributed/transaction/transaction_management.c index 29f5b367e97..9c7b456807e 100644 --- a/src/backend/distributed/transaction/transaction_management.c +++ b/src/backend/distributed/transaction/transaction_management.c @@ -333,7 +333,7 @@ CoordinatedTransactionCallback(XactEvent event, void *arg) * If this is a non-Citus main database we should try to commit the prepared * transactions created by the Citus main database on the worker nodes. */ - if (!IsMainDB && MainDBConnection != NULL) + if (!IsMainDB && MainDBConnection != NULL && IsMainDBCommandInXact) { RunCitusMainDBQuery(COMMIT_MANAGEMENT_COMMAND_2PC); CleanCitusMainDBConnection(); @@ -533,7 +533,7 @@ CoordinatedTransactionCallback(XactEvent event, void *arg) * main database query. So if some error happens on the distributed main * database query we wouldn't have committed the current query. */ - if (!IsMainDB && MainDBConnection != NULL) + if (!IsMainDB && MainDBConnection != NULL && IsMainDBCommandInXact) { RunCitusMainDBQuery("COMMIT"); } diff --git a/src/backend/distributed/utils/directory.c b/src/backend/distributed/utils/directory.c index bad585809ef..6701bf8fb13 100644 --- a/src/backend/distributed/utils/directory.c +++ b/src/backend/distributed/utils/directory.c @@ -29,7 +29,7 @@ static bool FileIsLink(const char *filename, struct stat filestat); void CitusCreateDirectory(StringInfo directoryName) { - int makeOK = mkdir(directoryName->data, S_IRWXU); + int makeOK = MakePGDirectory(directoryName->data); if (makeOK != 0) { ereport(ERROR, (errcode_for_file_access(), diff --git a/src/backend/distributed/utils/replication_origin_session_utils.c b/src/backend/distributed/utils/replication_origin_session_utils.c index 370b061be6c..f96e23f8f26 100644 --- a/src/backend/distributed/utils/replication_origin_session_utils.c +++ b/src/backend/distributed/utils/replication_origin_session_utils.c @@ -186,7 +186,7 @@ SetupReplicationOriginRemoteSession(MultiConnection *connection) { StringInfo replicationOriginSessionSetupQuery = makeStringInfo(); appendStringInfo(replicationOriginSessionSetupQuery, - "select pg_catalog.citus_internal_start_replication_origin_tracking();"); + "select citus_internal.start_replication_origin_tracking();"); ExecuteCriticalRemoteCommand(connection, replicationOriginSessionSetupQuery->data); connection->isReplicationOriginSessionSetup = true; @@ -205,7 +205,7 @@ ResetReplicationOriginRemoteSession(MultiConnection *connection) { StringInfo replicationOriginSessionResetQuery = makeStringInfo(); appendStringInfo(replicationOriginSessionResetQuery, - "select pg_catalog.citus_internal_stop_replication_origin_tracking();"); + "select citus_internal.stop_replication_origin_tracking();"); ExecuteCriticalRemoteCommand(connection, replicationOriginSessionResetQuery->data); connection->isReplicationOriginSessionSetup = false; @@ -229,7 +229,7 @@ IsRemoteReplicationOriginSessionSetup(MultiConnection *connection) StringInfo isReplicationOriginSessionSetupQuery = makeStringInfo(); appendStringInfo(isReplicationOriginSessionSetupQuery, - "SELECT pg_catalog.citus_internal_is_replication_origin_tracking_active()"); + "SELECT citus_internal.is_replication_origin_tracking_active()"); bool result = ExecuteRemoteCommandAndCheckResult(connection, isReplicationOriginSessionSetupQuery->data, diff --git a/src/backend/distributed/worker/worker_sql_task_protocol.c b/src/backend/distributed/worker/worker_sql_task_protocol.c index 2cf48fc6ffd..708fee15d02 100644 --- a/src/backend/distributed/worker/worker_sql_task_protocol.c +++ b/src/backend/distributed/worker/worker_sql_task_protocol.c @@ -126,7 +126,6 @@ TaskFileDestReceiverStartup(DestReceiver *dest, int operation, const char *nullPrintCharacter = "\\N"; const int fileFlags = (O_APPEND | O_CREAT | O_RDWR | O_TRUNC | PG_BINARY); - const int fileMode = (S_IRUSR | S_IWUSR); /* use the memory context that was in place when the DestReceiver was created */ MemoryContext oldContext = MemoryContextSwitchTo(taskFileDest->memoryContext); @@ -148,8 +147,7 @@ TaskFileDestReceiverStartup(DestReceiver *dest, int operation, taskFileDest->fileCompat = FileCompatFromFileStart(FileOpenForTransmit( taskFileDest->filePath, - fileFlags, - fileMode)); + fileFlags)); if (copyOutState->binary) { diff --git a/src/include/distributed/metadata_sync.h b/src/include/distributed/metadata_sync.h index 9f4c0a24b86..cb111e16e03 100644 --- a/src/include/distributed/metadata_sync.h +++ b/src/include/distributed/metadata_sync.h @@ -189,7 +189,7 @@ extern void SendInterTableRelationshipCommands(MetadataSyncContext *context); #define WORKER_DROP_ALL_SHELL_TABLES \ "CALL pg_catalog.worker_drop_all_shell_tables(%s)" #define CITUS_INTERNAL_MARK_NODE_NOT_SYNCED \ - "SELECT citus_internal_mark_node_not_synced(%d, %d)" + "SELECT citus_internal.mark_node_not_synced(%d, %d)" #define REMOVE_ALL_CITUS_TABLES_COMMAND \ "SELECT worker_drop_distributed_table(logicalrelid::regclass::text) FROM pg_dist_partition" diff --git a/src/include/distributed/remote_transaction.h b/src/include/distributed/remote_transaction.h index 2b61c25bdd0..45e2eba70f5 100644 --- a/src/include/distributed/remote_transaction.h +++ b/src/include/distributed/remote_transaction.h @@ -152,5 +152,6 @@ extern bool IsMainDB; extern char *SuperuserRole; extern char *MainDb; extern struct MultiConnection *MainDBConnection; +extern bool IsMainDBCommandInXact; #endif /* REMOTE_TRANSACTION_H */ diff --git a/src/include/distributed/shard_cleaner.h b/src/include/distributed/shard_cleaner.h index e7d3dea1bf7..4967846b2ba 100644 --- a/src/include/distributed/shard_cleaner.h +++ b/src/include/distributed/shard_cleaner.h @@ -81,16 +81,16 @@ typedef enum CleanupPolicy extern OperationId RegisterOperationNeedingCleanup(void); /* - * InsertCleanupRecordInCurrentTransaction inserts a new pg_dist_cleanup entry + * InsertCleanupOnSuccessRecordInCurrentTransaction inserts a new pg_dist_cleanup entry * as part of the current transaction. * * This is primarily useful for deferred cleanup (CLEANUP_DEFERRED_ON_SUCCESS) - * scenarios, since the records would roll back in case of failure. + * scenarios, since the records would roll back in case of failure. And for the + * same reason, always sets the policy type to CLEANUP_DEFERRED_ON_SUCCESS. */ -extern void InsertCleanupRecordInCurrentTransaction(CleanupObject objectType, - char *objectName, - int nodeGroupId, - CleanupPolicy policy); +extern void InsertCleanupOnSuccessRecordInCurrentTransaction(CleanupObject objectType, + char *objectName, + int nodeGroupId); /* * InsertCleanupRecordInSeparateTransaction inserts a new pg_dist_cleanup entry @@ -99,10 +99,10 @@ extern void InsertCleanupRecordInCurrentTransaction(CleanupObject objectType, * This is used in scenarios where we need to cleanup resources on operation * completion (CLEANUP_ALWAYS) or on failure (CLEANUP_ON_FAILURE). */ -extern void InsertCleanupRecordInSubtransaction(CleanupObject objectType, - char *objectName, - int nodeGroupId, - CleanupPolicy policy); +extern void InsertCleanupRecordOutsideTransaction(CleanupObject objectType, + char *objectName, + int nodeGroupId, + CleanupPolicy policy); /* * FinalizeOperationNeedingCleanupOnSuccess is be called by an operation to signal diff --git a/src/include/distributed/transmit.h b/src/include/distributed/transmit.h index b86fd91506b..9c2ab87ab65 100644 --- a/src/include/distributed/transmit.h +++ b/src/include/distributed/transmit.h @@ -21,7 +21,8 @@ /* Function declarations for transmitting files between two nodes */ extern void RedirectCopyDataToRegularFile(const char *filename); extern void SendRegularFile(const char *filename); -extern File FileOpenForTransmit(const char *filename, int fileFlags, int fileMode); +extern File FileOpenForTransmit(const char *filename, int fileFlags); +extern File FileOpenForTransmitPerm(const char *filename, int fileFlags, int fileMode); #endif /* TRANSMIT_H */ diff --git a/src/test/regress/Pipfile b/src/test/regress/Pipfile index d4b2cc39f07..a863d795ec3 100644 --- a/src/test/regress/Pipfile +++ b/src/test/regress/Pipfile @@ -16,6 +16,7 @@ pytest-timeout = "*" pytest-xdist = "*" pytest-repeat = "*" pyyaml = "*" +werkzeug = "==2.3.7" [dev-packages] black = "*" diff --git a/src/test/regress/Pipfile.lock b/src/test/regress/Pipfile.lock index bdb42a1c319..c0f8734a02e 100644 --- a/src/test/regress/Pipfile.lock +++ b/src/test/regress/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "b92bf682aeeea1a66a16beaf78584a5318fd0ae908ce85c7e2a4807aa2bee532" + "sha256": "bf20354a2d9c93d46041ac4c6fa427588ebfe29343ea0b02138b9079f2d82f18" }, "pipfile-spec": 6, "requires": { @@ -119,11 +119,11 @@ }, "certifi": { "hashes": [ - "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082", - "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9" + "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f", + "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1" ], "markers": "python_version >= '3.6'", - "version": "==2023.7.22" + "version": "==2024.2.2" }, "cffi": { "hashes": [ @@ -180,7 +180,7 @@ "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956", "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357" ], - "markers": "python_version >= '3.8'", + "markers": "platform_python_implementation != 'PyPy'", "version": "==1.16.0" }, "click": { @@ -200,33 +200,42 @@ }, "cryptography": { "hashes": [ - "sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67", - "sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311", - "sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8", - "sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13", - "sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143", - "sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f", - "sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829", - "sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd", - "sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397", - "sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac", - "sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d", - "sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a", - "sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839", - "sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e", - "sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6", - "sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9", - "sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860", - "sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca", - "sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91", - "sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d", - "sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714", - "sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb", - "sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f" + "sha256:04859aa7f12c2b5f7e22d25198ddd537391f1695df7057c8700f71f26f47a129", + "sha256:069d2ce9be5526a44093a0991c450fe9906cdf069e0e7cd67d9dee49a62b9ebe", + "sha256:0d3ec384058b642f7fb7e7bff9664030011ed1af8f852540c76a1317a9dd0d20", + "sha256:0fab2a5c479b360e5e0ea9f654bcebb535e3aa1e493a715b13244f4e07ea8eec", + "sha256:0fea01527d4fb22ffe38cd98951c9044400f6eff4788cf52ae116e27d30a1ba3", + "sha256:1b797099d221df7cce5ff2a1d272761d1554ddf9a987d3e11f6459b38cd300fd", + "sha256:1e935c2900fb53d31f491c0de04f41110351377be19d83d908c1fd502ae8daa5", + "sha256:20100c22b298c9eaebe4f0b9032ea97186ac2555f426c3e70670f2517989543b", + "sha256:20180da1b508f4aefc101cebc14c57043a02b355d1a652b6e8e537967f1e1b46", + "sha256:25b09b73db78facdfd7dd0fa77a3f19e94896197c86e9f6dc16bce7b37a96504", + "sha256:2619487f37da18d6826e27854a7f9d4d013c51eafb066c80d09c63cf24505306", + "sha256:2eb6368d5327d6455f20327fb6159b97538820355ec00f8cc9464d617caecead", + "sha256:35772a6cffd1f59b85cb670f12faba05513446f80352fe811689b4e439b5d89e", + "sha256:39d5c93e95bcbc4c06313fc6a500cee414ee39b616b55320c1904760ad686938", + "sha256:3d96ea47ce6d0055d5b97e761d37b4e84195485cb5a38401be341fabf23bc32a", + "sha256:4dcab7c25e48fc09a73c3e463d09ac902a932a0f8d0c568238b3696d06bf377b", + "sha256:5fbf0f3f0fac7c089308bd771d2c6c7b7d53ae909dce1db52d8e921f6c19bb3a", + "sha256:6c25e1e9c2ce682d01fc5e2dde6598f7313027343bd14f4049b82ad0402e52cd", + "sha256:762f3771ae40e111d78d77cbe9c1035e886ac04a234d3ee0856bf4ecb3749d54", + "sha256:90147dad8c22d64b2ff7331f8d4cddfdc3ee93e4879796f837bdbb2a0b141e0c", + "sha256:935cca25d35dda9e7bd46a24831dfd255307c55a07ff38fd1a92119cffc34857", + "sha256:93fbee08c48e63d5d1b39ab56fd3fdd02e6c2431c3da0f4edaf54954744c718f", + "sha256:9541c69c62d7446539f2c1c06d7046aef822940d248fa4b8962ff0302862cc1f", + "sha256:c23f03cfd7d9826cdcbad7850de67e18b4654179e01fe9bc623d37c2638eb4ef", + "sha256:c3d1f5a1d403a8e640fa0887e9f7087331abb3f33b0f2207d2cc7f213e4a864c", + "sha256:d1998e545081da0ab276bcb4b33cce85f775adb86a516e8f55b3dac87f469548", + "sha256:d5cf11bc7f0b71fb71af26af396c83dfd3f6eed56d4b6ef95d57867bf1e4ba65", + "sha256:db0480ffbfb1193ac4e1e88239f31314fe4c6cdcf9c0b8712b55414afbf80db4", + "sha256:de4ae486041878dc46e571a4c70ba337ed5233a1344c14a0790c4c4be4bbb8b4", + "sha256:de5086cd475d67113ccb6f9fae6d8fe3ac54a4f9238fd08bfdb07b03d791ff0a", + "sha256:df34312149b495d9d03492ce97471234fd9037aa5ba217c2a6ea890e9166f151", + "sha256:ead69ba488f806fe1b1b4050febafdbf206b81fa476126f3e16110c818bac396" ], "index": "pypi", "markers": "python_version >= '3.7'", - "version": "==41.0.4" + "version": "==42.0.3" }, "docopt": { "hashes": [ @@ -237,11 +246,11 @@ }, "exceptiongroup": { "hashes": [ - "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9", - "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3" + "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14", + "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68" ], "markers": "python_version < '3.11'", - "version": "==1.1.3" + "version": "==1.2.0" }, "execnet": { "hashes": [ @@ -253,12 +262,12 @@ }, "filelock": { "hashes": [ - "sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4", - "sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd" + "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e", + "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c" ], "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==3.12.4" + "version": "==3.13.1" }, "flask": { "hashes": [ @@ -318,11 +327,11 @@ }, "jinja2": { "hashes": [ - "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852", - "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61" + "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa", + "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90" ], "markers": "python_version >= '3.7'", - "version": "==3.1.2" + "version": "==3.1.3" }, "kaitaistruct": { "hashes": [ @@ -342,69 +351,69 @@ }, "markupsafe": { "hashes": [ - "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e", - "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e", - "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431", - "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686", - "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c", - "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559", - "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc", - "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb", - "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939", - "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c", - "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0", - "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4", - "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9", - "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575", - "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba", - "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d", - "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd", - "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3", - "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00", - "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155", - "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac", - "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52", - "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f", - "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8", - "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b", - "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007", - "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24", - "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea", - "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198", - "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0", - "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee", - "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be", - "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2", - "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1", - "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707", - "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6", - "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c", - "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58", - "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823", - "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779", - "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636", - "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c", - "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad", - "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee", - "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc", - "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2", - "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48", - "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7", - "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e", - "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b", - "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa", - "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5", - "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e", - "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb", - "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9", - "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57", - "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc", - "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc", - "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2", - "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11" + "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf", + "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff", + "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f", + "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3", + "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532", + "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f", + "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617", + "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df", + "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4", + "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906", + "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f", + "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4", + "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8", + "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371", + "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2", + "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465", + "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52", + "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6", + "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169", + "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad", + "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2", + "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0", + "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029", + "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f", + "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a", + "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced", + "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5", + "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c", + "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf", + "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9", + "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb", + "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad", + "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3", + "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1", + "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46", + "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc", + "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a", + "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee", + "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900", + "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5", + "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea", + "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f", + "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5", + "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e", + "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a", + "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f", + "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50", + "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a", + "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b", + "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4", + "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff", + "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2", + "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46", + "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b", + "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf", + "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5", + "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5", + "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab", + "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd", + "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68" ], "markers": "python_version >= '3.7'", - "version": "==2.1.3" + "version": "==2.1.5" }, "mitmproxy": { "editable": true, @@ -491,11 +500,11 @@ }, "pluggy": { "hashes": [ - "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12", - "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7" + "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981", + "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be" ], "markers": "python_version >= '3.8'", - "version": "==1.3.0" + "version": "==1.4.0" }, "protobuf": { "hashes": [ @@ -526,12 +535,12 @@ }, "psycopg": { "hashes": [ - "sha256:7542c45810ea16356e5126c9b4291cbc3802aa326fcbba09ff154fe380de29be", - "sha256:cd711edb64b07d7f8a233c365806caf7e55bbe7cbbd8d5c680f672bb5353c8d5" + "sha256:31144d3fb4c17d78094d9e579826f047d4af1da6a10427d91dfcfb6ecdf6f12b", + "sha256:4d5a0a5a8590906daa58ebd5f3cfc34091377354a1acced269dd10faf55da60e" ], "index": "pypi", "markers": "python_version >= '3.7'", - "version": "==3.1.11" + "version": "==3.1.18" }, "publicsuffix2": { "hashes": [ @@ -542,11 +551,11 @@ }, "pyasn1": { "hashes": [ - "sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57", - "sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde" + "sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58", + "sha256:6d391a96e59b23130a5cfa74d6fd7f388dbbe26cc8f1edf39fdddf08d9d6676c" ], "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", - "version": "==0.5.0" + "version": "==0.5.1" }, "pycparser": { "hashes": [ @@ -557,11 +566,11 @@ }, "pyopenssl": { "hashes": [ - "sha256:24f0dc5227396b3e831f4c7f602b950a5e9833d292c8e4a2e06b709292806ae2", - "sha256:276f931f55a452e7dea69c7173e984eb2a4407ce413c918aa34b55f82f9b8bac" + "sha256:6aa33039a93fffa4563e655b61d11364d01264be8ccb49906101e02a334530bf", + "sha256:ba07553fb6fd6a7a2259adb9b84e12302a9a8a75c44046e8bb5d3e5ee887e3c3" ], - "markers": "python_version >= '3.6'", - "version": "==23.2.0" + "markers": "python_version >= '3.7'", + "version": "==24.0.0" }, "pyparsing": { "hashes": [ @@ -579,48 +588,48 @@ }, "pytest": { "hashes": [ - "sha256:1d881c6124e08ff0a1bb75ba3ec0bfd8b5354a01c194ddd5a0a870a48d99b002", - "sha256:a766259cfab564a2ad52cb1aae1b881a75c3eb7e34ca3779697c23ed47c47069" + "sha256:249b1b0864530ba251b7438274c4d251c58d868edaaec8762893ad4a0d71c36c", + "sha256:50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6" ], "index": "pypi", - "markers": "python_version >= '3.7'", - "version": "==7.4.2" + "markers": "python_version >= '3.8'", + "version": "==8.0.0" }, "pytest-asyncio": { "hashes": [ - "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d", - "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b" + "sha256:3a048872a9c4ba14c3e90cc1aa20cbc2def7d01c7c8db3777ec281ba9c057675", + "sha256:4e7093259ba018d58ede7d5315131d21923a60f8a6e9ee266ce1589685c89eac" ], "index": "pypi", - "markers": "python_version >= '3.7'", - "version": "==0.21.1" + "markers": "python_version >= '3.8'", + "version": "==0.23.5" }, "pytest-repeat": { "hashes": [ - "sha256:4474a7d9e9137f6d8cc8ae297f8c4168d33c56dd740aa78cfffe562557e6b96e", - "sha256:5cd3289745ab3156d43eb9c8e7f7d00a926f3ae5c9cf425bec649b2fe15bad5b" + "sha256:26ab2df18226af9d5ce441c858f273121e92ff55f5bb311d25755b8d7abdd8ed", + "sha256:ffd3836dfcd67bb270bec648b330e20be37d2966448c4148c4092d1e8aba8185" ], "index": "pypi", - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", - "version": "==0.9.1" + "markers": "python_version >= '3.7'", + "version": "==0.9.3" }, "pytest-timeout": { "hashes": [ - "sha256:c07ca07404c612f8abbe22294b23c368e2e5104b521c1790195561f37e1ac3d9", - "sha256:f6f50101443ce70ad325ceb4473c4255e9d74e3c7cd0ef827309dfa4c0d975c6" + "sha256:3b0b95dabf3cb50bac9ef5ca912fa0cfc286526af17afc806824df20c2f72c90", + "sha256:bde531e096466f49398a59f2dde76fa78429a09a12411466f88a07213e220de2" ], "index": "pypi", - "markers": "python_version >= '3.6'", - "version": "==2.1.0" + "markers": "python_version >= '3.7'", + "version": "==2.2.0" }, "pytest-xdist": { "hashes": [ - "sha256:d5ee0520eb1b7bcca50a60a518ab7a7707992812c578198f8b44fdfac78e8c93", - "sha256:ff9daa7793569e6a68544850fd3927cd257cc03a7ef76c95e86915355e82b5f2" + "sha256:cbb36f3d67e0c478baa57fa4edc8843887e0f6cfc42d677530a36d7472b32d8a", + "sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24" ], "index": "pypi", "markers": "python_version >= '3.7'", - "version": "==3.3.1" + "version": "==3.5.0" }, "pyyaml": { "hashes": [ @@ -653,6 +662,7 @@ "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4", "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba", "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8", + "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef", "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5", "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd", "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3", @@ -693,36 +703,37 @@ "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001", "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462", "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9", + "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe", "sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b", "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b", "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615", + "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62", "sha256:25ac8c08322002b06fa1d49d1646181f0b2c72f5cbc15a85e80b4c30a544bb15", "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b", + "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1", "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9", "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675", - "sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1", "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899", "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7", "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7", "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312", "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa", - "sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f", "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91", - "sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa", "sha256:75e1ed13e1f9de23c5607fe6bd1aeaae21e523b32d83bb33918245361e9cc51b", + "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6", "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3", "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334", "sha256:84b554931e932c46f94ab306913ad7e11bba988104c5cff26d90d03f68258cd5", "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3", "sha256:955eae71ac26c1ab35924203fda6220f84dce57d6d7884f189743e2abe3a9fbe", - "sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3", + "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c", "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed", "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337", "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880", + "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f", "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d", "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248", "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d", - "sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279", "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf", "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512", "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069", @@ -731,7 +742,6 @@ "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d", "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31", "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92", - "sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd", "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5", "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28", "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d", @@ -760,28 +770,28 @@ }, "tornado": { "hashes": [ - "sha256:1bd19ca6c16882e4d37368e0152f99c099bad93e0950ce55e71daed74045908f", - "sha256:22d3c2fa10b5793da13c807e6fc38ff49a4f6e1e3868b0a6f4164768bb8e20f5", - "sha256:502fba735c84450974fec147340016ad928d29f1e91f49be168c0a4c18181e1d", - "sha256:65ceca9500383fbdf33a98c0087cb975b2ef3bfb874cb35b8de8740cf7f41bd3", - "sha256:71a8db65160a3c55d61839b7302a9a400074c9c753040455494e2af74e2501f2", - "sha256:7ac51f42808cca9b3613f51ffe2a965c8525cb1b00b7b2d56828b8045354f76a", - "sha256:7d01abc57ea0dbb51ddfed477dfe22719d376119844e33c661d873bf9c0e4a16", - "sha256:805d507b1f588320c26f7f097108eb4023bbaa984d63176d1652e184ba24270a", - "sha256:9dc4444c0defcd3929d5c1eb5706cbe1b116e762ff3e0deca8b715d14bf6ec17", - "sha256:ceb917a50cd35882b57600709dd5421a418c29ddc852da8bcdab1f0db33406b0", - "sha256:e7d8db41c0181c80d76c982aacc442c0783a2c54d6400fe028954201a2e032fe" + "sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0", + "sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63", + "sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263", + "sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052", + "sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f", + "sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee", + "sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78", + "sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579", + "sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212", + "sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e", + "sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2" ], "markers": "python_version >= '3.8'", - "version": "==6.3.3" + "version": "==6.4" }, "typing-extensions": { "hashes": [ - "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0", - "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef" + "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783", + "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd" ], "markers": "python_version >= '3.8'", - "version": "==4.8.0" + "version": "==4.9.0" }, "urwid": { "hashes": [ @@ -791,12 +801,12 @@ }, "werkzeug": { "hashes": [ - "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc", - "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10" + "sha256:2b8c0e447b4b9dbcc85dd97b6eeb4dcbaf6c8b6c3be0bd654e25553e0a2157d8", + "sha256:effc12dba7f3bd72e605ce49807bbe692bd729c3bb122a3b91747a6ae77df528" ], "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==3.0.1" + "version": "==2.3.7" }, "wsproto": { "hashes": [ @@ -864,40 +874,40 @@ "develop": { "attrs": { "hashes": [ - "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04", - "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015" + "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30", + "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1" ], "markers": "python_version >= '3.7'", - "version": "==23.1.0" + "version": "==23.2.0" }, "black": { "hashes": [ - "sha256:031e8c69f3d3b09e1aa471a926a1eeb0b9071f80b17689a655f7885ac9325a6f", - "sha256:13a2e4a93bb8ca74a749b6974925c27219bb3df4d42fc45e948a5d9feb5122b7", - "sha256:13ef033794029b85dfea8032c9d3b92b42b526f1ff4bf13b2182ce4e917f5100", - "sha256:14f04c990259576acd093871e7e9b14918eb28f1866f91968ff5524293f9c573", - "sha256:24b6b3ff5c6d9ea08a8888f6977eae858e1f340d7260cf56d70a49823236b62d", - "sha256:403397c033adbc45c2bd41747da1f7fc7eaa44efbee256b53842470d4ac5a70f", - "sha256:50254ebfa56aa46a9fdd5d651f9637485068a1adf42270148cd101cdf56e0ad9", - "sha256:538efb451cd50f43aba394e9ec7ad55a37598faae3348d723b59ea8e91616300", - "sha256:638619a559280de0c2aa4d76f504891c9860bb8fa214267358f0a20f27c12948", - "sha256:6a3b50e4b93f43b34a9d3ef00d9b6728b4a722c997c99ab09102fd5efdb88325", - "sha256:6ccd59584cc834b6d127628713e4b6b968e5f79572da66284532525a042549f9", - "sha256:75a2dc41b183d4872d3a500d2b9c9016e67ed95738a3624f4751a0cb4818fe71", - "sha256:7d30ec46de88091e4316b17ae58bbbfc12b2de05e069030f6b747dfc649ad186", - "sha256:8431445bf62d2a914b541da7ab3e2b4f3bc052d2ccbf157ebad18ea126efb91f", - "sha256:8fc1ddcf83f996247505db6b715294eba56ea9372e107fd54963c7553f2b6dfe", - "sha256:a732b82747235e0542c03bf352c126052c0fbc458d8a239a94701175b17d4855", - "sha256:adc3e4442eef57f99b5590b245a328aad19c99552e0bdc7f0b04db6656debd80", - "sha256:c46767e8df1b7beefb0899c4a95fb43058fa8500b6db144f4ff3ca38eb2f6393", - "sha256:c619f063c2d68f19b2d7270f4cf3192cb81c9ec5bc5ba02df91471d0b88c4c5c", - "sha256:cf3a4d00e4cdb6734b64bf23cd4341421e8953615cba6b3670453737a72ec204", - "sha256:cf99f3de8b3273a8317681d8194ea222f10e0133a24a7548c73ce44ea1679377", - "sha256:d6bc09188020c9ac2555a498949401ab35bb6bf76d4e0f8ee251694664df6301" + "sha256:057c3dc602eaa6fdc451069bd027a1b2635028b575a6c3acfd63193ced20d9c8", + "sha256:08654d0797e65f2423f850fc8e16a0ce50925f9337fb4a4a176a7aa4026e63f8", + "sha256:163baf4ef40e6897a2a9b83890e59141cc8c2a98f2dda5080dc15c00ee1e62cd", + "sha256:1e08fb9a15c914b81dd734ddd7fb10513016e5ce7e6704bdd5e1251ceee51ac9", + "sha256:4dd76e9468d5536abd40ffbc7a247f83b2324f0c050556d9c371c2b9a9a95e31", + "sha256:4f9de21bafcba9683853f6c96c2d515e364aee631b178eaa5145fc1c61a3cc92", + "sha256:61a0391772490ddfb8a693c067df1ef5227257e72b0e4108482b8d41b5aee13f", + "sha256:6981eae48b3b33399c8757036c7f5d48a535b962a7c2310d19361edeef64ce29", + "sha256:7e53a8c630f71db01b28cd9602a1ada68c937cbf2c333e6ed041390d6968faf4", + "sha256:810d445ae6069ce64030c78ff6127cd9cd178a9ac3361435708b907d8a04c693", + "sha256:93601c2deb321b4bad8f95df408e3fb3943d85012dddb6121336b8e24a0d1218", + "sha256:992e451b04667116680cb88f63449267c13e1ad134f30087dec8527242e9862a", + "sha256:9db528bccb9e8e20c08e716b3b09c6bdd64da0dd129b11e160bf082d4642ac23", + "sha256:a0057f800de6acc4407fe75bb147b0c2b5cbb7c3ed110d3e5999cd01184d53b0", + "sha256:ba15742a13de85e9b8f3239c8f807723991fbfae24bad92d34a2b12e81904982", + "sha256:bce4f25c27c3435e4dace4815bcb2008b87e167e3bf4ee47ccdc5ce906eb4894", + "sha256:ca610d29415ee1a30a3f30fab7a8f4144e9d34c89a235d81292a1edb2b55f540", + "sha256:d533d5e3259720fdbc1b37444491b024003e012c5173f7d06825a77508085430", + "sha256:d84f29eb3ee44859052073b7636533ec995bd0f64e2fb43aeceefc70090e752b", + "sha256:e37c99f89929af50ffaf912454b3e3b47fd64109659026b678c091a4cd450fb2", + "sha256:e8a6ae970537e67830776488bca52000eaa37fa63b9988e8c487458d9cd5ace6", + "sha256:faf2ee02e6612577ba0181f4347bcbcf591eb122f7841ae5ba233d12c39dcb4d" ], "index": "pypi", "markers": "python_version >= '3.8'", - "version": "==23.9.1" + "version": "==24.2.0" }, "click": { "hashes": [ @@ -909,30 +919,30 @@ }, "flake8": { "hashes": [ - "sha256:d5b3857f07c030bdb5bf41c7f53799571d75c4491748a3adcd47de929e34cd23", - "sha256:ffdfce58ea94c6580c77888a86506937f9a1a227dfcd15f245d694ae20a6b6e5" + "sha256:33f96621059e65eec474169085dc92bf26e7b2d47366b70be2f67ab80dc25132", + "sha256:a6dfbb75e03252917f2473ea9653f7cd799c3064e54d4c8140044c5c065f53c3" ], "index": "pypi", "markers": "python_full_version >= '3.8.1'", - "version": "==6.1.0" + "version": "==7.0.0" }, "flake8-bugbear": { "hashes": [ - "sha256:90cf04b19ca02a682feb5aac67cae8de742af70538590509941ab10ae8351f71", - "sha256:b182cf96ea8f7a8595b2f87321d7d9b28728f4d9c3318012d896543d19742cb5" + "sha256:663ef5de80cd32aacd39d362212983bc4636435a6f83700b4ed35acbd0b7d1b8", + "sha256:f9cb5f2a9e792dd80ff68e89a14c12eed8620af8b41a49d823b7a33064ac9658" ], "index": "pypi", "markers": "python_full_version >= '3.8.1'", - "version": "==23.9.16" + "version": "==24.2.6" }, "isort": { "hashes": [ - "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504", - "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6" + "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109", + "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6" ], "index": "pypi", "markers": "python_full_version >= '3.8.0'", - "version": "==5.12.0" + "version": "==5.13.2" }, "mccabe": { "hashes": [ @@ -960,19 +970,19 @@ }, "pathspec": { "hashes": [ - "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20", - "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3" + "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", + "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712" ], - "markers": "python_version >= '3.7'", - "version": "==0.11.2" + "markers": "python_version >= '3.8'", + "version": "==0.12.1" }, "platformdirs": { "hashes": [ - "sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3", - "sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e" + "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068", + "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768" ], - "markers": "python_version >= '3.7'", - "version": "==3.11.0" + "markers": "python_version >= '3.8'", + "version": "==4.2.0" }, "pycodestyle": { "hashes": [ @@ -984,11 +994,11 @@ }, "pyflakes": { "hashes": [ - "sha256:4132f6d49cb4dae6819e5379898f2b8cce3c5f23994194c24b77d5da2e36f774", - "sha256:a0aae034c444db0071aa077972ba4768d40c830d9539fd45bf4cd3f8f6992efc" + "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f", + "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a" ], "markers": "python_version >= '3.8'", - "version": "==3.1.0" + "version": "==3.2.0" }, "tomli": { "hashes": [ @@ -1000,11 +1010,11 @@ }, "typing-extensions": { "hashes": [ - "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0", - "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef" + "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783", + "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd" ], "markers": "python_version >= '3.8'", - "version": "==4.8.0" + "version": "==4.9.0" } } } diff --git a/src/test/regress/expected/create_drop_database_propagation_pg15.out b/src/test/regress/expected/create_drop_database_propagation_pg15.out index 9a501558a2b..7e76d87f34d 100644 --- a/src/test/regress/expected/create_drop_database_propagation_pg15.out +++ b/src/test/regress/expected/create_drop_database_propagation_pg15.out @@ -78,5 +78,11 @@ SELECT * FROM public.check_database_on_all_nodes('test_locale_provider') ORDER B worker node (remote) | {"database_properties": {"datacl": null, "datname": "test_locale_provider", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} (3 rows) +\c test_locale_provider - - :worker_2_port +set citus.enable_create_database_propagation to on; +create database unsupported_option_from_non_main_db with oid = 12345; +ERROR: CREATE DATABASE option "oid" is not supported +\c regression - - :master_port +set citus.enable_create_database_propagation to on; drop database test_locale_provider; \c - - - :master_port diff --git a/src/test/regress/expected/create_ref_dist_from_citus_local.out b/src/test/regress/expected/create_ref_dist_from_citus_local.out index f38e5c5a33f..cce7081b079 100644 --- a/src/test/regress/expected/create_ref_dist_from_citus_local.out +++ b/src/test/regress/expected/create_ref_dist_from_citus_local.out @@ -369,7 +369,7 @@ ROLLBACK; \set VERBOSITY DEFAULT -- Test the UDFs that we use to convert Citus local tables to single-shard tables and -- reference tables. -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, true); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', 1, true); ERROR: This is an internal Citus function can only be used in a distributed transaction SELECT citus_internal.delete_placement_metadata(1); ERROR: This is an internal Citus function can only be used in a distributed transaction @@ -393,13 +393,13 @@ SET citus.next_shard_id TO 1850000; SET citus.next_placement_id TO 8510000; SET citus.shard_replication_factor TO 1; SET search_path TO create_ref_dist_from_citus_local; -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(null, 't', 1, true); +SELECT citus_internal.update_none_dist_table_metadata(null, 't', 1, true); ERROR: relation_id cannot be NULL -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, null, 1, true); +SELECT citus_internal.update_none_dist_table_metadata(1, null, 1, true); ERROR: replication_model cannot be NULL -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', null, true); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', null, true); ERROR: colocation_id cannot be NULL -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, null); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', 1, null); ERROR: auto_converted cannot be NULL SELECT citus_internal.delete_placement_metadata(null); ERROR: placement_id cannot be NULL @@ -411,8 +411,8 @@ SELECT citus_add_local_table_to_metadata('udf_test'); (1 row) BEGIN; - SELECT pg_catalog.citus_internal_update_none_dist_table_metadata('create_ref_dist_from_citus_local.udf_test'::regclass, 'k', 99999, true); - citus_internal_update_none_dist_table_metadata + SELECT citus_internal.update_none_dist_table_metadata('create_ref_dist_from_citus_local.udf_test'::regclass, 'k', 99999, true); + update_none_dist_table_metadata --------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/create_role_propagation.out b/src/test/regress/expected/create_role_propagation.out index 48310bdc338..90f2690ce91 100644 --- a/src/test/regress/expected/create_role_propagation.out +++ b/src/test/regress/expected/create_role_propagation.out @@ -196,6 +196,7 @@ SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::t (1 row) \c - - - :master_port +create role test_admin_role; -- test grants with distributed and non-distributed roles SELECT master_remove_node('localhost', :worker_2_port); master_remove_node @@ -221,29 +222,72 @@ CREATE ROLE non_dist_role_4; NOTICE: not propagating CREATE ROLE/USER commands to other nodes HINT: Connect to other nodes directly to manually create all necessary users and roles. SET citus.enable_create_role_propagation TO ON; +grant dist_role_3,dist_role_1 to test_admin_role with admin option; SET ROLE dist_role_1; GRANT non_dist_role_1 TO non_dist_role_2; SET citus.enable_create_role_propagation TO OFF; +grant dist_role_1 to non_dist_role_1 with admin option; SET ROLE non_dist_role_1; -GRANT dist_role_1 TO dist_role_2; +GRANT dist_role_1 TO dist_role_2 granted by non_dist_role_1; RESET ROLE; SET citus.enable_create_role_propagation TO ON; -GRANT dist_role_3 TO non_dist_role_3; +GRANT dist_role_3 TO non_dist_role_3 granted by test_admin_role; GRANT non_dist_role_4 TO dist_role_4; +GRANT dist_role_3 TO dist_role_4 granted by test_admin_role; SELECT 1 FROM master_add_node('localhost', :worker_2_port); ?column? --------------------------------------------------------------------- 1 (1 row) -SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + ) q; + $$ +); + result +--------------------------------------------------------------------- + [{"member":"dist_role_4","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, + + {"member":"non_dist_role_3","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, + + {"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"dist_role_4","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, + + {"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"dist_role_4","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, + + {"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] +(3 rows) + +REVOKE dist_role_3 from dist_role_4 granted by test_admin_role cascade; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + order by member::regrole::text + ) q; + $$ +); + result +--------------------------------------------------------------------- + [{"member":"non_dist_role_3","role":"dist_role_3","grantor":"test_admin_role","admin_option":false}, + + {"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] +(3 rows) + +SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1','test_admin_role')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; role | member | grantor | admin_option --------------------------------------------------------------------- dist_role_1 | dist_role_2 | t | f + dist_role_1 | non_dist_role_1 | t | t + dist_role_1 | test_admin_role | t | t dist_role_3 | non_dist_role_3 | t | f + dist_role_3 | test_admin_role | t | t non_dist_role_1 | non_dist_role_2 | t | f non_dist_role_4 | dist_role_4 | t | f -(4 rows) +(7 rows) SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1; objid @@ -255,6 +299,25 @@ SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid':: non_dist_role_4 (5 rows) +REVOKE dist_role_3 from non_dist_role_3 granted by test_admin_role cascade; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + order by member::regrole::text + ) q; + $$ +); + result +--------------------------------------------------------------------- + [{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] + [{"member":"test_admin_role","role":"dist_role_3","grantor":"postgres","admin_option":true}] +(3 rows) + +revoke dist_role_3,dist_role_1 from test_admin_role cascade; +drop role test_admin_role; \c - - - :worker_1_port SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; role | member | grantor | admin_option @@ -276,9 +339,8 @@ SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_%' ORDER BY 1; SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; role | member | grantor | admin_option --------------------------------------------------------------------- - dist_role_1 | dist_role_2 | postgres | f non_dist_role_4 | dist_role_4 | postgres | f -(2 rows) +(1 row) SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_%' ORDER BY 1; rolname diff --git a/src/test/regress/expected/grant_on_database_propagation_from_non_maindb.out b/src/test/regress/expected/grant_on_database_propagation_from_non_maindb.out new file mode 100644 index 00000000000..594e3b74eb4 --- /dev/null +++ b/src/test/regress/expected/grant_on_database_propagation_from_non_maindb.out @@ -0,0 +1,471 @@ +-- Public role has connect,temp,temporary privileges on database +-- To test these scenarios, we need to revoke these privileges from public role +-- since public role privileges are inherited by new roles/users +set citus.enable_create_database_propagation to on; +create database test_2pc_db; +show citus.main_db; + citus.main_db +--------------------------------------------------------------------- + regression +(1 row) + +revoke connect,temp,temporary on database test_2pc_db from public; +CREATE SCHEMA grant_on_database_propagation_non_maindb; +SET search_path TO grant_on_database_propagation_non_maindb; +-- test grant/revoke CREATE privilege propagation on database +create user "myuser'_test"; +\c test_2pc_db - - :master_port +grant create on database test_2pc_db to "myuser'_test"; +\c regression - - :master_port; +select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) +(3 rows) + +\c test_2pc_db - - :master_port +revoke create on database test_2pc_db from "myuser'_test"; +\c regression - - :master_port; +select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) +(3 rows) + +drop user "myuser'_test"; +--------------------------------------------------------------------- +-- test grant/revoke CONNECT privilege propagation on database +\c regression - - :master_port +create user myuser2; +\c test_2pc_db - - :master_port +grant CONNECT on database test_2pc_db to myuser2; +\c regression - - :master_port; +select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']); + check_database_privileges +--------------------------------------------------------------------- + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) +(3 rows) + +\c test_2pc_db - - :master_port +revoke connect on database test_2pc_db from myuser2; +\c regression - - :master_port +select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']); + check_database_privileges +--------------------------------------------------------------------- + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) +(3 rows) + +drop user myuser2; +--------------------------------------------------------------------- +-- test grant/revoke TEMP privilege propagation on database +\c regression - - :master_port +create user myuser3; +-- test grant/revoke temp on database +\c test_2pc_db - - :master_port +grant TEMP on database test_2pc_db to myuser3; +\c regression - - :master_port; +select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']); + check_database_privileges +--------------------------------------------------------------------- + (TEMP,t) + (TEMP,t) + (TEMP,t) +(3 rows) + +\c test_2pc_db - - :worker_1_port +revoke TEMP on database test_2pc_db from myuser3; +\c regression - - :master_port; +select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']); + check_database_privileges +--------------------------------------------------------------------- + (TEMP,f) + (TEMP,f) + (TEMP,f) +(3 rows) + +drop user myuser3; +--------------------------------------------------------------------- +\c regression - - :master_port +-- test temporary privilege on database +create user myuser4; +-- test grant/revoke temporary on database +\c test_2pc_db - - :worker_1_port +grant TEMPORARY on database test_2pc_db to myuser4; +\c regression - - :master_port +select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(3 rows) + +\c test_2pc_db - - :master_port +revoke TEMPORARY on database test_2pc_db from myuser4; +\c regression - - :master_port; +select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(3 rows) + +drop user myuser4; +--------------------------------------------------------------------- +-- test ALL privileges with ALL statement on database +create user myuser5; +grant ALL on database test_2pc_db to myuser5; +\c regression - - :master_port +select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +revoke ALL on database test_2pc_db from myuser5; +\c regression - - :master_port +select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +drop user myuser5; +--------------------------------------------------------------------- +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database +create user myuser6; +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser6; +\c regression - - :master_port +select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser6; +\c regression - - :master_port +select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +drop user myuser6; +--------------------------------------------------------------------- +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database with grant option +create user myuser7; +create user myuser_1; +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7; +set role myuser7; +--here since myuser7 does not have grant option, it should fail +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1; +WARNING: no privileges were granted for "test_2pc_db" +\c regression - - :master_port +select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +\c test_2pc_db - - :master_port +RESET ROLE; +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7 with grant option; +set role myuser7; +--here since myuser have grant option, it should succeed +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1 granted by myuser7; +\c regression - - :master_port +select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +RESET ROLE; +--below test should fail and should throw an error since myuser_1 still have the dependent privileges +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict; +ERROR: dependent privileges exist +HINT: Use CASCADE to revoke them too. +--below test should fail and should throw an error since myuser_1 still have the dependent privileges +revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict ; +ERROR: dependent privileges exist +HINT: Use CASCADE to revoke them too. +--below test should succeed and should not throw any error since myuser_1 privileges are revoked with cascade +revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 cascade ; +--here we test if myuser7 still have the privileges after revoke grant option for +\c regression - - :master_port +select check_database_privileges('myuser7','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +reset role; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser_1; +\c regression - - :master_port +drop user myuser_1; +drop user myuser7; +--------------------------------------------------------------------- +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database multi database +-- and multi user +\c regression - - :master_port +create user myuser8; +create user myuser_2; +set citus.enable_create_database_propagation to on; +create database test_db; +revoke connect,temp,temporary on database test_db from public; +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db to myuser8,myuser_2; +\c regression - - :master_port +select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +\c test_2pc_db - - :master_port +RESET ROLE; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 ; +--below test should succeed and should not throw any error +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser_2; +--below test should succeed and should not throw any error +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 cascade; +\c regression - - :master_port +select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,f) + (CREATE,f) + (CREATE,f) + (CONNECT,f) + (CONNECT,f) + (CONNECT,f) + (TEMP,f) + (TEMP,f) + (TEMP,f) + (TEMPORARY,f) + (TEMPORARY,f) + (TEMPORARY,f) +(12 rows) + +\c test_2pc_db - - :master_port +reset role; +\c regression - - :master_port +drop user myuser_2; +drop user myuser8; +set citus.enable_create_database_propagation to on; +drop database test_db; +--------------------------------------------------------------------- +-- rollbacks public role database privileges to original state +grant connect,temp,temporary on database test_2pc_db to public; +drop database test_2pc_db; +set citus.enable_create_database_propagation to off; +DROP SCHEMA grant_on_database_propagation_non_maindb CASCADE; +reset citus.enable_create_database_propagation; +reset search_path; +--------------------------------------------------------------------- diff --git a/src/test/regress/expected/grant_role_from_non_maindb.out b/src/test/regress/expected/grant_role_from_non_maindb.out new file mode 100644 index 00000000000..6dc0b6c60ff --- /dev/null +++ b/src/test/regress/expected/grant_role_from_non_maindb.out @@ -0,0 +1,160 @@ +CREATE SCHEMA grant_role2pc; +SET search_path TO grant_role2pc; +set citus.enable_create_database_propagation to on; +CREATE DATABASE grant_role2pc_db; +\c grant_role2pc_db +SHOW citus.main_db; + citus.main_db +--------------------------------------------------------------------- + regression +(1 row) + +SET citus.superuser TO 'postgres'; +CREATE USER grant_role2pc_user1; +CREATE USER grant_role2pc_user2; +CREATE USER grant_role2pc_user3; +CREATE USER grant_role2pc_user4; +CREATE USER grant_role2pc_user5; +CREATE USER grant_role2pc_user6; +CREATE USER grant_role2pc_user7; +\c grant_role2pc_db +--test with empty superuser +SET citus.superuser TO ''; +grant grant_role2pc_user1 to grant_role2pc_user2; +ERROR: No superuser role is given for Citus main database connection +HINT: Set citus.superuser to a superuser role name +SET citus.superuser TO 'postgres'; +grant grant_role2pc_user1 to grant_role2pc_user2 with admin option granted by CURRENT_USER; +\c regression +select result FROM run_command_on_all_nodes( + $$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text = 'grant_role2pc_user2' + order by member::regrole::text, roleid::regrole::text + ) t + $$ +); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true}] + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true}] + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true}] +(3 rows) + +\c grant_role2pc_db +--test grant under transactional context with multiple operations +BEGIN; +grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user3 WITH ADMIN OPTION; +grant grant_role2pc_user1 to grant_role2pc_user4 granted by grant_role2pc_user3 ; +COMMIT; +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user5 WITH ADMIN OPTION granted by grant_role2pc_user3; +grant grant_role2pc_user1 to grant_role2pc_user6; +ROLLBACK; +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user7; +SELECT 1/0; +ERROR: division by zero +commit; +\c regression +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user3","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user3","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user3","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false}] +(3 rows) + +\c grant_role2pc_db +grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user5,grant_role2pc_user6,grant_role2pc_user7 granted by grant_role2pc_user3; +\c regression +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user1","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] +(3 rows) + +\c grant_role2pc_db +revoke admin option for grant_role2pc_user1 from grant_role2pc_user5 granted by grant_role2pc_user3; +--test revoke under transactional context with multiple operations +BEGIN; +revoke grant_role2pc_user1 from grant_role2pc_user5 granted by grant_role2pc_user3 ; +revoke grant_role2pc_user1 from grant_role2pc_user4 granted by grant_role2pc_user3; +COMMIT; +\c grant_role2pc_db - - :worker_1_port +BEGIN; +revoke grant_role2pc_user1 from grant_role2pc_user6,grant_role2pc_user7 granted by grant_role2pc_user3; +revoke grant_role2pc_user1 from grant_role2pc_user3 cascade; +COMMIT; +\c regression +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user2','grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user2","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user3","role":"grant_role2pc_user2","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user7","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] +(3 rows) + +\c grant_role2pc_db - - :worker_1_port +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user5 WITH ADMIN OPTION; +grant grant_role2pc_user1 to grant_role2pc_user6; +COMMIT; +\c regression - - :master_port +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user5','grant_role2pc_user6') + order by member::regrole::text, roleid::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] + [{"member":"grant_role2pc_user5","role":"grant_role2pc_user1","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user5","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user1","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user6","role":"grant_role2pc_user2","grantor":"grant_role2pc_user3","admin_option":false}] +(3 rows) + +revoke grant_role2pc_user1 from grant_role2pc_user5,grant_role2pc_user6; +--clean resources +DROP SCHEMA grant_role2pc; +set citus.enable_create_database_propagation to on; +DROP DATABASE grant_role2pc_db; +drop user grant_role2pc_user2,grant_role2pc_user3,grant_role2pc_user4,grant_role2pc_user5,grant_role2pc_user6,grant_role2pc_user7; +drop user grant_role2pc_user1; +reset citus.enable_create_database_propagation; diff --git a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out index e37724e4b8b..1aa7cbcc1e1 100644 --- a/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out +++ b/src/test/regress/expected/isolation_replicate_reference_tables_to_coordinator.out @@ -138,7 +138,7 @@ step s2-view-worker: ('%pg_prepared_xacts%'), ('%COMMIT%'), ('%dump_local_%'), - ('%citus_internal_local_blocked_processes%'), + ('%citus_internal.local_blocked_processes%'), ('%add_node%'), ('%csa_from_one_node%'), ('%pg_locks%')) diff --git a/src/test/regress/expected/metadata_sync_from_non_maindb.out b/src/test/regress/expected/metadata_sync_from_non_maindb.out new file mode 100644 index 00000000000..91ca1c82deb --- /dev/null +++ b/src/test/regress/expected/metadata_sync_from_non_maindb.out @@ -0,0 +1,171 @@ +CREATE SCHEMA metadata_sync_2pc_schema; +SET search_path TO metadata_sync_2pc_schema; +set citus.enable_create_database_propagation to on; +CREATE DATABASE metadata_sync_2pc_db; +revoke connect,temp,temporary on database metadata_sync_2pc_db from public; +\c metadata_sync_2pc_db +SHOW citus.main_db; + citus.main_db +--------------------------------------------------------------------- + regression +(1 row) + +CREATE USER "grant_role2pc'_user1"; +CREATE USER "grant_role2pc'_user2"; +CREATE USER "grant_role2pc'_user3"; +CREATE USER grant_role2pc_user4; +CREATE USER grant_role2pc_user5; +\c regression +select 1 from citus_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +\c metadata_sync_2pc_db +grant "grant_role2pc'_user1","grant_role2pc'_user2" to "grant_role2pc'_user3" WITH ADMIN OPTION; +-- This section was originally testing a scenario where a user with the 'admin option' grants the same role to another user, also with the 'admin option'. +-- However, we encountered inconsistent errors because the 'admin option' grant is executed after the grant below. +-- Once we establish the correct order of granting, we will reintroduce the 'granted by' clause. +-- For now, we are commenting out the grant below that includes 'granted by', and instead, we are adding a grant without the 'granted by' clause. +-- grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3"; +grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5; +--test for grant on database +\c metadata_sync_2pc_db - - :master_port +grant create on database metadata_sync_2pc_db to "grant_role2pc'_user1"; +grant connect on database metadata_sync_2pc_db to "grant_role2pc'_user2"; +grant ALL on database metadata_sync_2pc_db to "grant_role2pc'_user3"; +\c regression +select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) +(2 rows) + +select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); + check_database_privileges +--------------------------------------------------------------------- + (CONNECT,t) + (CONNECT,t) +(2 rows) + +select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) +(8 rows) + +-- test for security label on role +\c metadata_sync_2pc_db - - :master_port +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE grant_role2pc_user4 IS 'citus_unclassified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "grant_role2pc'_user1" IS 'citus_classified'; +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('grant_role2pc_user4') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels($$"grant_role2pc''_user1"$$) ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +set citus.enable_create_database_propagation to on; +select 1 from citus_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('"grant_role2pc''_user2"','"grant_role2pc''_user3"','grant_role2pc_user4','grant_role2pc_user5') + order by member::regrole::text +) t +$$); + result +--------------------------------------------------------------------- + [{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false}] + [{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false}] + [{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":true},{"member":"\"grant_role2pc'_user3\"","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":true},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user4","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user1\"","grantor":"postgres","admin_option":false},{"member":"grant_role2pc_user5","role":"\"grant_role2pc'_user2\"","grantor":"postgres","admin_option":false}] +(3 rows) + +select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) +(3 rows) + +select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); + check_database_privileges +--------------------------------------------------------------------- + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) +(3 rows) + +select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); + check_database_privileges +--------------------------------------------------------------------- + (CREATE,t) + (CREATE,t) + (CREATE,t) + (CONNECT,t) + (CONNECT,t) + (CONNECT,t) + (TEMP,t) + (TEMP,t) + (TEMP,t) + (TEMPORARY,t) + (TEMPORARY,t) + (TEMPORARY,t) +(12 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('grant_role2pc_user4') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels($$"grant_role2pc''_user1"$$) ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +\c metadata_sync_2pc_db +revoke "grant_role2pc'_user1","grant_role2pc'_user2" from grant_role2pc_user4,grant_role2pc_user5 ; +revoke admin option for "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; +revoke "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; +revoke ALL on database metadata_sync_2pc_db from "grant_role2pc'_user3"; +revoke CONNECT on database metadata_sync_2pc_db from "grant_role2pc'_user2"; +revoke CREATE on database metadata_sync_2pc_db from "grant_role2pc'_user1"; +\c regression +drop user "grant_role2pc'_user1","grant_role2pc'_user2","grant_role2pc'_user3",grant_role2pc_user4,grant_role2pc_user5; +set citus.enable_create_database_propagation to on; +drop database metadata_sync_2pc_db; +drop schema metadata_sync_2pc_schema; +reset citus.enable_create_database_propagation; +reset search_path; diff --git a/src/test/regress/expected/metadata_sync_helpers.out b/src/test/regress/expected/metadata_sync_helpers.out index 13dd70939e1..9db68eaf569 100644 --- a/src/test/regress/expected/metadata_sync_helpers.out +++ b/src/test/regress/expected/metadata_sync_helpers.out @@ -14,7 +14,7 @@ CREATE TABLE test(col_1 int); -- not in a distributed transaction SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); ERROR: This is an internal Citus function can only be used in a distributed transaction -SELECT citus_internal_update_relation_colocation ('test'::regclass, 1); +SELECT citus_internal.update_relation_colocation ('test'::regclass, 1); ERROR: This is an internal Citus function can only be used in a distributed transaction -- in a distributed transaction, but the application name is not Citus BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -73,7 +73,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation ('test'::regclass, 10); + SELECT citus_internal.update_relation_colocation ('test'::regclass, 10); ERROR: must be owner of table test ROLLBACK; -- finally, a user can only add its own tables to the metadata @@ -349,7 +349,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420007, 10000, 11111); + SELECT citus_internal.update_placement_metadata(1420007, 10000, 11111); ERROR: could not find valid entry for shard xxxxx ROLLBACK; -- non-existing users should fail to pass the checks @@ -525,8 +525,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation ('test_2'::regclass, 1231231232); - citus_internal_update_relation_colocation + SELECT citus_internal.update_relation_colocation ('test_2'::regclass, 1231231232); + update_relation_colocation --------------------------------------------------------------------- (1 row) @@ -852,7 +852,7 @@ BEGIN; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ERROR: cannot colocate tables test_2 and test_3 ROLLBACK; -- now, add few more shards for test_3 to make it colocated with test_2 @@ -1112,8 +1112,8 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; (1 row) SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); - citus_internal_update_relation_colocation + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); + update_relation_colocation --------------------------------------------------------------------- (1 row) @@ -1130,7 +1130,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); + SELECT citus_internal.update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); ERROR: Node with group id 1014 for shard placement xxxxx does not exist COMMIT; -- fails because the source node doesn't contain the shard @@ -1143,7 +1143,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); + SELECT citus_internal.update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); ERROR: Active placement for shard xxxxx is not found on group:14 COMMIT; -- fails because shard does not exist @@ -1156,7 +1156,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(0, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(0, get_node_id(), get_node_id()+1); ERROR: Shard id does not exists: 0 COMMIT; -- fails because none-existing shard @@ -1169,7 +1169,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); ERROR: Shard id does not exists: 213123123123 COMMIT; -- fails because we do not own the shard @@ -1182,7 +1182,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420007, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(1420007, get_node_id(), get_node_id()+1); ERROR: must be owner of table super_user_table COMMIT; -- the user only allowed to delete their own shards @@ -1274,7 +1274,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET repmodel = 't' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ERROR: cannot colocate tables test_2 and test_3 ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -1298,7 +1298,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; UPDATE pg_dist_partition SET partkey = '{VAR :varno 1 :varattno 1 :vartype 20 :vartypmod -1 :varcollid 0 :varlevelsup 1 :varnoold 1 :varoattno 1 :location -1}' WHERE logicalrelid = 'test_2'::regclass; \endif - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ERROR: cannot colocate tables test_2 and test_3 ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -1313,7 +1313,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = '' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ERROR: The relation "test_2" does not have a valid entry in pg_dist_partition. ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -1328,7 +1328,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = 'a' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ERROR: Updating colocation ids are only allowed for hash and single shard distributed tables: a ROLLBACK; -- colocated hash distributed table should have the same dist key columns diff --git a/src/test/regress/expected/multi_extension.out b/src/test/regress/expected/multi_extension.out index dcd3253679c..0aecd652fbc 100644 --- a/src/test/regress/expected/multi_extension.out +++ b/src/test/regress/expected/multi_extension.out @@ -1438,9 +1438,19 @@ SELECT * FROM multi_extension.print_extension_changes(); | function citus_internal.delete_shard_metadata(bigint) void | function citus_internal.delete_tenant_schema(oid) void | function citus_internal.execute_command_on_remote_nodes_as_user(text,text) void + | function citus_internal.global_blocked_processes() SETOF record + | function citus_internal.is_replication_origin_tracking_active() boolean + | function citus_internal.local_blocked_processes() SETOF record + | function citus_internal.mark_node_not_synced(integer,integer) void | function citus_internal.mark_object_distributed(oid,text,oid,text) void | function citus_internal.start_management_transaction(xid8) void -(18 rows) + | function citus_internal.start_replication_origin_tracking() void + | function citus_internal.stop_replication_origin_tracking() void + | function citus_internal.unregister_tenant_schema_globally(oid,text) void + | function citus_internal.update_none_dist_table_metadata(oid,"char",bigint,boolean) void + | function citus_internal.update_placement_metadata(bigint,integer,integer) void + | function citus_internal.update_relation_colocation(oid,integer) void +(28 rows) DROP TABLE multi_extension.prev_objects, multi_extension.extension_diff; -- show running version diff --git a/src/test/regress/expected/multi_test_helpers.out b/src/test/regress/expected/multi_test_helpers.out index 8b9a833a3d6..04fe5033dad 100644 --- a/src/test/regress/expected/multi_test_helpers.out +++ b/src/test/regress/expected/multi_test_helpers.out @@ -645,3 +645,16 @@ BEGIN END LOOP; END; $func$ LANGUAGE plpgsql;; +CREATE OR REPLACE FUNCTION check_database_privileges(role_name text, db_name text, permissions text[]) +RETURNS TABLE(permission text, result text) +AS $func$ +DECLARE + permission text; +BEGIN + FOREACH permission IN ARRAY permissions + LOOP + RETURN QUERY EXECUTE format($inner$SELECT %s, result FROM run_command_on_all_nodes($$select has_database_privilege(%s,%s,%s); $$)$inner$, + quote_literal(permission), quote_literal(role_name), quote_literal(db_name), quote_literal(permission)); + END LOOP; +END; +$func$ LANGUAGE plpgsql; diff --git a/src/test/regress/expected/other_databases.out b/src/test/regress/expected/other_databases.out index a15c4bb50d8..c67746055a5 100644 --- a/src/test/regress/expected/other_databases.out +++ b/src/test/regress/expected/other_databases.out @@ -98,11 +98,11 @@ REVOKE ALL ON SCHEMA citus_internal FROM nonsuperuser; DROP USER other_db_user9, nonsuperuser; -- test from a worker \c - - - :worker_1_port -CREATE DATABASE other_db2; +CREATE DATABASE worker_other_db; NOTICE: Citus partially supports CREATE DATABASE for distributed databases DETAIL: Citus does not propagate CREATE DATABASE command to other nodes HINT: You can manually create a database and its extensions on other nodes. -\c other_db2 +\c worker_other_db CREATE USER worker_user1; BEGIN; CREATE USER worker_user2; @@ -129,8 +129,211 @@ SELECT usename FROM pg_user WHERE usename LIKE 'worker\_user%' ORDER BY 1; -- some user creation commands will fail but let's make sure we try to drop them just in case DROP USER IF EXISTS worker_user1, worker_user2, worker_user3; NOTICE: role "worker_user3" does not exist, skipping -\c - - - :worker_1_port -DROP DATABASE other_db2; +-- test creating and dropping a database from a Citus non-main database +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +\c other_db1 +CREATE DATABASE other_db3; +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": {"datacl": null, "datname": "other_db3", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "other_db3", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "other_db3", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c other_db1 +DROP DATABASE other_db3; +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c worker_other_db - - :worker_1_port +CREATE DATABASE other_db4; +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": {"datacl": null, "datname": "other_db4", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": {"datacl": null, "datname": "other_db4", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "other_db4", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": true, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c worker_other_db +DROP DATABASE other_db4; +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +DROP DATABASE worker_other_db; +CREATE DATABASE other_db5; +-- disable create database propagation for the next test +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +\c other_db5 - - :worker_2_port +-- locally create a database +CREATE DATABASE local_db; +\c regression - - - +-- re-enable create database propagation +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +\c other_db5 - - :master_port +-- Test a scenario where create database fails because the database +-- already exists on another node and we don't crash etc. +CREATE DATABASE local_db; +ERROR: database "local_db" already exists +CONTEXT: while executing command on localhost:xxxxx +while executing command on localhost:xxxxx +\c regression - - - +SELECT * FROM public.check_database_on_all_nodes('local_db') ORDER BY node_type, result; + node_type | result +--------------------------------------------------------------------- + coordinator (local) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": null, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} + worker node (remote) | {"database_properties": {"datacl": null, "datname": "local_db", "datctype": "C", "encoding": "UTF8", "datcollate": "C", "tablespace": "pg_default", "daticurules": null, "datallowconn": true, "datconnlimit": -1, "daticulocale": null, "datistemplate": false, "database_owner": "postgres", "datcollversion": null, "datlocprovider": "c"}, "pg_dist_object_record_for_db_exists": false, "stale_pg_dist_object_record_for_a_db_exists": false} +(3 rows) + +\c - - - :worker_2_port +-- locally drop the database for cleanup purposes +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + +DROP DATABASE local_db; +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + \c - - - :master_port +DROP DATABASE other_db5; +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); + result +--------------------------------------------------------------------- + ALTER SYSTEM + ALTER SYSTEM + ALTER SYSTEM +(3 rows) + +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); + result +--------------------------------------------------------------------- + t + t + t +(3 rows) + +SELECT pg_sleep(0.1); + pg_sleep +--------------------------------------------------------------------- + +(1 row) + DROP SCHEMA other_databases; DROP DATABASE other_db1; diff --git a/src/test/regress/expected/schema_based_sharding.out b/src/test/regress/expected/schema_based_sharding.out index 5204d60d50c..711c3914137 100644 --- a/src/test/regress/expected/schema_based_sharding.out +++ b/src/test/regress/expected/schema_based_sharding.out @@ -19,13 +19,13 @@ SELECT citus_internal.add_tenant_schema(1, NULL); ERROR: colocation_id cannot be NULL SELECT citus_internal.delete_tenant_schema(NULL); ERROR: schema_id cannot be NULL -SELECT citus_internal_unregister_tenant_schema_globally(1, NULL); +SELECT citus_internal.unregister_tenant_schema_globally(1, NULL); ERROR: schema_name cannot be NULL -SELECT citus_internal_unregister_tenant_schema_globally(NULL, 'text'); +SELECT citus_internal.unregister_tenant_schema_globally(NULL, 'text'); ERROR: schema_id cannot be NULL --- Verify that citus_internal_unregister_tenant_schema_globally can only +-- Verify that citus_internal.unregister_tenant_schema_globally can only -- be called on schemas that are dropped already. -SELECT citus_internal_unregister_tenant_schema_globally('regular_schema'::regnamespace, 'regular_schema'); +SELECT citus_internal.unregister_tenant_schema_globally('regular_schema'::regnamespace, 'regular_schema'); ERROR: schema is expected to be already dropped because this function is only expected to be called from Citus drop hook SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); ?column? @@ -1511,10 +1511,10 @@ SELECT pg_reload_conf(); t (1 row) --- Verify that citus_internal_unregister_tenant_schema_globally is a no-op +-- Verify that citus_internal.unregister_tenant_schema_globally is a no-op -- on workers. -SELECT citus_internal_unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3'); - citus_internal_unregister_tenant_schema_globally +SELECT citus_internal.unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3'); + unregister_tenant_schema_globally --------------------------------------------------------------------- (1 row) diff --git a/src/test/regress/expected/seclabel.out b/src/test/regress/expected/seclabel.out index f826de44b66..ae658973408 100644 --- a/src/test/regress/expected/seclabel.out +++ b/src/test/regress/expected/seclabel.out @@ -115,16 +115,13 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'; NOTICE: issuing SECURITY LABEL ON ROLE user1 IS 'citus_unclassified' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx -SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'; -NOTICE: issuing SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified' +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified'; +NOTICE: issuing SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx \c - - - :worker_1_port --- command not allowed from worker node -SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus ''!unclassified'; -ERROR: operation is not allowed on this node -HINT: Connect to the coordinator and run it again. -\c - - - :master_port -RESET citus.log_remote_commands; +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +-- command from the worker node should be propagated to the coordinator SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; node_type | result --------------------------------------------------------------------- @@ -132,6 +129,33 @@ SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORD worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} (2 rows) +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +NOTICE: issuing SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified' +DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +RESET citus.log_remote_commands; +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + +\c - - - :master_port +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(2 rows) + SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; node_type | result --------------------------------------------------------------------- @@ -143,7 +167,7 @@ SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') SET citus.log_remote_commands TO on; SET citus.grep_remote_commands = '%SECURITY LABEL%'; SELECT 1 FROM citus_add_node('localhost', :worker_2_port); -NOTICE: issuing SELECT worker_create_or_alter_role('user1', 'CREATE ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''', 'ALTER ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''');SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified' +NOTICE: issuing SELECT worker_create_or_alter_role('user1', 'CREATE ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''', 'ALTER ROLE user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''');SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx NOTICE: issuing SELECT worker_create_or_alter_role('user 2', 'CREATE ROLE "user 2" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''', 'ALTER ROLE "user 2" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS CONNECTION LIMIT -1 PASSWORD NULL VALID UNTIL ''infinity''');SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified' DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx @@ -155,9 +179,9 @@ DETAIL: on server postgres@localhost:xxxxx connectionId: xxxxxxx SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; node_type | result --------------------------------------------------------------------- - coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} - worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} - worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} (3 rows) SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; @@ -168,6 +192,35 @@ SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') worker_2 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} (3 rows) +-- disable the GUC and check that the command is not propagated +SET citus.enable_alter_role_propagation TO off; +SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'; +NOTICE: not propagating SECURITY LABEL commands to other nodes +HINT: Connect to other nodes directly to manually assign necessary labels. +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +\c - - - :worker_2_port +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +SET citus.enable_alter_role_propagation TO off; +SECURITY LABEL ON ROLE user1 IS 'citus ''!unclassified'; +NOTICE: not propagating SECURITY LABEL commands to other nodes +HINT: Connect to other nodes directly to manually assign necessary labels. +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus '!unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +RESET citus.enable_alter_role_propagation; -- cleanup RESET citus.log_remote_commands; DROP ROLE user1, "user 2"; diff --git a/src/test/regress/expected/seclabel_non_maindb.out b/src/test/regress/expected/seclabel_non_maindb.out new file mode 100644 index 00000000000..48c89fb3119 --- /dev/null +++ b/src/test/regress/expected/seclabel_non_maindb.out @@ -0,0 +1,111 @@ +-- SECLABEL +-- +-- Test suite for running SECURITY LABEL ON ROLE statements from non-main databases +SET citus.enable_create_database_propagation to ON; +CREATE DATABASE database1; +CREATE DATABASE database2; +\c - - - :worker_1_port +SET citus.enable_create_database_propagation to ON; +CREATE DATABASE database_w1; +\c - - - :master_port +CREATE ROLE user1; +\c database1 +SHOW citus.main_db; + citus.main_db +--------------------------------------------------------------------- + regression +(1 row) + +SHOW citus.superuser; + citus.superuser +--------------------------------------------------------------------- + postgres +(1 row) + +CREATE ROLE "user 2"; +-- Set a SECURITY LABEL on a role from a non-main database +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_unclassified'; +-- Check the result +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_classified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +\c database1 +-- Set a SECURITY LABEL on database, it should not be propagated +SECURITY LABEL FOR "citus '!tests_label_provider" ON DATABASE database1 IS 'citus_classified'; +-- Set a SECURITY LABEL on a table, it should not be propagated +CREATE TABLE a (i int); +SECURITY LABEL ON TABLE a IS 'citus_classified'; +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('database1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_classified", "objtype": "database", "provider": "citus '!tests_label_provider"} + worker_1 | + worker_2 | +(3 rows) + +-- Check that only the SECURITY LABEL for ROLES is propagated to the non-main databases on other nodes +\c database_w1 - - :worker_1_port +SELECT provider, objtype, label, objname FROM pg_seclabels ORDER BY objname; + provider | objtype | label | objname +--------------------------------------------------------------------- + citus '!tests_label_provider | role | citus_unclassified | "user 2" + citus '!tests_label_provider | role | citus_classified | user1 +(2 rows) + +-- Check the result after a transaction +BEGIN; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON DATABASE database_w1 IS 'citus_classified'; +COMMIT; +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('database_w1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | + worker_1 | {"label": "citus_classified", "objtype": "database", "provider": "citus '!tests_label_provider"} + worker_2 | +(3 rows) + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +BEGIN; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified'; +ROLLBACK; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + node_type | result +--------------------------------------------------------------------- + coordinator | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_1 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} + worker_2 | {"label": "citus_unclassified", "objtype": "role", "provider": "citus '!tests_label_provider"} +(3 rows) + +-- clean up +SET citus.enable_create_database_propagation to ON; +DROP DATABASE database1; +DROP DATABASE database2; +DROP DATABASE database_w1; +DROP ROLE user1; +DROP ROLE "user 2"; +RESET citus.enable_create_database_propagation; diff --git a/src/test/regress/expected/shard_rebalancer.out b/src/test/regress/expected/shard_rebalancer.out index a7cd6b38cb9..988fa68be88 100644 --- a/src/test/regress/expected/shard_rebalancer.out +++ b/src/test/regress/expected/shard_rebalancer.out @@ -2395,6 +2395,74 @@ SELECT count(*) FROM pg_dist_partition; 0 (1 row) +-- verify a system with a new node won't copy distributed table shards without reference tables +SELECT 1 from master_remove_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +SELECT public.wait_until_metadata_sync(30000); + wait_until_metadata_sync +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE r1 (a int PRIMARY KEY, b int); +SELECT create_reference_table('r1'); + create_reference_table +--------------------------------------------------------------------- + +(1 row) + +CREATE TABLE d1 (a int PRIMARY KEY, b int); +SELECT create_distributed_table('d1', 'a'); + create_distributed_table +--------------------------------------------------------------------- + +(1 row) + +ALTER SEQUENCE pg_dist_groupid_seq RESTART WITH 15; +SELECT 1 from master_add_node('localhost', :worker_2_port); + ?column? +--------------------------------------------------------------------- + 1 +(1 row) + +-- count the number of placements for the reference table to verify it is not available on +-- all nodes +SELECT count(*) +FROM pg_dist_shard +JOIN pg_dist_shard_placement USING (shardid) +WHERE logicalrelid = 'r1'::regclass; + count +--------------------------------------------------------------------- + 1 +(1 row) + +-- #7426 We can't move shards to the fresh node before we copy reference tables there. +-- rebalance_table_shards() will do the copy, but the low-level +-- citus_move_shard_placement() should raise an error +SELECT citus_move_shard_placement(pg_dist_shard.shardid, nodename, nodeport, 'localhost', :worker_2_port) + FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) + WHERE logicalrelid = 'd1'::regclass AND nodename = 'localhost' AND nodeport = :worker_1_port LIMIT 1; +ERROR: there are missing reference tables on some nodes +SELECT replicate_reference_tables(); + replicate_reference_tables +--------------------------------------------------------------------- + +(1 row) + +-- After replication, the move should succeed. +SELECT citus_move_shard_placement(pg_dist_shard.shardid, nodename, nodeport, 'localhost', :worker_2_port) + FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) + WHERE logicalrelid = 'd1'::regclass AND nodename = 'localhost' AND nodeport = :worker_1_port LIMIT 1; + citus_move_shard_placement +--------------------------------------------------------------------- + +(1 row) + +DROP TABLE d1, r1; -- verify a system having only reference tables will copy the reference tables when -- executing the rebalancer SELECT 1 from master_remove_node('localhost', :worker_2_port); diff --git a/src/test/regress/expected/upgrade_list_citus_objects.out b/src/test/regress/expected/upgrade_list_citus_objects.out index a4f948ee630..4f17695befc 100644 --- a/src/test/regress/expected/upgrade_list_citus_objects.out +++ b/src/test/regress/expected/upgrade_list_citus_objects.out @@ -73,6 +73,10 @@ ORDER BY 1; function citus_internal.delete_tenant_schema(oid) function citus_internal.execute_command_on_remote_nodes_as_user(text,text) function citus_internal.find_groupid_for_node(text,integer) + function citus_internal.global_blocked_processes() + function citus_internal.is_replication_origin_tracking_active() + function citus_internal.local_blocked_processes() + function citus_internal.mark_node_not_synced(integer,integer) function citus_internal.mark_object_distributed(oid,text,oid,text) function citus_internal.pg_dist_node_trigger_func() function citus_internal.pg_dist_rebalance_strategy_trigger_func() @@ -81,6 +85,12 @@ ORDER BY 1; function citus_internal.replace_isolation_tester_func() function citus_internal.restore_isolation_tester_func() function citus_internal.start_management_transaction(xid8) + function citus_internal.start_replication_origin_tracking() + function citus_internal.stop_replication_origin_tracking() + function citus_internal.unregister_tenant_schema_globally(oid,text) + function citus_internal.update_none_dist_table_metadata(oid,"char",bigint,boolean) + function citus_internal.update_placement_metadata(bigint,integer,integer) + function citus_internal.update_relation_colocation(oid,integer) function citus_internal_add_colocation_metadata(integer,integer,integer,regtype,oid) function citus_internal_add_object_metadata(text,text[],text[],integer,integer,boolean) function citus_internal_add_partition_metadata(regclass,"char",text,integer,"char") @@ -361,5 +371,5 @@ ORDER BY 1; view citus_stat_tenants_local view pg_dist_shard_placement view time_partitions -(351 rows) +(361 rows) diff --git a/src/test/regress/multi_1_schedule b/src/test/regress/multi_1_schedule index 5d3cebc1deb..fa9093617ee 100644 --- a/src/test/regress/multi_1_schedule +++ b/src/test/regress/multi_1_schedule @@ -41,7 +41,7 @@ test: create_drop_database_propagation_pg16 test: grant_on_parameter_propagation test: comment_on_database test: comment_on_role - +test: metadata_sync_from_non_maindb # don't parallelize single_shard_table_udfs to make sure colocation ids are sequential test: single_shard_table_udfs test: schema_based_sharding @@ -59,7 +59,7 @@ test: multi_metadata_attributes test: multi_read_from_secondaries -test: grant_on_database_propagation +test: grant_on_database_propagation grant_on_database_propagation_from_non_maindb test: alter_database_propagation test: citus_shards diff --git a/src/test/regress/multi_schedule b/src/test/regress/multi_schedule index f599363a988..85de7b8b829 100644 --- a/src/test/regress/multi_schedule +++ b/src/test/regress/multi_schedule @@ -108,7 +108,7 @@ test: object_propagation_debug test: undistribute_table test: run_command_on_all_nodes test: background_task_queue_monitor -test: other_databases +test: other_databases grant_role_from_non_maindb seclabel_non_maindb test: citus_internal_access # Causal clock test diff --git a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec index fce3794274a..9683935bed0 100644 --- a/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec +++ b/src/test/regress/spec/isolation_replicate_reference_tables_to_coordinator.spec @@ -90,7 +90,7 @@ step "s2-view-worker" ('%pg_prepared_xacts%'), ('%COMMIT%'), ('%dump_local_%'), - ('%citus_internal_local_blocked_processes%'), + ('%citus_internal.local_blocked_processes%'), ('%add_node%'), ('%csa_from_one_node%'), ('%pg_locks%')) diff --git a/src/test/regress/sql/create_drop_database_propagation_pg15.sql b/src/test/regress/sql/create_drop_database_propagation_pg15.sql index 40d1b9e0987..4e006c54fa8 100644 --- a/src/test/regress/sql/create_drop_database_propagation_pg15.sql +++ b/src/test/regress/sql/create_drop_database_propagation_pg15.sql @@ -60,6 +60,14 @@ CREATE DATABASE test_locale_provider SELECT * FROM public.check_database_on_all_nodes('test_locale_provider') ORDER BY node_type; +\c test_locale_provider - - :worker_2_port + +set citus.enable_create_database_propagation to on; +create database unsupported_option_from_non_main_db with oid = 12345; + +\c regression - - :master_port + +set citus.enable_create_database_propagation to on; drop database test_locale_provider; \c - - - :master_port diff --git a/src/test/regress/sql/create_ref_dist_from_citus_local.sql b/src/test/regress/sql/create_ref_dist_from_citus_local.sql index 2b78ab29ec2..e9610d65fbe 100644 --- a/src/test/regress/sql/create_ref_dist_from_citus_local.sql +++ b/src/test/regress/sql/create_ref_dist_from_citus_local.sql @@ -219,7 +219,7 @@ ROLLBACK; -- Test the UDFs that we use to convert Citus local tables to single-shard tables and -- reference tables. -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, true); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', 1, true); SELECT citus_internal.delete_placement_metadata(1); CREATE ROLE test_user_create_ref_dist WITH LOGIN; @@ -234,10 +234,10 @@ SET citus.next_placement_id TO 8510000; SET citus.shard_replication_factor TO 1; SET search_path TO create_ref_dist_from_citus_local; -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(null, 't', 1, true); -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, null, 1, true); -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', null, true); -SELECT pg_catalog.citus_internal_update_none_dist_table_metadata(1, 't', 1, null); +SELECT citus_internal.update_none_dist_table_metadata(null, 't', 1, true); +SELECT citus_internal.update_none_dist_table_metadata(1, null, 1, true); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', null, true); +SELECT citus_internal.update_none_dist_table_metadata(1, 't', 1, null); SELECT citus_internal.delete_placement_metadata(null); @@ -245,7 +245,7 @@ CREATE TABLE udf_test (col_1 int); SELECT citus_add_local_table_to_metadata('udf_test'); BEGIN; - SELECT pg_catalog.citus_internal_update_none_dist_table_metadata('create_ref_dist_from_citus_local.udf_test'::regclass, 'k', 99999, true); + SELECT citus_internal.update_none_dist_table_metadata('create_ref_dist_from_citus_local.udf_test'::regclass, 'k', 99999, true); SELECT COUNT(*)=1 FROM pg_dist_partition WHERE logicalrelid = 'create_ref_dist_from_citus_local.udf_test'::regclass AND repmodel = 'k' AND colocationid = 99999 AND autoconverted = true; diff --git a/src/test/regress/sql/create_role_propagation.sql b/src/test/regress/sql/create_role_propagation.sql index fa32cf2d25b..bd2951b175c 100644 --- a/src/test/regress/sql/create_role_propagation.sql +++ b/src/test/regress/sql/create_role_propagation.sql @@ -75,6 +75,8 @@ SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::t \c - - - :master_port +create role test_admin_role; + -- test grants with distributed and non-distributed roles SELECT master_remove_node('localhost', :worker_2_port); @@ -84,6 +86,8 @@ CREATE ROLE dist_role_2; CREATE ROLE dist_role_3; CREATE ROLE dist_role_4; + + SET citus.enable_create_role_propagation TO OFF; CREATE ROLE non_dist_role_1 SUPERUSER; @@ -93,28 +97,71 @@ CREATE ROLE non_dist_role_4; SET citus.enable_create_role_propagation TO ON; + +grant dist_role_3,dist_role_1 to test_admin_role with admin option; + SET ROLE dist_role_1; GRANT non_dist_role_1 TO non_dist_role_2; SET citus.enable_create_role_propagation TO OFF; +grant dist_role_1 to non_dist_role_1 with admin option; SET ROLE non_dist_role_1; -GRANT dist_role_1 TO dist_role_2; +GRANT dist_role_1 TO dist_role_2 granted by non_dist_role_1; RESET ROLE; SET citus.enable_create_role_propagation TO ON; -GRANT dist_role_3 TO non_dist_role_3; + +GRANT dist_role_3 TO non_dist_role_3 granted by test_admin_role; GRANT non_dist_role_4 TO dist_role_4; +GRANT dist_role_3 TO dist_role_4 granted by test_admin_role; + SELECT 1 FROM master_add_node('localhost', :worker_2_port); -SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + ) q; + $$ +); + +REVOKE dist_role_3 from dist_role_4 granted by test_admin_role cascade; + +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + order by member::regrole::text + ) q; + $$ +); + +SELECT roleid::regrole::text AS role, member::regrole::text, (grantor::regrole::text IN ('postgres', 'non_dist_role_1', 'dist_role_1','test_admin_role')) AS grantor, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; SELECT objid::regrole FROM pg_catalog.pg_dist_object WHERE classid='pg_authid'::regclass::oid AND objid::regrole::text LIKE '%dist\_%' ORDER BY 1; +REVOKE dist_role_3 from non_dist_role_3 granted by test_admin_role cascade; + +SELECT result FROM run_command_on_all_nodes( + $$ + SELECT json_agg(q.* ORDER BY member) FROM ( + SELECT member::regrole::text, roleid::regrole::text AS role, grantor::regrole::text, admin_option + FROM pg_auth_members WHERE roleid::regrole::text = 'dist_role_3' + order by member::regrole::text + ) q; + $$ +); + +revoke dist_role_3,dist_role_1 from test_admin_role cascade; +drop role test_admin_role; + \c - - - :worker_1_port SELECT roleid::regrole::text AS role, member::regrole::text, grantor::regrole::text, admin_option FROM pg_auth_members WHERE roleid::regrole::text LIKE '%dist\_%' ORDER BY 1, 2; SELECT rolname FROM pg_authid WHERE rolname LIKE '%dist\_%' ORDER BY 1; diff --git a/src/test/regress/sql/grant_on_database_propagation_from_non_maindb.sql b/src/test/regress/sql/grant_on_database_propagation_from_non_maindb.sql new file mode 100644 index 00000000000..f83472b3655 --- /dev/null +++ b/src/test/regress/sql/grant_on_database_propagation_from_non_maindb.sql @@ -0,0 +1,246 @@ +-- Public role has connect,temp,temporary privileges on database +-- To test these scenarios, we need to revoke these privileges from public role +-- since public role privileges are inherited by new roles/users +set citus.enable_create_database_propagation to on; +create database test_2pc_db; +show citus.main_db; +revoke connect,temp,temporary on database test_2pc_db from public; + +CREATE SCHEMA grant_on_database_propagation_non_maindb; +SET search_path TO grant_on_database_propagation_non_maindb; + +-- test grant/revoke CREATE privilege propagation on database +create user "myuser'_test"; + +\c test_2pc_db - - :master_port +grant create on database test_2pc_db to "myuser'_test"; + +\c regression - - :master_port; +select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']); + +\c test_2pc_db - - :master_port +revoke create on database test_2pc_db from "myuser'_test"; + +\c regression - - :master_port; +select check_database_privileges('myuser''_test','test_2pc_db',ARRAY['CREATE']); + +drop user "myuser'_test"; +----------------------------------------------------------------------- + +-- test grant/revoke CONNECT privilege propagation on database +\c regression - - :master_port +create user myuser2; + +\c test_2pc_db - - :master_port +grant CONNECT on database test_2pc_db to myuser2; + +\c regression - - :master_port; +select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']); + +\c test_2pc_db - - :master_port +revoke connect on database test_2pc_db from myuser2; + +\c regression - - :master_port +select check_database_privileges('myuser2','test_2pc_db',ARRAY['CONNECT']); + +drop user myuser2; + +----------------------------------------------------------------------- + +-- test grant/revoke TEMP privilege propagation on database +\c regression - - :master_port +create user myuser3; + +-- test grant/revoke temp on database +\c test_2pc_db - - :master_port +grant TEMP on database test_2pc_db to myuser3; + +\c regression - - :master_port; +select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']); + + +\c test_2pc_db - - :worker_1_port +revoke TEMP on database test_2pc_db from myuser3; + +\c regression - - :master_port; +select check_database_privileges('myuser3','test_2pc_db',ARRAY['TEMP']); + +drop user myuser3; + +----------------------------------------------------------------------- + +\c regression - - :master_port +-- test temporary privilege on database +create user myuser4; + +-- test grant/revoke temporary on database +\c test_2pc_db - - :worker_1_port +grant TEMPORARY on database test_2pc_db to myuser4; + +\c regression - - :master_port +select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']); + +\c test_2pc_db - - :master_port +revoke TEMPORARY on database test_2pc_db from myuser4; + +\c regression - - :master_port; +select check_database_privileges('myuser4','test_2pc_db',ARRAY['TEMPORARY']); + +drop user myuser4; +----------------------------------------------------------------------- + +-- test ALL privileges with ALL statement on database +create user myuser5; + +grant ALL on database test_2pc_db to myuser5; + +\c regression - - :master_port +select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +\c test_2pc_db - - :master_port +revoke ALL on database test_2pc_db from myuser5; + +\c regression - - :master_port +select check_database_privileges('myuser5','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + +drop user myuser5; +----------------------------------------------------------------------- + +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database +create user myuser6; + +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser6; + +\c regression - - :master_port +select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + +\c test_2pc_db - - :master_port +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser6; + +\c regression - - :master_port +select check_database_privileges('myuser6','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +drop user myuser6; +----------------------------------------------------------------------- + +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database with grant option +create user myuser7; +create user myuser_1; + +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7; + +set role myuser7; +--here since myuser7 does not have grant option, it should fail +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1; + +\c regression - - :master_port +select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + +\c test_2pc_db - - :master_port + +RESET ROLE; + +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser7 with grant option; +set role myuser7; + +--here since myuser have grant option, it should succeed +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db to myuser_1 granted by myuser7; + +\c regression - - :master_port +select check_database_privileges('myuser_1','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + +\c test_2pc_db - - :master_port + +RESET ROLE; + +--below test should fail and should throw an error since myuser_1 still have the dependent privileges +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict; +--below test should fail and should throw an error since myuser_1 still have the dependent privileges +revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 restrict ; + +--below test should succeed and should not throw any error since myuser_1 privileges are revoked with cascade +revoke grant option for CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7 cascade ; + +--here we test if myuser7 still have the privileges after revoke grant option for + +\c regression - - :master_port +select check_database_privileges('myuser7','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +\c test_2pc_db - - :master_port + +reset role; + +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser7; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db from myuser_1; + +\c regression - - :master_port +drop user myuser_1; +drop user myuser7; + +----------------------------------------------------------------------- + +-- test CREATE,CONNECT,TEMP,TEMPORARY privileges one by one on database multi database +-- and multi user +\c regression - - :master_port +create user myuser8; +create user myuser_2; + +set citus.enable_create_database_propagation to on; +create database test_db; + +revoke connect,temp,temporary on database test_db from public; + +\c test_2pc_db - - :master_port +grant CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db to myuser8,myuser_2; + +\c regression - - :master_port +select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +\c test_2pc_db - - :master_port + +RESET ROLE; +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 ; + +--below test should succeed and should not throw any error +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser_2; + +--below test should succeed and should not throw any error +revoke CREATE,CONNECT,TEMP,TEMPORARY on database test_2pc_db,test_db from myuser8 cascade; + +\c regression - - :master_port +select check_database_privileges('myuser8','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser8','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser_2','test_2pc_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); +select check_database_privileges('myuser_2','test_db',ARRAY['CREATE', 'CONNECT', 'TEMP', 'TEMPORARY']); + + +\c test_2pc_db - - :master_port + +reset role; + +\c regression - - :master_port +drop user myuser_2; +drop user myuser8; + +set citus.enable_create_database_propagation to on; +drop database test_db; + +--------------------------------------------------------------------------- +-- rollbacks public role database privileges to original state +grant connect,temp,temporary on database test_2pc_db to public; +drop database test_2pc_db; +set citus.enable_create_database_propagation to off; +DROP SCHEMA grant_on_database_propagation_non_maindb CASCADE; + +reset citus.enable_create_database_propagation; +reset search_path; +--------------------------------------------------------------------------- diff --git a/src/test/regress/sql/grant_role_from_non_maindb.sql b/src/test/regress/sql/grant_role_from_non_maindb.sql new file mode 100644 index 00000000000..b74b5092d20 --- /dev/null +++ b/src/test/regress/sql/grant_role_from_non_maindb.sql @@ -0,0 +1,147 @@ +CREATE SCHEMA grant_role2pc; +SET search_path TO grant_role2pc; +set citus.enable_create_database_propagation to on; + +CREATE DATABASE grant_role2pc_db; + +\c grant_role2pc_db +SHOW citus.main_db; + +SET citus.superuser TO 'postgres'; +CREATE USER grant_role2pc_user1; +CREATE USER grant_role2pc_user2; +CREATE USER grant_role2pc_user3; +CREATE USER grant_role2pc_user4; +CREATE USER grant_role2pc_user5; +CREATE USER grant_role2pc_user6; +CREATE USER grant_role2pc_user7; + +\c grant_role2pc_db + +--test with empty superuser +SET citus.superuser TO ''; +grant grant_role2pc_user1 to grant_role2pc_user2; + +SET citus.superuser TO 'postgres'; +grant grant_role2pc_user1 to grant_role2pc_user2 with admin option granted by CURRENT_USER; + +\c regression + +select result FROM run_command_on_all_nodes( + $$ + SELECT array_to_json(array_agg(row_to_json(t))) + FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text = 'grant_role2pc_user2' + order by member::regrole::text, roleid::regrole::text + ) t + $$ +); + +\c grant_role2pc_db +--test grant under transactional context with multiple operations +BEGIN; +grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user3 WITH ADMIN OPTION; +grant grant_role2pc_user1 to grant_role2pc_user4 granted by grant_role2pc_user3 ; +COMMIT; + +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user5 WITH ADMIN OPTION granted by grant_role2pc_user3; +grant grant_role2pc_user1 to grant_role2pc_user6; +ROLLBACK; + + + +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user7; +SELECT 1/0; +commit; + + +\c regression + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + + +\c grant_role2pc_db + +grant grant_role2pc_user1,grant_role2pc_user2 to grant_role2pc_user5,grant_role2pc_user6,grant_role2pc_user7 granted by grant_role2pc_user3; + +\c regression + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + +\c grant_role2pc_db +revoke admin option for grant_role2pc_user1 from grant_role2pc_user5 granted by grant_role2pc_user3; + +--test revoke under transactional context with multiple operations +BEGIN; +revoke grant_role2pc_user1 from grant_role2pc_user5 granted by grant_role2pc_user3 ; +revoke grant_role2pc_user1 from grant_role2pc_user4 granted by grant_role2pc_user3; +COMMIT; +\c grant_role2pc_db - - :worker_1_port +BEGIN; +revoke grant_role2pc_user1 from grant_role2pc_user6,grant_role2pc_user7 granted by grant_role2pc_user3; +revoke grant_role2pc_user1 from grant_role2pc_user3 cascade; +COMMIT; + +\c regression + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user2','grant_role2pc_user3','grant_role2pc_user4','grant_role2pc_user5','grant_role2pc_user6','grant_role2pc_user7') + order by member::regrole::text, roleid::regrole::text +) t +$$); + +\c grant_role2pc_db - - :worker_1_port +BEGIN; +grant grant_role2pc_user1 to grant_role2pc_user5 WITH ADMIN OPTION; +grant grant_role2pc_user1 to grant_role2pc_user6; +COMMIT; + +\c regression - - :master_port + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('grant_role2pc_user5','grant_role2pc_user6') + order by member::regrole::text, roleid::regrole::text +) t +$$); + +revoke grant_role2pc_user1 from grant_role2pc_user5,grant_role2pc_user6; + +--clean resources +DROP SCHEMA grant_role2pc; +set citus.enable_create_database_propagation to on; +DROP DATABASE grant_role2pc_db; +drop user grant_role2pc_user2,grant_role2pc_user3,grant_role2pc_user4,grant_role2pc_user5,grant_role2pc_user6,grant_role2pc_user7; +drop user grant_role2pc_user1; +reset citus.enable_create_database_propagation; diff --git a/src/test/regress/sql/metadata_sync_from_non_maindb.sql b/src/test/regress/sql/metadata_sync_from_non_maindb.sql new file mode 100644 index 00000000000..93445be273b --- /dev/null +++ b/src/test/regress/sql/metadata_sync_from_non_maindb.sql @@ -0,0 +1,88 @@ +CREATE SCHEMA metadata_sync_2pc_schema; +SET search_path TO metadata_sync_2pc_schema; +set citus.enable_create_database_propagation to on; +CREATE DATABASE metadata_sync_2pc_db; + +revoke connect,temp,temporary on database metadata_sync_2pc_db from public; + +\c metadata_sync_2pc_db +SHOW citus.main_db; + +CREATE USER "grant_role2pc'_user1"; +CREATE USER "grant_role2pc'_user2"; +CREATE USER "grant_role2pc'_user3"; +CREATE USER grant_role2pc_user4; +CREATE USER grant_role2pc_user5; + +\c regression +select 1 from citus_remove_node('localhost', :worker_2_port); + +\c metadata_sync_2pc_db +grant "grant_role2pc'_user1","grant_role2pc'_user2" to "grant_role2pc'_user3" WITH ADMIN OPTION; +-- This section was originally testing a scenario where a user with the 'admin option' grants the same role to another user, also with the 'admin option'. +-- However, we encountered inconsistent errors because the 'admin option' grant is executed after the grant below. +-- Once we establish the correct order of granting, we will reintroduce the 'granted by' clause. +-- For now, we are commenting out the grant below that includes 'granted by', and instead, we are adding a grant without the 'granted by' clause. +-- grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5 granted by "grant_role2pc'_user3"; +grant "grant_role2pc'_user1","grant_role2pc'_user2" to grant_role2pc_user4,grant_role2pc_user5; + +--test for grant on database +\c metadata_sync_2pc_db - - :master_port +grant create on database metadata_sync_2pc_db to "grant_role2pc'_user1"; +grant connect on database metadata_sync_2pc_db to "grant_role2pc'_user2"; +grant ALL on database metadata_sync_2pc_db to "grant_role2pc'_user3"; + +\c regression +select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); +select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); +select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); + +-- test for security label on role +\c metadata_sync_2pc_db - - :master_port +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE grant_role2pc_user4 IS 'citus_unclassified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "grant_role2pc'_user1" IS 'citus_classified'; + +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('grant_role2pc_user4') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels($$"grant_role2pc''_user1"$$) ORDER BY node_type; + +set citus.enable_create_database_propagation to on; +select 1 from citus_add_node('localhost', :worker_2_port); + +select result FROM run_command_on_all_nodes($$ +SELECT array_to_json(array_agg(row_to_json(t))) +FROM ( + SELECT member::regrole, roleid::regrole as role, grantor::regrole, admin_option + FROM pg_auth_members + WHERE member::regrole::text in + ('"grant_role2pc''_user2"','"grant_role2pc''_user3"','grant_role2pc_user4','grant_role2pc_user5') + order by member::regrole::text +) t +$$); + +select check_database_privileges('grant_role2pc''_user1','metadata_sync_2pc_db',ARRAY['CREATE']); +select check_database_privileges('grant_role2pc''_user2','metadata_sync_2pc_db',ARRAY['CONNECT']); +select check_database_privileges('grant_role2pc''_user3','metadata_sync_2pc_db',ARRAY['CREATE','CONNECT','TEMP','TEMPORARY']); + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('grant_role2pc_user4') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels($$"grant_role2pc''_user1"$$) ORDER BY node_type; + +\c metadata_sync_2pc_db +revoke "grant_role2pc'_user1","grant_role2pc'_user2" from grant_role2pc_user4,grant_role2pc_user5 ; + +revoke admin option for "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; + +revoke "grant_role2pc'_user1","grant_role2pc'_user2" from "grant_role2pc'_user3"; +revoke ALL on database metadata_sync_2pc_db from "grant_role2pc'_user3"; +revoke CONNECT on database metadata_sync_2pc_db from "grant_role2pc'_user2"; +revoke CREATE on database metadata_sync_2pc_db from "grant_role2pc'_user1"; + +\c regression + +drop user "grant_role2pc'_user1","grant_role2pc'_user2","grant_role2pc'_user3",grant_role2pc_user4,grant_role2pc_user5; +set citus.enable_create_database_propagation to on; +drop database metadata_sync_2pc_db; +drop schema metadata_sync_2pc_schema; + +reset citus.enable_create_database_propagation; +reset search_path; diff --git a/src/test/regress/sql/metadata_sync_helpers.sql b/src/test/regress/sql/metadata_sync_helpers.sql index c669e906976..dae331d258b 100644 --- a/src/test/regress/sql/metadata_sync_helpers.sql +++ b/src/test/regress/sql/metadata_sync_helpers.sql @@ -16,7 +16,7 @@ CREATE TABLE test(col_1 int); -- not in a distributed transaction SELECT citus_internal.add_partition_metadata ('test'::regclass, 'h', 'col_1', 0, 's'); -SELECT citus_internal_update_relation_colocation ('test'::regclass, 1); +SELECT citus_internal.update_relation_colocation ('test'::regclass, 1); -- in a distributed transaction, but the application name is not Citus BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -54,7 +54,7 @@ ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation ('test'::regclass, 10); + SELECT citus_internal.update_relation_colocation ('test'::regclass, 10); ROLLBACK; -- finally, a user can only add its own tables to the metadata @@ -202,7 +202,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420007, 10000, 11111); + SELECT citus_internal.update_placement_metadata(1420007, 10000, 11111); ROLLBACK; -- non-existing users should fail to pass the checks @@ -302,7 +302,7 @@ COMMIT; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation ('test_2'::regclass, 1231231232); + SELECT citus_internal.update_relation_colocation ('test_2'::regclass, 1231231232); ROLLBACK; -- invalid shard ids are not allowed @@ -525,7 +525,7 @@ COMMIT; BEGIN; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; -- now, add few more shards for test_3 to make it colocated with test_2 @@ -693,7 +693,7 @@ COMMIT; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; -- try to update placements @@ -703,7 +703,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); + SELECT citus_internal.update_placement_metadata(1420000, get_node_id(), get_node_id()+1000); COMMIT; -- fails because the source node doesn't contain the shard @@ -711,7 +711,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); + SELECT citus_internal.update_placement_metadata(1420000, get_node_id()+10000, get_node_id()); COMMIT; -- fails because shard does not exist @@ -719,7 +719,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(0, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(0, get_node_id(), get_node_id()+1); COMMIT; -- fails because none-existing shard @@ -727,7 +727,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(213123123123, get_node_id(), get_node_id()+1); COMMIT; -- fails because we do not own the shard @@ -735,7 +735,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; SELECT assign_distributed_transaction_id(0, 8, '2021-07-09 15:41:55.542377+02'); SET application_name to 'citus_internal gpid=10000000001'; \set VERBOSITY terse - SELECT citus_internal_update_placement_metadata(1420007, get_node_id(), get_node_id()+1); + SELECT citus_internal.update_placement_metadata(1420007, get_node_id(), get_node_id()+1); COMMIT; -- the user only allowed to delete their own shards @@ -788,7 +788,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET repmodel = 't' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; @@ -810,7 +810,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; WHERE logicalrelid = 'test_2'::regclass; \endif - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -820,7 +820,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = '' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; @@ -830,7 +830,7 @@ BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; -- so that making two tables colocated fails UPDATE pg_dist_partition SET partmethod = 'a' WHERE logicalrelid = 'test_2'::regclass; - SELECT citus_internal_update_relation_colocation('test_2'::regclass, 251); + SELECT citus_internal.update_relation_colocation('test_2'::regclass, 251); ROLLBACK; -- colocated hash distributed table should have the same dist key columns diff --git a/src/test/regress/sql/multi_test_helpers.sql b/src/test/regress/sql/multi_test_helpers.sql index ec156e51859..ac724433973 100644 --- a/src/test/regress/sql/multi_test_helpers.sql +++ b/src/test/regress/sql/multi_test_helpers.sql @@ -653,8 +653,6 @@ BEGIN END; $func$ LANGUAGE plpgsql; - - CREATE OR REPLACE FUNCTION check_parameter_privileges(users text[], parameters text[], permissions text[]) RETURNS TABLE ( res text, usr text, param text, perms text) AS $func$ DECLARE @@ -673,4 +671,18 @@ BEGIN END LOOP; END LOOP; END; -$func$ LANGUAGE plpgsql;; +$func$ LANGUAGE plpgsql; + +CREATE OR REPLACE FUNCTION check_database_privileges(role_name text, db_name text, permissions text[]) +RETURNS TABLE(permission text, result text) +AS $func$ +DECLARE + permission text; +BEGIN + FOREACH permission IN ARRAY permissions + LOOP + RETURN QUERY EXECUTE format($inner$SELECT %s, result FROM run_command_on_all_nodes($$select has_database_privilege(%s,%s,%s); $$)$inner$, + quote_literal(permission), quote_literal(role_name), quote_literal(db_name), quote_literal(permission)); + END LOOP; +END; +$func$ LANGUAGE plpgsql; diff --git a/src/test/regress/sql/other_databases.sql b/src/test/regress/sql/other_databases.sql index 8cd54f35432..aa936e50753 100644 --- a/src/test/regress/sql/other_databases.sql +++ b/src/test/regress/sql/other_databases.sql @@ -75,9 +75,9 @@ DROP USER other_db_user9, nonsuperuser; -- test from a worker \c - - - :worker_1_port -CREATE DATABASE other_db2; +CREATE DATABASE worker_other_db; -\c other_db2 +\c worker_other_db CREATE USER worker_user1; @@ -98,9 +98,85 @@ SELECT usename FROM pg_user WHERE usename LIKE 'worker\_user%' ORDER BY 1; -- some user creation commands will fail but let's make sure we try to drop them just in case DROP USER IF EXISTS worker_user1, worker_user2, worker_user3; -\c - - - :worker_1_port -DROP DATABASE other_db2; +-- test creating and dropping a database from a Citus non-main database +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); +\c other_db1 +CREATE DATABASE other_db3; + +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type; + +\c other_db1 +DROP DATABASE other_db3; + +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db3') ORDER BY node_type; + +\c worker_other_db - - :worker_1_port +CREATE DATABASE other_db4; + +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type; + +\c worker_other_db +DROP DATABASE other_db4; + +\c regression +SELECT * FROM public.check_database_on_all_nodes('other_db4') ORDER BY node_type; + +DROP DATABASE worker_other_db; + +CREATE DATABASE other_db5; + +-- disable create database propagation for the next test +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + +\c other_db5 - - :worker_2_port + +-- locally create a database +CREATE DATABASE local_db; + +\c regression - - - + +-- re-enable create database propagation +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + +\c other_db5 - - :master_port + +-- Test a scenario where create database fails because the database +-- already exists on another node and we don't crash etc. +CREATE DATABASE local_db; + +\c regression - - - + +SELECT * FROM public.check_database_on_all_nodes('local_db') ORDER BY node_type, result; + +\c - - - :worker_2_port + +-- locally drop the database for cleanup purposes +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + +DROP DATABASE local_db; + +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO true$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + \c - - - :master_port +DROP DATABASE other_db5; + +SELECT result FROM run_command_on_all_nodes($$ALTER SYSTEM SET citus.enable_create_database_propagation TO false$$); +SELECT result FROM run_command_on_all_nodes($$SELECT pg_reload_conf()$$); +SELECT pg_sleep(0.1); + DROP SCHEMA other_databases; DROP DATABASE other_db1; diff --git a/src/test/regress/sql/schema_based_sharding.sql b/src/test/regress/sql/schema_based_sharding.sql index af5c201f4b3..f0b2276df00 100644 --- a/src/test/regress/sql/schema_based_sharding.sql +++ b/src/test/regress/sql/schema_based_sharding.sql @@ -15,12 +15,12 @@ SET client_min_messages TO NOTICE; SELECT citus_internal.add_tenant_schema(NULL, 1); SELECT citus_internal.add_tenant_schema(1, NULL); SELECT citus_internal.delete_tenant_schema(NULL); -SELECT citus_internal_unregister_tenant_schema_globally(1, NULL); -SELECT citus_internal_unregister_tenant_schema_globally(NULL, 'text'); +SELECT citus_internal.unregister_tenant_schema_globally(1, NULL); +SELECT citus_internal.unregister_tenant_schema_globally(NULL, 'text'); --- Verify that citus_internal_unregister_tenant_schema_globally can only +-- Verify that citus_internal.unregister_tenant_schema_globally can only -- be called on schemas that are dropped already. -SELECT citus_internal_unregister_tenant_schema_globally('regular_schema'::regnamespace, 'regular_schema'); +SELECT citus_internal.unregister_tenant_schema_globally('regular_schema'::regnamespace, 'regular_schema'); SELECT 1 FROM citus_remove_node('localhost', :worker_2_port); @@ -1022,9 +1022,9 @@ SELECT pg_reload_conf(); ALTER SYSTEM SET citus.enable_schema_based_sharding TO ON; SELECT pg_reload_conf(); --- Verify that citus_internal_unregister_tenant_schema_globally is a no-op +-- Verify that citus_internal.unregister_tenant_schema_globally is a no-op -- on workers. -SELECT citus_internal_unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3'); +SELECT citus_internal.unregister_tenant_schema_globally('tenant_3'::regnamespace, 'tenant_3'); \c - - - :master_port diff --git a/src/test/regress/sql/seclabel.sql b/src/test/regress/sql/seclabel.sql index e523fc1dacd..d39e0118392 100644 --- a/src/test/regress/sql/seclabel.sql +++ b/src/test/regress/sql/seclabel.sql @@ -62,14 +62,20 @@ SET citus.grep_remote_commands = '%SECURITY LABEL%'; SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; SECURITY LABEL ON ROLE user1 IS NULL; SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'; -SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'; +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified'; \c - - - :worker_1_port --- command not allowed from worker node -SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus ''!unclassified'; +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +-- command from the worker node should be propagated to the coordinator +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; -\c - - - :master_port RESET citus.log_remote_commands; +SECURITY LABEL for "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus ''!unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; +\c - - - :master_port SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; @@ -82,6 +88,19 @@ SELECT 1 FROM citus_add_node('localhost', :worker_2_port); SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; +-- disable the GUC and check that the command is not propagated +SET citus.enable_alter_role_propagation TO off; +SECURITY LABEL ON ROLE user1 IS 'citus_unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + +\c - - - :worker_2_port +SET citus.log_remote_commands TO on; +SET citus.grep_remote_commands = '%SECURITY LABEL%'; +SET citus.enable_alter_role_propagation TO off; +SECURITY LABEL ON ROLE user1 IS 'citus ''!unclassified'; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; +RESET citus.enable_alter_role_propagation; + -- cleanup RESET citus.log_remote_commands; DROP ROLE user1, "user 2"; diff --git a/src/test/regress/sql/seclabel_non_maindb.sql b/src/test/regress/sql/seclabel_non_maindb.sql new file mode 100644 index 00000000000..1833d419363 --- /dev/null +++ b/src/test/regress/sql/seclabel_non_maindb.sql @@ -0,0 +1,71 @@ +-- SECLABEL +-- +-- Test suite for running SECURITY LABEL ON ROLE statements from non-main databases + +SET citus.enable_create_database_propagation to ON; + +CREATE DATABASE database1; +CREATE DATABASE database2; + +\c - - - :worker_1_port +SET citus.enable_create_database_propagation to ON; +CREATE DATABASE database_w1; + + +\c - - - :master_port +CREATE ROLE user1; +\c database1 +SHOW citus.main_db; +SHOW citus.superuser; + +CREATE ROLE "user 2"; + +-- Set a SECURITY LABEL on a role from a non-main database +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_classified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_unclassified'; + +-- Check the result +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + +\c database1 +-- Set a SECURITY LABEL on database, it should not be propagated +SECURITY LABEL FOR "citus '!tests_label_provider" ON DATABASE database1 IS 'citus_classified'; + +-- Set a SECURITY LABEL on a table, it should not be propagated +CREATE TABLE a (i int); +SECURITY LABEL ON TABLE a IS 'citus_classified'; + +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('database1') ORDER BY node_type; + +-- Check that only the SECURITY LABEL for ROLES is propagated to the non-main databases on other nodes +\c database_w1 - - :worker_1_port +SELECT provider, objtype, label, objname FROM pg_seclabels ORDER BY objname; + + +-- Check the result after a transaction +BEGIN; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE user1 IS 'citus_unclassified'; +SECURITY LABEL FOR "citus '!tests_label_provider" ON DATABASE database_w1 IS 'citus_classified'; +COMMIT; + +\c regression +SELECT node_type, result FROM get_citus_tests_label_provider_labels('database_w1') ORDER BY node_type; +SELECT node_type, result FROM get_citus_tests_label_provider_labels('user1') ORDER BY node_type; + +BEGIN; +SECURITY LABEL FOR "citus '!tests_label_provider" ON ROLE "user 2" IS 'citus_classified'; +ROLLBACK; + +SELECT node_type, result FROM get_citus_tests_label_provider_labels('"user 2"') ORDER BY node_type; + +-- clean up +SET citus.enable_create_database_propagation to ON; +DROP DATABASE database1; +DROP DATABASE database2; +DROP DATABASE database_w1; +DROP ROLE user1; +DROP ROLE "user 2"; +RESET citus.enable_create_database_propagation; diff --git a/src/test/regress/sql/shard_rebalancer.sql b/src/test/regress/sql/shard_rebalancer.sql index 5d8e89b3631..9037f8f7504 100644 --- a/src/test/regress/sql/shard_rebalancer.sql +++ b/src/test/regress/sql/shard_rebalancer.sql @@ -1340,6 +1340,43 @@ DROP TABLE t1, r1, r2; -- test suites should clean up their distributed tables. SELECT count(*) FROM pg_dist_partition; +-- verify a system with a new node won't copy distributed table shards without reference tables + +SELECT 1 from master_remove_node('localhost', :worker_2_port); +SELECT public.wait_until_metadata_sync(30000); + +CREATE TABLE r1 (a int PRIMARY KEY, b int); +SELECT create_reference_table('r1'); + +CREATE TABLE d1 (a int PRIMARY KEY, b int); +SELECT create_distributed_table('d1', 'a'); + +ALTER SEQUENCE pg_dist_groupid_seq RESTART WITH 15; +SELECT 1 from master_add_node('localhost', :worker_2_port); + +-- count the number of placements for the reference table to verify it is not available on +-- all nodes +SELECT count(*) +FROM pg_dist_shard +JOIN pg_dist_shard_placement USING (shardid) +WHERE logicalrelid = 'r1'::regclass; + +-- #7426 We can't move shards to the fresh node before we copy reference tables there. +-- rebalance_table_shards() will do the copy, but the low-level +-- citus_move_shard_placement() should raise an error +SELECT citus_move_shard_placement(pg_dist_shard.shardid, nodename, nodeport, 'localhost', :worker_2_port) + FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) + WHERE logicalrelid = 'd1'::regclass AND nodename = 'localhost' AND nodeport = :worker_1_port LIMIT 1; + +SELECT replicate_reference_tables(); + +-- After replication, the move should succeed. +SELECT citus_move_shard_placement(pg_dist_shard.shardid, nodename, nodeport, 'localhost', :worker_2_port) + FROM pg_dist_shard JOIN pg_dist_shard_placement USING (shardid) + WHERE logicalrelid = 'd1'::regclass AND nodename = 'localhost' AND nodeport = :worker_1_port LIMIT 1; + +DROP TABLE d1, r1; + -- verify a system having only reference tables will copy the reference tables when -- executing the rebalancer