diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..d17b72e --- /dev/null +++ b/.gitattributes @@ -0,0 +1,9 @@ +* whitespace=space-before-tab,trailing-space +*.[ch] whitespace=space-before-tab,trailing-space,indent-with-non-tab,tabwidth=4 + +# Avoid confusing ASCII underlines with leftover merge conflict markers +README conflict-marker-size=32 +README.* conflict-marker-size=32 + +# Test output files that contain extra whitespace +*.out -whitespace diff --git a/.gitignore b/.gitignore index 6f21dc5..65a153a 100644 --- a/.gitignore +++ b/.gitignore @@ -20,8 +20,12 @@ # Executables *.exe -*.out *.app *.i*86 *.x86_64 *.hex + +# pgregress results directory +results/ + +.idea/ diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..53af7f8 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,6 @@ +[submodule "mongo-c-driver"] + path = mongo-c-driver + url = ../../mongodb/mongo-c-driver.git +[submodule "json-c"] + path = json-c + url = https://github.com/json-c/json-c.git diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index df9d264..64aba67 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,18 +1,17 @@ Contributing to `mongo_fdw` =========================== -Following these guidelines helps to facilitate relevant discussion in pull -requests and issues so the developers managing and developing this open source -project can address patches and bugs as efficiently as possible. +Following these guidelines helps to facilitate relevant discussion in +pull requests and issues so the developers managing and developing this +open source project can address patches and bugs as efficiently as +possible. Using Issues ------------ -`mongo_fdw`'s maintainers prefer that bug reports, feature requests, and pull -requests are submitted as [GitHub Issues][1]. If you think you require personal -assistance, please **do not** open an issue: email `engage` `@` `citusdata.com` -instead. +`mongo_fdw`'s maintainers prefer that bug reports, feature requests, and +pull requests are submitted as [GitHub Issues][1]. Bug Reports @@ -21,20 +20,22 @@ Bug Reports Before opening a bug report: 1. Search for a duplicate issue using GitHub's issue search - 2. Check whethe the bug remains in the lasest `master` or `develop` commit - 3. Create a reduced test case: remove code and data not relevant to the bug + 2. Check whether the bug remains in the latest `master` or `develop` + commit + 3. Create a reduced test case: remove code and data not relevant to + the bug -A contributor should be able to begin work on your bug without asking too many -followup questions. If you include the following information, your bug will be -serviced more quickly: +A contributor should be able to begin work on your bug without asking +too many followup questions. If you include the following information, +your bug will be serviced more quickly: * Short, descriptive title * Your OS * Versions of dependencies * Any custom modifications -Once the background information is out of the way, you are free to present the -bug itself. You should explain: +Once the background information is out of the way, you are free to +present the bug itself. You should explain: * Steps you took to exercise the bug * The expected outcome @@ -44,42 +45,44 @@ bug itself. You should explain: Feature Requests ---------------- -We are open to adding features but ultimately control the scope and aims of the -project. If a proposed feature is likely to incur high testing, maintenance, or -performance costs it is also unlikely to be accepted. If a _strong_ case exists -for a given feature, we may be persuaded on merit. Be specific. +We are open to adding features but ultimately control the scope and aims +of the project. If a proposed feature is likely to incur high testing, +maintenance, or performance costs it is also unlikely to be accepted. +If a _strong_ case exists for a given feature, we may be persuaded on +merit. Be specific. Pull Requests ------------- -Well-constructed pull requests are very welcome. By _well-constructed_, we mean -they do not introduce unrelated changes or break backwards compatibility. Just -fork this repo and open a request against `develop`. +Well-constructed pull requests are very welcome. By _well-constructed_, +we mean they do not introduce unrelated changes or break backwards +compatibility. Just fork this repo and open a request against `develop`. -Some examples of things likely to increase the likelihood a pull request is -rejected: +Some examples of things likely to increase the likelihood a pull request +is rejected: * Large structural changes, including: - * Refactoring for its own sake + * Re-factoring for its own sake * Adding languages to the project - * Unnecesary whitespace changes + * Unnecessary whitespace changes * Deviation from obvious conventions * Introduction of incompatible intellectual property -Please do not change version numbers in your pull request: they will be updated -by the project owners prior to the next release. +Please do not change version numbers in your pull request: they will be +updated by the project owners prior to the next release. License ------- -By submitting a patch, you agree to allow the project owners to license your -work under the terms of the [`LICENSE`][2]. Additionally, you grant the project -owners a license under copyright covering your contribution to the extent -permitted by law. Finally, you confirm that you own said copyright, have the -legal authority to grant said license, and in doing so are not violating any -grant of rights you have made to third parties, including your employer. +By submitting a patch, you agree to allow the project owners to license +your work under the terms of the [`LICENSE`][2]. Additionally, you grant +the project owners a license under copyright covering your contribution +to the extent permitted by law. Finally, you confirm that you own said +copyright, have the legal authority to grant said license, and in doing +so are not violating any grant of rights you have made to third parties, +including your employer. -[1]: https://github.com/citusdata/mongo_fdw/issues +[1]: https://github.com/EnterpriseDB/mongo_fdw/issues [2]: LICENSE diff --git a/INSTALL.md b/INSTALL.md new file mode 100644 index 0000000..ffa0a34 --- /dev/null +++ b/INSTALL.md @@ -0,0 +1,155 @@ +Notes about installation Mongo Foreign Data Wrapper +=================================================== + +To compile the [MongoDB][1] foreign data wrapper for [PostgreSQL](https://www.postgresql.org/), `mongo-c` and `json-c` +libraries are needed. To build and install `mongo-c` and `json-c` libraries, there +are two ways. You can either use script `autogen.sh` or you can manually +perform all required steps listed. + +### Notes about new MongoDB C Driver support +The current implementation is based on the driver version 1.17.3 of MongoDB. + +## Installation using script +Number of manual steps needs to be performed to compile and install required +mongo-c and json-c libraries. If you want to avoid the manual steps, there is a +shell script available which will download and install the appropriate drivers +and libraries for you. + +Here is how it works: + +To install mongo-c and json-c libraries at custom locations, you need to +export environment variables `MONGOC_INSTALL_DIR` and `JSONC_INSTALL_DIR` +respectively. If these variables are not set then these libraries will be +installed in the default location. Please note that you need to have the +required permissions on the directory where you want to install the libraries. + + * autogen.sh + +The script autogen.sh will do all the necessary steps to build with mongo-c +driver accordingly. + +## Steps for manual installation +### mongo-c +1. Download and extract source code of mongoc driver for version `1.17.3` + + ```sh + wget https://github.com/mongodb/mongo-c-driver/releases/download/1.17.3/mongo-c-driver-1.17.3.tar.gz + tar xzf mongo-c-driver-1.17.3.tar.gz + rm -rf mongo-c-driver + mv mongo-c-driver-1.17.3 mongo-c-driver + cd mongo-c-driver + ``` + +2. Configure mongoc driver + + ```sh + cmake -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF . + ``` + + To install at custom location: + + ```sh + cmake -DCMAKE_INSTALL_PREFIX=YOUR_INSTALLATION_DIRECTORY -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF . + ``` + +3. Compile and install + + ```sh + cmake --build . + cmake --build . --target install + ``` + +For more details on installation of mongo-c driver, you can refer [here][3]. + +### json-c + +1. Download and extract source code + + ```sh + wget https://github.com/json-c/json-c/archive/json-c-0.15-20200726.tar.gz + tar -xzf json-c-0.15-20200726.tar.gz + rm -rf json-c + mv json-c-json-c-0.15-20200726/ json-c + cd json-c + ``` + +2. Configure + + ```sh + cmake . + ``` + To install at custom location: + + ```sh + cmake -DCMAKE_INSTALL_PREFIX=YOUR_INSTALLATION_DIRECTORY . + ``` + +3. Compile and install + + ```sh + make + make install + ``` + +For more details on installation of json-c library, you can refer [here][4]. + +## Mongo_fdw configuration, compilation and installation +The `PKG_CONFIG_PATH` environment variable must be set to mongo-c-driver source +directory for successful compilation as shown below, + +```sh +export PKG_CONFIG_PATH=$YOUR_MONGO_FDW_SOURCE_DIR/mongo-c-driver/src/libmongoc/src:$YOUR_MONGO_FDW_SOURCE_DIR/mongo-c-driver/src/libbson/src +``` + +The `LD_LIBRARY_PATH` environment variable must include the path to the mongo-c +installation directory containing the libmongoc-1.0.so and libbson-1.0.so +files. For example, assuming the installation directory is /home/mongo-c and +the libraries were created under it in lib64 sub-directory, then we can define +the `LD_LIBRARY_PATH` as: + +```sh +export LD_LIBRARY_PATH=/home/mongo-c/lib64:$LD_LIBRARY_PATH +``` + +Note: This `LD_LIBRARY_PATH` environment variable setting must be in effect +when the `pg_ctl` utility is executed to start or restart PostgreSQL or +EDB Postgres Advanced Server. + + +1. To build on POSIX-compliant systems you need to ensure the + `pg_config` executable is in your path when you run `make`. This + executable is typically in your PostgreSQL installation's `bin` + directory. For example: + + ```sh + export PATH=/usr/local/pgsql/bin/:$PATH + ``` + +2. Compile the code using make. + + ```sh + make USE_PGXS=1 + ``` + +3. Finally install the foreign data wrapper. + + ```sh + make USE_PGXS=1 install + ``` + +4. Running regression test. + + ```sh + make USE_PGXS=1 installcheck + ``` + However, make sure to set the `MONGO_HOST`, `MONGO_PORT`, `MONGO_USER_NAME`, + and `MONGO_PWD` environment variables correctly. The default settings can be + found in the `mongodb_init.sh` script. + + +If you run into any issues, please [let us know][2]. + +[1]: http://www.mongodb.com +[2]: https://github.com/enterprisedb/mongo_fdw/issues/new +[3]: http://mongoc.org/libmongoc/1.17.3/installing.html#configuring-the-build +[4]: https://github.com/json-c/json-c/tree/json-c-0.15-20200726#build-instructions-- diff --git a/Makefile b/Makefile index b4543d8..a232f26 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,7 @@ # mongo_fdw/Makefile # -# Copyright (c) 2012-2014 Citus Data, Inc. +# Portions Copyright (c) 2004-2024, EnterpriseDB Corporation. +# Portions Copyright © 2012–2014 Citus Data, Inc. # MODULE_big = mongo_fdw @@ -10,21 +11,25 @@ MODULE_big = mongo_fdw # on another platform, change env_posix.os in MONGO_OBJS with the appropriate # environment object file. # +LIBJSON = json-c +LIBJSON_OBJS = $(LIBJSON)/json_util.o $(LIBJSON)/json_object.o $(LIBJSON)/json_tokener.o \ + $(LIBJSON)/json_object_iterator.o $(LIBJSON)/printbuf.o $(LIBJSON)/linkhash.o \ + $(LIBJSON)/arraylist.o $(LIBJSON)/random_seed.o $(LIBJSON)/debug.o $(LIBJSON)/strerror_override.o -MONGO_DRIVER = mongo-c-driver-v0.6 -MONGO_PATH = $(MONGO_DRIVER)/src -MONGO_OBJS = $(MONGO_PATH)/bson.os $(MONGO_PATH)/encoding.os $(MONGO_PATH)/md5.os \ - $(MONGO_PATH)/mongo.os $(MONGO_PATH)/numbers.os $(MONGO_PATH)/env_posix.os +MONGO_INCLUDE = $(shell pkg-config --cflags libmongoc-1.0) +PG_CPPFLAGS = --std=c99 $(MONGO_INCLUDE) -I$(LIBJSON) +SHLIB_LINK = $(shell pkg-config --libs libmongoc-1.0) + +OBJS = connection.o option.o mongo_wrapper.o mongo_fdw.o mongo_query.o deparse.o $(LIBJSON_OBJS) -PG_CPPFLAGS = --std=c99 -I$(MONGO_PATH) -OBJS = mongo_fdw.o mongo_query.o $(MONGO_OBJS) EXTENSION = mongo_fdw -DATA = mongo_fdw--1.0.sql +DATA = mongo_fdw--1.0.sql mongo_fdw--1.1.sql mongo_fdw--1.0--1.1.sql -$(MONGO_DRIVER)/%.os: - $(MAKE) -C $(MONGO_DRIVER) $*.os +REGRESS = server_options connection_validation dml select pushdown join_pushdown aggregate_pushdown limit_offset_pushdown +REGRESS_OPTS = --load-extension=$(EXTENSION) +ifdef USE_PGXS # # Users need to specify their Postgres installation path through pg_config. For # example: /usr/local/pgsql/bin/pg_config or /usr/lib/postgresql/9.1/bin/pg_config @@ -33,3 +38,18 @@ $(MONGO_DRIVER)/%.os: PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) include $(PGXS) + +ifndef MAJORVERSION + MAJORVERSION := $(basename $(VERSION)) +endif + +ifeq (,$(findstring $(MAJORVERSION), 12 13 14 15 16 17)) + $(error PostgreSQL 12, 13, 14, 15, 16, or 17 is required to compile this extension) +endif + +else +subdir = contrib/mongo_fdw +top_builddir = ../.. +include $(top_builddir)/src/Makefile.global +include $(top_srcdir)/contrib/contrib-global.mk +endif diff --git a/README.md b/README.md index 4b024d5..2f048cf 100644 --- a/README.md +++ b/README.md @@ -1,134 +1,491 @@ -MongoDB FDW for PostgreSQL -========================== +MongoDB Foreign Data Wrapper for PostgreSQL +============================================ This PostgreSQL extension implements a Foreign Data Wrapper (FDW) for -[MongoDB][1]. For an example demonstrating this wrapper's use, see [our blog -post][2]. Please also note that this version of `mongo_fdw` only works with -PostgreSQL 9.2 or 9.3. - +[MongoDB][1]. + +Please note that this version of mongo_fdw works with PostgreSQL and EDB +Postgres Advanced Server 12, 13, 14, 15, 16 and 17. + +Contents +-------- + +1. [Features](#features) +2. [Supported platforms](#supported-platforms) +3. [Installation](#installation) +4. [Usage](#usage) +5. [Functions](#functions) +6. [Character set handling](#character-set-handling) +7. [Examples](#examples) +8. [Limitations](#limitations) +9. [Contributing](#contributing) +10. [Support](#support) +11. [Useful links](#useful-links) +12. [License](#license) + + +Features +-------- + +The following enhancements are added to the latest version of `mongo_fdw`: + +#### Write-able FDW +The previous version was only read-only, the latest version provides the +write capability. The user can now issue an insert, update, and delete +statements for the foreign tables using the `mongo_fdw`. + +#### Connection Pooling +The latest version comes with a connection pooler that utilizes the +same MongoDB database connection for all the queries in the same session. +The previous version would open a new [MongoDB][1] connection for every +query. This is a performance enhancement. + +#### JOIN push-down +`mongo_fdw` now also supports join push-down. The joins between two +foreign tables from the same remote MongoDB server are pushed to a remote +server, instead of fetching all the rows for both the tables and +performing a join locally, thereby may enhance the performance. Currently, +joins involving only relational and arithmetic operators in join-clauses +are pushed down to avoid any potential join failure. Also, only the +INNER and LEFT/RIGHT OUTER joins are supported, and not the FULL OUTER, +SEMI, and ANTI join. Moreover, only joins between two tables are pushed +down and not when either inner or outer relation is the join itself. + +#### AGGREGATE push-down +`mongo_fdw` now also supports aggregate push-down. Push aggregates to the +remote MongoDB server instead of fetching all of the rows and aggregating +them locally. This gives a very good performance boost for the cases +where aggregates can be pushed down. The push-down is currently limited +to aggregate functions min, max, sum, avg, and count, to avoid pushing +down the functions that are not present on the MongoDB server. The +aggregate filters, orders, variadic and distinct are not pushed down. + +#### ORDER BY push-down +`mongo_fdw` now also supports order by push-down. If possible, push order +by clause to the remote server so that we get the ordered result set from +the foreign server itself. It might help us to have an efficient merge +join. NULLs behavior is opposite on the MongoDB server. Thus to get an +equivalent result, we can only push-down ORDER BY with either +ASC NULLS FIRST or DESC NULLS LAST. Moreover, as MongoDB sorts only on +fields, only column names in ORDER BY expressions are pushed down. + +#### LIMIT OFFSET push-down +`mongo_fdw` now also supports limit offset push-down. Wherever possible, +perform LIMIT and OFFSET operations on the remote server. This reduces +network traffic between local PostgreSQL and remote MongoDB servers. + +#### GUC variables: + + * `mongo_fdw.enable_join_pushdown`: If `true`, pushes the join between two + foreign tables from the same foreign server, instead of fetching all the + rows for both the tables and performing a join locally. Default is `true`. + * `mongo_fdw.enable_aggregate_pushdown`: If `true`, pushes aggregate + operations to the foreign server, instead of fetching rows from the + foreign server and performing the operations locally. Default is `true`. + * `mongo_fdw.enable_order_by_pushdown`: If `true`, pushes the order by + operation to the foreign server, instead of fetching rows from the + foreign server and performing the sort locally. Default is `true`. + +Supported platforms +------------------- + +`mongo_fdw` was developed on Linux, and should run on any +reasonably POSIX-compliant system. Installation ------------ -The MongoDB FDW includes the official MongoDB C Driver version 0.6. When you -type `make`, the C driver's source code also gets automatically compiled and -linked. +About script or manual installation, `mongo-c` driver please read the following [instructions in INSTALL.md](INSTALL.md). -To build on POSIX-compliant systems (like Linux and OS X), you need to ensure -the `pg_config` executable is in your path when you run `make`. This executable -is typically in your PostgreSQL installation's `bin` directory. For example: +If you run into any issues, please [let us know][2]. -```sh -PATH=/usr/local/pgsql/bin/:$PATH make -sudo PATH=/usr/local/pgsql/bin/:$PATH make install -``` +Usage +----- -Note that we have tested the `mongo_fdw` extension only on Fedora and Ubuntu -systems. If you run into issues on other systems, please [let us know][3]. +## CREATE SERVER options +`mongo_fdw` accepts the following options via the `CREATE SERVER` command: -Usage ------ +- **address** as *string*, optional, default `127.0.0.1` + + Address or hostname of the MongoDB server. + +- **port** as *integer*, optional, default `27017`. + + Port number of the MongoDB server. + +- **use_remote_estimate** as *boolean*, optional, default `false` + + Controls whether `mongo_fdw` uses exact rows from + remote collection to obtain cost estimates. + +- **authentication_database** as *string*, optional + + Database against which user will be + authenticated against. Only valid with password based authentication. + +- **replica_set** as *string*, optional + + Replica set the server is member of. If set, + driver will auto-connect to correct primary in the replica set when + writing. + +- **read_preference** as *string*, optional, default `primary` + + `primary`, `secondary`, `primaryPreferred`, + `secondaryPreferred`, or `nearest`. + +- **ssl** as *boolean*, optional, default `false` + + Enable ssl. See http://mongoc.org/libmongoc/current/mongoc_ssl_opt_t.html to + understand the options. + +- **pem_file** as *string*, optional + + The .pem file that contains both the TLS/SSL certificate and + key. + +- **pem_pwd** as *string*, optional + + The password to decrypt the certificate key file(i.e. pem_file) + +- **ca_file** as *string*, optional + + The .pem file that contains the root certificate chain from the + Certificate Authority. + +- **ca_dir** as *string*, optional + + The absolute path to the `ca_file`. + +- **crl_file** as *string*, optional + + The .pem file that contains the Certificate Revocation List. + +- **weak_cert_validation** as *boolean*, optional, default `false` + + Enable the validation checks for TLS/SSL certificates and allows the use of invalid + certificates to connect if set to `true`. + +- **enable_join_pushdown** as *boolean*, optional, default `true` + + If `true`, pushes the join between two foreign + tables from the same foreign server, instead of fetching all the rows + for both the tables and performing a join locally. This option can also + be set for an individual table, and if any of the tables involved in the + join has set it to false then the join will not be pushed down. The + table-level value of the option takes precedence over the server-level + option value. + +- **enable_aggregate_pushdown** as *boolean*, optional, default `true` + + If `true`, push aggregates to the remote + MongoDB server instead of fetching all of the rows and aggregating them + locally. This option can also be set for an individual table. The + table-level value of the option takes precedence over the server-level + option value. + +- **enable_order_by_pushdown** as *boolean*, optional, default `true` + + If `true`, pushes the ORDER BY clause to the foreign server instead of + performing a sort locally. This option can also be set for an individual + table, and if any of the tables involved in the query has set it to + false then the ORDER BY will not be pushed down. The table-level value + of the option takes precedence over the server-level option value. + +## CREATE USER MAPPING options + +`mongo_fdw` accepts the following options via the `CREATE USER MAPPING` +command: + +- **username** as *string*, optional + + Username to use when connecting to MongoDB. + +- **password** as *string*, optional + + Password to authenticate to the MongoDB server. + +## CREATE FOREIGN TABLE options + +`mongo_fdw` accepts the following table-level options via the +`CREATE FOREIGN TABLE` command: + +- **database** as *string*, optional, default `test` + + Name of the MongoDB database to query. + +- **collection** as *string*, optional, default name of foreign table + + Name of the MongoDB collection to query. + +- **enable_join_pushdown** as *boolean*, optional, default `true` + + Similar to the server-level option, but can be + configured at table level as well. + +- **enable_aggregate_pushdown** as *boolean*, optional, default `true` + + Similar to the server-level option, but can be configured at table level as well. + +- **enable_order_by_pushdown** as *boolean*, optional, default `true` + + Similar to the server-level option, but can be configured at table level as well. + +No column-level options are available. + +## IMPORT FOREIGN SCHEMA options + +`mongo_fdw` don't supports [IMPORT FOREIGN SCHEMA](https://www.postgresql.org/docs/current/sql-importforeignschema.html) +because MongoDB is schemaless. + +## TRUNCATE support + +`mongo_fdw` don't implements the foreign data wrapper `TRUNCATE` API, available +from PostgreSQL 14, because MongoDB is schemaless. + +Functions +--------- + +As well as the standard `mongo_fdw_handler()` and `mongo_fdw_validator()` +functions, `mongo_fdw` provides the following user-callable utility functions: + +- **mongo_fdw_version()** + + Returns the version number as an integer. -The following parameters can be set on a MongoDB foreign server object: +Character set handling +---------------------- - * `address`: the address or hostname of the MongoDB server. - Defaults to `127.0.0.1` - * `port`: the port number of the MongoDB server. Defaults to `27017` +`BSON` in MongoDB can only be encoded in `UTF-8`. Also `UTF-8` is recommended and +de-facto most popular PostgreSQL server encoding. -The following parameters can be set on a MongoDB foreign table object: +Examples +-------- - * `database`: the name of the MongoDB database to query. Defaults to `test` - * `collection`: the name of the MongoDB collection to query. Defaults to - the foreign table name used in the relevant `CREATE` command +As an example, the following commands demonstrate loading the +`mongo_fdw` wrapper, creating a server, and then creating a foreign +table associated with a MongoDB collection. The commands also show +specifying option values in the `OPTIONS` clause. If an option value +isn't provided, the wrapper uses the default value mentioned above. -As an example, the following commands demonstrate loading the `mongo_fdw` -wrapper, creating a server, and then creating a foreign table associated with -a MongoDB collection. The commands also show specifying option values in the -`OPTIONS` clause. If an option value isn't provided, the wrapper uses the -default value mentioned above. +`mongo_fdw` can collect data distribution statistics will incorporate +them when estimating costs for the query execution plan. To see selected +execution plans for a query, just run `EXPLAIN`. -`mongo_fdw` can collect data distribution statistics will incorporate them when -estimating costs for the query execution plan. To see selected execution plans -for a query, just run `EXPLAIN`. +### Install the extension: -We also currently use the internal PostgreSQL `NAME` type to represent the BSON -object identifier type (the `_id` field). +Once for a database you need, as PostgreSQL superuser. ```sql --- load extension first time after install CREATE EXTENSION mongo_fdw; +``` --- create server object -CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw -OPTIONS (address '127.0.0.1', port '27017'); +### Create a foreign server with appropriate configuration: --- create foreign table -CREATE FOREIGN TABLE customer_reviews -( - _id NAME, - customer_id TEXT, - review_date TIMESTAMP, - review_rating INTEGER, - product_id CHAR(10), - product_title TEXT, - product_group TEXT, - product_category TEXT, - similar_product_ids CHAR(10)[] -) -SERVER mongo_server -OPTIONS (database 'test', collection 'customer_reviews'); +Once for a foreign data source you need, as PostgreSQL superuser. + +```sql +CREATE SERVER "MongoDB server" FOREIGN DATA WRAPPER mongo_fdw OPTIONS ( + address '127.0.0.1', + port '27017' +); +``` + +### Grant usage on foreign server to normal user in PostgreSQL: + +Once for a normal user (non-superuser) in PostgreSQL, as PostgreSQL superuser. It is a good idea to use a superuser only where really necessary, so let's allow a normal user to use the foreign server (this is not required for the example to work, but it's security recommendation). + +```sql +GRANT USAGE ON FOREIGN SERVER "MongoDB server" TO pguser; +``` +Where `pguser` is a sample user for works with foreign server (and foreign tables). + +### User mapping + +Create an appropriate user mapping: +```sql +CREATE USER MAPPING FOR pguser SERVER "MongoDB server" OPTIONS ( + username 'mongo_user', + password 'mongo_pass' +); +``` +Where `pguser` is a sample user for works with foreign server (and foreign tables). + +### Create foreign table +All `CREATE FOREIGN TABLE` SQL commands can be executed as a normal PostgreSQL user if there were correct `GRANT USAGE ON FOREIGN SERVER`. No need of PostgreSQL supersuer for security reasons but also works with PostgreSQL supersuer. --- collect data distribution statistics -ANALYZE customer_reviews; +Create a foreign table referencing the MongoDB collection: + +```sql +-- Note: first column of the table must be "_id" of type "name". +CREATE FOREIGN TABLE warehouse ( + _id name, + warehouse_id int, + warehouse_name text, + warehouse_created timestamptz +) SERVER "MongoDB server" OPTIONS ( + database 'db', + collection 'warehouse' +); ``` +### Typical examples with [MongoDB][1]'s equivalent statements. + +#### `SELECT` +```sql +SELECT * FROM warehouse WHERE warehouse_id = 1; +``` +``` + _id | warehouse_id | warehouse_name | warehouse_created +--------------------------+--------------+----------------+--------------------------- + 53720b1904864dc1f5a571a0 | 1 | UPS | 2014-12-12 12:42:10+05:30 +(1 row) +``` +``` +db.warehouse.find +( + { + "warehouse_id" : 1 + } +).pretty() +{ + "_id" : ObjectId("53720b1904864dc1f5a571a0"), + "warehouse_id" : 1, + "warehouse_name" : "UPS", + "warehouse_created" : ISODate("2014-12-12T07:12:10Z") +} +``` +#### `INSERT` +```sql +INSERT INTO warehouse VALUES (0, 2, 'Laptop', '2015-11-11T08:13:10Z'); +-- Note: The given value for "_id" column will be ignored and allows MongoDB to +-- insert the unique value for the "_id" column. +``` +``` +db.warehouse.insert +( + { + "warehouse_id" : NumberInt(2), + "warehouse_name" : "Laptop", + "warehouse_created" : ISODate("2015-11-11T08:13:10Z") + } +) +``` +#### `DELETE` +```sql +DELETE FROM warehouse WHERE warehouse_id = 2; +``` +``` +db.warehouse.remove +( + { + "warehouse_id" : 2 + } +) +``` +#### `UPDATE` +```sql +UPDATE warehouse SET warehouse_name = 'UPS_NEW' WHERE warehouse_id = 1; +``` +``` +db.warehouse.update +( + { + "warehouse_id" : 1 + }, + { + "warehouse_id" : 1, + "warehouse_name" : "UPS_NEW", + "warehouse_created" : ISODate("2014-12-12T07:12:10Z") + } +) +``` +#### `EXPLAIN`, `ANALYZE` +```sql +EXPLAIN SELECT * FROM warehouse WHERE warehouse_id = 1; +``` +``` + QUERY PLAN +----------------------------------------------------------------- + Foreign Scan on warehouse (cost=0.00..0.00 rows=1000 width=84) + Filter: (warehouse_id = 1) + Foreign Namespace: db.warehouse +(3 rows) +``` +``` +ANALYZE warehouse; +``` Limitations ----------- - * If the BSON document key contains uppercase letters or occurs within a - nested document, `mongo_fdw` requires the corresponding column names to be - declared in double quotes. For example, a nested field such as `"review": { - "Votes": 19 }` should be declared as `"review.Votes" INTEGER` in the `CREATE - TABLE` statement. - - * Note that PostgreSQL limits column names to 63 characters by default. If - you need column names that are longer, you can increase the `NAMEDATALEN` - constant in `src/include/pg_config_manual.h`, compile, and reinstall. + - If the BSON document key contains uppercase letters or occurs within + a nested document, ``mongo_fdw`` requires the corresponding column names + to be declared in double quotes. + - Note that PostgreSQL limits column names to 63 characters by + default. If you need column names that are longer, you can increase the + `NAMEDATALEN` constant in `src/include/pg_config_manual.h`, compile, + and re-install. Contributing ------------ -Have a fix for a bug or an idea for a great new feature? Great! Check out the -contribution guidelines [here][4]. For all other types of questions or comments -about the wrapper please contact us at `engage` `@` `citusdata.com`. - +Have a fix for a bug or an idea for a great new feature? Great! Check +out the contribution guidelines [here][3]. Support ------- +This project will be modified to maintain compatibility with new +PostgreSQL and EDB Postgres Advanced Server releases. + +If you need commercial support, please contact the EnterpriseDB sales +team, or check whether your existing PostgreSQL support provider can +also support `mongo_fdw`. + + +Useful links +------------ -This project will be modified to maintain compatibility with new PostgreSQL -releases. The project owners set aside a day every month to look over open -issues and support emails, but are not engaged in active feature development. -Reported bugs will be addressed by apparent severity. +### Documentation + + - For details, please refer to [mongo_fdw documentation][5]. + +### Source code + +Reference FDW realization, `postgres_fdw` + - https://git.postgresql.org/gitweb/?p=postgresql.git;a=tree;f=contrib/postgres_fdw;hb=HEAD + +### General FDW Documentation + + - https://www.postgresql.org/docs/current/ddl-foreign-data.html + - https://www.postgresql.org/docs/current/sql-createforeigndatawrapper.html + - https://www.postgresql.org/docs/current/sql-createforeigntable.html + - https://www.postgresql.org/docs/current/sql-importforeignschema.html + - https://www.postgresql.org/docs/current/fdwhandler.html + - https://www.postgresql.org/docs/current/postgres-fdw.html + +### Other FDWs + + - https://wiki.postgresql.org/wiki/Fdw + - https://pgxn.org/tag/fdw/ License ------- +Portions Copyright (c) 2004-2024, EnterpriseDB Corporation. +Portions Copyright © 2012–2014 Citus Data, Inc. -Copyright © 2012–2014 Citus Data, Inc. - -This program is free software: you can redistribute it and/or modify it under -the terms of the GNU Lesser General Public License as published by the Free -Software Foundation, either version 3 of the License, or (at your option) any -later version. +This program is free software: you can redistribute it and/or modify it +under the terms of the GNU Lesser General Public License as published by +the Free Software Foundation, either version 3 of the License, or (at +your option) any later version. -See the [`LICENSE`][5] file for full details. +See the [`LICENSE`][4] file for full details. [1]: http://www.mongodb.com -[2]: http://www.citusdata.com/blog/51-run-sql-on-mongodb -[3]: https://github.com/citusdata/mongo_fdw/issues/new -[4]: CONTRIBUTING.md -[5]: LICENSE +[2]: https://github.com/enterprisedb/mongo_fdw/issues/new +[3]: CONTRIBUTING.md +[4]: LICENSE +[5]: https://www.enterprisedb.com/docs/mongo_data_adapter/latest/ diff --git a/autogen.sh b/autogen.sh new file mode 100755 index 0000000..459ae8c --- /dev/null +++ b/autogen.sh @@ -0,0 +1,95 @@ +#! /bin/bash + +#------------------------------------------------------------------------- +# +# autogen.sh +# Foreign-data wrapper for remote MongoDB servers +# +# Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group +# Portions Copyright (c) 2004-2024, EnterpriseDB Corporation. +# +# IDENTIFICATION +# autogen.sh +# +#------------------------------------------------------------------------- + + +MONGOC_VERSION=1.17.3 +JSONC_VERSION=0.15-20200726 +MONGOC_INSTALL="${MONGOC_INSTALL_DIR}" +JSONC_INSTALL="${JSONC_INSTALL_DIR}" + +# Don't allow input to the script +if [ "$#" -ne 0 ]; then + echo "Usage: autogen.sh" + exit +fi + +CMAKE_COMMAND='cmake3' +if ! [ -x "$(command -v cmake3)" ]; then + CMAKE_COMMAND='cmake' +fi + +### +# Pull the latest version of Mongo C Driver's master branch +# +function checkout_mongo_driver +{ + rm -rf mongo-c-driver && + wget https://github.com/mongodb/mongo-c-driver/releases/download/$MONGOC_VERSION/mongo-c-driver-$MONGOC_VERSION.tar.gz && + tar -zxf mongo-c-driver-$MONGOC_VERSION.tar.gz && + mv mongo-c-driver-$MONGOC_VERSION mongo-c-driver && + rm -rf mongo-c-driver-$MONGOC_VERSION.tar.gz +} + +## +# Pull the json-c library +# +function checkout_json_lib +{ + echo $PWD && + rm -rf json-c && + wget https://github.com/json-c/json-c/archive/json-c-$JSONC_VERSION.tar.gz && + tar -zxf json-c-$JSONC_VERSION.tar.gz && + mv json-c-json-c-$JSONC_VERSION json-c && + rm -rf json-c-$JSONC_VERSION.tar.gz && + echo $PWD +} + + +## +# Compile and install json-c library +# +function install_json_lib +{ + cd json-c && + $CMAKE_COMMAND -DCMAKE_INSTALL_PREFIX=$JSONC_INSTALL $JSONC_CFLAGS . && + make install && + cd .. +} + +### +# Configure and install the Mongo C Driver and libbson +# +function install_mongoc_driver +{ + cd mongo-c-driver && + $CMAKE_COMMAND -DCMAKE_INSTALL_PREFIX=$MONGOC_INSTALL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_SSL=AUTO . && + make install && + cd .. +} + +checkout_mongo_driver && +checkout_json_lib && +install_mongoc_driver && +install_json_lib && +export PKG_CONFIG_PATH=mongo-c-driver/src/libmongoc/src:mongo-c-driver/src/libbson/src + +ret=$? +if [ "$ret" -ne 0 ]; then + echo "Failed" + exit $ret +else + echo "Done" + exit 0 +fi diff --git a/connection.c b/connection.c new file mode 100644 index 0000000..b35bd35 --- /dev/null +++ b/connection.c @@ -0,0 +1,229 @@ +/*------------------------------------------------------------------------- + * + * connection.c + * Connection management functions for mongo_fdw + * + * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 2004-2024, EnterpriseDB Corporation. + * Portions Copyright (c) 2012–2014 Citus Data, Inc. + * + * IDENTIFICATION + * connection.c + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "access/xact.h" +#if PG_VERSION_NUM >= 130000 +#include "common/hashfn.h" +#endif +#include "mongo_wrapper.h" +#include "utils/inval.h" +#include "utils/syscache.h" + +/* Length of host */ +#define HOST_LEN 256 + +/* + * Connection cache hash table entry + * + * The lookup key in this hash table is the foreign server OID plus the user + * mapping OID. (We use just one connection per user per foreign server, + * so that we can ensure all scans use the same snapshot during a query.) + */ +typedef struct ConnCacheKey +{ + Oid serverid; /* OID of foreign server */ + Oid userid; /* OID of local user whose mapping we use */ +} ConnCacheKey; + +typedef struct ConnCacheEntry +{ + ConnCacheKey key; /* hash key (must be first) */ + MONGO_CONN *conn; /* connection to foreign server, or NULL */ + bool invalidated; /* true if reconnect is pending */ + uint32 server_hashvalue; /* hash value of foreign server OID */ + uint32 mapping_hashvalue; /* hash value of user mapping OID */ +} ConnCacheEntry; + +/* + * Connection cache (initialized on first use) + */ +static HTAB *ConnectionHash = NULL; + +static void mongo_inval_callback(Datum arg, int cacheid, uint32 hashvalue); + +/* + * mongo_get_connection + * Get a mongo connection which can be used to execute queries on the + * remote Mongo server with the user's authorization. A new connection is + * established if we don't already have a suitable one. + */ +MONGO_CONN * +mongo_get_connection(ForeignServer *server, UserMapping *user, + MongoFdwOptions *opt) +{ + bool found; + ConnCacheEntry *entry; + ConnCacheKey key; + + /* First time through, initialize connection cache hashtable */ + if (ConnectionHash == NULL) + { + HASHCTL ctl; + + MemSet(&ctl, 0, sizeof(ctl)); + ctl.keysize = sizeof(ConnCacheKey); + ctl.entrysize = sizeof(ConnCacheEntry); + ctl.hash = tag_hash; + /* Allocate ConnectionHash in the cache context */ + ctl.hcxt = CacheMemoryContext; + ConnectionHash = hash_create("mongo_fdw connections", 8, + &ctl, + HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + + /* + * Register some callback functions that manage connection cleanup. + * This should be done just once in each backend. + */ + CacheRegisterSyscacheCallback(FOREIGNSERVEROID, + mongo_inval_callback, (Datum) 0); + CacheRegisterSyscacheCallback(USERMAPPINGOID, + mongo_inval_callback, (Datum) 0); + } + + /* Create hash key for the entry. Assume no pad bytes in key struct */ + key.serverid = server->serverid; + key.userid = user->userid; + + /* + * Find or create cached entry for requested connection. + */ + entry = hash_search(ConnectionHash, &key, HASH_ENTER, &found); + if (!found) + { + /* Initialize new hashtable entry (key is already filled in) */ + entry->conn = NULL; + } + + /* If an existing entry has invalid connection then release it */ + if (entry->conn != NULL && entry->invalidated) + { + elog(DEBUG3, "disconnecting mongo_fdw connection %p for option changes to take effect", + entry->conn); + mongoDisconnect(entry->conn); + entry->conn = NULL; + } + + if (entry->conn == NULL) + { + entry->conn = mongoConnect(opt); + elog(DEBUG3, "new mongo_fdw connection %p for server \"%s:%d\"", + entry->conn, opt->svr_address, opt->svr_port); + + /* + * Once the connection is established, then set the connection + * invalidation flag to false, also set the server and user mapping + * hash values. + */ + entry->invalidated = false; + entry->server_hashvalue = + GetSysCacheHashValue1(FOREIGNSERVEROID, + ObjectIdGetDatum(server->serverid)); + entry->mapping_hashvalue = + GetSysCacheHashValue1(USERMAPPINGOID, + ObjectIdGetDatum(user->umid)); + } + + /* Check if the existing or new connection is reachable/active or not? */ + if (entry->conn != NULL) + { + bson_error_t error; + bool retval; + bson_t *command; + + /* Ping the database using "ping" command */ + command = BCON_NEW("ping", BCON_INT32(1)); + retval = mongoc_client_command_simple(entry->conn, opt->svr_database, + command, NULL, NULL, &error); + if (!retval) + ereport(ERROR, + (errmsg("could not connect to server %s", + server->servername), + errhint("Mongo error: \"%s\"", error.message))); + } + return entry->conn; +} + +/* + * mongo_cleanup_connection + * Delete all the cache entries on backend exits. + */ +void +mongo_cleanup_connection() +{ + HASH_SEQ_STATUS scan; + ConnCacheEntry *entry; + + if (ConnectionHash == NULL) + return; + + hash_seq_init(&scan, ConnectionHash); + while ((entry = (ConnCacheEntry *) hash_seq_search(&scan))) + { + if (entry->conn == NULL) + continue; + + elog(DEBUG3, "disconnecting mongo_fdw connection %p", entry->conn); + mongoDisconnect(entry->conn); + entry->conn = NULL; + } +} + +/* + * mongo_release_connection + * Release connection created by calling mongo_get_connection. + */ +void +mongo_release_connection(MONGO_CONN *conn) +{ + /* + * We don't close the connection individually here, will do all connection + * cleanup on the backend exit. + */ +} + +/* + * mongo_inval_callback + * Connection invalidation callback function for mongo. + * + * After a change to a pg_foreign_server or pg_user_mapping catalog entry, + * mark connections depending on that entry as needing to be remade. This + * implementation is similar as pgfdw_inval_callback. + */ +static void +mongo_inval_callback(Datum arg, int cacheid, uint32 hashvalue) +{ + HASH_SEQ_STATUS scan; + ConnCacheEntry *entry; + + Assert(cacheid == FOREIGNSERVEROID || cacheid == USERMAPPINGOID); + + /* ConnectionHash must exist already, if we're registered */ + hash_seq_init(&scan, ConnectionHash); + while ((entry = (ConnCacheEntry *) hash_seq_search(&scan))) + { + /* Ignore invalid entries */ + if (entry->conn == NULL) + continue; + + /* hashvalue == 0 means a cache reset, must clear all state */ + if (hashvalue == 0 || + (cacheid == FOREIGNSERVEROID && + entry->server_hashvalue == hashvalue) || + (cacheid == USERMAPPINGOID && + entry->mapping_hashvalue == hashvalue)) + entry->invalidated = true; + } +} diff --git a/data/mongo_fixture.json b/data/mongo_fixture.json new file mode 100644 index 0000000..1804624 --- /dev/null +++ b/data/mongo_fixture.json @@ -0,0 +1,32 @@ +[{ + "_id": {"$oid": "5381ccf9d6d81c8e8bf0434f"}, + "name": "Ukraine", + "population": 45590000, + "capital": "Kyiv", + "hdi": 0.74, + "lastElections": {"type": "presidential", "date": {"$date": 1400976000000}}, + "mainExports": ["Semi-finished products of iron or non-alloy steel", + "Flat-rolled products of iron or non-alloy steel", + "Sunflower-seed, safflower or cotton-seed oil"] +}, { + "_id": {"$oid": "5381ccf9d6d81c8e8bf04350"}, + "name": "Poland", + "population": 38540000, + "capital": "Warsaw", + "hdi": 0.821, + "lastElections": {"type": "presidential", "date": {"$date": 1400976000000}}, + "lastElections": {"type": "parliamentary", "date": {"$date": 1318118400000}}, + "mainExports": ["Parts and accessories of the motor vehicles of headings 87.01 to 87.0", + "Motor cars and other motor vehicles principally designed for the transport", + "Reception apparatus for television"] +}, { + "_id": {"$oid": "5381ccf9d6d81c8e8bf04351"}, + "name": "Moldova", + "population": 3560000, + "capital": "Chișinău", + "hdi": 0.66, + "lastElections": {"type": "parliamentary", "date": {"$date": 1290902400000}}, + "mainExports": ["Wine of fresh grapes, including fortified wines", + "Insulated (including enameled or anodized) wire, cable", + "Sunflower seeds, whether or not broken"] +}] diff --git a/data/mongo_test_data.js b/data/mongo_test_data.js new file mode 100644 index 0000000..a09926b --- /dev/null +++ b/data/mongo_test_data.js @@ -0,0 +1,114 @@ +// Cleanup of databases/collections created during regression run +// As 'test' is a default database, any foreign table created when +// database is not mentioned then corresponding collection gets +// created in test database. So dropping as part of cleanup. +use test +db.mongo_test3.drop(); +use mongo_fdw_regress1 +db.mongo_test1.drop(); +use mongo_fdw_regress2 +db.dropDatabase(); +use mongo_fdw_regress +db.test_tbl1.drop(); +db.test_tbl2.drop(); +db.test_tbl3.drop(); +db.test_tbl4.drop(); +db.test_tbl5.drop(); +db.test_tbl7.drop(); +db.test_tbl8.drop(); +db.test1.drop(); +db.test2.drop(); +db.test3.drop(); +db.test4.drop(); +db.mongo_test.drop(); +db.test5.drop(); +// Below queries will create and insert values in collections +db.mongo_test.insert({a : NumberInt(0), b : "mongo_test collection"}); +db.test_tbl2.insertMany([ + {c1 : NumberInt(10), c2 : "DEVELOPMENT", c3 :"PUNE" }, + {c1: NumberInt(20), c2 : "ADMINISTRATION", c3 :"BANGLORE" }, + {c1: NumberInt(30), c2 : "SALES", c3 :"MUMBAI" }, + {c1: NumberInt(40), c2 : "HR", c3 :"NAGPUR" } +]); +db.test_tbl1.insertMany([ + {c1: NumberInt(100), c2 : "EMP1", c3 :"ADMIN", c4 :NumberInt(1300) ,c5 :ISODate("1980-12-17"), c6 :800.300, c7 :NumberInt(0), c8 :NumberInt(20) }, + {c1: NumberInt(200), c2 : "EMP2", c3 :"SALESMAN", c4 :NumberInt(600) ,c5 :ISODate("1981-02-20"), c6 :1600, c7 :NumberInt(300), c8 :NumberInt(30) }, + {c1: NumberInt(300), c2 : "EMP3", c3 :"SALESMAN", c4 :NumberInt(600) ,c5 :ISODate("1981-02-22"), c6 :1250, c7 :NumberInt(500), c8 :NumberInt(30) }, + {c1: NumberInt(400), c2 : "EMP4", c3 :"MANAGER", c4 :NumberInt(900) ,c5 :ISODate("1981-04-02"), c6 :2975, c7 :NumberInt(0), c8 :NumberInt(20) }, + {c1: NumberInt(500), c2 : "EMP5", c3 :"SALESMAN", c4 :NumberInt(600) ,c5 :ISODate("1981-09-28"), c6 :1250.23, c7 :NumberInt(1400), c8 :NumberInt(30) }, + {c1: NumberInt(600), c2 : "EMP6", c3 :"MANAGER", c4 :NumberInt(900) ,c5 :ISODate("1981-05-01"), c6 :2850, c7 :NumberInt(0), c8 :NumberInt(30) }, + {c1: NumberInt(700), c2 : "EMP7", c3 :"MANAGER", c4 :NumberInt(900) ,c5 :ISODate("1981-06-09"), c6 :2450.34, c7 :NumberInt(0), c8 :NumberInt(10) }, + {c1: NumberInt(800), c2 : "EMP8", c3 :"FINANCE", c4 :NumberInt(400) ,c5 :ISODate("1987-04-19"), c6 :3000, c7 :NumberInt(0), c8 :NumberInt(20) }, + {c1: NumberInt(900), c2 : "EMP9", c3 :"HEAD", c4 :null ,c5 :ISODate("1981-11-17"), c6 :5000, c7 :NumberInt(0), c8 :NumberInt(10) }, + {c1: NumberInt(1000), c2 : "EMP10", c3 :"SALESMAN", c4 :NumberInt(600) ,c5 :ISODate("1980-09-08"), c6 :1500, c7 :NumberInt(0), c8 :NumberInt(30) }, + {c1: NumberInt(1100), c2 : "EMP11", c3 :"ADMIN", c4 :NumberInt(800) ,c5 :ISODate("1987-05-23"), c6 :1100, c7 :NumberInt(0), c8 :NumberInt(20) }, + {c1: NumberInt(1200), c2 : "EMP12", c3 :"ADMIN", c4 :NumberInt(600) ,c5 :ISODate("1981-12-03"), c6 :950.00, c7 :NumberInt(0), c8 :NumberInt(30) }, + {c1: NumberInt(1300), c2 : "EMP13", c3 :"FINANCE", c4 :NumberInt(400) ,c5 :ISODate("1981-12-03"), c6 :3000, c7 :NumberInt(0), c8 :NumberInt(20) }, + {c1: NumberInt(1400), c2 : "EMP14", c3 :"ADMIN", c4 :NumberInt(700) ,c5 :ISODate("1982-01-23"), c6 :1300, c7 :NumberInt(0), c8 :NumberInt(10) }, +]); +db.test_tbl3.insertMany([ + {name: "dvd", marks: [23, 24], pass: false}, + {name: "vdd", marks: [29, 31], pass: true} +]); + +db.test1.insertMany([ + {c1: NumberInt(1), c2: NumberInt(1), c3: "A"}, + {c1: NumberInt(2), c2: NumberInt(2), c3: "B"}, + {c1: NumberInt(3), c2: NumberInt(3), c3: "C"}, + {c1: NumberInt(4), c2: NumberInt(4), c3: "D"}, +]); + +db.test2.insertMany([ + {c1: NumberInt(5), c2: NumberInt(5), c3: "E"}, + {c1: NumberInt(6), c2: NumberInt(6), c3: "F"}, + {c1: NumberInt(7), c2: NumberInt(7), c3: "G"}, + {c1: NumberInt(8), c2: NumberInt(8), c3: "H"}, +]); + +db.test3.insertMany([ + {c1: NumberInt(1), c2: NumberInt(1), c3: "A"}, + {c1: NumberInt(2), c2: NumberInt(2), c3: "B"}, + {c1: NumberInt(3), c2: NumberInt(3), c3: "C"}, + {c1: NumberInt(4), c2: NumberInt(4), c3: "D"}, +]); + +db.test4.insertMany([ + {c1: NumberInt(5), c2: NumberInt(5), c3: "E"}, + {c1: NumberInt(6), c2: NumberInt(6), c3: "F"}, + {c1: NumberInt(7), c2: NumberInt(7), c3: "G"}, + {c1: NumberInt(8), c2: NumberInt(8), c3: "H"}, +]); + +db.test5.insertMany([ + {c1: 12.345678}, + {c1: -1.23} +]); +db.test_tbl4.insertMany([ + {a: NumberInt(25)}, + {a: NumberLong(9999999999)}, + {a: 25}, + {a: 25.09}, + {a: false} +]); +db.test_tbl5.insertMany([ + {a: NumberInt(25)}, + {a: 25}, + {a: 25.09}, + {a: true} +]); +db.test_tbl7.insertMany([ + {_id: null, a: NumberInt(10), b: "ROW1"}, + {a: NumberInt(20), b: "ROW2"} +]); +db.test_tbl8.insertMany([ + {_id: NumberInt(1), a: NumberInt(2), b: "ROW1"}, + {a: NumberInt(3), b: "ROW2"}, +]); +db.mongo_test_large.drop(); +db.mongo_test_large.insertMany([ + {_id: NumberInt(0), a01 : NumberInt(1), a02 : NumberInt(2), a03 : NumberInt(3), a04 : NumberInt(4), a05 : NumberInt(5), a06 : NumberInt(6), a07 : NumberInt(7), a08 : NumberInt(8), a09 : NumberInt(9), a10 : NumberInt(10), a11 : NumberInt(11), a12 : NumberInt(12), a13 : NumberInt(13), a14 : NumberInt(14), a15 : NumberInt(15), a16 : NumberInt(16), a17 : NumberInt(17), a18 : NumberInt(18), a19 : NumberInt(19), a20 : NumberInt(20), a21 : NumberInt(21), a22 : NumberInt(22), a23 : NumberInt(23), a24 : NumberInt(24), a25 : NumberInt(25), a26 : NumberInt(26), a27 : NumberInt(27), a28 : NumberInt(28), a29 : NumberInt(29), a30 : NumberInt(30), a31 : NumberInt(31), a32 : NumberInt(32), a33 : NumberInt(33), a34 : NumberInt(134), a35 : NumberInt(35)}, + {_id: NumberInt(1), a01 : NumberInt(1), a02 : NumberInt(2), a03 : NumberInt(3), a04 : NumberInt(4), a05 : NumberInt(5), a06 : NumberInt(6), a07 : NumberInt(7), a08 : NumberInt(8), a09 : NumberInt(9), a10 : NumberInt(10), a11 : NumberInt(11), a12 : NumberInt(12), a13 : NumberInt(13), a14 : NumberInt(14), a15 : NumberInt(15), a16 : NumberInt(16), a17 : NumberInt(17), a18 : NumberInt(18), a19 : NumberInt(19), a20 : NumberInt(20), a21 : NumberInt(21), a22 : NumberInt(22), a23 : NumberInt(23), a24 : NumberInt(24), a25 : NumberInt(25), a26 : NumberInt(26), a27 : NumberInt(27), a28 : NumberInt(28), a29 : NumberInt(29), a30 : NumberInt(30), a31 : NumberInt(31), a32 : NumberInt(2), a33 : NumberInt(3), a34 : NumberInt(4), a35 : NumberInt(5)}, + {_id: NumberInt(2), a01 : NumberInt(1), a02 : NumberInt(2), a03 : NumberInt(3), a04 : NumberInt(4), a05 : NumberInt(5), a06 : NumberInt(6), a07 : NumberInt(7), a08 : NumberInt(8), a09 : NumberInt(9), a10 : NumberInt(10), a11 : NumberInt(11), a12 : NumberInt(12), a13 : NumberInt(13), a14 : NumberInt(14), a15 : NumberInt(15), a16 : NumberInt(16), a17 : NumberInt(17), a18 : NumberInt(18), a19 : NumberInt(19), a20 : NumberInt(20), a21 : NumberInt(21), a22 : NumberInt(22), a23 : NumberInt(23), a24 : NumberInt(24), a25 : NumberInt(25), a26 : NumberInt(26), a27 : NumberInt(27), a28 : NumberInt(28), a29 : NumberInt(29), a30 : NumberInt(30), a31 : NumberInt(31), a32 : NumberInt(132), a33 : NumberInt(133), a34 : NumberInt(134), a35 : NumberInt(135)}, + {_id: NumberInt(3), a01 : NumberInt(1), a02 : NumberInt(2), a03 : NumberInt(3), a04 : NumberInt(4), a05 : NumberInt(5), a06 : NumberInt(6), a07 : NumberInt(7), a08 : NumberInt(8), a09 : NumberInt(9), a10 : NumberInt(10), a11 : NumberInt(11), a12 : NumberInt(12), a13 : NumberInt(13), a14 : NumberInt(14), a15 : NumberInt(15), a16 : NumberInt(16), a17 : NumberInt(17), a18 : NumberInt(18), a19 : NumberInt(19), a20 : NumberInt(20), a21 : NumberInt(21), a22 : NumberInt(22), a23 : NumberInt(23), a24 : NumberInt(24), a25 : NumberInt(25), a26 : NumberInt(26), a27 : NumberInt(27), a28 : NumberInt(28), a29 : NumberInt(29), a30 : NumberInt(30), a31 : NumberInt(31), a32 : NumberInt(32), a33 : NumberInt(3), a34 : NumberInt(34), a35 : NumberInt(35)}, + {_id: NumberInt(4), a01 : NumberInt(1), a02 : NumberInt(2), a03 : NumberInt(3), a04 : NumberInt(4), a05 : NumberInt(5), a06 : NumberInt(6), a07 : NumberInt(7), a08 : NumberInt(8), a09 : NumberInt(9), a10 : NumberInt(10), a11 : NumberInt(11), a12 : NumberInt(12), a13 : NumberInt(13), a14 : NumberInt(14), a15 : NumberInt(15), a16 : NumberInt(16), a17 : NumberInt(17), a18 : NumberInt(18), a19 : NumberInt(19), a20 : NumberInt(20), a21 : NumberInt(21), a22 : NumberInt(22), a23 : NumberInt(23), a24 : NumberInt(24), a25 : NumberInt(25), a26 : NumberInt(26), a27 : NumberInt(27), a28 : NumberInt(28), a29 : NumberInt(29), a30 : NumberInt(30), a31 : NumberInt(31), a32 : NumberInt(32), a33 : NumberInt(33), a34 : NumberInt(34), a35 : NumberInt(35)} +]); diff --git a/data/mongo_testdevice.json b/data/mongo_testdevice.json new file mode 100644 index 0000000..6c2a598 --- /dev/null +++ b/data/mongo_testdevice.json @@ -0,0 +1,10 @@ +[ +{ + "_id": { + "$oid": "6580400c4898199d6e0173cd" + }, + "mac": "001122334455", + "name": "test device", + "level": 3 +} +] diff --git a/data/mongo_testlog.json b/data/mongo_testlog.json new file mode 100644 index 0000000..65760c1 --- /dev/null +++ b/data/mongo_testlog.json @@ -0,0 +1,14 @@ +[ +{ + "_id": { + "$oid": "658040214898199d6e0173d0" + }, + "log": "hello log", + "logMeta": { + "logMac": "001122334455", + "nestMore": { + "level": 3 + } + } +} +] diff --git a/data/mongo_warehouse.json b/data/mongo_warehouse.json new file mode 100644 index 0000000..e8e4a38 --- /dev/null +++ b/data/mongo_warehouse.json @@ -0,0 +1,14 @@ +[ + { + "_id" : {"$oid": "58a1ebbaf543ec0b90545859"}, + "warehouse_id" : 1, + "warehouse_name" : "UPS", + "warehouse_created" : {"$date": 1418368330000} + }, + { + "_id" : {"$oid": "58a1ebbaf543ec0b9054585a"}, + "warehouse_id" : 2, + "warehouse_name" : "Laptop", + "warehouse_created" : {"$date": 1447229590000} + } +] diff --git a/deparse.c b/deparse.c new file mode 100644 index 0000000..1c1d307 --- /dev/null +++ b/deparse.c @@ -0,0 +1,677 @@ +/*------------------------------------------------------------------------- + * + * deparse.c + * Query deparser for mongo_fdw + * + * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 2004-2024, EnterpriseDB Corporation. + * Portions Copyright (c) 2012–2014 Citus Data, Inc. + * + * IDENTIFICATION + * deparse.c + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "mongo_wrapper.h" + +#include +#include + +#include "access/htup_details.h" +#include "catalog/pg_operator.h" +#if PG_VERSION_NUM >= 130000 +#include "common/hashfn.h" +#endif +#include "mongoc.h" +#include "mongo_query.h" +#include "optimizer/optimizer.h" +#include "parser/parsetree.h" +#include "utils/rel.h" +#include "utils/syscache.h" + +/* + * Functions to gather information related to columns involved in the given + * query, which is useful at the time of execution to prepare MongoDB query. + */ +static void mongo_check_op_expr(OpExpr *node, MongoRelQualInfo *qual_info); +static void mongo_check_var(Var *column, MongoRelQualInfo *qual_info); + +/* Helper functions to form MongoDB query document. */ +static void mongo_append_bool_expr(BoolExpr *node, BSON *queryDoc, + pipeline_cxt *context); +static void mongo_append_op_expr(OpExpr *node, BSON *child, + pipeline_cxt *context); +static void mongo_append_column_name(Var *column, BSON *queryDoc, + pipeline_cxt *context); +static void mongo_add_null_check(Var *column, BSON *expr, + pipeline_cxt *context); + +/* + * mongo_check_qual + * Check the given qual expression and find the columns used in it. We + * recursively traverse until we get a Var node and then retrieve the + * required information from it. + */ +void +mongo_check_qual(Expr *node, MongoRelQualInfo *qual_info) +{ + if (node == NULL) + return; + + switch (nodeTag(node)) + { + case T_Var: + mongo_check_var((Var *) node, qual_info); + break; + case T_OpExpr: + mongo_check_op_expr((OpExpr *) node, qual_info); + break; + case T_List: + { + ListCell *lc; + + foreach(lc, (List *) node) + mongo_check_qual((Expr *) lfirst(lc), qual_info); + } + break; + case T_RelabelType: + mongo_check_qual(((RelabelType *) node)->arg, qual_info); + break; + case T_BoolExpr: + mongo_check_qual((Expr *) ((BoolExpr *) node)->args, qual_info); + break; + case T_Aggref: + { + ListCell *lc; + char *func_name = get_func_name(((Aggref *) node)->aggfnoid); + + /* Save aggregation operation name */ + qual_info->aggTypeList = lappend(qual_info->aggTypeList, + makeString(func_name)); + + qual_info->is_agg_column = true; + + /* Save information whether this is a HAVING clause or not */ + if (qual_info->is_having) + qual_info->isHavingList = lappend_int(qual_info->isHavingList, + true); + else + qual_info->isHavingList = lappend_int(qual_info->isHavingList, + false); + + /* + * The aggregation over '*' doesn't need column information. + * Hence, only to maintain the length of column information + * lists add dummy members into it. + * + * For aggregation over the column, add required information + * into the column information lists. + */ + if (((Aggref *) node)->aggstar) + { + qual_info->colNameList = lappend(qual_info->colNameList, + makeString("*")); + qual_info->colNumList = lappend_int(qual_info->colNumList, + 0); + qual_info->rtiList = lappend_int(qual_info->rtiList, 0); + qual_info->isOuterList = lappend_int(qual_info->isOuterList, + 0); + /* Append dummy var */ + qual_info->aggColList = lappend(qual_info->aggColList, + makeVar(0, 0, 0, 0, 0, 0)); + qual_info->is_agg_column = false; + } + else + { + foreach(lc, ((Aggref *) node)->args) + { + Node *n = (Node *) lfirst(lc); + + /* If TargetEntry, extract the expression from it */ + if (IsA(n, TargetEntry)) + { + TargetEntry *tle = (TargetEntry *) n; + + n = (Node *) tle->expr; + } + + mongo_check_qual((Expr *) n, qual_info); + } + } + } + break; + case T_Const: + case T_Param: + /* Nothing to do here because we are looking only for Var's */ + break; + default: + elog(ERROR, "unsupported expression type to check: %d", + (int) nodeTag(node)); + break; + } +} + +/* + * mongo_check_op_expr + * Check given operator expression. + */ +static void +mongo_check_op_expr(OpExpr *node, MongoRelQualInfo *qual_info) +{ + HeapTuple tuple; + Form_pg_operator form; + char oprkind; + ListCell *arg; + + /* Retrieve information about the operator from the system catalog. */ + tuple = SearchSysCache1(OPEROID, ObjectIdGetDatum(node->opno)); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for operator %u", node->opno); + + form = (Form_pg_operator) GETSTRUCT(tuple); + oprkind = form->oprkind; + + /* Sanity check. */ + Assert((oprkind == 'r' && list_length(node->args) == 1) || + (oprkind == 'l' && list_length(node->args) == 1) || + (oprkind == 'b' && list_length(node->args) == 2)); + + /* Deparse left operand. */ + if (oprkind == 'r' || oprkind == 'b') + { + arg = list_head(node->args); + mongo_check_qual(lfirst(arg), qual_info); + } + + /* Deparse right operand. */ + if (oprkind == 'l' || oprkind == 'b') + { + arg = list_tail(node->args); + mongo_check_qual(lfirst(arg), qual_info); + } + + ReleaseSysCache(tuple); +} + +/* + * mongo_check_var + * Check the given Var and append required information related to columns + * involved in qual clauses to separate lists in context. Prepare separate + * list for aggregated columns directly (not related information). + * + * Save required information in the form of a list in MongoRelQualInfo + * structure. Prepare a hash table to avoid duplication of entry if one column + * is involved in the multiple qual expressions. + */ +static void +mongo_check_var(Var *column, MongoRelQualInfo *qual_info) +{ + RangeTblEntry *rte; + char *colname; + ColumnHashKey key; + bool found; + bool is_outerrel = false; + + if (!(bms_is_member(column->varno, qual_info->foreignRel->relids) && + column->varlevelsup == 0)) + return; /* Var does not belong to foreign table */ + + Assert(!IS_SPECIAL_VARNO(column->varno)); + + if (!qual_info->exprColHash) + { + HASHCTL hashInfo; + + memset(&hashInfo, 0, sizeof(hashInfo)); + hashInfo.keysize = sizeof(ColumnHashKey); + hashInfo.entrysize = sizeof(ColumnHashKey); + hashInfo.hcxt = CurrentMemoryContext; + + qual_info->exprColHash = hash_create("Join Expression Column Hash", + MaxHashTableSize, + &hashInfo, + (HASH_ELEM | HASH_BLOBS | HASH_CONTEXT)); + } + + key.varno = column->varno; + key.varattno = column->varattno; + + hash_search(qual_info->exprColHash, (void *) &key, HASH_ENTER, &found); + + /* + * Add aggregated column in the aggColList even if it's already available + * in the hash table. This is because multiple aggregation operations can + * be done on the same column. So, to maintain the same length of + * aggregation functions and their columns, add each aggregation column. + */ + if (qual_info->is_agg_column) + { + qual_info->aggColList = lappend(qual_info->aggColList, column); + qual_info->is_agg_column = false; + if (found) + return; + } + + /* + * Don't add the duplicate column. The Aggregated column is already taken + * care of. + */ + if (found) + return; + + /* Get RangeTblEntry from array in PlannerInfo. */ + rte = planner_rt_fetch(column->varno, qual_info->root); + + colname = get_attname(rte->relid, column->varattno, false); + + /* Is relation inner or outer? */ + if (bms_is_member(column->varno, qual_info->outerRelids)) + is_outerrel = true; + + /* Fill the lists with elements */ + qual_info->colNameList = lappend(qual_info->colNameList, makeString(colname)); + qual_info->colNumList = lappend_int(qual_info->colNumList, column->varattno); + qual_info->rtiList = lappend_int(qual_info->rtiList, column->varno); + qual_info->isOuterList = lappend_int(qual_info->isOuterList, is_outerrel); +} + +/* + * mongo_get_jointype_name + * Output join name for given join type + */ +const char * +mongo_get_jointype_name(JoinType jointype) +{ + switch (jointype) + { + case JOIN_INNER: + return "INNER"; + + case JOIN_LEFT: + return "LEFT"; + + case JOIN_RIGHT: + return "RIGHT"; + + default: + /* Shouldn't come here, but protect from buggy code. */ + elog(ERROR, "unsupported join type %d", jointype); + } + + /* Keep compiler happy */ + return NULL; +} + +/* + * mongo_append_expr + * Append given expression node. + */ +void +mongo_append_expr(Expr *node, BSON *child_doc, pipeline_cxt *context) +{ + if (node == NULL) + return; + + switch (nodeTag(node)) + { + case T_Var: + mongo_append_column_name((Var *) node, child_doc, context); + break; + case T_Const: + append_constant_value(child_doc, + psprintf("%d", context->arrayIndex), + (Const *) node); + break; + case T_OpExpr: + mongo_append_op_expr((OpExpr *) node, child_doc, context); + break; + case T_RelabelType: + mongo_append_expr(((RelabelType *) node)->arg, child_doc, context); + break; + case T_BoolExpr: + mongo_append_bool_expr((BoolExpr *) node, child_doc, context); + break; + case T_Param: + append_param_value(child_doc, psprintf("%d", context->arrayIndex), + (Param *) node, context->scanStateNode); + break; + case T_Aggref: + bsonAppendUTF8(child_doc, "0", "$v_having"); + break; + default: + elog(ERROR, "unsupported expression type to append: %d", + (int) nodeTag(node)); + break; + } +} + +/* + * mongo_append_bool_expr + * Recurse through a BoolExpr node to form MongoDB query pipeline. + */ +static void +mongo_append_bool_expr(BoolExpr *node, BSON *child_doc, pipeline_cxt *context) +{ + BSON child; + BSON expr; + const char *op = NULL; + ListCell *lc; + int saved_array_index; + int reset_index = 0; + + switch (node->boolop) + { + case AND_EXPR: + op = "$and"; + break; + case OR_EXPR: + op = "$or"; + break; + case NOT_EXPR: + op = "$not"; + break; + } + + bsonAppendStartObject(child_doc, psprintf("%d", context->arrayIndex), &expr); + bsonAppendStartArray(&expr, op, &child); + + /* Save array index */ + saved_array_index = context->arrayIndex; + + /* Reset to zero to be used for nested arrays */ + context->arrayIndex = reset_index; + + /* Save join expression type boolean "TRUE" */ + context->isBoolExpr = true; + + foreach(lc, node->args) + { + mongo_append_expr((Expr *) lfirst(lc), &child, context); + context->arrayIndex++; + } + + bsonAppendFinishArray(&expr, &child); + bsonAppendFinishObject(child_doc, &expr); + + /* Retain array index */ + context->arrayIndex = saved_array_index; +} + +/* + * mongo_append_op_expr + * Deparse given operator expression. + * + * Build and append following syntax into $and array: + * + * {"$eq": [ "$$v_age", "$old" ] } + * + * Each element of operator (e.g. "$eq") array is appended by function called + * mongo_append_column_name. + * + * In MongoDB, (null = null), (null < 1) is TRUE but that is FALSE in Postgres. + * To eliminate null value rows, add equality check for null values for columns + * involved in JOIN and WHERE clauses. E.g. add the following syntax: + * + * {"$ne": [ "$$v_age", null ]}, + * {"$ne": [ "$old", null ]} + */ +static void +mongo_append_op_expr(OpExpr *node, BSON *child_doc, pipeline_cxt *context) +{ + HeapTuple tuple; + Form_pg_operator form; + char oprkind; + ListCell *arg; + BSON expr; + BSON child1; + char *mongo_operator; + int saved_array_index; + int reset_index = 0; + int and_index = 0; + BSON and_op; + BSON and_obj; + + /* Increament operator expression count */ + context->opExprCount++; + + /* Retrieve information about the operator from the system catalog. */ + tuple = SearchSysCache1(OPEROID, ObjectIdGetDatum(node->opno)); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for operator %u", node->opno); + + form = (Form_pg_operator) GETSTRUCT(tuple); + oprkind = form->oprkind; + + /* Sanity check. */ + Assert((oprkind == 'r' && list_length(node->args) == 1) || + (oprkind == 'l' && list_length(node->args) == 1) || + (oprkind == 'b' && list_length(node->args) == 2)); + + if (context->isBoolExpr == true) + { + bsonAppendStartObject(child_doc, psprintf("%d", and_index++), + &and_obj); + bsonAppendStartArray(&and_obj, "$and", &and_op); + bsonAppendStartObject(&and_op, psprintf("%d", context->arrayIndex), + &expr); + } + else + bsonAppendStartObject(child_doc, psprintf("%d", context->arrayIndex), + &expr); + + /* Deparse operator name. */ + mongo_operator = mongo_operator_name(get_opname(node->opno)); + + bsonAppendStartArray(&expr, mongo_operator, &child1); + + /* Save array index */ + saved_array_index = context->arrayIndex; + + /* Reset to zero to be used for nested arrays */ + context->arrayIndex = reset_index; + + /* Deparse left operand. */ + if (oprkind == 'r' || oprkind == 'b') + { + arg = list_head(node->args); + mongo_append_expr(lfirst(arg), &child1, context); + } + + /* Deparse right operand. */ + if (oprkind == 'l' || oprkind == 'b') + { + if (oprkind == 'l') + context->arrayIndex = reset_index; + else + context->arrayIndex++; + arg = list_tail(node->args); + mongo_append_expr(lfirst(arg), &child1, context); + } + + /* Decreament operator expression count */ + context->opExprCount--; + + bsonAppendFinishArray(&expr, &child1); + if (context->isBoolExpr) + bsonAppendFinishObject(&and_op, &expr); + else + bsonAppendFinishObject(child_doc, &expr); + + /* + * Add equality check for null values for columns involved in JOIN and + * WHERE clauses. + */ + if (context->opExprCount == 0) + { + List *var_list; + ListCell *lc; + + var_list = pull_var_clause((Node *) node, PVC_RECURSE_PLACEHOLDERS || + PVC_RECURSE_AGGREGATES); + + foreach(lc, var_list) + { + Var *var = (Var *) lfirst(lc); + + if (context->isBoolExpr) + bsonAppendStartObject(&and_op, psprintf("%d", and_index++), + &expr); + else + bsonAppendStartObject(child_doc, + psprintf("%d", context->arrayIndex++), + &expr); + mongo_add_null_check(var, &expr, context); + + if (context->isBoolExpr) + bsonAppendFinishObject(&and_op, &expr); + else + bsonAppendFinishObject(child_doc, &expr); + } + } + + if (context->isBoolExpr == true) + { + bsonAppendFinishArray(&and_obj, &and_op); + bsonAppendFinishObject(child_doc, &and_obj); + } + + /* Retain array index */ + context->arrayIndex = saved_array_index; + + ReleaseSysCache(tuple); +} + +/* + * mongo_append_column_name + * Deparse Var and append corresponding column name to operator array. + * + * The elements of the operator array are appended by this function. + */ +static void +mongo_append_column_name(Var *column, BSON *child_doc, pipeline_cxt *context) +{ + bool found = false; + ColInfoHashKey key; + ColInfoHashEntry *columnInfo; + char *field; + + key.varNo = column->varno; + key.varAttno = column->varattno; + + columnInfo = (ColInfoHashEntry *) hash_search(context->colInfoHash, + (void *) &key, + HASH_FIND, + &found); + if (!found) + return; + + if (columnInfo->isOuter && context->isJoinClause) + field = psprintf("$$%s", + get_varname_for_outer_col(columnInfo->colName)); + else + field = psprintf("$%s", columnInfo->colName); + + bsonAppendUTF8(child_doc, psprintf("%d", context->arrayIndex), field); +} + +/* + * mongo_add_null_check + * Eliminate null value rows of columns involved in the join and WHERE + * clauses. + */ +static void +mongo_add_null_check(Var *column, BSON *expr, pipeline_cxt *context) +{ + BSON ne_expr; + bool found = false; + ColInfoHashKey key; + ColInfoHashEntry *columnInfo; + char *field; + + key.varNo = column->varno; + key.varAttno = column->varattno; + + columnInfo = (ColInfoHashEntry *) hash_search(context->colInfoHash, + (void *) &key, + HASH_FIND, + &found); + if (!found) + return; + + if (columnInfo->isOuter && context->isJoinClause) + field = psprintf("$$%s", + get_varname_for_outer_col(columnInfo->colName)); + else + field = psprintf("$%s", columnInfo->colName); + + bsonAppendStartArray(expr, "$ne", &ne_expr); + bsonAppendUTF8(&ne_expr, "0", field); + bsonAppendNull(&ne_expr, "1"); + bsonAppendFinishArray(expr, &ne_expr); +} + +/* + * mongo_is_foreign_pathkey + * Returns true if it's safe to push down the sort expression described by + * 'pathkey' to the foreign server. + */ +bool +mongo_is_foreign_pathkey(PlannerInfo *root, RelOptInfo *baserel, + PathKey *pathkey) +{ + EquivalenceMember *em; + EquivalenceClass *pathkey_ec = pathkey->pk_eclass; + Expr *em_expr; + + /* + * mongo_is_foreign_expr would detect volatile expressions as well, but + * checking ec_has_volatile here saves some cycles. + */ + if (pathkey_ec->ec_has_volatile) + return false; + + /* can push if a suitable EC member exists */ + if (!(em = mongo_find_em_for_rel(root, pathkey_ec, baserel))) + return false; + + /* Ignore binary-compatible relabeling */ + em_expr = em->em_expr; + while (em_expr && IsA(em_expr, RelabelType)) + em_expr = ((RelabelType *) em_expr)->arg; + + /* Only Vars are allowed per MongoDB. */ + if (!IsA(em_expr, Var)) + return false; + + /* Check for sort operator pushability. */ + if (!mongo_is_default_sort_operator(em, pathkey)) + return false; + + return true; +} + +/* + * mongo_is_builtin + * Return true if given object is one of PostgreSQL's built-in objects. + * + * We use FirstBootstrapObjectId as the cutoff, so that we only consider + * objects with hand-assigned OIDs to be "built in", not for instance any + * function or type defined in the information_schema. + * + * Our constraints for dealing with types are tighter than they are for + * functions or operators: we want to accept only types that are in pg_catalog, + * else format_type might incorrectly fail to schema-qualify their names. + * (This could be fixed with some changes to format_type, but for now there's + * no need.) Thus we must exclude information_schema types. + * + * XXX there is a problem with this, which is that the set of built-in + * objects expands over time. Something that is built-in to us might not + * be known to the remote server, if it's of an older version. But keeping + * track of that would be a huge exercise. + */ +bool +mongo_is_builtin(Oid oid) +{ + return (oid < FirstGenbkiObjectId); +} diff --git a/expected/aggregate_pushdown.out b/expected/aggregate_pushdown.out new file mode 100644 index 0000000..3a752ad --- /dev/null +++ b/expected/aggregate_pushdown.out @@ -0,0 +1,1988 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` +-- Before running this file user must create database mongo_fdw_regress on +-- MongoDB with all permission for MONGO_USER_NAME user with MONGO_PASS +-- password and ran mongodb_init.sh file to load collections. +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; +-- Create foreign tables. +CREATE FOREIGN TABLE fdw137_t1 (_id NAME, c1 INTEGER, c2 TEXT, c3 CHAR(9), c4 INTEGER, c5 pg_catalog.Date, c6 DECIMAL, c7 INTEGER, c8 INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl1'); +CREATE FOREIGN TABLE fdw137_t2 (_id NAME, c1 INTEGER, c2 TEXT, c3 TEXT) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); +INSERT INTO fdw137_t1 VALUES (0, 1500, 'EMP15', 'FINANCE', 1300, '2000-12-25', 950.0, 400, 60); +INSERT INTO fdw137_t1 VALUES (0, 1600, 'EMP16', 'ADMIN', 600); +INSERT INTO fdw137_t2 VALUES (0, 50, 'TESTING', 'NASHIK'); +INSERT INTO fdw137_t2 VALUES (0); +-- Create local table. +CREATE TABLE fdw137_local AS + SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM fdw137_t1; +-- Simple aggregates. ORDER BY push-down not possible because only column names allowed. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), sum(c1), avg(c1), min(c4), max(c1), sum(c1) * (random() <= 1)::int AS sum2 FROM fdw137_t1 WHERE c4 > 600 GROUP BY c4 ORDER BY 1 ASC NULLS FIRST, 2 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------ + Result + Output: (count(*)), (sum(c1)), (avg(c1)), (min(c4)), (max(c1)), ((sum(c1)) * ((random() <= '1'::double precision))::integer), c4 + -> Sort + Output: (count(*)), (sum(c1)), (avg(c1)), (min(c4)), (max(c1)), c4 + Sort Key: (count(*)) NULLS FIRST, (sum(fdw137_t1.c1)) NULLS FIRST + -> Foreign Scan + Output: (count(*)), (sum(c1)), (avg(c1)), (min(c4)), (max(c1)), c4 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(8 rows) + +SELECT count(*), sum(c1), avg(c1), min(c4), max(c1), sum(c1) * (random() <= 1)::int AS sum2 FROM fdw137_t1 WHERE c4 > 600 GROUP BY c4 ORDER BY 1 ASC NULLS FIRST, 2 ASC NULLS FIRST; + count | sum | avg | min | max | sum2 +-------+------+------------------+------+------+------ + 1 | 1100 | 1100 | 800 | 1100 | 1100 + 1 | 1400 | 1400 | 700 | 1400 | 1400 + 2 | 1600 | 800 | 1300 | 1500 | 1600 + 3 | 1700 | 566.666666666667 | 900 | 700 | 1700 +(4 rows) + +-- GROUP BY clause HAVING expressions +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c1), count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY c1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: c1, (sum(c1)), (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(3 rows) + +SELECT c1, sum(c1), count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY c1 ASC NULLS FIRST; + c1 | sum | count +------+------+------- + 600 | 600 | 1 + 700 | 700 | 1 + 800 | 800 | 1 + 900 | 900 | 1 + 1000 | 1000 | 1 + 1100 | 1100 | 1 + 1200 | 1200 | 1 + 1300 | 1300 | 1 + 1400 | 1400 | 1 + 1500 | 1500 | 1 + 1600 | 1600 | 1 +(11 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, min(c2) FROM fdw137_t1 WHERE c3 = 'ADMIN' GROUP BY c8 HAVING min(c8) = 20 ORDER BY c8 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: c8, (min(c2)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(3 rows) + +SELECT c8, min(c2) FROM fdw137_t1 WHERE c3 = 'ADMIN' GROUP BY c8 HAVING min(c8) = 20 ORDER BY c8 ASC NULLS FIRST; + c8 | min +----+------ + 20 | EMP1 +(1 row) + +-- Multi-column GROUP BY clause. Push-down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: c2, (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(3 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: c2, (sum(c1)), c1 + Sort Key: fdw137_t1.c2 NULLS FIRST + -> Foreign Scan + Output: c2, (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +SET mongo_fdw.enable_order_by_pushdown TO ON; +-- Aggregation on expression. Don't push-down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c1+2) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY c1 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------- + GroupAggregate + Output: c1, sum((c1 + 2)) + Group Key: fdw137_t1.c1 + Filter: (min(fdw137_t1.c1) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(7 rows) + +SELECT c1, sum(c1+2) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY c1 ASC NULLS FIRST; + c1 | sum +------+------ + 600 | 602 + 700 | 702 + 800 | 802 + 900 | 902 + 1000 | 1002 + 1100 | 1102 + 1200 | 1202 + 1300 | 1302 + 1400 | 1402 + 1500 | 1502 + 1600 | 1602 +(11 rows) + +-- Aggregate with unshippable GROUP BY clause are not pushed +EXPLAIN (VERBOSE, COSTS OFF) +SELECT avg(c4) FROM fdw137_t1 GROUP BY c4 * (random() <= 1)::int ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------ + Sort + Output: (avg(c4)), ((c4 * ((random() <= '1'::double precision))::integer)) + Sort Key: (avg(fdw137_t1.c4)) + -> HashAggregate + Output: avg(c4), ((c4 * ((random() <= '1'::double precision))::integer)) + Group Key: (fdw137_t1.c4 * ((random() <= '1'::double precision))::integer) + -> Foreign Scan on public.fdw137_t1 + Output: (c4 * ((random() <= '1'::double precision))::integer), c4 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(9 rows) + +SELECT avg(c4) FROM fdw137_t1 GROUP BY c4 * (random() <= 1)::int ORDER BY 1; + avg +----------------------- + 400.0000000000000000 + 600.0000000000000000 + 700.0000000000000000 + 800.0000000000000000 + 900.0000000000000000 + 1300.0000000000000000 + +(7 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c1) FROM fdw137_t1 GROUP BY c1 HAVING min(c1 * 3) > 500 ORDER BY c1; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c1, (sum(c1)) + Sort Key: fdw137_t1.c1 + -> HashAggregate + Output: c1, sum(c1) + Group Key: fdw137_t1.c1 + Filter: (min((fdw137_t1.c1 * 3)) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c1, sum(c1) FROM fdw137_t1 GROUP BY c1 HAVING min(c1 * 3) > 500 ORDER BY c1; + c1 | sum +------+------ + 200 | 200 + 300 | 300 + 400 | 400 + 500 | 500 + 600 | 600 + 700 | 700 + 800 | 800 + 900 | 900 + 1000 | 1000 + 1100 | 1100 + 1200 | 1200 + 1300 | 1300 + 1400 | 1400 + 1500 | 1500 + 1600 | 1600 +(15 rows) + +-- FDW-134: Test ORDER BY with COLLATE. Shouldn't push-down +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY c2 COLLATE "en_US" ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: c2, (sum(c1)), ((c2)::text), c1 + Sort Key: fdw137_t1.c2 COLLATE "en_US" NULLS FIRST + -> Foreign Scan + Output: c2, (sum(c1)), c2, c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY c2 COLLATE "en_US" ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +-- Using expressions in HAVING clause. Pushed down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c3, count(*) FROM fdw137_t1 GROUP BY c3 HAVING abs(max(c8)) = abs(10) ORDER BY 1, 2; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: c3, (count(*)) + Sort Key: fdw137_t1.c3, (count(*)) + -> Foreign Scan + Output: c3, (count(*)) + Filter: (abs((max(fdw137_t1.c8))) = 10) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(7 rows) + +SELECT c3, count(*) FROM fdw137_t1 GROUP BY c3 HAVING abs(max(c8)) = abs(10) ORDER BY 1, 2; + c3 | count +-----------+------- + HEAD | 1 +(1 row) + +-- Unshippable HAVING clause will be evaluated locally, and other qual in HAVING clause is pushed down +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM (SELECT c3, count(c1) FROM fdw137_t1 GROUP BY c3 HAVING (avg(c1) / avg(c1)) * random() <= 1 and min(c1) > 100) x; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------- + Aggregate + Output: count(*) + -> Foreign Scan + Output: fdw137_t1.c3, NULL::bigint + Filter: (((((avg(fdw137_t1.c1)) / (avg(fdw137_t1.c1))))::double precision * random()) <= '1'::double precision) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT count(*) FROM (SELECT c3, count(c1) FROM fdw137_t1 GROUP BY c3 HAVING (avg(c1) / avg(c1)) * random() <= 1 and min(c1) > 100) x; + count +------- + 0 +(1 row) + +-- Aggregate over join query +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t1.c8), avg(t2.c1) FROM fdw137_t1 t1 INNER JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8%2 = 0 ORDER BY 1 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ + Sort + Output: (sum(t1.c8)), (avg(t2.c1)) + Sort Key: (sum(t1.c8)) DESC NULLS LAST + -> Foreign Scan + Output: (sum(t1.c8)), (avg(t2.c1)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2)) +(6 rows) + +SELECT sum(t1.c8), avg(t2.c1) FROM fdw137_t1 t1 INNER JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8%2 = 0 ORDER BY 1 DESC NULLS LAST; + sum | avg +-----+------------------ + 310 | 22.1428571428571 +(1 row) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, count(*), t2.c4 FROM fdw137_t2 t1 INNER JOIN fdw137_t1 t2 ON (t1.c1 = t2.c8) GROUP BY t1.c1, t2.c4 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Foreign Scan + Output: t1.c1, (count(*)), t2.c4 + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl2 t1) INNER JOIN (mongo_fdw_regress.test_tbl1 t2)) +(3 rows) + +SELECT t1.c1, count(*), t2.c4 FROM fdw137_t2 t1 INNER JOIN fdw137_t1 t2 ON (t1.c1 = t2.c8) GROUP BY t1.c1, t2.c4 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | count | c4 +----+-------+------ + 10 | 1 | + 10 | 1 | 700 + 10 | 1 | 900 + 20 | 2 | 400 + 20 | 1 | 800 + 20 | 1 | 900 + 20 | 1 | 1300 + 30 | 5 | 600 + 30 | 1 | 900 +(9 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(3 rows) + +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 ORDER BY 2 ASC NULLS FIRST; + sum | c8 | avg +-----+----+----- + 100 | 20 | 20 + 180 | 30 | 30 + 0 | 60 | 60 +(3 rows) + +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Sort Key: t1.c8 NULLS FIRST + -> Foreign Scan + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(6 rows) + +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 ORDER BY 2 ASC NULLS FIRST; + sum | c8 | avg +-----+----+----- + 100 | 20 | 20 + 180 | 30 | 30 + 0 | 60 | 60 +(3 rows) + +SET mongo_fdw.enable_order_by_pushdown TO ON; +-- Aggregate is not pushed down as aggregation contains random() +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1 * (random() <= 1)::int) AS sum, avg(c1) FROM fdw137_t1 ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------- + Sort + Output: (sum((c1 * ((random() <= '1'::double precision))::integer))), (avg(c1)) + Sort Key: (sum((fdw137_t1.c1 * ((random() <= '1'::double precision))::integer))) + -> Aggregate + Output: sum((c1 * ((random() <= '1'::double precision))::integer)), avg(c1) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(8 rows) + +SELECT sum(c1 * (random() <= 1)::int) AS sum, avg(c1) FROM fdw137_t1 ORDER BY 1; + sum | avg +-------+---------------------- + 13600 | 850.0000000000000000 +(1 row) + +-- Not pushed down due to local conditions present in underneath input rel +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t1.c8) FROM fdw137_t1 t1 INNER JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE ((t1.c8 * t2.c1)/(t1.c8 * t2.c1)) * random() <= 1 ORDER BY 1; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(t1.c8)) + Sort Key: (sum(t1.c8)) + -> Aggregate + Output: sum(t1.c8) + -> Foreign Scan + Output: t1.c8 + Filter: (((((t1.c8 * t2.c1) / (t1.c8 * t2.c1)))::double precision * random()) <= '1'::double precision) + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(9 rows) + +SELECT sum(t1.c8) FROM fdw137_t1 t1 INNER JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE ((t1.c8 * t2.c1)/(t1.c8 * t2.c1)) * random() <= 1 ORDER BY 1; + sum +----- + 310 +(1 row) + +-- Aggregates in subquery are pushed down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(x.a), sum(x.a) FROM (SELECT c8 a, sum(c1) b FROM fdw137_t1 GROUP BY c8 ORDER BY 1, 2) x; + QUERY PLAN +--------------------------------------------------------------------------------------- + Aggregate + Output: count(fdw137_t1.c8), sum(fdw137_t1.c8) + -> Sort + Output: fdw137_t1.c8, (sum(fdw137_t1.c1)) + Sort Key: fdw137_t1.c8, (sum(fdw137_t1.c1)) + -> Foreign Scan + Output: fdw137_t1.c8, (sum(fdw137_t1.c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(8 rows) + +SELECT count(x.a), sum(x.a) FROM (SELECT c8 a, sum(c1) b FROM fdw137_t1 GROUP BY c8 ORDER BY 1, 2) x; + count | sum +-------+----- + 4 | 120 +(1 row) + +-- Aggregate is still pushed down by taking unshippable expression out +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c4 * (random() <= 1)::int AS sum1, sum(c1) AS sum2 FROM fdw137_t1 GROUP BY c4 ORDER BY 1, 2; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Sort + Output: ((c4 * ((random() <= '1'::double precision))::integer)), (sum(c1)), c4 + Sort Key: ((fdw137_t1.c4 * ((random() <= '1'::double precision))::integer)), (sum(fdw137_t1.c1)) + -> Foreign Scan + Output: (c4 * ((random() <= '1'::double precision))::integer), (sum(c1)), c4 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT c4 * (random() <= 1)::int AS sum1, sum(c1) AS sum2 FROM fdw137_t1 GROUP BY c4 ORDER BY 1, 2; + sum1 | sum2 +------+------ + 400 | 2100 + 600 | 4800 + 700 | 1400 + 800 | 1100 + 900 | 1700 + 1300 | 1600 + | 900 +(7 rows) + +-- Testing ORDER BY, DISTINCT, FILTER and Ordered-sets within aggregates +-- ORDER BY within aggregates (same column used to order) are not pushed +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1 ORDER BY c1) FROM fdw137_t1 WHERE c1 < 500 GROUP BY c2 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Output: (sum(c1 ORDER BY c1)), c2 + Sort Key: (sum(fdw137_t1.c1 ORDER BY fdw137_t1.c1)) + -> GroupAggregate + Output: sum(c1 ORDER BY c1), c2 + Group Key: fdw137_t1.c2 + -> Sort + Output: c2, c1 + Sort Key: fdw137_t1.c2 + -> Foreign Scan on public.fdw137_t1 + Output: c2, c1 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(12 rows) + +SELECT sum(c1 ORDER BY c1) FROM fdw137_t1 WHERE c1 < 500 GROUP BY c2 ORDER BY 1; + sum +----- + 100 + 200 + 300 + 400 +(4 rows) + +-- ORDER BY within aggregate (different column used to order also using DESC) +-- are not pushed. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c8 ORDER BY c1 desc) FROM fdw137_t1 WHERE c1 > 1000 and c8 > 20; + QUERY PLAN +-------------------------------------------------------- + Aggregate + Output: sum(c8 ORDER BY c1 DESC) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(5 rows) + +SELECT sum(c8 ORDER BY c1 desc) FROM fdw137_t1 WHERE c1 > 1000 and c8 > 20; + sum +----- + 90 +(1 row) + +-- DISTINCT within aggregate. Don't push down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(DISTINCT (c1)) FROM fdw137_t1 WHERE c4 = 600 and c1 < 500; + QUERY PLAN +-------------------------------------------------------- + Aggregate + Output: sum(DISTINCT c1) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(5 rows) + +SELECT sum(DISTINCT (c1)) FROM fdw137_t1 WHERE c4 = 600 and c1 < 500; + sum +----- + 500 +(1 row) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(DISTINCT (t1.c1)) FROM fdw137_t1 t1 join fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 < 30 GROUP BY (t2.c1) ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(DISTINCT t1.c1)), t2.c1 + Sort Key: (sum(DISTINCT t1.c1)) + -> GroupAggregate + Output: sum(DISTINCT t1.c1), t2.c1 + Group Key: t2.c1 + -> Sort + Output: t2.c1, t1.c1 + Sort Key: t2.c1 + -> Foreign Scan + Output: t2.c1, t1.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(12 rows) + +SELECT sum(DISTINCT (t1.c1)) FROM fdw137_t1 t1 join fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 < 30 GROUP BY (t2.c1) ORDER BY 1; + sum +------ + 3000 + 3700 +(2 rows) + +-- DISTINCT, ORDER BY and FILTER within aggregate, not pushed down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), sum(DISTINCT c1 ORDER BY c1) filter (WHERE c1%3 < 2), c4 FROM fdw137_t1 WHERE c4 = 600 GROUP BY c4; + QUERY PLAN +----------------------------------------------------------------------------------- + GroupAggregate + Output: sum(c1), sum(DISTINCT c1 ORDER BY c1) FILTER (WHERE ((c1 % 3) < 2)), c4 + Group Key: fdw137_t1.c4 + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(6 rows) + +SELECT sum(c1), sum(DISTINCT c1 ORDER BY c1) filter (WHERE c1%3 < 2), c4 FROM fdw137_t1 WHERE c4 = 600 GROUP BY c4; + sum | sum | c4 +------+------+----- + 4800 | 4100 | 600 +(1 row) + +-- FILTER within aggregate, not pushed +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1) filter (WHERE c1 < 1000 and c4 > 500) FROM fdw137_t1 GROUP BY c4 ORDER BY 1 nulls last; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Sort + Output: (sum(c1) FILTER (WHERE ((c1 < 1000) AND (c4 > 500)))), c4 + Sort Key: (sum(fdw137_t1.c1) FILTER (WHERE ((fdw137_t1.c1 < 1000) AND (fdw137_t1.c4 > 500)))) + -> HashAggregate + Output: sum(c1) FILTER (WHERE ((c1 < 1000) AND (c4 > 500))), c4 + Group Key: fdw137_t1.c4 + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(9 rows) + +SELECT sum(c1) filter (WHERE c1 < 1000 and c4 > 500) FROM fdw137_t1 GROUP BY c4 ORDER BY 1 nulls last; + sum +------ + 100 + 1000 + 1700 + + + + +(7 rows) + +-- Outer query is aggregation query +EXPLAIN (VERBOSE, COSTS OFF) +SELECT DISTINCT (SELECT count(*) filter (WHERE t2.c1 = 20 and t2.c1 < 30) FROM fdw137_t1 t1 WHERE t1.c1 = 500) FROM fdw137_t2 t2 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------- + Unique + Output: ((SubPlan 1)) + -> Sort + Output: ((SubPlan 1)) + Sort Key: ((SubPlan 1)) + -> Aggregate + Output: (SubPlan 1) + -> Foreign Scan on public.fdw137_t2 t2 + Output: t2._id, t2.c1, t2.c2, t2.c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + SubPlan 1 + -> Foreign Scan on public.fdw137_t1 t1 + Output: count(*) FILTER (WHERE ((t2.c1 = 20) AND (t2.c1 < 30))) + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(14 rows) + +SELECT DISTINCT (SELECT count(*) filter (WHERE t2.c1 = 20 and t2.c1 < 30) FROM fdw137_t1 t1 WHERE t1.c1 = 500) FROM fdw137_t2 t2 ORDER BY 1; + count +------- + 1 +(1 row) + +-- Inner query is aggregation query +EXPLAIN (VERBOSE, COSTS OFF) +SELECT DISTINCT (SELECT count(t1.c1) filter (WHERE t2.c1 = 20 and t2.c1 < 30) FROM fdw137_t1 t1 WHERE t1.c1 > 600) FROM fdw137_t2 t2 ORDER BY 1; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Unique + Output: ((SubPlan 1)) + -> Sort + Output: ((SubPlan 1)) + Sort Key: ((SubPlan 1)) + -> Foreign Scan on public.fdw137_t2 t2 + Output: (SubPlan 1) + Foreign Namespace: mongo_fdw_regress.test_tbl2 + SubPlan 1 + -> Aggregate + Output: count(t1.c1) FILTER (WHERE ((t2.c1 = 20) AND (t2.c1 < 30))) + -> Foreign Scan on public.fdw137_t1 t1 + Output: t1._id, t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(14 rows) + +SELECT DISTINCT (SELECT count(t1.c1) filter (WHERE t2.c1 = 20 and t2.c1 < 30) FROM fdw137_t1 t1 WHERE t1.c1 > 600) FROM fdw137_t2 t2 ORDER BY 1; + count +------- + 0 + 10 +(2 rows) + +-- Ordered-sets within aggregate, not pushed down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, rank('10'::varchar) within group (ORDER BY c3), percentile_cont(c8/200::numeric) within group (ORDER BY c1) FROM fdw137_t1 GROUP BY c8 HAVING percentile_cont(c8/200::numeric) within group (ORDER BY c1) < 500 ORDER BY c8; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: c8, rank('10'::bpchar) WITHIN GROUP (ORDER BY c3), percentile_cont((((c8)::numeric / '200'::numeric))::double precision) WITHIN GROUP (ORDER BY ((c1)::double precision)) + Group Key: fdw137_t1.c8 + Filter: (percentile_cont((((fdw137_t1.c8)::numeric / '200'::numeric))::double precision) WITHIN GROUP (ORDER BY ((fdw137_t1.c1)::double precision)) < '500'::double precision) + -> Sort + Output: c8, c3, c1 + Sort Key: fdw137_t1.c8 + -> Foreign Scan on public.fdw137_t1 + Output: c8, c3, c1 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c8, rank('10'::varchar) within group (ORDER BY c3), percentile_cont(c8/200::numeric) within group (ORDER BY c1) FROM fdw137_t1 GROUP BY c8 HAVING percentile_cont(c8/200::numeric) within group (ORDER BY c1) < 500 ORDER BY c8; + c8 | rank | percentile_cont +----+------+----------------- + 20 | 1 | 220 + 30 | 1 | 275 +(2 rows) + +-- Subquery in FROM clause HAVING aggregate +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), x.b FROM fdw137_t1, (SELECT c1 a, sum(c1) b FROM fdw137_t2 GROUP BY c1) x WHERE fdw137_t1.c8 = x.a GROUP BY x.b ORDER BY 1, 2; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: (count(*)), x.b + Sort Key: (count(*)), x.b + -> HashAggregate + Output: count(*), x.b + Group Key: x.b + -> Hash Join + Output: x.b + Inner Unique: true + Hash Cond: (fdw137_t1.c8 = x.a) + -> Foreign Scan on public.fdw137_t1 + Output: fdw137_t1._id, fdw137_t1.c1, fdw137_t1.c2, fdw137_t1.c3, fdw137_t1.c4, fdw137_t1.c5, fdw137_t1.c6, fdw137_t1.c7, fdw137_t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + Output: x.b, x.a + -> Subquery Scan on x + Output: x.b, x.a + -> Foreign Scan + Output: fdw137_t2.c1, (sum(fdw137_t2.c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(20 rows) + +SELECT count(*), x.b FROM fdw137_t1, (SELECT c1 a, sum(c1) b FROM fdw137_t2 GROUP BY c1) x WHERE fdw137_t1.c8 = x.a GROUP BY x.b ORDER BY 1, 2; + count | b +-------+---- + 3 | 10 + 5 | 20 + 6 | 30 +(3 rows) + +-- Join with IS NULL check in HAVING +EXPLAIN (VERBOSE, COSTS OFF) +SELECT avg(t1.c1), sum(t2.c1) FROM fdw137_t1 t1 join fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t2.c1 HAVING avg(t1.c1) is null ORDER BY 1 nulls last, 2; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ + Sort + Output: (avg(t1.c1)), (sum(t2.c1)), t2.c1 + Sort Key: (avg(t1.c1)), (sum(t2.c1)) + -> Foreign Scan + Output: (avg(t1.c1)), (sum(t2.c1)), t2.c1 + Filter: ((avg(t1.c1)) IS NULL) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2)) +(7 rows) + +SELECT avg(t1.c1), sum(t2.c1) FROM fdw137_t1 t1 join fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t2.c1 HAVING avg(t1.c1) is null ORDER BY 1 nulls last, 2; + avg | sum +-----+----- +(0 rows) + +-- ORDER BY expression is part of the target list but not pushed down to +-- foreign server. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1) * (random() <= 1)::int AS sum FROM fdw137_t1 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------------------------------- + Sort + Output: (((sum(c1)) * ((random() <= '1'::double precision))::integer)) + Sort Key: (((sum(fdw137_t1.c1)) * ((random() <= '1'::double precision))::integer)) + -> Foreign Scan + Output: ((sum(c1)) * ((random() <= '1'::double precision))::integer) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT sum(c1) * (random() <= 1)::int AS sum FROM fdw137_t1 ORDER BY 1; + sum +------- + 13600 +(1 row) + +-- LATERAL join, with parameterization +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum FROM fdw137_t1 t1, lateral (SELECT sum(t2.c1) sum FROM fdw137_t2 t2 GROUP BY t2.c1) qry WHERE t1.c8 * 2 = qry.sum ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Sort + Output: t1.c8, qry.sum + Sort Key: t1.c8 + -> Hash Join + Output: t1.c8, qry.sum + Hash Cond: ((t1.c8 * 2) = qry.sum) + -> Foreign Scan on public.fdw137_t1 t1 + Output: t1._id, t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + Output: qry.sum + -> Subquery Scan on qry + Output: qry.sum + -> Foreign Scan + Output: (sum(t2.c1)), t2.c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 t2) +(16 rows) + +-- Check with placeHolderVars +EXPLAIN (VERBOSE, COSTS OFF) +SELECT q.b, count(fdw137_t1.c1), sum(q.a) FROM fdw137_t1 left join (SELECT min(13), avg(fdw137_t1.c1), sum(fdw137_t2.c1) FROM fdw137_t1 right join fdw137_t2 ON (fdw137_t1.c8 = fdw137_t2.c1) WHERE fdw137_t1.c8 = 20) q(a, b, c) ON (fdw137_t1.c8 = q.b) WHERE fdw137_t1.c1 between 100 and 500 GROUP BY q.b ORDER BY 1 nulls last, 2; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: q.b, (count(fdw137_t1.c1)), (sum(q.a)) + Sort Key: q.b, (count(fdw137_t1.c1)) + -> GroupAggregate + Output: q.b, count(fdw137_t1.c1), sum(q.a) + Group Key: q.b + -> Sort + Output: q.b, fdw137_t1.c1, q.a + Sort Key: q.b + -> Hash Left Join + Output: q.b, fdw137_t1.c1, q.a + Inner Unique: true + Hash Cond: ((fdw137_t1.c8)::numeric = q.b) + -> Foreign Scan on public.fdw137_t1 + Output: fdw137_t1._id, fdw137_t1.c1, fdw137_t1.c2, fdw137_t1.c3, fdw137_t1.c4, fdw137_t1.c5, fdw137_t1.c6, fdw137_t1.c7, fdw137_t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + Output: q.b, q.a + -> Subquery Scan on q + Output: q.b, q.a + -> Aggregate + Output: min(13), avg(fdw137_t1_1.c1), NULL::bigint + -> Foreign Scan + Output: fdw137_t1_1.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 fdw137_t1) INNER JOIN (mongo_fdw_regress.test_tbl2 fdw137_t2) +(25 rows) + +SELECT q.b, count(fdw137_t1.c1), sum(q.a) FROM fdw137_t1 left join (SELECT min(13), avg(fdw137_t1.c1), sum(fdw137_t2.c1) FROM fdw137_t1 right join fdw137_t2 ON (fdw137_t1.c8 = fdw137_t2.c1) WHERE fdw137_t1.c8 = 20) q(a, b, c) ON (fdw137_t1.c8 = q.b) WHERE fdw137_t1.c1 between 100 and 500 GROUP BY q.b ORDER BY 1 nulls last, 2; + b | count | sum +---+-------+----- + | 5 | +(1 row) + +-- Not supported cases +-- The COUNT of column +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(c8) FROM fdw137_t1 ; + QUERY PLAN +-------------------------------------------------------- + Aggregate + Output: count(c8) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(5 rows) + +SELECT count(c8) FROM fdw137_t1 ; + count +------- + 15 +(1 row) + +-- Grouping sets +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum(c1) FROM fdw137_t1 WHERE c8 > 10 GROUP BY rollup(c8) ORDER BY 1 nulls last; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c8, (sum(c1)) + Sort Key: fdw137_t1.c8 + -> MixedAggregate + Output: c8, sum(c1) + Hash Key: fdw137_t1.c8 + Group Key: () + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c8, sum(c1) FROM fdw137_t1 WHERE c8 > 10 GROUP BY rollup(c8) ORDER BY 1 nulls last; + c8 | sum +----+------ + 20 | 3700 + 30 | 3800 + 60 | 1500 + | 9000 +(4 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum(c1) FROM fdw137_t1 WHERE c8 > 3 GROUP BY cube(c8) ORDER BY 1 nulls last; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c8, (sum(c1)) + Sort Key: fdw137_t1.c8 + -> MixedAggregate + Output: c8, sum(c1) + Hash Key: fdw137_t1.c8 + Group Key: () + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c8, sum(c1) FROM fdw137_t1 WHERE c8 > 3 GROUP BY cube(c8) ORDER BY 1 nulls last; + c8 | sum +----+------- + 10 | 3000 + 20 | 3700 + 30 | 3800 + 60 | 1500 + | 12000 +(5 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, c4, sum(c1) FROM fdw137_t1 WHERE c8 > 20 GROUP BY grouping sets(c8, c4) ORDER BY 1 nulls last, 2 nulls last; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c8, c4, (sum(c1)) + Sort Key: fdw137_t1.c8, fdw137_t1.c4 + -> HashAggregate + Output: c8, c4, sum(c1) + Hash Key: fdw137_t1.c8 + Hash Key: fdw137_t1.c4 + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c8, c4, sum(c1) FROM fdw137_t1 WHERE c8 > 20 GROUP BY grouping sets(c8, c4) ORDER BY 1 nulls last, 2 nulls last; + c8 | c4 | sum +----+------+------ + 30 | | 3800 + 60 | | 1500 + | 600 | 3200 + | 900 | 600 + | 1300 | 1500 +(5 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum(c1), grouping(c8) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1 nulls last; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c8, (sum(c1)), (GROUPING(c8)) + Sort Key: fdw137_t1.c8 + -> HashAggregate + Output: c8, sum(c1), GROUPING(c8) + Group Key: fdw137_t1.c8 + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(9 rows) + +SELECT c8, sum(c1), grouping(c8) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1 nulls last; + c8 | sum | grouping +----+------+---------- + 20 | 3700 | 0 + 30 | 3800 | 0 + 60 | 1500 | 0 +(3 rows) + +-- DISTINCT itself is not pushed down, whereas underneath aggregate is pushed +EXPLAIN (VERBOSE, COSTS OFF) +SELECT DISTINCT sum(c1) s FROM fdw137_t1 WHERE c1 > 1000 GROUP BY c1 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------- + Unique + Output: (sum(c1)), c1 + -> Sort + Output: (sum(c1)), c1 + Sort Key: (sum(fdw137_t1.c1)) + -> Foreign Scan + Output: (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(8 rows) + +SELECT DISTINCT sum(c1) s FROM fdw137_t1 WHERE c1 > 1000 GROUP BY c1 ORDER BY 1; + s +------ + 1100 + 1200 + 1300 + 1400 + 1500 + 1600 +(6 rows) + +-- WindowAgg +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum(c8), count(c8) over (partition by c8%2) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Sort + Output: c8, (sum(c8)), (count(c8) OVER (?)), ((c8 % 2)) + Sort Key: fdw137_t1.c8 + -> WindowAgg + Output: c8, (sum(c8)), count(c8) OVER (?), ((c8 % 2)) + -> Sort + Output: c8, ((c8 % 2)), (sum(c8)) + Sort Key: ((fdw137_t1.c8 % 2)) + -> Foreign Scan + Output: c8, (c8 % 2), (sum(c8)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(11 rows) + +SELECT c8, sum(c8), count(c8) over (partition by c8%2) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + c8 | sum | count +----+-----+------- + 20 | 100 | 3 + 30 | 180 | 3 + 60 | 60 | 3 +(3 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, array_agg(c8) over (partition by c8%2 ORDER BY c8 desc) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Sort + Output: c8, (array_agg(c8) OVER (?)), ((c8 % 2)) + Sort Key: fdw137_t1.c8 + -> WindowAgg + Output: c8, array_agg(c8) OVER (?), ((c8 % 2)) + -> Sort + Output: c8, ((c8 % 2)) + Sort Key: ((fdw137_t1.c8 % 2)), fdw137_t1.c8 DESC + -> Foreign Scan + Output: c8, (c8 % 2) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(11 rows) + +SELECT c8, array_agg(c8) over (partition by c8%2 ORDER BY c8 desc) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + c8 | array_agg +----+------------ + 20 | {60,30,20} + 30 | {60,30} + 60 | {60} +(3 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, array_agg(c8) over (partition by c8%2 ORDER BY c8 range between current row and unbounded following) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Sort + Output: c8, (array_agg(c8) OVER (?)), ((c8 % 2)) + Sort Key: fdw137_t1.c8 + -> WindowAgg + Output: c8, array_agg(c8) OVER (?), ((c8 % 2)) + -> Sort + Output: c8, ((c8 % 2)) + Sort Key: ((fdw137_t1.c8 % 2)), fdw137_t1.c8 + -> Foreign Scan + Output: c8, (c8 % 2) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(11 rows) + +SELECT c8, array_agg(c8) over (partition by c8%2 ORDER BY c8 range between current row and unbounded following) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + c8 | array_agg +----+------------ + 20 | {20,30,60} + 30 | {30,60} + 60 | {60} +(3 rows) + +-- User defined function for user defined aggregate, VARIADIC +CREATE FUNCTION least_accum(anyelement, variadic anyarray) +returns anyelement language sql AS + 'SELECT least($1, min($2[i])) FROM generate_subscripts($2,2) g(i)'; +CREATE aggregate least_agg(variadic items anyarray) ( + stype = anyelement, sfunc = least_accum +); +-- Not pushed down due to user defined aggregate +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, least_agg(c1) FROM fdw137_t1 GROUP BY c2 ORDER BY c2; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c2, (least_agg(VARIADIC ARRAY[c1])) + Sort Key: fdw137_t1.c2 + -> HashAggregate + Output: c2, least_agg(VARIADIC ARRAY[c1]) + Group Key: fdw137_t1.c2 + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(9 rows) + +SELECT c2, least_agg(c1) FROM fdw137_t1 GROUP BY c2 ORDER BY c2; + c2 | least_agg +-------+----------- + EMP1 | + EMP10 | + EMP11 | + EMP12 | + EMP13 | + EMP14 | + EMP15 | + EMP16 | + EMP2 | + EMP3 | + EMP4 | + EMP5 | + EMP6 | + EMP7 | + EMP8 | + EMP9 | +(16 rows) + +-- Test partition-wise aggregate +SET enable_partitionwise_aggregate TO ON; +-- Create the partition tables +CREATE TABLE fprt1 (_id NAME, c1 INTEGER, c2 INTEGER, c3 TEXT) PARTITION BY RANGE(c1); +CREATE FOREIGN TABLE ftprt1_p1 PARTITION OF fprt1 FOR VALUES FROM (1) TO (4) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test1'); +CREATE FOREIGN TABLE ftprt1_p2 PARTITION OF fprt1 FOR VALUES FROM (5) TO (8) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test2'); +-- Plan with partitionwise aggregates is enabled +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c1) FROM fprt1 GROUP BY c1 ORDER BY 2; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: fprt1.c1, (sum(fprt1.c1)) + Sort Key: (sum(fprt1.c1)) + -> Append + -> Foreign Scan + Output: fprt1.c1, (sum(fprt1.c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test1 fprt1) + -> Foreign Scan + Output: fprt1_1.c1, (sum(fprt1_1.c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test2 fprt1) +(10 rows) + +SELECT c1, sum(c1) FROM fprt1 GROUP BY c1 ORDER BY 2; + c1 | sum +----+----- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 +(8 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c2), min(c2), count(*) FROM fprt1 GROUP BY c1 HAVING avg(c2) < 22 ORDER BY 2; + QUERY PLAN +------------------------------------------------------------------------------------ + Sort + Output: fprt1.c1, (sum(fprt1.c2)), (min(fprt1.c2)), (count(*)) + Sort Key: (sum(fprt1.c2)) + -> Append + -> Foreign Scan + Output: fprt1.c1, (sum(fprt1.c2)), (min(fprt1.c2)), (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test1 fprt1) + -> Foreign Scan + Output: fprt1_1.c1, (sum(fprt1_1.c2)), (min(fprt1_1.c2)), (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test2 fprt1) +(10 rows) + +SELECT c1, sum(c2), min(c2), count(*) FROM fprt1 GROUP BY c1 HAVING avg(c2) < 22 ORDER BY 2; + c1 | sum | min | count +----+-----+-----+------- + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 1 + 3 | 3 | 3 | 1 + 4 | 4 | 4 | 1 + 5 | 5 | 5 | 1 + 6 | 6 | 6 | 1 + 7 | 7 | 7 | 1 + 8 | 8 | 8 | 1 +(8 rows) + +-- Check with whole-row reference +-- Should have all the columns in the target list for the given relation +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, count(t1) FROM fprt1 t1 GROUP BY c1 HAVING avg(c2) < 22 ORDER BY 1; + QUERY PLAN +---------------------------------------------------------------- + Sort + Output: t1.c1, (count(((t1.*)::fprt1))) + Sort Key: t1.c1 + -> Append + -> HashAggregate + Output: t1.c1, count(((t1.*)::fprt1)) + Group Key: t1.c1 + Filter: (avg(t1.c2) < '22'::numeric) + -> Foreign Scan on public.ftprt1_p1 t1 + Output: t1.c1, t1.*, t1.c2 + Foreign Namespace: mongo_fdw_regress.test1 + -> HashAggregate + Output: t1_1.c1, count(((t1_1.*)::fprt1)) + Group Key: t1_1.c1 + Filter: (avg(t1_1.c2) < '22'::numeric) + -> Foreign Scan on public.ftprt1_p2 t1_1 + Output: t1_1.c1, t1_1.*, t1_1.c2 + Foreign Namespace: mongo_fdw_regress.test2 +(18 rows) + +SELECT c1, count(t1) FROM fprt1 t1 GROUP BY c1 HAVING avg(c2) < 22 ORDER BY 1; + c1 | count +----+------- + 1 | 1 + 2 | 1 + 3 | 1 + 4 | 1 + 5 | 1 + 6 | 1 + 7 | 1 + 8 | 1 +(8 rows) + +SET enable_partitionwise_aggregate TO OFF; +-- Support enable_aggregate_pushdown option at server level and table level. +-- Check only boolean values are accepted. +ALTER SERVER mongo_server OPTIONS (ADD enable_aggregate_pushdown 'non-bolean'); +ERROR: enable_aggregate_pushdown requires a Boolean value +-- Test the option at server level. +ALTER SERVER mongo_server OPTIONS (ADD enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> HashAggregate + Output: count(*), c1 + Group Key: fdw137_t1.c1 + Filter: (min(fdw137_t1.c1) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> Foreign Scan + Output: (count(*)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +-- Test the option at table level. Setting option at table level does not +-- affect the setting at server level. +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (ADD enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> HashAggregate + Output: count(*), c1 + Group Key: fdw137_t1.c1 + Filter: (min(fdw137_t1.c1) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'false'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> Foreign Scan + Output: (count(*)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +-- Test option for aggregation over join. Allow aggregation only if enabled for +-- both the relations involved in the join. +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'false'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (ADD enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(t2.c1)), t1.c8 + Sort Key: t1.c8 + -> HashAggregate + Output: sum(t2.c1), t1.c8 + Group Key: t1.c8 + -> Foreign Scan + Output: t1.c8, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2) +(9 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(t2.c1)), t1.c8 + Sort Key: t1.c8 + -> HashAggregate + Output: sum(t2.c1), t1.c8 + Group Key: t1.c8 + -> Foreign Scan + Output: t1.c8, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2) +(9 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'false'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: (sum(t2.c1)), t1.c8 + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(3 rows) + +-- FDW-560: Aggregation over nested join. As nested join push down is not +-- supported, aggregation shouldn't get pushdown. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) INNER JOIN fdw137_t1 t3 ON (t3.c1 = t1.c1) GROUP BY t1.c8 ORDER BY 2; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: sum(t2.c1), t1.c8 + Group Key: t1.c8 + -> Merge Left Join + Output: t1.c8, t2.c1 + Merge Cond: (t1.c8 = t2.c1) + -> Sort + Output: t1.c8 + Sort Key: t1.c8 + -> Foreign Scan + Output: t1.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl1 t3) + -> Sort + Output: t2.c1 + Sort Key: t2.c1 + -> Foreign Scan on public.fdw137_t2 t2 + Output: t2.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(18 rows) + +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) INNER JOIN fdw137_t1 t3 ON (t3.c1 = t1.c1) GROUP BY t1.c8 ORDER BY 2; + sum | c8 +-----+---- + 30 | 10 + 100 | 20 + 180 | 30 + | 60 + | +(5 rows) + +-- Check when enable_join_pushdown is OFF and enable_aggregate_pushdown is ON. +-- Shouldn't push down join as well as aggregation. +ALTER SERVER mongo_server OPTIONS (ADD enable_join_pushdown 'false'); +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2; + QUERY PLAN +-------------------------------------------------------------------- + GroupAggregate + Output: sum(t2.c1), t1.c8 + Group Key: t1.c8 + -> Merge Left Join + Output: t1.c8, t2.c1 + Merge Cond: (t1.c8 = t2.c1) + -> Sort + Output: t1.c8 + Sort Key: t1.c8 + -> Foreign Scan on public.fdw137_t1 t1 + Output: t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Sort + Output: t2.c1 + Sort Key: t2.c1 + -> Foreign Scan on public.fdw137_t2 t2 + Output: t2.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(18 rows) + +-- FDW-134: Test with number of columns more than 32 +CREATE FOREIGN TABLE f_test_large (_id int, + a01 int, a02 int, a03 int, a04 int, a05 int, a06 int, a07 int, a08 int, a09 int, a10 int, + a11 int, a12 int, a13 int, a14 int, a15 int, a16 int, a17 int, a18 int, a19 int, a20 int, + a21 int, a22 int, a23 int, a24 int, a25 int, a26 int, a27 int, a28 int, a29 int, a30 int, + a31 int, a32 int, a33 int, a34 int, a35 int) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'mongo_test_large'); +-- Shouldn't pushdown ORDERBY clause due to exceeded number of path keys limit. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT a32, sum(a32) FROM f_test_large GROUP BY + a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, + a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, + a31, a32, a33, a34, a35 ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST, a33 ASC NULLS FIRST, a34 DESC NULLS LAST, a35 ASC NULLS FIRST; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: a32, (sum(a32)), a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a33, a34, a35 + Sort Key: f_test_large.a01 NULLS FIRST, f_test_large.a02 NULLS FIRST, f_test_large.a03 NULLS FIRST, f_test_large.a04 NULLS FIRST, f_test_large.a05 NULLS FIRST, f_test_large.a06 NULLS FIRST, f_test_large.a07 NULLS FIRST, f_test_large.a08 NULLS FIRST, f_test_large.a09 NULLS FIRST, f_test_large.a10 NULLS FIRST, f_test_large.a11 NULLS FIRST, f_test_large.a12 NULLS FIRST, f_test_large.a13 NULLS FIRST, f_test_large.a14 NULLS FIRST, f_test_large.a15 NULLS FIRST, f_test_large.a16 NULLS FIRST, f_test_large.a17 NULLS FIRST, f_test_large.a18 NULLS FIRST, f_test_large.a19 NULLS FIRST, f_test_large.a20 NULLS FIRST, f_test_large.a21 NULLS FIRST, f_test_large.a22 NULLS FIRST, f_test_large.a23 NULLS FIRST, f_test_large.a24 NULLS FIRST, f_test_large.a25 NULLS FIRST, f_test_large.a26 NULLS FIRST, f_test_large.a27 NULLS FIRST, f_test_large.a28 NULLS FIRST, f_test_large.a29 NULLS FIRST, f_test_large.a30 NULLS FIRST, f_test_large.a31 NULLS FIRST, f_test_large.a32 NULLS FIRST, f_test_large.a33 NULLS FIRST, f_test_large.a34 DESC NULLS LAST, f_test_large.a35 NULLS FIRST + -> Foreign Scan + Output: a32, (sum(a32)), a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a33, a34, a35 + Foreign Namespace: Aggregate on (mongo_fdw_regress.mongo_test_large f_test_large) +(6 rows) + +SELECT a32, sum(a32) FROM f_test_large GROUP BY + a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, + a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, + a31, a32, a33, a34, a35 ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST, a33 ASC NULLS FIRST, a34 DESC NULLS LAST, a35 ASC NULLS FIRST; + a32 | sum +-----+----- + 2 | 2 + 32 | 32 + 32 | 32 + 32 | 32 + 132 | 132 +(5 rows) + +-- Should pushdown ORDERBY clause because number of path keys are in limit. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT a32, sum(a32) FROM f_test_large GROUP BY + a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, + a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, + a31, a32 ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: a32, (sum(a32)), a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31 + Foreign Namespace: Aggregate on (mongo_fdw_regress.mongo_test_large f_test_large) +(3 rows) + +SELECT a32, sum(a32) FROM f_test_large GROUP BY + a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, + a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, + a31, a32 ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST; + a32 | sum +-----+----- + 2 | 2 + 32 | 96 + 132 | 132 +(3 rows) + +-- FDW-131: Limit and offset pushdown with Aggregate pushdown. +SELECT avg(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1; + avg | c1 +-----+---- + 10 | 10 + 20 | 20 + 30 | 30 + 40 | 40 + 50 | 50 + | +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT 1 OFFSET 1; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(3 rows) + +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT 1 OFFSET 1; + sum | c1 +-----+---- + 10 | 10 +(1 row) + +-- Limit 0, Offset 0 with aggregates. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT 0 OFFSET 0; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(3 rows) + +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT 0 OFFSET 0; + sum | c1 +-----+---- +(0 rows) + +-- Limit NULL +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT NULL OFFSET 2; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(3 rows) + +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT NULL OFFSET 2; + sum | c1 +-----+---- + 20 | 20 + 30 | 30 + 40 | 40 + 50 | 50 +(4 rows) + +-- Limit ALL +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT ALL OFFSET 2; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(3 rows) + +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT ALL OFFSET 2; + sum | c1 +-----+---- + 20 | 20 + 30 | 30 + 40 | 40 + 50 | 50 +(4 rows) + +-- Limit with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, sum(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY 1 ASC NULLS FIRST LIMIT -1; + QUERY PLAN +--------------------------------------------------------------------------------- + Limit + Output: c1, (sum(c1)) + -> Foreign Scan + Output: c1, (sum(c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(5 rows) + +-- Should throw an error. +SELECT c1, sum(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY 1 ASC NULLS FIRST LIMIT -1; +ERROR: LIMIT must not be negative +-- Offset with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, sum(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY 1 ASC NULLS FIRST OFFSET -2; + QUERY PLAN +--------------------------------------------------------------------------------- + Limit + Output: c1, (sum(c1)) + -> Foreign Scan + Output: c1, (sum(c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(5 rows) + +-- Should throw an error. +SELECT c1, sum(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY 1 ASC NULLS FIRST OFFSET -2; +ERROR: OFFSET must not be negative +-- Limit/Offset with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, avg(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT -1 OFFSET -2; + QUERY PLAN +--------------------------------------------------------------------------------- + Limit + Output: c1, (avg(c1)) + -> Foreign Scan + Output: c1, (avg(c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(5 rows) + +-- Should throw an error. +SELECT c1, avg(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT -1 OFFSET -2; +ERROR: OFFSET must not be negative +-- Limit with expression evaluating to -ve value. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, avg(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM fdw137_t2)); + QUERY PLAN +----------------------------------------------------------------------------------- + Limit + Output: fdw137_t2.c1, (avg(fdw137_t2.c1)) + InitPlan 1 (returns $0) + -> Foreign Scan + Output: (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) + -> Foreign Scan + Output: fdw137_t2.c1, (avg(fdw137_t2.c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(9 rows) + +SELECT c1, avg(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM fdw137_t2)); +ERROR: LIMIT must not be negative +-- FDW-559: Test mongo_fdw.enable_aggregate_pushdown GUC. +-- Check default value. Should be ON. +SHOW mongo_fdw.enable_aggregate_pushdown; + mongo_fdw.enable_aggregate_pushdown +------------------------------------- + on +(1 row) + +-- Negative testing for GUC value. +SET mongo_fdw.enable_aggregate_pushdown to 'abc'; +ERROR: parameter "mongo_fdw.enable_aggregate_pushdown" requires a Boolean value +--Disable the GUC enable_aggregate_pushdown. +SET mongo_fdw.enable_aggregate_pushdown to false; +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +-- Shouldn't pushdown aggregate because GUC is OFF. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> HashAggregate + Output: count(*), c1 + Group Key: fdw137_t1.c1 + Filter: (min(fdw137_t1.c1) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + count +------- + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +(11 rows) + +--Enable the GUC enable_aggregate_pushdown. +SET mongo_fdw.enable_aggregate_pushdown to on; +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +-- Should pushdown aggregate because GUC is ON. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> Foreign Scan + Output: (count(*)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + count +------- + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +(11 rows) + +-- Test for aggregation over join when server and table options for both the +-- tables is true and guc is enabled. Should pushdown. +SET mongo_fdw.enable_aggregate_pushdown to on; +SET mongo_fdw.enable_join_pushdown to on; +ALTER SERVER mongo_server OPTIONS (SET enable_join_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: (count(*)), t1.c8 + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(3 rows) + +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + count | c8 +-------+---- + 1 | + 3 | 10 + 5 | 20 + 6 | 30 + 1 | 60 +(5 rows) + +--Disable the GUC enable_join_pushdown. Shouldn't pushdown aggregate. +SET mongo_fdw.enable_join_pushdown to off; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------------------------------- + GroupAggregate + Output: count(*), t1.c8 + Group Key: t1.c8 + -> Merge Left Join + Output: t1.c8 + Merge Cond: (t1.c8 = t2.c1) + -> Foreign Scan on public.fdw137_t1 t1 + Output: t1._id, t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Sort + Output: t2.c1 + Sort Key: t2.c1 NULLS FIRST + -> Foreign Scan on public.fdw137_t2 t2 + Output: t2.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(15 rows) + +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + count | c8 +-------+---- + 1 | + 3 | 10 + 5 | 20 + 6 | 30 + 1 | 60 +(5 rows) + +SET mongo_fdw.enable_join_pushdown to on; +--Disable the GUC enable_aggregate_pushdown. Shouldn't pushdown. +SET mongo_fdw.enable_aggregate_pushdown to false; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: count(*), t1.c8 + Group Key: t1.c8 + -> Foreign Scan + Output: t1.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2) +(6 rows) + +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + count | c8 +-------+---- + 1 | + 3 | 10 + 5 | 20 + 6 | 30 + 1 | 60 +(5 rows) + +-- FDW-589: Test enable_order_by_pushdown option at server and table level. +SET mongo_fdw.enable_join_pushdown to true; +SET mongo_fdw.enable_aggregate_pushdown to true; +SET mongo_fdw.enable_order_by_pushdown to true; +ALTER SERVER mongo_server OPTIONS (ADD enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (ADD enable_join_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (ADD enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: c2, (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(3 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (ADD enable_join_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (ADD enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(3 rows) + +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + sum | c8 | avg +-----+----+----- + 100 | 20 | 20 + 180 | 30 | 30 + 0 | 60 | 60 +(3 rows) + +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_order_by_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: c2, (sum(c1)), c1 + Sort Key: fdw137_t1.c2 NULLS FIRST + -> Foreign Scan + Output: c2, (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Sort Key: t1.c8 NULLS FIRST + -> Foreign Scan + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(6 rows) + +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + sum | c8 | avg +-----+----+----- + 100 | 20 | 20 + 180 | 30 | 30 + 0 | 60 | 60 +(3 rows) + +-- Test that setting option at table level does not affect the setting at +-- server level. +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'false'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: c2, (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(3 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(3 rows) + +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + sum | c8 | avg +-----+----+----- + 100 | 20 | 20 + 180 | 30 | 30 + 0 | 60 | 60 +(3 rows) + +-- When option enable_aggregate_pushdown is disabled. Shouldn't pushdown +-- aggregate as well as ORDER BY too. +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c2, (sum(c1)), c1 + Sort Key: fdw137_t1.c2 NULLS FIRST + -> HashAggregate + Output: c2, sum(c1), c1 + Group Key: fdw137_t1.c2, fdw137_t1.c1 + Filter: (min(fdw137_t1.c1) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +-- Cleanup +DELETE FROM fdw137_t1 WHERE c8 IS NULL; +DELETE FROM fdw137_t1 WHERE c8 = 60; +DELETE FROM fdw137_t2 WHERE c1 IS NULL; +DELETE FROM fdw137_t2 WHERE c1 = 50; +DROP FOREIGN TABLE fdw137_t1; +DROP FOREIGN TABLE fdw137_t2; +DROP FOREIGN TABLE ftprt1_p1; +DROP FOREIGN TABLE ftprt1_p2; +DROP FOREIGN TABLE f_test_large; +DROP TABLE fprt1; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/expected/aggregate_pushdown_1.out b/expected/aggregate_pushdown_1.out new file mode 100644 index 0000000..a96eab8 --- /dev/null +++ b/expected/aggregate_pushdown_1.out @@ -0,0 +1,1988 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` +-- Before running this file user must create database mongo_fdw_regress on +-- MongoDB with all permission for MONGO_USER_NAME user with MONGO_PASS +-- password and ran mongodb_init.sh file to load collections. +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; +-- Create foreign tables. +CREATE FOREIGN TABLE fdw137_t1 (_id NAME, c1 INTEGER, c2 TEXT, c3 CHAR(9), c4 INTEGER, c5 pg_catalog.Date, c6 DECIMAL, c7 INTEGER, c8 INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl1'); +CREATE FOREIGN TABLE fdw137_t2 (_id NAME, c1 INTEGER, c2 TEXT, c3 TEXT) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); +INSERT INTO fdw137_t1 VALUES (0, 1500, 'EMP15', 'FINANCE', 1300, '2000-12-25', 950.0, 400, 60); +INSERT INTO fdw137_t1 VALUES (0, 1600, 'EMP16', 'ADMIN', 600); +INSERT INTO fdw137_t2 VALUES (0, 50, 'TESTING', 'NASHIK'); +INSERT INTO fdw137_t2 VALUES (0); +-- Create local table. +CREATE TABLE fdw137_local AS + SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM fdw137_t1; +-- Simple aggregates. ORDER BY push-down not possible because only column names allowed. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), sum(c1), avg(c1), min(c4), max(c1), sum(c1) * (random() <= 1)::int AS sum2 FROM fdw137_t1 WHERE c4 > 600 GROUP BY c4 ORDER BY 1 ASC NULLS FIRST, 2 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------ + Result + Output: (count(*)), (sum(c1)), (avg(c1)), (min(c4)), (max(c1)), ((sum(c1)) * ((random() <= '1'::double precision))::integer), c4 + -> Sort + Output: (count(*)), (sum(c1)), (avg(c1)), (min(c4)), (max(c1)), c4 + Sort Key: (count(*)) NULLS FIRST, (sum(fdw137_t1.c1)) NULLS FIRST + -> Foreign Scan + Output: (count(*)), (sum(c1)), (avg(c1)), (min(c4)), (max(c1)), c4 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(8 rows) + +SELECT count(*), sum(c1), avg(c1), min(c4), max(c1), sum(c1) * (random() <= 1)::int AS sum2 FROM fdw137_t1 WHERE c4 > 600 GROUP BY c4 ORDER BY 1 ASC NULLS FIRST, 2 ASC NULLS FIRST; + count | sum | avg | min | max | sum2 +-------+------+------------------+------+------+------ + 1 | 1100 | 1100 | 800 | 1100 | 1100 + 1 | 1400 | 1400 | 700 | 1400 | 1400 + 2 | 1600 | 800 | 1300 | 1500 | 1600 + 3 | 1700 | 566.666666666667 | 900 | 700 | 1700 +(4 rows) + +-- GROUP BY clause HAVING expressions +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c1), count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY c1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: c1, (sum(c1)), (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(3 rows) + +SELECT c1, sum(c1), count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY c1 ASC NULLS FIRST; + c1 | sum | count +------+------+------- + 600 | 600 | 1 + 700 | 700 | 1 + 800 | 800 | 1 + 900 | 900 | 1 + 1000 | 1000 | 1 + 1100 | 1100 | 1 + 1200 | 1200 | 1 + 1300 | 1300 | 1 + 1400 | 1400 | 1 + 1500 | 1500 | 1 + 1600 | 1600 | 1 +(11 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, min(c2) FROM fdw137_t1 WHERE c3 = 'ADMIN' GROUP BY c8 HAVING min(c8) = 20 ORDER BY c8 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: c8, (min(c2)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(3 rows) + +SELECT c8, min(c2) FROM fdw137_t1 WHERE c3 = 'ADMIN' GROUP BY c8 HAVING min(c8) = 20 ORDER BY c8 ASC NULLS FIRST; + c8 | min +----+------ + 20 | EMP1 +(1 row) + +-- Multi-column GROUP BY clause. Push-down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: c2, (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(3 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: c2, (sum(c1)), c1 + Sort Key: fdw137_t1.c2 NULLS FIRST + -> Foreign Scan + Output: c2, (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +SET mongo_fdw.enable_order_by_pushdown TO ON; +-- Aggregation on expression. Don't push-down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c1+2) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY c1 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------- + GroupAggregate + Output: c1, sum((c1 + 2)) + Group Key: fdw137_t1.c1 + Filter: (min(fdw137_t1.c1) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(7 rows) + +SELECT c1, sum(c1+2) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY c1 ASC NULLS FIRST; + c1 | sum +------+------ + 600 | 602 + 700 | 702 + 800 | 802 + 900 | 902 + 1000 | 1002 + 1100 | 1102 + 1200 | 1202 + 1300 | 1302 + 1400 | 1402 + 1500 | 1502 + 1600 | 1602 +(11 rows) + +-- Aggregate with unshippable GROUP BY clause are not pushed +EXPLAIN (VERBOSE, COSTS OFF) +SELECT avg(c4) FROM fdw137_t1 GROUP BY c4 * (random() <= 1)::int ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------ + Sort + Output: (avg(c4)), ((c4 * ((random() <= '1'::double precision))::integer)) + Sort Key: (avg(fdw137_t1.c4)) + -> HashAggregate + Output: avg(c4), ((c4 * ((random() <= '1'::double precision))::integer)) + Group Key: (fdw137_t1.c4 * ((random() <= '1'::double precision))::integer) + -> Foreign Scan on public.fdw137_t1 + Output: (c4 * ((random() <= '1'::double precision))::integer), c4 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(9 rows) + +SELECT avg(c4) FROM fdw137_t1 GROUP BY c4 * (random() <= 1)::int ORDER BY 1; + avg +----------------------- + 400.0000000000000000 + 600.0000000000000000 + 700.0000000000000000 + 800.0000000000000000 + 900.0000000000000000 + 1300.0000000000000000 + +(7 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c1) FROM fdw137_t1 GROUP BY c1 HAVING min(c1 * 3) > 500 ORDER BY c1; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c1, (sum(c1)) + Sort Key: fdw137_t1.c1 + -> HashAggregate + Output: c1, sum(c1) + Group Key: fdw137_t1.c1 + Filter: (min((fdw137_t1.c1 * 3)) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c1, sum(c1) FROM fdw137_t1 GROUP BY c1 HAVING min(c1 * 3) > 500 ORDER BY c1; + c1 | sum +------+------ + 200 | 200 + 300 | 300 + 400 | 400 + 500 | 500 + 600 | 600 + 700 | 700 + 800 | 800 + 900 | 900 + 1000 | 1000 + 1100 | 1100 + 1200 | 1200 + 1300 | 1300 + 1400 | 1400 + 1500 | 1500 + 1600 | 1600 +(15 rows) + +-- FDW-134: Test ORDER BY with COLLATE. Shouldn't push-down +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY c2 COLLATE "en_US" ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: c2, (sum(c1)), ((c2)::text), c1 + Sort Key: fdw137_t1.c2 COLLATE "en_US" NULLS FIRST + -> Foreign Scan + Output: c2, (sum(c1)), c2, c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY c2 COLLATE "en_US" ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +-- Using expressions in HAVING clause. Pushed down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c3, count(*) FROM fdw137_t1 GROUP BY c3 HAVING abs(max(c8)) = abs(10) ORDER BY 1, 2; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: c3, (count(*)) + Sort Key: fdw137_t1.c3, (count(*)) + -> Foreign Scan + Output: c3, (count(*)) + Filter: (abs((max(fdw137_t1.c8))) = 10) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(7 rows) + +SELECT c3, count(*) FROM fdw137_t1 GROUP BY c3 HAVING abs(max(c8)) = abs(10) ORDER BY 1, 2; + c3 | count +-----------+------- + HEAD | 1 +(1 row) + +-- Unshippable HAVING clause will be evaluated locally, and other qual in HAVING clause is pushed down +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM (SELECT c3, count(c1) FROM fdw137_t1 GROUP BY c3 HAVING (avg(c1) / avg(c1)) * random() <= 1 and min(c1) > 100) x; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------- + Aggregate + Output: count(*) + -> Foreign Scan + Output: fdw137_t1.c3, NULL::bigint + Filter: (((((avg(fdw137_t1.c1)) / (avg(fdw137_t1.c1))))::double precision * random()) <= '1'::double precision) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT count(*) FROM (SELECT c3, count(c1) FROM fdw137_t1 GROUP BY c3 HAVING (avg(c1) / avg(c1)) * random() <= 1 and min(c1) > 100) x; + count +------- + 0 +(1 row) + +-- Aggregate over join query +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t1.c8), avg(t2.c1) FROM fdw137_t1 t1 INNER JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8%2 = 0 ORDER BY 1 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ + Sort + Output: (sum(t1.c8)), (avg(t2.c1)) + Sort Key: (sum(t1.c8)) DESC NULLS LAST + -> Foreign Scan + Output: (sum(t1.c8)), (avg(t2.c1)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2)) +(6 rows) + +SELECT sum(t1.c8), avg(t2.c1) FROM fdw137_t1 t1 INNER JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8%2 = 0 ORDER BY 1 DESC NULLS LAST; + sum | avg +-----+------------------ + 310 | 22.1428571428571 +(1 row) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, count(*), t2.c4 FROM fdw137_t2 t1 INNER JOIN fdw137_t1 t2 ON (t1.c1 = t2.c8) GROUP BY t1.c1, t2.c4 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Foreign Scan + Output: t1.c1, (count(*)), t2.c4 + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl2 t1) INNER JOIN (mongo_fdw_regress.test_tbl1 t2)) +(3 rows) + +SELECT t1.c1, count(*), t2.c4 FROM fdw137_t2 t1 INNER JOIN fdw137_t1 t2 ON (t1.c1 = t2.c8) GROUP BY t1.c1, t2.c4 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | count | c4 +----+-------+------ + 10 | 1 | + 10 | 1 | 700 + 10 | 1 | 900 + 20 | 2 | 400 + 20 | 1 | 800 + 20 | 1 | 900 + 20 | 1 | 1300 + 30 | 5 | 600 + 30 | 1 | 900 +(9 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(3 rows) + +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 ORDER BY 2 ASC NULLS FIRST; + sum | c8 | avg +-----+----+----- + 100 | 20 | 20 + 180 | 30 | 30 + 0 | 60 | 60 +(3 rows) + +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Sort Key: t1.c8 NULLS FIRST + -> Foreign Scan + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(6 rows) + +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 ORDER BY 2 ASC NULLS FIRST; + sum | c8 | avg +-----+----+----- + 100 | 20 | 20 + 180 | 30 | 30 + 0 | 60 | 60 +(3 rows) + +SET mongo_fdw.enable_order_by_pushdown TO ON; +-- Aggregate is not pushed down as aggregation contains random() +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1 * (random() <= 1)::int) AS sum, avg(c1) FROM fdw137_t1 ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------- + Sort + Output: (sum((c1 * ((random() <= '1'::double precision))::integer))), (avg(c1)) + Sort Key: (sum((fdw137_t1.c1 * ((random() <= '1'::double precision))::integer))) + -> Aggregate + Output: sum((c1 * ((random() <= '1'::double precision))::integer)), avg(c1) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(8 rows) + +SELECT sum(c1 * (random() <= 1)::int) AS sum, avg(c1) FROM fdw137_t1 ORDER BY 1; + sum | avg +-------+---------------------- + 13600 | 850.0000000000000000 +(1 row) + +-- Not pushed down due to local conditions present in underneath input rel +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t1.c8) FROM fdw137_t1 t1 INNER JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE ((t1.c8 * t2.c1)/(t1.c8 * t2.c1)) * random() <= 1 ORDER BY 1; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(t1.c8)) + Sort Key: (sum(t1.c8)) + -> Aggregate + Output: sum(t1.c8) + -> Foreign Scan + Output: t1.c8 + Filter: (((((t1.c8 * t2.c1) / (t1.c8 * t2.c1)))::double precision * random()) <= '1'::double precision) + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(9 rows) + +SELECT sum(t1.c8) FROM fdw137_t1 t1 INNER JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE ((t1.c8 * t2.c1)/(t1.c8 * t2.c1)) * random() <= 1 ORDER BY 1; + sum +----- + 310 +(1 row) + +-- Aggregates in subquery are pushed down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(x.a), sum(x.a) FROM (SELECT c8 a, sum(c1) b FROM fdw137_t1 GROUP BY c8 ORDER BY 1, 2) x; + QUERY PLAN +--------------------------------------------------------------------------------------- + Aggregate + Output: count(fdw137_t1.c8), sum(fdw137_t1.c8) + -> Sort + Output: fdw137_t1.c8, (sum(fdw137_t1.c1)) + Sort Key: fdw137_t1.c8, (sum(fdw137_t1.c1)) + -> Foreign Scan + Output: fdw137_t1.c8, (sum(fdw137_t1.c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(8 rows) + +SELECT count(x.a), sum(x.a) FROM (SELECT c8 a, sum(c1) b FROM fdw137_t1 GROUP BY c8 ORDER BY 1, 2) x; + count | sum +-------+----- + 4 | 120 +(1 row) + +-- Aggregate is still pushed down by taking unshippable expression out +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c4 * (random() <= 1)::int AS sum1, sum(c1) AS sum2 FROM fdw137_t1 GROUP BY c4 ORDER BY 1, 2; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Sort + Output: ((c4 * ((random() <= '1'::double precision))::integer)), (sum(c1)), c4 + Sort Key: ((fdw137_t1.c4 * ((random() <= '1'::double precision))::integer)), (sum(fdw137_t1.c1)) + -> Foreign Scan + Output: (c4 * ((random() <= '1'::double precision))::integer), (sum(c1)), c4 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT c4 * (random() <= 1)::int AS sum1, sum(c1) AS sum2 FROM fdw137_t1 GROUP BY c4 ORDER BY 1, 2; + sum1 | sum2 +------+------ + 400 | 2100 + 600 | 4800 + 700 | 1400 + 800 | 1100 + 900 | 1700 + 1300 | 1600 + | 900 +(7 rows) + +-- Testing ORDER BY, DISTINCT, FILTER and Ordered-sets within aggregates +-- ORDER BY within aggregates (same column used to order) are not pushed +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1 ORDER BY c1) FROM fdw137_t1 WHERE c1 < 500 GROUP BY c2 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Output: (sum(c1 ORDER BY c1)), c2 + Sort Key: (sum(fdw137_t1.c1 ORDER BY fdw137_t1.c1)) + -> GroupAggregate + Output: sum(c1 ORDER BY c1), c2 + Group Key: fdw137_t1.c2 + -> Sort + Output: c2, c1 + Sort Key: fdw137_t1.c2 + -> Foreign Scan on public.fdw137_t1 + Output: c2, c1 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(12 rows) + +SELECT sum(c1 ORDER BY c1) FROM fdw137_t1 WHERE c1 < 500 GROUP BY c2 ORDER BY 1; + sum +----- + 100 + 200 + 300 + 400 +(4 rows) + +-- ORDER BY within aggregate (different column used to order also using DESC) +-- are not pushed. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c8 ORDER BY c1 desc) FROM fdw137_t1 WHERE c1 > 1000 and c8 > 20; + QUERY PLAN +-------------------------------------------------------- + Aggregate + Output: sum(c8 ORDER BY c1 DESC) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(5 rows) + +SELECT sum(c8 ORDER BY c1 desc) FROM fdw137_t1 WHERE c1 > 1000 and c8 > 20; + sum +----- + 90 +(1 row) + +-- DISTINCT within aggregate. Don't push down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(DISTINCT (c1)) FROM fdw137_t1 WHERE c4 = 600 and c1 < 500; + QUERY PLAN +-------------------------------------------------------- + Aggregate + Output: sum(DISTINCT c1) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(5 rows) + +SELECT sum(DISTINCT (c1)) FROM fdw137_t1 WHERE c4 = 600 and c1 < 500; + sum +----- + 500 +(1 row) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(DISTINCT (t1.c1)) FROM fdw137_t1 t1 join fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 < 30 GROUP BY (t2.c1) ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(DISTINCT t1.c1)), t2.c1 + Sort Key: (sum(DISTINCT t1.c1)) + -> GroupAggregate + Output: sum(DISTINCT t1.c1), t2.c1 + Group Key: t2.c1 + -> Sort + Output: t2.c1, t1.c1 + Sort Key: t2.c1 + -> Foreign Scan + Output: t2.c1, t1.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(12 rows) + +SELECT sum(DISTINCT (t1.c1)) FROM fdw137_t1 t1 join fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 < 30 GROUP BY (t2.c1) ORDER BY 1; + sum +------ + 3000 + 3700 +(2 rows) + +-- DISTINCT, ORDER BY and FILTER within aggregate, not pushed down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), sum(DISTINCT c1 ORDER BY c1) filter (WHERE c1%3 < 2), c4 FROM fdw137_t1 WHERE c4 = 600 GROUP BY c4; + QUERY PLAN +----------------------------------------------------------------------------------- + GroupAggregate + Output: sum(c1), sum(DISTINCT c1 ORDER BY c1) FILTER (WHERE ((c1 % 3) < 2)), c4 + Group Key: fdw137_t1.c4 + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(6 rows) + +SELECT sum(c1), sum(DISTINCT c1 ORDER BY c1) filter (WHERE c1%3 < 2), c4 FROM fdw137_t1 WHERE c4 = 600 GROUP BY c4; + sum | sum | c4 +------+------+----- + 4800 | 4100 | 600 +(1 row) + +-- FILTER within aggregate, not pushed +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1) filter (WHERE c1 < 1000 and c4 > 500) FROM fdw137_t1 GROUP BY c4 ORDER BY 1 nulls last; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Sort + Output: (sum(c1) FILTER (WHERE ((c1 < 1000) AND (c4 > 500)))), c4 + Sort Key: (sum(fdw137_t1.c1) FILTER (WHERE ((fdw137_t1.c1 < 1000) AND (fdw137_t1.c4 > 500)))) + -> HashAggregate + Output: sum(c1) FILTER (WHERE ((c1 < 1000) AND (c4 > 500))), c4 + Group Key: fdw137_t1.c4 + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(9 rows) + +SELECT sum(c1) filter (WHERE c1 < 1000 and c4 > 500) FROM fdw137_t1 GROUP BY c4 ORDER BY 1 nulls last; + sum +------ + 100 + 1000 + 1700 + + + + +(7 rows) + +-- Outer query is aggregation query +EXPLAIN (VERBOSE, COSTS OFF) +SELECT DISTINCT (SELECT count(*) filter (WHERE t2.c1 = 20 and t2.c1 < 30) FROM fdw137_t1 t1 WHERE t1.c1 = 500) FROM fdw137_t2 t2 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------- + Unique + Output: ((SubPlan 1)) + -> Sort + Output: ((SubPlan 1)) + Sort Key: ((SubPlan 1)) + -> Aggregate + Output: (SubPlan 1) + -> Foreign Scan on public.fdw137_t2 t2 + Output: t2._id, t2.c1, t2.c2, t2.c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + SubPlan 1 + -> Foreign Scan on public.fdw137_t1 t1 + Output: count(*) FILTER (WHERE ((t2.c1 = 20) AND (t2.c1 < 30))) + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(14 rows) + +SELECT DISTINCT (SELECT count(*) filter (WHERE t2.c1 = 20 and t2.c1 < 30) FROM fdw137_t1 t1 WHERE t1.c1 = 500) FROM fdw137_t2 t2 ORDER BY 1; + count +------- + 1 +(1 row) + +-- Inner query is aggregation query +EXPLAIN (VERBOSE, COSTS OFF) +SELECT DISTINCT (SELECT count(t1.c1) filter (WHERE t2.c1 = 20 and t2.c1 < 30) FROM fdw137_t1 t1 WHERE t1.c1 > 600) FROM fdw137_t2 t2 ORDER BY 1; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Unique + Output: ((SubPlan 1)) + -> Sort + Output: ((SubPlan 1)) + Sort Key: ((SubPlan 1)) + -> Foreign Scan on public.fdw137_t2 t2 + Output: (SubPlan 1) + Foreign Namespace: mongo_fdw_regress.test_tbl2 + SubPlan 1 + -> Aggregate + Output: count(t1.c1) FILTER (WHERE ((t2.c1 = 20) AND (t2.c1 < 30))) + -> Foreign Scan on public.fdw137_t1 t1 + Output: t1._id, t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(14 rows) + +SELECT DISTINCT (SELECT count(t1.c1) filter (WHERE t2.c1 = 20 and t2.c1 < 30) FROM fdw137_t1 t1 WHERE t1.c1 > 600) FROM fdw137_t2 t2 ORDER BY 1; + count +------- + 0 + 10 +(2 rows) + +-- Ordered-sets within aggregate, not pushed down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, rank('10'::varchar) within group (ORDER BY c3), percentile_cont(c8/200::numeric) within group (ORDER BY c1) FROM fdw137_t1 GROUP BY c8 HAVING percentile_cont(c8/200::numeric) within group (ORDER BY c1) < 500 ORDER BY c8; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: c8, rank('10'::bpchar) WITHIN GROUP (ORDER BY c3), percentile_cont((((c8)::numeric / '200'::numeric))::double precision) WITHIN GROUP (ORDER BY ((c1)::double precision)) + Group Key: fdw137_t1.c8 + Filter: (percentile_cont((((fdw137_t1.c8)::numeric / '200'::numeric))::double precision) WITHIN GROUP (ORDER BY ((fdw137_t1.c1)::double precision)) < '500'::double precision) + -> Sort + Output: c8, c3, c1 + Sort Key: fdw137_t1.c8 + -> Foreign Scan on public.fdw137_t1 + Output: c8, c3, c1 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c8, rank('10'::varchar) within group (ORDER BY c3), percentile_cont(c8/200::numeric) within group (ORDER BY c1) FROM fdw137_t1 GROUP BY c8 HAVING percentile_cont(c8/200::numeric) within group (ORDER BY c1) < 500 ORDER BY c8; + c8 | rank | percentile_cont +----+------+----------------- + 20 | 1 | 220 + 30 | 1 | 275 +(2 rows) + +-- Subquery in FROM clause HAVING aggregate +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), x.b FROM fdw137_t1, (SELECT c1 a, sum(c1) b FROM fdw137_t2 GROUP BY c1) x WHERE fdw137_t1.c8 = x.a GROUP BY x.b ORDER BY 1, 2; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: (count(*)), x.b + Sort Key: (count(*)), x.b + -> HashAggregate + Output: count(*), x.b + Group Key: x.b + -> Hash Join + Output: x.b + Inner Unique: true + Hash Cond: (fdw137_t1.c8 = x.a) + -> Foreign Scan on public.fdw137_t1 + Output: fdw137_t1._id, fdw137_t1.c1, fdw137_t1.c2, fdw137_t1.c3, fdw137_t1.c4, fdw137_t1.c5, fdw137_t1.c6, fdw137_t1.c7, fdw137_t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + Output: x.b, x.a + -> Subquery Scan on x + Output: x.b, x.a + -> Foreign Scan + Output: fdw137_t2.c1, (sum(fdw137_t2.c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(20 rows) + +SELECT count(*), x.b FROM fdw137_t1, (SELECT c1 a, sum(c1) b FROM fdw137_t2 GROUP BY c1) x WHERE fdw137_t1.c8 = x.a GROUP BY x.b ORDER BY 1, 2; + count | b +-------+---- + 3 | 10 + 5 | 20 + 6 | 30 +(3 rows) + +-- Join with IS NULL check in HAVING +EXPLAIN (VERBOSE, COSTS OFF) +SELECT avg(t1.c1), sum(t2.c1) FROM fdw137_t1 t1 join fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t2.c1 HAVING avg(t1.c1) is null ORDER BY 1 nulls last, 2; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ + Sort + Output: (avg(t1.c1)), (sum(t2.c1)), t2.c1 + Sort Key: (avg(t1.c1)), (sum(t2.c1)) + -> Foreign Scan + Output: (avg(t1.c1)), (sum(t2.c1)), t2.c1 + Filter: ((avg(t1.c1)) IS NULL) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2)) +(7 rows) + +SELECT avg(t1.c1), sum(t2.c1) FROM fdw137_t1 t1 join fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t2.c1 HAVING avg(t1.c1) is null ORDER BY 1 nulls last, 2; + avg | sum +-----+----- +(0 rows) + +-- ORDER BY expression is part of the target list but not pushed down to +-- foreign server. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1) * (random() <= 1)::int AS sum FROM fdw137_t1 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------------------------------- + Sort + Output: (((sum(c1)) * ((random() <= '1'::double precision))::integer)) + Sort Key: (((sum(fdw137_t1.c1)) * ((random() <= '1'::double precision))::integer)) + -> Foreign Scan + Output: ((sum(c1)) * ((random() <= '1'::double precision))::integer) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT sum(c1) * (random() <= 1)::int AS sum FROM fdw137_t1 ORDER BY 1; + sum +------- + 13600 +(1 row) + +-- LATERAL join, with parameterization +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum FROM fdw137_t1 t1, lateral (SELECT sum(t2.c1) sum FROM fdw137_t2 t2 GROUP BY t2.c1) qry WHERE t1.c8 * 2 = qry.sum ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Sort + Output: t1.c8, qry.sum + Sort Key: t1.c8 + -> Hash Join + Output: t1.c8, qry.sum + Hash Cond: ((t1.c8 * 2) = qry.sum) + -> Foreign Scan on public.fdw137_t1 t1 + Output: t1._id, t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + Output: qry.sum + -> Subquery Scan on qry + Output: qry.sum + -> Foreign Scan + Output: (sum(t2.c1)), t2.c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 t2) +(16 rows) + +-- Check with placeHolderVars +EXPLAIN (VERBOSE, COSTS OFF) +SELECT q.b, count(fdw137_t1.c1), sum(q.a) FROM fdw137_t1 left join (SELECT min(13), avg(fdw137_t1.c1), sum(fdw137_t2.c1) FROM fdw137_t1 right join fdw137_t2 ON (fdw137_t1.c8 = fdw137_t2.c1) WHERE fdw137_t1.c8 = 20) q(a, b, c) ON (fdw137_t1.c8 = q.b) WHERE fdw137_t1.c1 between 100 and 500 GROUP BY q.b ORDER BY 1 nulls last, 2; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: q.b, (count(fdw137_t1.c1)), (sum(q.a)) + Sort Key: q.b, (count(fdw137_t1.c1)) + -> GroupAggregate + Output: q.b, count(fdw137_t1.c1), sum(q.a) + Group Key: q.b + -> Sort + Output: q.b, fdw137_t1.c1, q.a + Sort Key: q.b + -> Hash Left Join + Output: q.b, fdw137_t1.c1, q.a + Inner Unique: true + Hash Cond: ((fdw137_t1.c8)::numeric = q.b) + -> Foreign Scan on public.fdw137_t1 + Output: fdw137_t1._id, fdw137_t1.c1, fdw137_t1.c2, fdw137_t1.c3, fdw137_t1.c4, fdw137_t1.c5, fdw137_t1.c6, fdw137_t1.c7, fdw137_t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + Output: q.b, q.a + -> Subquery Scan on q + Output: q.b, q.a + -> Aggregate + Output: min(13), avg(fdw137_t1_1.c1), NULL::bigint + -> Foreign Scan + Output: fdw137_t1_1.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 fdw137_t1) INNER JOIN (mongo_fdw_regress.test_tbl2 fdw137_t2) +(25 rows) + +SELECT q.b, count(fdw137_t1.c1), sum(q.a) FROM fdw137_t1 left join (SELECT min(13), avg(fdw137_t1.c1), sum(fdw137_t2.c1) FROM fdw137_t1 right join fdw137_t2 ON (fdw137_t1.c8 = fdw137_t2.c1) WHERE fdw137_t1.c8 = 20) q(a, b, c) ON (fdw137_t1.c8 = q.b) WHERE fdw137_t1.c1 between 100 and 500 GROUP BY q.b ORDER BY 1 nulls last, 2; + b | count | sum +---+-------+----- + | 5 | +(1 row) + +-- Not supported cases +-- The COUNT of column +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(c8) FROM fdw137_t1 ; + QUERY PLAN +-------------------------------------------------------- + Aggregate + Output: count(c8) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(5 rows) + +SELECT count(c8) FROM fdw137_t1 ; + count +------- + 15 +(1 row) + +-- Grouping sets +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum(c1) FROM fdw137_t1 WHERE c8 > 10 GROUP BY rollup(c8) ORDER BY 1 nulls last; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c8, (sum(c1)) + Sort Key: fdw137_t1.c8 + -> MixedAggregate + Output: c8, sum(c1) + Hash Key: fdw137_t1.c8 + Group Key: () + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c8, sum(c1) FROM fdw137_t1 WHERE c8 > 10 GROUP BY rollup(c8) ORDER BY 1 nulls last; + c8 | sum +----+------ + 20 | 3700 + 30 | 3800 + 60 | 1500 + | 9000 +(4 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum(c1) FROM fdw137_t1 WHERE c8 > 3 GROUP BY cube(c8) ORDER BY 1 nulls last; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c8, (sum(c1)) + Sort Key: fdw137_t1.c8 + -> MixedAggregate + Output: c8, sum(c1) + Hash Key: fdw137_t1.c8 + Group Key: () + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c8, sum(c1) FROM fdw137_t1 WHERE c8 > 3 GROUP BY cube(c8) ORDER BY 1 nulls last; + c8 | sum +----+------- + 10 | 3000 + 20 | 3700 + 30 | 3800 + 60 | 1500 + | 12000 +(5 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, c4, sum(c1) FROM fdw137_t1 WHERE c8 > 20 GROUP BY grouping sets(c8, c4) ORDER BY 1 nulls last, 2 nulls last; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c8, c4, (sum(c1)) + Sort Key: fdw137_t1.c8, fdw137_t1.c4 + -> HashAggregate + Output: c8, c4, sum(c1) + Hash Key: fdw137_t1.c8 + Hash Key: fdw137_t1.c4 + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c8, c4, sum(c1) FROM fdw137_t1 WHERE c8 > 20 GROUP BY grouping sets(c8, c4) ORDER BY 1 nulls last, 2 nulls last; + c8 | c4 | sum +----+------+------ + 30 | | 3800 + 60 | | 1500 + | 600 | 3200 + | 900 | 600 + | 1300 | 1500 +(5 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum(c1), grouping(c8) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1 nulls last; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c8, (sum(c1)), (GROUPING(c8)) + Sort Key: fdw137_t1.c8 + -> HashAggregate + Output: c8, sum(c1), GROUPING(c8) + Group Key: fdw137_t1.c8 + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(9 rows) + +SELECT c8, sum(c1), grouping(c8) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1 nulls last; + c8 | sum | grouping +----+------+---------- + 20 | 3700 | 0 + 30 | 3800 | 0 + 60 | 1500 | 0 +(3 rows) + +-- DISTINCT itself is not pushed down, whereas underneath aggregate is pushed +EXPLAIN (VERBOSE, COSTS OFF) +SELECT DISTINCT sum(c1) s FROM fdw137_t1 WHERE c1 > 1000 GROUP BY c1 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------- + Unique + Output: (sum(c1)), c1 + -> Sort + Output: (sum(c1)), c1 + Sort Key: (sum(fdw137_t1.c1)) + -> Foreign Scan + Output: (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(8 rows) + +SELECT DISTINCT sum(c1) s FROM fdw137_t1 WHERE c1 > 1000 GROUP BY c1 ORDER BY 1; + s +------ + 1100 + 1200 + 1300 + 1400 + 1500 + 1600 +(6 rows) + +-- WindowAgg +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum(c8), count(c8) over (partition by c8%2) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Sort + Output: c8, (sum(c8)), (count(c8) OVER (?)), ((c8 % 2)) + Sort Key: fdw137_t1.c8 + -> WindowAgg + Output: c8, (sum(c8)), count(c8) OVER (?), ((c8 % 2)) + -> Sort + Output: c8, ((c8 % 2)), (sum(c8)) + Sort Key: ((fdw137_t1.c8 % 2)) + -> Foreign Scan + Output: c8, (c8 % 2), (sum(c8)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(11 rows) + +SELECT c8, sum(c8), count(c8) over (partition by c8%2) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + c8 | sum | count +----+-----+------- + 20 | 100 | 3 + 30 | 180 | 3 + 60 | 60 | 3 +(3 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, array_agg(c8) over (partition by c8%2 ORDER BY c8 desc) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Sort + Output: c8, (array_agg(c8) OVER (?)), ((c8 % 2)) + Sort Key: fdw137_t1.c8 + -> WindowAgg + Output: c8, array_agg(c8) OVER (?), ((c8 % 2)) + -> Sort + Output: c8, ((c8 % 2)) + Sort Key: ((fdw137_t1.c8 % 2)), fdw137_t1.c8 DESC + -> Foreign Scan + Output: c8, (c8 % 2) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(11 rows) + +SELECT c8, array_agg(c8) over (partition by c8%2 ORDER BY c8 desc) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + c8 | array_agg +----+------------ + 20 | {60,30,20} + 30 | {60,30} + 60 | {60} +(3 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, array_agg(c8) over (partition by c8%2 ORDER BY c8 range between current row and unbounded following) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Sort + Output: c8, (array_agg(c8) OVER (?)), ((c8 % 2)) + Sort Key: fdw137_t1.c8 + -> WindowAgg + Output: c8, array_agg(c8) OVER (?), ((c8 % 2)) + -> Sort + Output: c8, ((c8 % 2)) + Sort Key: ((fdw137_t1.c8 % 2)), fdw137_t1.c8 + -> Foreign Scan + Output: c8, (c8 % 2) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(11 rows) + +SELECT c8, array_agg(c8) over (partition by c8%2 ORDER BY c8 range between current row and unbounded following) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + c8 | array_agg +----+------------ + 20 | {20,30,60} + 30 | {30,60} + 60 | {60} +(3 rows) + +-- User defined function for user defined aggregate, VARIADIC +CREATE FUNCTION least_accum(anyelement, variadic anyarray) +returns anyelement language sql AS + 'SELECT least($1, min($2[i])) FROM generate_subscripts($2,2) g(i)'; +CREATE aggregate least_agg(variadic items anyarray) ( + stype = anyelement, sfunc = least_accum +); +-- Not pushed down due to user defined aggregate +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, least_agg(c1) FROM fdw137_t1 GROUP BY c2 ORDER BY c2; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c2, (least_agg(VARIADIC ARRAY[c1])) + Sort Key: fdw137_t1.c2 + -> HashAggregate + Output: c2, least_agg(VARIADIC ARRAY[c1]) + Group Key: fdw137_t1.c2 + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(9 rows) + +SELECT c2, least_agg(c1) FROM fdw137_t1 GROUP BY c2 ORDER BY c2; + c2 | least_agg +-------+----------- + EMP1 | + EMP10 | + EMP11 | + EMP12 | + EMP13 | + EMP14 | + EMP15 | + EMP16 | + EMP2 | + EMP3 | + EMP4 | + EMP5 | + EMP6 | + EMP7 | + EMP8 | + EMP9 | +(16 rows) + +-- Test partition-wise aggregate +SET enable_partitionwise_aggregate TO ON; +-- Create the partition tables +CREATE TABLE fprt1 (_id NAME, c1 INTEGER, c2 INTEGER, c3 TEXT) PARTITION BY RANGE(c1); +CREATE FOREIGN TABLE ftprt1_p1 PARTITION OF fprt1 FOR VALUES FROM (1) TO (4) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test1'); +CREATE FOREIGN TABLE ftprt1_p2 PARTITION OF fprt1 FOR VALUES FROM (5) TO (8) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test2'); +-- Plan with partitionwise aggregates is enabled +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c1) FROM fprt1 GROUP BY c1 ORDER BY 2; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: ftprt1_p1.c1, (sum(ftprt1_p1.c1)) + Sort Key: (sum(ftprt1_p1.c1)) + -> Append + -> Foreign Scan + Output: ftprt1_p1.c1, (sum(ftprt1_p1.c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test1 fprt1) + -> Foreign Scan + Output: ftprt1_p2.c1, (sum(ftprt1_p2.c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test2 fprt1) +(10 rows) + +SELECT c1, sum(c1) FROM fprt1 GROUP BY c1 ORDER BY 2; + c1 | sum +----+----- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 +(8 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c2), min(c2), count(*) FROM fprt1 GROUP BY c1 HAVING avg(c2) < 22 ORDER BY 2; + QUERY PLAN +------------------------------------------------------------------------------------------ + Sort + Output: ftprt1_p1.c1, (sum(ftprt1_p1.c2)), (min(ftprt1_p1.c2)), (count(*)) + Sort Key: (sum(ftprt1_p1.c2)) + -> Append + -> Foreign Scan + Output: ftprt1_p1.c1, (sum(ftprt1_p1.c2)), (min(ftprt1_p1.c2)), (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test1 fprt1) + -> Foreign Scan + Output: ftprt1_p2.c1, (sum(ftprt1_p2.c2)), (min(ftprt1_p2.c2)), (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test2 fprt1) +(10 rows) + +SELECT c1, sum(c2), min(c2), count(*) FROM fprt1 GROUP BY c1 HAVING avg(c2) < 22 ORDER BY 2; + c1 | sum | min | count +----+-----+-----+------- + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 1 + 3 | 3 | 3 | 1 + 4 | 4 | 4 | 1 + 5 | 5 | 5 | 1 + 6 | 6 | 6 | 1 + 7 | 7 | 7 | 1 + 8 | 8 | 8 | 1 +(8 rows) + +-- Check with whole-row reference +-- Should have all the columns in the target list for the given relation +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, count(t1) FROM fprt1 t1 GROUP BY c1 HAVING avg(c2) < 22 ORDER BY 1; + QUERY PLAN +---------------------------------------------------------------- + Sort + Output: t1.c1, (count(((t1.*)::fprt1))) + Sort Key: t1.c1 + -> Append + -> HashAggregate + Output: t1.c1, count(((t1.*)::fprt1)) + Group Key: t1.c1 + Filter: (avg(t1.c2) < '22'::numeric) + -> Foreign Scan on public.ftprt1_p1 t1 + Output: t1.c1, t1.*, t1.c2 + Foreign Namespace: mongo_fdw_regress.test1 + -> HashAggregate + Output: t1_1.c1, count(((t1_1.*)::fprt1)) + Group Key: t1_1.c1 + Filter: (avg(t1_1.c2) < '22'::numeric) + -> Foreign Scan on public.ftprt1_p2 t1_1 + Output: t1_1.c1, t1_1.*, t1_1.c2 + Foreign Namespace: mongo_fdw_regress.test2 +(18 rows) + +SELECT c1, count(t1) FROM fprt1 t1 GROUP BY c1 HAVING avg(c2) < 22 ORDER BY 1; + c1 | count +----+------- + 1 | 1 + 2 | 1 + 3 | 1 + 4 | 1 + 5 | 1 + 6 | 1 + 7 | 1 + 8 | 1 +(8 rows) + +SET enable_partitionwise_aggregate TO OFF; +-- Support enable_aggregate_pushdown option at server level and table level. +-- Check only boolean values are accepted. +ALTER SERVER mongo_server OPTIONS (ADD enable_aggregate_pushdown 'non-bolean'); +ERROR: enable_aggregate_pushdown requires a Boolean value +-- Test the option at server level. +ALTER SERVER mongo_server OPTIONS (ADD enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> HashAggregate + Output: count(*), c1 + Group Key: fdw137_t1.c1 + Filter: (min(fdw137_t1.c1) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> Foreign Scan + Output: (count(*)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +-- Test the option at table level. Setting option at table level does not +-- affect the setting at server level. +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (ADD enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> HashAggregate + Output: count(*), c1 + Group Key: fdw137_t1.c1 + Filter: (min(fdw137_t1.c1) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'false'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> Foreign Scan + Output: (count(*)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +-- Test option for aggregation over join. Allow aggregation only if enabled for +-- both the relations involved in the join. +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'false'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (ADD enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(t2.c1)), t1.c8 + Sort Key: t1.c8 + -> HashAggregate + Output: sum(t2.c1), t1.c8 + Group Key: t1.c8 + -> Foreign Scan + Output: t1.c8, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2) +(9 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(t2.c1)), t1.c8 + Sort Key: t1.c8 + -> HashAggregate + Output: sum(t2.c1), t1.c8 + Group Key: t1.c8 + -> Foreign Scan + Output: t1.c8, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2) +(9 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'false'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: (sum(t2.c1)), t1.c8 + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(3 rows) + +-- FDW-560: Aggregation over nested join. As nested join push down is not +-- supported, aggregation shouldn't get pushdown. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) INNER JOIN fdw137_t1 t3 ON (t3.c1 = t1.c1) GROUP BY t1.c8 ORDER BY 2; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: sum(t2.c1), t1.c8 + Group Key: t1.c8 + -> Merge Left Join + Output: t1.c8, t2.c1 + Merge Cond: (t1.c8 = t2.c1) + -> Sort + Output: t1.c8 + Sort Key: t1.c8 + -> Foreign Scan + Output: t1.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl1 t3) + -> Sort + Output: t2.c1 + Sort Key: t2.c1 + -> Foreign Scan on public.fdw137_t2 t2 + Output: t2.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(18 rows) + +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) INNER JOIN fdw137_t1 t3 ON (t3.c1 = t1.c1) GROUP BY t1.c8 ORDER BY 2; + sum | c8 +-----+---- + 30 | 10 + 100 | 20 + 180 | 30 + | 60 + | +(5 rows) + +-- Check when enable_join_pushdown is OFF and enable_aggregate_pushdown is ON. +-- Shouldn't push down join as well as aggregation. +ALTER SERVER mongo_server OPTIONS (ADD enable_join_pushdown 'false'); +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2; + QUERY PLAN +-------------------------------------------------------------------- + GroupAggregate + Output: sum(t2.c1), t1.c8 + Group Key: t1.c8 + -> Merge Left Join + Output: t1.c8, t2.c1 + Merge Cond: (t1.c8 = t2.c1) + -> Sort + Output: t1.c8 + Sort Key: t1.c8 + -> Foreign Scan on public.fdw137_t1 t1 + Output: t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Sort + Output: t2.c1 + Sort Key: t2.c1 + -> Foreign Scan on public.fdw137_t2 t2 + Output: t2.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(18 rows) + +-- FDW-134: Test with number of columns more than 32 +CREATE FOREIGN TABLE f_test_large (_id int, + a01 int, a02 int, a03 int, a04 int, a05 int, a06 int, a07 int, a08 int, a09 int, a10 int, + a11 int, a12 int, a13 int, a14 int, a15 int, a16 int, a17 int, a18 int, a19 int, a20 int, + a21 int, a22 int, a23 int, a24 int, a25 int, a26 int, a27 int, a28 int, a29 int, a30 int, + a31 int, a32 int, a33 int, a34 int, a35 int) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'mongo_test_large'); +-- Shouldn't pushdown ORDERBY clause due to exceeded number of path keys limit. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT a32, sum(a32) FROM f_test_large GROUP BY + a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, + a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, + a31, a32, a33, a34, a35 ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST, a33 ASC NULLS FIRST, a34 DESC NULLS LAST, a35 ASC NULLS FIRST; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: a32, (sum(a32)), a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a33, a34, a35 + Sort Key: f_test_large.a01 NULLS FIRST, f_test_large.a02 NULLS FIRST, f_test_large.a03 NULLS FIRST, f_test_large.a04 NULLS FIRST, f_test_large.a05 NULLS FIRST, f_test_large.a06 NULLS FIRST, f_test_large.a07 NULLS FIRST, f_test_large.a08 NULLS FIRST, f_test_large.a09 NULLS FIRST, f_test_large.a10 NULLS FIRST, f_test_large.a11 NULLS FIRST, f_test_large.a12 NULLS FIRST, f_test_large.a13 NULLS FIRST, f_test_large.a14 NULLS FIRST, f_test_large.a15 NULLS FIRST, f_test_large.a16 NULLS FIRST, f_test_large.a17 NULLS FIRST, f_test_large.a18 NULLS FIRST, f_test_large.a19 NULLS FIRST, f_test_large.a20 NULLS FIRST, f_test_large.a21 NULLS FIRST, f_test_large.a22 NULLS FIRST, f_test_large.a23 NULLS FIRST, f_test_large.a24 NULLS FIRST, f_test_large.a25 NULLS FIRST, f_test_large.a26 NULLS FIRST, f_test_large.a27 NULLS FIRST, f_test_large.a28 NULLS FIRST, f_test_large.a29 NULLS FIRST, f_test_large.a30 NULLS FIRST, f_test_large.a31 NULLS FIRST, f_test_large.a32 NULLS FIRST, f_test_large.a33 NULLS FIRST, f_test_large.a34 DESC NULLS LAST, f_test_large.a35 NULLS FIRST + -> Foreign Scan + Output: a32, (sum(a32)), a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a33, a34, a35 + Foreign Namespace: Aggregate on (mongo_fdw_regress.mongo_test_large f_test_large) +(6 rows) + +SELECT a32, sum(a32) FROM f_test_large GROUP BY + a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, + a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, + a31, a32, a33, a34, a35 ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST, a33 ASC NULLS FIRST, a34 DESC NULLS LAST, a35 ASC NULLS FIRST; + a32 | sum +-----+----- + 2 | 2 + 32 | 32 + 32 | 32 + 32 | 32 + 132 | 132 +(5 rows) + +-- Should pushdown ORDERBY clause because number of path keys are in limit. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT a32, sum(a32) FROM f_test_large GROUP BY + a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, + a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, + a31, a32 ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: a32, (sum(a32)), a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31 + Foreign Namespace: Aggregate on (mongo_fdw_regress.mongo_test_large f_test_large) +(3 rows) + +SELECT a32, sum(a32) FROM f_test_large GROUP BY + a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, + a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, + a31, a32 ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST; + a32 | sum +-----+----- + 2 | 2 + 32 | 96 + 132 | 132 +(3 rows) + +-- FDW-131: Limit and offset pushdown with Aggregate pushdown. +SELECT avg(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1; + avg | c1 +-----+---- + 10 | 10 + 20 | 20 + 30 | 30 + 40 | 40 + 50 | 50 + | +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT 1 OFFSET 1; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(3 rows) + +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT 1 OFFSET 1; + sum | c1 +-----+---- + 10 | 10 +(1 row) + +-- Limit 0, Offset 0 with aggregates. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT 0 OFFSET 0; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(3 rows) + +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT 0 OFFSET 0; + sum | c1 +-----+---- +(0 rows) + +-- Limit NULL +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT NULL OFFSET 2; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(3 rows) + +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT NULL OFFSET 2; + sum | c1 +-----+---- + 20 | 20 + 30 | 30 + 40 | 40 + 50 | 50 +(4 rows) + +-- Limit ALL +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT ALL OFFSET 2; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(3 rows) + +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT ALL OFFSET 2; + sum | c1 +-----+---- + 20 | 20 + 30 | 30 + 40 | 40 + 50 | 50 +(4 rows) + +-- Limit with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, sum(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY 1 ASC NULLS FIRST LIMIT -1; + QUERY PLAN +--------------------------------------------------------------------------------- + Limit + Output: c1, (sum(c1)) + -> Foreign Scan + Output: c1, (sum(c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(5 rows) + +-- Should throw an error. +SELECT c1, sum(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY 1 ASC NULLS FIRST LIMIT -1; +ERROR: LIMIT must not be negative +-- Offset with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, sum(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY 1 ASC NULLS FIRST OFFSET -2; + QUERY PLAN +--------------------------------------------------------------------------------- + Limit + Output: c1, (sum(c1)) + -> Foreign Scan + Output: c1, (sum(c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(5 rows) + +-- Should throw an error. +SELECT c1, sum(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY 1 ASC NULLS FIRST OFFSET -2; +ERROR: OFFSET must not be negative +-- Limit/Offset with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, avg(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT -1 OFFSET -2; + QUERY PLAN +--------------------------------------------------------------------------------- + Limit + Output: c1, (avg(c1)) + -> Foreign Scan + Output: c1, (avg(c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(5 rows) + +-- Should throw an error. +SELECT c1, avg(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT -1 OFFSET -2; +ERROR: OFFSET must not be negative +-- Limit with expression evaluating to -ve value. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, avg(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM fdw137_t2)); + QUERY PLAN +----------------------------------------------------------------------------------- + Limit + Output: fdw137_t2.c1, (avg(fdw137_t2.c1)) + InitPlan 1 (returns $0) + -> Foreign Scan + Output: (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) + -> Foreign Scan + Output: fdw137_t2.c1, (avg(fdw137_t2.c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(9 rows) + +SELECT c1, avg(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM fdw137_t2)); +ERROR: LIMIT must not be negative +-- FDW-559: Test mongo_fdw.enable_aggregate_pushdown GUC. +-- Check default value. Should be ON. +SHOW mongo_fdw.enable_aggregate_pushdown; + mongo_fdw.enable_aggregate_pushdown +------------------------------------- + on +(1 row) + +-- Negative testing for GUC value. +SET mongo_fdw.enable_aggregate_pushdown to 'abc'; +ERROR: parameter "mongo_fdw.enable_aggregate_pushdown" requires a Boolean value +--Disable the GUC enable_aggregate_pushdown. +SET mongo_fdw.enable_aggregate_pushdown to false; +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +-- Shouldn't pushdown aggregate because GUC is OFF. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> HashAggregate + Output: count(*), c1 + Group Key: fdw137_t1.c1 + Filter: (min(fdw137_t1.c1) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + count +------- + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +(11 rows) + +--Enable the GUC enable_aggregate_pushdown. +SET mongo_fdw.enable_aggregate_pushdown to on; +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +-- Should pushdown aggregate because GUC is ON. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> Foreign Scan + Output: (count(*)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + count +------- + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +(11 rows) + +-- Test for aggregation over join when server and table options for both the +-- tables is true and guc is enabled. Should pushdown. +SET mongo_fdw.enable_aggregate_pushdown to on; +SET mongo_fdw.enable_join_pushdown to on; +ALTER SERVER mongo_server OPTIONS (SET enable_join_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: (count(*)), t1.c8 + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(3 rows) + +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + count | c8 +-------+---- + 1 | + 3 | 10 + 5 | 20 + 6 | 30 + 1 | 60 +(5 rows) + +--Disable the GUC enable_join_pushdown. Shouldn't pushdown aggregate. +SET mongo_fdw.enable_join_pushdown to off; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------------------------------- + GroupAggregate + Output: count(*), t1.c8 + Group Key: t1.c8 + -> Merge Left Join + Output: t1.c8 + Merge Cond: (t1.c8 = t2.c1) + -> Foreign Scan on public.fdw137_t1 t1 + Output: t1._id, t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Sort + Output: t2.c1 + Sort Key: t2.c1 NULLS FIRST + -> Foreign Scan on public.fdw137_t2 t2 + Output: t2.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(15 rows) + +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + count | c8 +-------+---- + 1 | + 3 | 10 + 5 | 20 + 6 | 30 + 1 | 60 +(5 rows) + +SET mongo_fdw.enable_join_pushdown to on; +--Disable the GUC enable_aggregate_pushdown. Shouldn't pushdown. +SET mongo_fdw.enable_aggregate_pushdown to false; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: count(*), t1.c8 + Group Key: t1.c8 + -> Foreign Scan + Output: t1.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2) +(6 rows) + +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + count | c8 +-------+---- + 1 | + 3 | 10 + 5 | 20 + 6 | 30 + 1 | 60 +(5 rows) + +-- FDW-589: Test enable_order_by_pushdown option at server and table level. +SET mongo_fdw.enable_join_pushdown to true; +SET mongo_fdw.enable_aggregate_pushdown to true; +SET mongo_fdw.enable_order_by_pushdown to true; +ALTER SERVER mongo_server OPTIONS (ADD enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (ADD enable_join_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (ADD enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: c2, (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(3 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (ADD enable_join_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (ADD enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(3 rows) + +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + sum | c8 | avg +-----+----+----- + 100 | 20 | 20 + 180 | 30 | 30 + 0 | 60 | 60 +(3 rows) + +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_order_by_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: c2, (sum(c1)), c1 + Sort Key: fdw137_t1.c2 NULLS FIRST + -> Foreign Scan + Output: c2, (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Sort Key: t1.c8 NULLS FIRST + -> Foreign Scan + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(6 rows) + +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + sum | c8 | avg +-----+----+----- + 100 | 20 | 20 + 180 | 30 | 30 + 0 | 60 | 60 +(3 rows) + +-- Test that setting option at table level does not affect the setting at +-- server level. +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'false'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: c2, (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(3 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(3 rows) + +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + sum | c8 | avg +-----+----+----- + 100 | 20 | 20 + 180 | 30 | 30 + 0 | 60 | 60 +(3 rows) + +-- When option enable_aggregate_pushdown is disabled. Shouldn't pushdown +-- aggregate as well as ORDER BY too. +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c2, (sum(c1)), c1 + Sort Key: fdw137_t1.c2 NULLS FIRST + -> HashAggregate + Output: c2, sum(c1), c1 + Group Key: fdw137_t1.c2, fdw137_t1.c1 + Filter: (min(fdw137_t1.c1) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +-- Cleanup +DELETE FROM fdw137_t1 WHERE c8 IS NULL; +DELETE FROM fdw137_t1 WHERE c8 = 60; +DELETE FROM fdw137_t2 WHERE c1 IS NULL; +DELETE FROM fdw137_t2 WHERE c1 = 50; +DROP FOREIGN TABLE fdw137_t1; +DROP FOREIGN TABLE fdw137_t2; +DROP FOREIGN TABLE ftprt1_p1; +DROP FOREIGN TABLE ftprt1_p2; +DROP FOREIGN TABLE f_test_large; +DROP TABLE fprt1; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/expected/aggregate_pushdown_2.out b/expected/aggregate_pushdown_2.out new file mode 100644 index 0000000..e968598 --- /dev/null +++ b/expected/aggregate_pushdown_2.out @@ -0,0 +1,1997 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` +-- Before running this file user must create database mongo_fdw_regress on +-- MongoDB with all permission for MONGO_USER_NAME user with MONGO_PASS +-- password and ran mongodb_init.sh file to load collections. +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; +-- Create foreign tables. +CREATE FOREIGN TABLE fdw137_t1 (_id NAME, c1 INTEGER, c2 TEXT, c3 CHAR(9), c4 INTEGER, c5 pg_catalog.Date, c6 DECIMAL, c7 INTEGER, c8 INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl1'); +CREATE FOREIGN TABLE fdw137_t2 (_id NAME, c1 INTEGER, c2 TEXT, c3 TEXT) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); +INSERT INTO fdw137_t1 VALUES (0, 1500, 'EMP15', 'FINANCE', 1300, '2000-12-25', 950.0, 400, 60); +INSERT INTO fdw137_t1 VALUES (0, 1600, 'EMP16', 'ADMIN', 600); +INSERT INTO fdw137_t2 VALUES (0, 50, 'TESTING', 'NASHIK'); +INSERT INTO fdw137_t2 VALUES (0); +-- Create local table. +CREATE TABLE fdw137_local AS + SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM fdw137_t1; +-- Simple aggregates. ORDER BY push-down not possible because only column names allowed. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), sum(c1), avg(c1), min(c4), max(c1), sum(c1) * (random() <= 1)::int AS sum2 FROM fdw137_t1 WHERE c4 > 600 GROUP BY c4 ORDER BY 1 ASC NULLS FIRST, 2 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------ + Result + Output: (count(*)), (sum(c1)), (avg(c1)), (min(c4)), (max(c1)), ((sum(c1)) * ((random() <= '1'::double precision))::integer), c4 + -> Sort + Output: (count(*)), (sum(c1)), (avg(c1)), (min(c4)), (max(c1)), c4 + Sort Key: (count(*)) NULLS FIRST, (sum(fdw137_t1.c1)) NULLS FIRST + -> Foreign Scan + Output: (count(*)), (sum(c1)), (avg(c1)), (min(c4)), (max(c1)), c4 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(8 rows) + +SELECT count(*), sum(c1), avg(c1), min(c4), max(c1), sum(c1) * (random() <= 1)::int AS sum2 FROM fdw137_t1 WHERE c4 > 600 GROUP BY c4 ORDER BY 1 ASC NULLS FIRST, 2 ASC NULLS FIRST; + count | sum | avg | min | max | sum2 +-------+------+------------------+------+------+------ + 1 | 1100 | 1100 | 800 | 1100 | 1100 + 1 | 1400 | 1400 | 700 | 1400 | 1400 + 2 | 1600 | 800 | 1300 | 1500 | 1600 + 3 | 1700 | 566.666666666667 | 900 | 700 | 1700 +(4 rows) + +-- GROUP BY clause HAVING expressions +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c1), count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY c1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: c1, (sum(c1)), (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(3 rows) + +SELECT c1, sum(c1), count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY c1 ASC NULLS FIRST; + c1 | sum | count +------+------+------- + 600 | 600 | 1 + 700 | 700 | 1 + 800 | 800 | 1 + 900 | 900 | 1 + 1000 | 1000 | 1 + 1100 | 1100 | 1 + 1200 | 1200 | 1 + 1300 | 1300 | 1 + 1400 | 1400 | 1 + 1500 | 1500 | 1 + 1600 | 1600 | 1 +(11 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, min(c2) FROM fdw137_t1 WHERE c3 = 'ADMIN' GROUP BY c8 HAVING min(c8) = 20 ORDER BY c8 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: c8, (min(c2)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(3 rows) + +SELECT c8, min(c2) FROM fdw137_t1 WHERE c3 = 'ADMIN' GROUP BY c8 HAVING min(c8) = 20 ORDER BY c8 ASC NULLS FIRST; + c8 | min +----+------ + 20 | EMP1 +(1 row) + +-- Multi-column GROUP BY clause. Push-down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: c2, (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(3 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: c2, (sum(c1)), c1 + Sort Key: fdw137_t1.c2 NULLS FIRST + -> Foreign Scan + Output: c2, (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +SET mongo_fdw.enable_order_by_pushdown TO ON; +-- Aggregation on expression. Don't push-down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c1+2) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY c1 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------- + GroupAggregate + Output: c1, sum((c1 + 2)) + Group Key: fdw137_t1.c1 + Filter: (min(fdw137_t1.c1) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(7 rows) + +SELECT c1, sum(c1+2) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY c1 ASC NULLS FIRST; + c1 | sum +------+------ + 600 | 602 + 700 | 702 + 800 | 802 + 900 | 902 + 1000 | 1002 + 1100 | 1102 + 1200 | 1202 + 1300 | 1302 + 1400 | 1402 + 1500 | 1502 + 1600 | 1602 +(11 rows) + +-- Aggregate with unshippable GROUP BY clause are not pushed +EXPLAIN (VERBOSE, COSTS OFF) +SELECT avg(c4) FROM fdw137_t1 GROUP BY c4 * (random() <= 1)::int ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------ + Sort + Output: (avg(c4)), ((c4 * ((random() <= '1'::double precision))::integer)) + Sort Key: (avg(fdw137_t1.c4)) + -> HashAggregate + Output: avg(c4), ((c4 * ((random() <= '1'::double precision))::integer)) + Group Key: (fdw137_t1.c4 * ((random() <= '1'::double precision))::integer) + -> Foreign Scan on public.fdw137_t1 + Output: (c4 * ((random() <= '1'::double precision))::integer), c4 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(9 rows) + +SELECT avg(c4) FROM fdw137_t1 GROUP BY c4 * (random() <= 1)::int ORDER BY 1; + avg +----------------------- + 400.0000000000000000 + 600.0000000000000000 + 700.0000000000000000 + 800.0000000000000000 + 900.0000000000000000 + 1300.0000000000000000 + +(7 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c1) FROM fdw137_t1 GROUP BY c1 HAVING min(c1 * 3) > 500 ORDER BY c1; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c1, (sum(c1)) + Sort Key: fdw137_t1.c1 + -> HashAggregate + Output: c1, sum(c1) + Group Key: fdw137_t1.c1 + Filter: (min((fdw137_t1.c1 * 3)) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c1, sum(c1) FROM fdw137_t1 GROUP BY c1 HAVING min(c1 * 3) > 500 ORDER BY c1; + c1 | sum +------+------ + 200 | 200 + 300 | 300 + 400 | 400 + 500 | 500 + 600 | 600 + 700 | 700 + 800 | 800 + 900 | 900 + 1000 | 1000 + 1100 | 1100 + 1200 | 1200 + 1300 | 1300 + 1400 | 1400 + 1500 | 1500 + 1600 | 1600 +(15 rows) + +-- FDW-134: Test ORDER BY with COLLATE. Shouldn't push-down +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY c2 COLLATE "en_US" ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: c2, (sum(c1)), ((c2)::text), c1 + Sort Key: fdw137_t1.c2 COLLATE "en_US" NULLS FIRST + -> Foreign Scan + Output: c2, (sum(c1)), c2, c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY c2 COLLATE "en_US" ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +-- Using expressions in HAVING clause. Pushed down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c3, count(*) FROM fdw137_t1 GROUP BY c3 HAVING abs(max(c8)) = abs(10) ORDER BY 1, 2; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: c3, (count(*)) + Sort Key: fdw137_t1.c3, (count(*)) + -> Foreign Scan + Output: c3, (count(*)) + Filter: (abs((max(fdw137_t1.c8))) = 10) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(7 rows) + +SELECT c3, count(*) FROM fdw137_t1 GROUP BY c3 HAVING abs(max(c8)) = abs(10) ORDER BY 1, 2; + c3 | count +-----------+------- + HEAD | 1 +(1 row) + +-- Unshippable HAVING clause will be evaluated locally, and other qual in HAVING clause is pushed down +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM (SELECT c3, count(c1) FROM fdw137_t1 GROUP BY c3 HAVING (avg(c1) / avg(c1)) * random() <= 1 and min(c1) > 100) x; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------- + Aggregate + Output: count(*) + -> Foreign Scan + Output: fdw137_t1.c3, NULL::bigint + Filter: (((((avg(fdw137_t1.c1)) / (avg(fdw137_t1.c1))))::double precision * random()) <= '1'::double precision) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT count(*) FROM (SELECT c3, count(c1) FROM fdw137_t1 GROUP BY c3 HAVING (avg(c1) / avg(c1)) * random() <= 1 and min(c1) > 100) x; + count +------- + 0 +(1 row) + +-- Aggregate over join query +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t1.c8), avg(t2.c1) FROM fdw137_t1 t1 INNER JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8%2 = 0 ORDER BY 1 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ + Sort + Output: (sum(t1.c8)), (avg(t2.c1)) + Sort Key: (sum(t1.c8)) DESC NULLS LAST + -> Foreign Scan + Output: (sum(t1.c8)), (avg(t2.c1)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2)) +(6 rows) + +SELECT sum(t1.c8), avg(t2.c1) FROM fdw137_t1 t1 INNER JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8%2 = 0 ORDER BY 1 DESC NULLS LAST; + sum | avg +-----+------------------ + 310 | 22.1428571428571 +(1 row) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, count(*), t2.c4 FROM fdw137_t2 t1 INNER JOIN fdw137_t1 t2 ON (t1.c1 = t2.c8) GROUP BY t1.c1, t2.c4 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Foreign Scan + Output: t1.c1, (count(*)), t2.c4 + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl2 t1) INNER JOIN (mongo_fdw_regress.test_tbl1 t2)) +(3 rows) + +SELECT t1.c1, count(*), t2.c4 FROM fdw137_t2 t1 INNER JOIN fdw137_t1 t2 ON (t1.c1 = t2.c8) GROUP BY t1.c1, t2.c4 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | count | c4 +----+-------+------ + 10 | 1 | + 10 | 1 | 700 + 10 | 1 | 900 + 20 | 2 | 400 + 20 | 1 | 800 + 20 | 1 | 900 + 20 | 1 | 1300 + 30 | 5 | 600 + 30 | 1 | 900 +(9 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(3 rows) + +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 ORDER BY 2 ASC NULLS FIRST; + sum | c8 | avg +-----+----+----- + 100 | 20 | 20 + 180 | 30 | 30 + 0 | 60 | 60 +(3 rows) + +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Sort Key: t1.c8 NULLS FIRST + -> Foreign Scan + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(6 rows) + +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 ORDER BY 2 ASC NULLS FIRST; + sum | c8 | avg +-----+----+----- + 100 | 20 | 20 + 180 | 30 | 30 + 0 | 60 | 60 +(3 rows) + +SET mongo_fdw.enable_order_by_pushdown TO ON; +-- Aggregate is not pushed down as aggregation contains random() +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1 * (random() <= 1)::int) AS sum, avg(c1) FROM fdw137_t1 ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------- + Sort + Output: (sum((c1 * ((random() <= '1'::double precision))::integer))), (avg(c1)) + Sort Key: (sum((fdw137_t1.c1 * ((random() <= '1'::double precision))::integer))) + -> Aggregate + Output: sum((c1 * ((random() <= '1'::double precision))::integer)), avg(c1) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(8 rows) + +SELECT sum(c1 * (random() <= 1)::int) AS sum, avg(c1) FROM fdw137_t1 ORDER BY 1; + sum | avg +-------+---------------------- + 13600 | 850.0000000000000000 +(1 row) + +-- Not pushed down due to local conditions present in underneath input rel +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t1.c8) FROM fdw137_t1 t1 INNER JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE ((t1.c8 * t2.c1)/(t1.c8 * t2.c1)) * random() <= 1 ORDER BY 1; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(t1.c8)) + Sort Key: (sum(t1.c8)) + -> Aggregate + Output: sum(t1.c8) + -> Foreign Scan + Output: t1.c8 + Filter: (((((t1.c8 * t2.c1) / (t1.c8 * t2.c1)))::double precision * random()) <= '1'::double precision) + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(9 rows) + +SELECT sum(t1.c8) FROM fdw137_t1 t1 INNER JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE ((t1.c8 * t2.c1)/(t1.c8 * t2.c1)) * random() <= 1 ORDER BY 1; + sum +----- + 310 +(1 row) + +-- Aggregates in subquery are pushed down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(x.a), sum(x.a) FROM (SELECT c8 a, sum(c1) b FROM fdw137_t1 GROUP BY c8 ORDER BY 1, 2) x; + QUERY PLAN +--------------------------------------------------------------------------------------- + Aggregate + Output: count(fdw137_t1.c8), sum(fdw137_t1.c8) + -> Sort + Output: fdw137_t1.c8, (sum(fdw137_t1.c1)) + Sort Key: fdw137_t1.c8, (sum(fdw137_t1.c1)) + -> Foreign Scan + Output: fdw137_t1.c8, (sum(fdw137_t1.c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(8 rows) + +SELECT count(x.a), sum(x.a) FROM (SELECT c8 a, sum(c1) b FROM fdw137_t1 GROUP BY c8 ORDER BY 1, 2) x; + count | sum +-------+----- + 4 | 120 +(1 row) + +-- Aggregate is still pushed down by taking unshippable expression out +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c4 * (random() <= 1)::int AS sum1, sum(c1) AS sum2 FROM fdw137_t1 GROUP BY c4 ORDER BY 1, 2; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Sort + Output: ((c4 * ((random() <= '1'::double precision))::integer)), (sum(c1)), c4 + Sort Key: ((fdw137_t1.c4 * ((random() <= '1'::double precision))::integer)), (sum(fdw137_t1.c1)) + -> Foreign Scan + Output: (c4 * ((random() <= '1'::double precision))::integer), (sum(c1)), c4 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT c4 * (random() <= 1)::int AS sum1, sum(c1) AS sum2 FROM fdw137_t1 GROUP BY c4 ORDER BY 1, 2; + sum1 | sum2 +------+------ + 400 | 2100 + 600 | 4800 + 700 | 1400 + 800 | 1100 + 900 | 1700 + 1300 | 1600 + | 900 +(7 rows) + +-- Testing ORDER BY, DISTINCT, FILTER and Ordered-sets within aggregates +-- ORDER BY within aggregates (same column used to order) are not pushed +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1 ORDER BY c1) FROM fdw137_t1 WHERE c1 < 500 GROUP BY c2 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Output: (sum(c1 ORDER BY c1)), c2 + Sort Key: (sum(fdw137_t1.c1 ORDER BY fdw137_t1.c1)) + -> GroupAggregate + Output: sum(c1 ORDER BY c1), c2 + Group Key: fdw137_t1.c2 + -> Sort + Output: c2, c1 + Sort Key: fdw137_t1.c2, fdw137_t1.c1 + -> Foreign Scan on public.fdw137_t1 + Output: c2, c1 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(12 rows) + +SELECT sum(c1 ORDER BY c1) FROM fdw137_t1 WHERE c1 < 500 GROUP BY c2 ORDER BY 1; + sum +----- + 100 + 200 + 300 + 400 +(4 rows) + +-- ORDER BY within aggregate (different column used to order also using DESC) +-- are not pushed. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c8 ORDER BY c1 desc) FROM fdw137_t1 WHERE c1 > 1000 and c8 > 20; + QUERY PLAN +-------------------------------------------------------------- + Aggregate + Output: sum(c8 ORDER BY c1 DESC) + -> Sort + Output: c8, c1 + Sort Key: fdw137_t1.c1 DESC + -> Foreign Scan on public.fdw137_t1 + Output: c8, c1 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(8 rows) + +SELECT sum(c8 ORDER BY c1 desc) FROM fdw137_t1 WHERE c1 > 1000 and c8 > 20; + sum +----- + 90 +(1 row) + +-- DISTINCT within aggregate. Don't push down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(DISTINCT (c1)) FROM fdw137_t1 WHERE c4 = 600 and c1 < 500; + QUERY PLAN +-------------------------------------------------------------- + Aggregate + Output: sum(DISTINCT c1) + -> Sort + Output: c1 + Sort Key: fdw137_t1.c1 + -> Foreign Scan on public.fdw137_t1 + Output: c1 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(8 rows) + +SELECT sum(DISTINCT (c1)) FROM fdw137_t1 WHERE c4 = 600 and c1 < 500; + sum +----- + 500 +(1 row) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(DISTINCT (t1.c1)) FROM fdw137_t1 t1 join fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 < 30 GROUP BY (t2.c1) ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(DISTINCT t1.c1)), t2.c1 + Sort Key: (sum(DISTINCT t1.c1)) + -> GroupAggregate + Output: sum(DISTINCT t1.c1), t2.c1 + Group Key: t2.c1 + -> Sort + Output: t2.c1, t1.c1 + Sort Key: t2.c1, t1.c1 + -> Foreign Scan + Output: t2.c1, t1.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(12 rows) + +SELECT sum(DISTINCT (t1.c1)) FROM fdw137_t1 t1 join fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 < 30 GROUP BY (t2.c1) ORDER BY 1; + sum +------ + 3000 + 3700 +(2 rows) + +-- DISTINCT, ORDER BY and FILTER within aggregate, not pushed down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), sum(DISTINCT c1 ORDER BY c1) filter (WHERE c1%3 < 2), c4 FROM fdw137_t1 WHERE c4 = 600 GROUP BY c4; + QUERY PLAN +----------------------------------------------------------------------------------- + GroupAggregate + Output: sum(c1), sum(DISTINCT c1 ORDER BY c1) FILTER (WHERE ((c1 % 3) < 2)), c4 + -> Sort + Output: c1, c4 + Sort Key: fdw137_t1.c1 + -> Foreign Scan on public.fdw137_t1 + Output: c1, c4 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(8 rows) + +SELECT sum(c1), sum(DISTINCT c1 ORDER BY c1) filter (WHERE c1%3 < 2), c4 FROM fdw137_t1 WHERE c4 = 600 GROUP BY c4; + sum | sum | c4 +------+------+----- + 4800 | 4100 | 600 +(1 row) + +-- FILTER within aggregate, not pushed +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1) filter (WHERE c1 < 1000 and c4 > 500) FROM fdw137_t1 GROUP BY c4 ORDER BY 1 nulls last; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Sort + Output: (sum(c1) FILTER (WHERE ((c1 < 1000) AND (c4 > 500)))), c4 + Sort Key: (sum(fdw137_t1.c1) FILTER (WHERE ((fdw137_t1.c1 < 1000) AND (fdw137_t1.c4 > 500)))) + -> HashAggregate + Output: sum(c1) FILTER (WHERE ((c1 < 1000) AND (c4 > 500))), c4 + Group Key: fdw137_t1.c4 + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(9 rows) + +SELECT sum(c1) filter (WHERE c1 < 1000 and c4 > 500) FROM fdw137_t1 GROUP BY c4 ORDER BY 1 nulls last; + sum +------ + 100 + 1000 + 1700 + + + + +(7 rows) + +-- Outer query is aggregation query +EXPLAIN (VERBOSE, COSTS OFF) +SELECT DISTINCT (SELECT count(*) filter (WHERE t2.c1 = 20 and t2.c1 < 30) FROM fdw137_t1 t1 WHERE t1.c1 = 500) FROM fdw137_t2 t2 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------- + Unique + Output: ((SubPlan 1)) + -> Sort + Output: ((SubPlan 1)) + Sort Key: ((SubPlan 1)) + -> Aggregate + Output: (SubPlan 1) + -> Foreign Scan on public.fdw137_t2 t2 + Output: t2._id, t2.c1, t2.c2, t2.c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + SubPlan 1 + -> Foreign Scan on public.fdw137_t1 t1 + Output: count(*) FILTER (WHERE ((t2.c1 = 20) AND (t2.c1 < 30))) + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(14 rows) + +SELECT DISTINCT (SELECT count(*) filter (WHERE t2.c1 = 20 and t2.c1 < 30) FROM fdw137_t1 t1 WHERE t1.c1 = 500) FROM fdw137_t2 t2 ORDER BY 1; + count +------- + 1 +(1 row) + +-- Inner query is aggregation query +EXPLAIN (VERBOSE, COSTS OFF) +SELECT DISTINCT (SELECT count(t1.c1) filter (WHERE t2.c1 = 20 and t2.c1 < 30) FROM fdw137_t1 t1 WHERE t1.c1 > 600) FROM fdw137_t2 t2 ORDER BY 1; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Unique + Output: ((SubPlan 1)) + -> Sort + Output: ((SubPlan 1)) + Sort Key: ((SubPlan 1)) + -> Foreign Scan on public.fdw137_t2 t2 + Output: (SubPlan 1) + Foreign Namespace: mongo_fdw_regress.test_tbl2 + SubPlan 1 + -> Aggregate + Output: count(t1.c1) FILTER (WHERE ((t2.c1 = 20) AND (t2.c1 < 30))) + -> Foreign Scan on public.fdw137_t1 t1 + Output: t1._id, t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(14 rows) + +SELECT DISTINCT (SELECT count(t1.c1) filter (WHERE t2.c1 = 20 and t2.c1 < 30) FROM fdw137_t1 t1 WHERE t1.c1 > 600) FROM fdw137_t2 t2 ORDER BY 1; + count +------- + 0 + 10 +(2 rows) + +-- Ordered-sets within aggregate, not pushed down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, rank('10'::varchar) within group (ORDER BY c3), percentile_cont(c8/200::numeric) within group (ORDER BY c1) FROM fdw137_t1 GROUP BY c8 HAVING percentile_cont(c8/200::numeric) within group (ORDER BY c1) < 500 ORDER BY c8; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: c8, rank('10'::bpchar) WITHIN GROUP (ORDER BY c3), percentile_cont((((c8)::numeric / '200'::numeric))::double precision) WITHIN GROUP (ORDER BY ((c1)::double precision)) + Group Key: fdw137_t1.c8 + Filter: (percentile_cont((((fdw137_t1.c8)::numeric / '200'::numeric))::double precision) WITHIN GROUP (ORDER BY ((fdw137_t1.c1)::double precision)) < '500'::double precision) + -> Sort + Output: c8, c3, c1 + Sort Key: fdw137_t1.c8 + -> Foreign Scan on public.fdw137_t1 + Output: c8, c3, c1 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c8, rank('10'::varchar) within group (ORDER BY c3), percentile_cont(c8/200::numeric) within group (ORDER BY c1) FROM fdw137_t1 GROUP BY c8 HAVING percentile_cont(c8/200::numeric) within group (ORDER BY c1) < 500 ORDER BY c8; + c8 | rank | percentile_cont +----+------+----------------- + 20 | 1 | 220 + 30 | 1 | 275 +(2 rows) + +-- Subquery in FROM clause HAVING aggregate +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), x.b FROM fdw137_t1, (SELECT c1 a, sum(c1) b FROM fdw137_t2 GROUP BY c1) x WHERE fdw137_t1.c8 = x.a GROUP BY x.b ORDER BY 1, 2; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: (count(*)), x.b + Sort Key: (count(*)), x.b + -> HashAggregate + Output: count(*), x.b + Group Key: x.b + -> Hash Join + Output: x.b + Inner Unique: true + Hash Cond: (fdw137_t1.c8 = x.a) + -> Foreign Scan on public.fdw137_t1 + Output: fdw137_t1._id, fdw137_t1.c1, fdw137_t1.c2, fdw137_t1.c3, fdw137_t1.c4, fdw137_t1.c5, fdw137_t1.c6, fdw137_t1.c7, fdw137_t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + Output: x.b, x.a + -> Subquery Scan on x + Output: x.b, x.a + -> Foreign Scan + Output: fdw137_t2.c1, (sum(fdw137_t2.c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(20 rows) + +SELECT count(*), x.b FROM fdw137_t1, (SELECT c1 a, sum(c1) b FROM fdw137_t2 GROUP BY c1) x WHERE fdw137_t1.c8 = x.a GROUP BY x.b ORDER BY 1, 2; + count | b +-------+---- + 3 | 10 + 5 | 20 + 6 | 30 +(3 rows) + +-- Join with IS NULL check in HAVING +EXPLAIN (VERBOSE, COSTS OFF) +SELECT avg(t1.c1), sum(t2.c1) FROM fdw137_t1 t1 join fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t2.c1 HAVING avg(t1.c1) is null ORDER BY 1 nulls last, 2; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ + Sort + Output: (avg(t1.c1)), (sum(t2.c1)), t2.c1 + Sort Key: (avg(t1.c1)), (sum(t2.c1)) + -> Foreign Scan + Output: (avg(t1.c1)), (sum(t2.c1)), t2.c1 + Filter: ((avg(t1.c1)) IS NULL) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2)) +(7 rows) + +SELECT avg(t1.c1), sum(t2.c1) FROM fdw137_t1 t1 join fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t2.c1 HAVING avg(t1.c1) is null ORDER BY 1 nulls last, 2; + avg | sum +-----+----- +(0 rows) + +-- ORDER BY expression is part of the target list but not pushed down to +-- foreign server. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1) * (random() <= 1)::int AS sum FROM fdw137_t1 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------------------------------- + Sort + Output: (((sum(c1)) * ((random() <= '1'::double precision))::integer)) + Sort Key: (((sum(fdw137_t1.c1)) * ((random() <= '1'::double precision))::integer)) + -> Foreign Scan + Output: ((sum(c1)) * ((random() <= '1'::double precision))::integer) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT sum(c1) * (random() <= 1)::int AS sum FROM fdw137_t1 ORDER BY 1; + sum +------- + 13600 +(1 row) + +-- LATERAL join, with parameterization +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum FROM fdw137_t1 t1, lateral (SELECT sum(t2.c1) sum FROM fdw137_t2 t2 GROUP BY t2.c1) qry WHERE t1.c8 * 2 = qry.sum ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Sort + Output: t1.c8, qry.sum + Sort Key: t1.c8 + -> Hash Join + Output: t1.c8, qry.sum + Hash Cond: ((t1.c8 * 2) = qry.sum) + -> Foreign Scan on public.fdw137_t1 t1 + Output: t1._id, t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + Output: qry.sum + -> Subquery Scan on qry + Output: qry.sum + -> Foreign Scan + Output: (sum(t2.c1)), t2.c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 t2) +(16 rows) + +-- Check with placeHolderVars +EXPLAIN (VERBOSE, COSTS OFF) +SELECT q.b, count(fdw137_t1.c1), sum(q.a) FROM fdw137_t1 left join (SELECT min(13), avg(fdw137_t1.c1), sum(fdw137_t2.c1) FROM fdw137_t1 right join fdw137_t2 ON (fdw137_t1.c8 = fdw137_t2.c1) WHERE fdw137_t1.c8 = 20) q(a, b, c) ON (fdw137_t1.c8 = q.b) WHERE fdw137_t1.c1 between 100 and 500 GROUP BY q.b ORDER BY 1 nulls last, 2; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------- + Incremental Sort + Output: q.b, (count(fdw137_t1.c1)), (sum(q.a)) + Sort Key: q.b, (count(fdw137_t1.c1)) + Presorted Key: q.b + -> GroupAggregate + Output: q.b, count(fdw137_t1.c1), sum(q.a) + Group Key: q.b + -> Sort + Output: q.b, fdw137_t1.c1, q.a + Sort Key: q.b + -> Hash Left Join + Output: q.b, fdw137_t1.c1, q.a + Inner Unique: true + Hash Cond: ((fdw137_t1.c8)::numeric = q.b) + -> Foreign Scan on public.fdw137_t1 + Output: fdw137_t1._id, fdw137_t1.c1, fdw137_t1.c2, fdw137_t1.c3, fdw137_t1.c4, fdw137_t1.c5, fdw137_t1.c6, fdw137_t1.c7, fdw137_t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + Output: q.b, q.a + -> Subquery Scan on q + Output: q.b, q.a + -> Aggregate + Output: min(13), avg(fdw137_t1_1.c1), NULL::bigint + -> Foreign Scan + Output: fdw137_t1_1.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 fdw137_t1) INNER JOIN (mongo_fdw_regress.test_tbl2 fdw137_t2) +(26 rows) + +SELECT q.b, count(fdw137_t1.c1), sum(q.a) FROM fdw137_t1 left join (SELECT min(13), avg(fdw137_t1.c1), sum(fdw137_t2.c1) FROM fdw137_t1 right join fdw137_t2 ON (fdw137_t1.c8 = fdw137_t2.c1) WHERE fdw137_t1.c8 = 20) q(a, b, c) ON (fdw137_t1.c8 = q.b) WHERE fdw137_t1.c1 between 100 and 500 GROUP BY q.b ORDER BY 1 nulls last, 2; + b | count | sum +---+-------+----- + | 5 | +(1 row) + +-- Not supported cases +-- The COUNT of column +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(c8) FROM fdw137_t1 ; + QUERY PLAN +-------------------------------------------------------- + Aggregate + Output: count(c8) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(5 rows) + +SELECT count(c8) FROM fdw137_t1 ; + count +------- + 15 +(1 row) + +-- Grouping sets +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum(c1) FROM fdw137_t1 WHERE c8 > 10 GROUP BY rollup(c8) ORDER BY 1 nulls last; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c8, (sum(c1)) + Sort Key: fdw137_t1.c8 + -> MixedAggregate + Output: c8, sum(c1) + Hash Key: fdw137_t1.c8 + Group Key: () + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c8, sum(c1) FROM fdw137_t1 WHERE c8 > 10 GROUP BY rollup(c8) ORDER BY 1 nulls last; + c8 | sum +----+------ + 20 | 3700 + 30 | 3800 + 60 | 1500 + | 9000 +(4 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum(c1) FROM fdw137_t1 WHERE c8 > 3 GROUP BY cube(c8) ORDER BY 1 nulls last; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c8, (sum(c1)) + Sort Key: fdw137_t1.c8 + -> MixedAggregate + Output: c8, sum(c1) + Hash Key: fdw137_t1.c8 + Group Key: () + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c8, sum(c1) FROM fdw137_t1 WHERE c8 > 3 GROUP BY cube(c8) ORDER BY 1 nulls last; + c8 | sum +----+------- + 10 | 3000 + 20 | 3700 + 30 | 3800 + 60 | 1500 + | 12000 +(5 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, c4, sum(c1) FROM fdw137_t1 WHERE c8 > 20 GROUP BY grouping sets(c8, c4) ORDER BY 1 nulls last, 2 nulls last; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c8, c4, (sum(c1)) + Sort Key: fdw137_t1.c8, fdw137_t1.c4 + -> HashAggregate + Output: c8, c4, sum(c1) + Hash Key: fdw137_t1.c8 + Hash Key: fdw137_t1.c4 + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c8, c4, sum(c1) FROM fdw137_t1 WHERE c8 > 20 GROUP BY grouping sets(c8, c4) ORDER BY 1 nulls last, 2 nulls last; + c8 | c4 | sum +----+------+------ + 30 | | 3800 + 60 | | 1500 + | 600 | 3200 + | 900 | 600 + | 1300 | 1500 +(5 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum(c1), grouping(c8) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1 nulls last; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c8, (sum(c1)), (GROUPING(c8)) + Sort Key: fdw137_t1.c8 + -> HashAggregate + Output: c8, sum(c1), GROUPING(c8) + Group Key: fdw137_t1.c8 + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(9 rows) + +SELECT c8, sum(c1), grouping(c8) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1 nulls last; + c8 | sum | grouping +----+------+---------- + 20 | 3700 | 0 + 30 | 3800 | 0 + 60 | 1500 | 0 +(3 rows) + +-- DISTINCT itself is not pushed down, whereas underneath aggregate is pushed +EXPLAIN (VERBOSE, COSTS OFF) +SELECT DISTINCT sum(c1) s FROM fdw137_t1 WHERE c1 > 1000 GROUP BY c1 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------- + Unique + Output: (sum(c1)), c1 + -> Sort + Output: (sum(c1)), c1 + Sort Key: (sum(fdw137_t1.c1)) + -> Foreign Scan + Output: (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(8 rows) + +SELECT DISTINCT sum(c1) s FROM fdw137_t1 WHERE c1 > 1000 GROUP BY c1 ORDER BY 1; + s +------ + 1100 + 1200 + 1300 + 1400 + 1500 + 1600 +(6 rows) + +-- WindowAgg +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum(c8), count(c8) over (partition by c8%2) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Sort + Output: c8, (sum(c8)), (count(c8) OVER (?)), ((c8 % 2)) + Sort Key: fdw137_t1.c8 + -> WindowAgg + Output: c8, (sum(c8)), count(c8) OVER (?), ((c8 % 2)) + -> Sort + Output: c8, ((c8 % 2)), (sum(c8)) + Sort Key: ((fdw137_t1.c8 % 2)) + -> Foreign Scan + Output: c8, (c8 % 2), (sum(c8)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(11 rows) + +SELECT c8, sum(c8), count(c8) over (partition by c8%2) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + c8 | sum | count +----+-----+------- + 20 | 100 | 3 + 30 | 180 | 3 + 60 | 60 | 3 +(3 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, array_agg(c8) over (partition by c8%2 ORDER BY c8 desc) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Sort + Output: c8, (array_agg(c8) OVER (?)), ((c8 % 2)) + Sort Key: fdw137_t1.c8 + -> WindowAgg + Output: c8, array_agg(c8) OVER (?), ((c8 % 2)) + -> Sort + Output: c8, ((c8 % 2)) + Sort Key: ((fdw137_t1.c8 % 2)), fdw137_t1.c8 DESC + -> Foreign Scan + Output: c8, (c8 % 2) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(11 rows) + +SELECT c8, array_agg(c8) over (partition by c8%2 ORDER BY c8 desc) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + c8 | array_agg +----+------------ + 20 | {60,30,20} + 30 | {60,30} + 60 | {60} +(3 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, array_agg(c8) over (partition by c8%2 ORDER BY c8 range between current row and unbounded following) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Sort + Output: c8, (array_agg(c8) OVER (?)), ((c8 % 2)) + Sort Key: fdw137_t1.c8 + -> WindowAgg + Output: c8, array_agg(c8) OVER (?), ((c8 % 2)) + -> Sort + Output: c8, ((c8 % 2)) + Sort Key: ((fdw137_t1.c8 % 2)), fdw137_t1.c8 + -> Foreign Scan + Output: c8, (c8 % 2) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(11 rows) + +SELECT c8, array_agg(c8) over (partition by c8%2 ORDER BY c8 range between current row and unbounded following) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + c8 | array_agg +----+------------ + 20 | {20,30,60} + 30 | {30,60} + 60 | {60} +(3 rows) + +-- User defined function for user defined aggregate, VARIADIC +CREATE FUNCTION least_accum(anyelement, variadic anyarray) +returns anyelement language sql AS + 'SELECT least($1, min($2[i])) FROM generate_subscripts($2,2) g(i)'; +CREATE aggregate least_agg(variadic items anyarray) ( + stype = anyelement, sfunc = least_accum +); +-- Not pushed down due to user defined aggregate +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, least_agg(c1) FROM fdw137_t1 GROUP BY c2 ORDER BY c2; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c2, (least_agg(VARIADIC ARRAY[c1])) + Sort Key: fdw137_t1.c2 + -> HashAggregate + Output: c2, least_agg(VARIADIC ARRAY[c1]) + Group Key: fdw137_t1.c2 + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(9 rows) + +SELECT c2, least_agg(c1) FROM fdw137_t1 GROUP BY c2 ORDER BY c2; + c2 | least_agg +-------+----------- + EMP1 | + EMP10 | + EMP11 | + EMP12 | + EMP13 | + EMP14 | + EMP15 | + EMP16 | + EMP2 | + EMP3 | + EMP4 | + EMP5 | + EMP6 | + EMP7 | + EMP8 | + EMP9 | +(16 rows) + +-- Test partition-wise aggregate +SET enable_partitionwise_aggregate TO ON; +-- Create the partition tables +CREATE TABLE fprt1 (_id NAME, c1 INTEGER, c2 INTEGER, c3 TEXT) PARTITION BY RANGE(c1); +CREATE FOREIGN TABLE ftprt1_p1 PARTITION OF fprt1 FOR VALUES FROM (1) TO (4) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test1'); +CREATE FOREIGN TABLE ftprt1_p2 PARTITION OF fprt1 FOR VALUES FROM (5) TO (8) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test2'); +-- Plan with partitionwise aggregates is enabled +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c1) FROM fprt1 GROUP BY c1 ORDER BY 2; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: fprt1.c1, (sum(fprt1.c1)) + Sort Key: (sum(fprt1.c1)) + -> Append + -> Foreign Scan + Output: fprt1.c1, (sum(fprt1.c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test1 fprt1) + -> Foreign Scan + Output: fprt1_1.c1, (sum(fprt1_1.c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test2 fprt1) +(10 rows) + +SELECT c1, sum(c1) FROM fprt1 GROUP BY c1 ORDER BY 2; + c1 | sum +----+----- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 +(8 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c2), min(c2), count(*) FROM fprt1 GROUP BY c1 HAVING avg(c2) < 22 ORDER BY 2; + QUERY PLAN +------------------------------------------------------------------------------------ + Sort + Output: fprt1.c1, (sum(fprt1.c2)), (min(fprt1.c2)), (count(*)) + Sort Key: (sum(fprt1.c2)) + -> Append + -> Foreign Scan + Output: fprt1.c1, (sum(fprt1.c2)), (min(fprt1.c2)), (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test1 fprt1) + -> Foreign Scan + Output: fprt1_1.c1, (sum(fprt1_1.c2)), (min(fprt1_1.c2)), (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test2 fprt1) +(10 rows) + +SELECT c1, sum(c2), min(c2), count(*) FROM fprt1 GROUP BY c1 HAVING avg(c2) < 22 ORDER BY 2; + c1 | sum | min | count +----+-----+-----+------- + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 1 + 3 | 3 | 3 | 1 + 4 | 4 | 4 | 1 + 5 | 5 | 5 | 1 + 6 | 6 | 6 | 1 + 7 | 7 | 7 | 1 + 8 | 8 | 8 | 1 +(8 rows) + +-- Check with whole-row reference +-- Should have all the columns in the target list for the given relation +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, count(t1) FROM fprt1 t1 GROUP BY c1 HAVING avg(c2) < 22 ORDER BY 1; + QUERY PLAN +---------------------------------------------------------------- + Sort + Output: t1.c1, (count(((t1.*)::fprt1))) + Sort Key: t1.c1 + -> Append + -> HashAggregate + Output: t1.c1, count(((t1.*)::fprt1)) + Group Key: t1.c1 + Filter: (avg(t1.c2) < '22'::numeric) + -> Foreign Scan on public.ftprt1_p1 t1 + Output: t1.c1, t1.*, t1.c2 + Foreign Namespace: mongo_fdw_regress.test1 + -> HashAggregate + Output: t1_1.c1, count(((t1_1.*)::fprt1)) + Group Key: t1_1.c1 + Filter: (avg(t1_1.c2) < '22'::numeric) + -> Foreign Scan on public.ftprt1_p2 t1_1 + Output: t1_1.c1, t1_1.*, t1_1.c2 + Foreign Namespace: mongo_fdw_regress.test2 +(18 rows) + +SELECT c1, count(t1) FROM fprt1 t1 GROUP BY c1 HAVING avg(c2) < 22 ORDER BY 1; + c1 | count +----+------- + 1 | 1 + 2 | 1 + 3 | 1 + 4 | 1 + 5 | 1 + 6 | 1 + 7 | 1 + 8 | 1 +(8 rows) + +SET enable_partitionwise_aggregate TO OFF; +-- Support enable_aggregate_pushdown option at server level and table level. +-- Check only boolean values are accepted. +ALTER SERVER mongo_server OPTIONS (ADD enable_aggregate_pushdown 'non-bolean'); +ERROR: enable_aggregate_pushdown requires a Boolean value +-- Test the option at server level. +ALTER SERVER mongo_server OPTIONS (ADD enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> HashAggregate + Output: count(*), c1 + Group Key: fdw137_t1.c1 + Filter: (min(fdw137_t1.c1) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> Foreign Scan + Output: (count(*)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +-- Test the option at table level. Setting option at table level does not +-- affect the setting at server level. +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (ADD enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> HashAggregate + Output: count(*), c1 + Group Key: fdw137_t1.c1 + Filter: (min(fdw137_t1.c1) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'false'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> Foreign Scan + Output: (count(*)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +-- Test option for aggregation over join. Allow aggregation only if enabled for +-- both the relations involved in the join. +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'false'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (ADD enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(t2.c1)), t1.c8 + Sort Key: t1.c8 + -> HashAggregate + Output: sum(t2.c1), t1.c8 + Group Key: t1.c8 + -> Foreign Scan + Output: t1.c8, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2) +(9 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(t2.c1)), t1.c8 + Sort Key: t1.c8 + -> HashAggregate + Output: sum(t2.c1), t1.c8 + Group Key: t1.c8 + -> Foreign Scan + Output: t1.c8, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2) +(9 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'false'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: (sum(t2.c1)), t1.c8 + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(3 rows) + +-- FDW-560: Aggregation over nested join. As nested join push down is not +-- supported, aggregation shouldn't get pushdown. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) INNER JOIN fdw137_t1 t3 ON (t3.c1 = t1.c1) GROUP BY t1.c8 ORDER BY 2; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: sum(t2.c1), t1.c8 + Group Key: t1.c8 + -> Merge Left Join + Output: t1.c8, t2.c1 + Merge Cond: (t1.c8 = t2.c1) + -> Sort + Output: t1.c8 + Sort Key: t1.c8 + -> Foreign Scan + Output: t1.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl1 t3) + -> Sort + Output: t2.c1 + Sort Key: t2.c1 + -> Foreign Scan on public.fdw137_t2 t2 + Output: t2.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(18 rows) + +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) INNER JOIN fdw137_t1 t3 ON (t3.c1 = t1.c1) GROUP BY t1.c8 ORDER BY 2; + sum | c8 +-----+---- + 30 | 10 + 100 | 20 + 180 | 30 + | 60 + | +(5 rows) + +-- Check when enable_join_pushdown is OFF and enable_aggregate_pushdown is ON. +-- Shouldn't push down join as well as aggregation. +ALTER SERVER mongo_server OPTIONS (ADD enable_join_pushdown 'false'); +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2; + QUERY PLAN +-------------------------------------------------------------------- + GroupAggregate + Output: sum(t2.c1), t1.c8 + Group Key: t1.c8 + -> Merge Left Join + Output: t1.c8, t2.c1 + Merge Cond: (t1.c8 = t2.c1) + -> Sort + Output: t1.c8 + Sort Key: t1.c8 + -> Foreign Scan on public.fdw137_t1 t1 + Output: t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Sort + Output: t2.c1 + Sort Key: t2.c1 + -> Foreign Scan on public.fdw137_t2 t2 + Output: t2.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(18 rows) + +-- FDW-134: Test with number of columns more than 32 +CREATE FOREIGN TABLE f_test_large (_id int, + a01 int, a02 int, a03 int, a04 int, a05 int, a06 int, a07 int, a08 int, a09 int, a10 int, + a11 int, a12 int, a13 int, a14 int, a15 int, a16 int, a17 int, a18 int, a19 int, a20 int, + a21 int, a22 int, a23 int, a24 int, a25 int, a26 int, a27 int, a28 int, a29 int, a30 int, + a31 int, a32 int, a33 int, a34 int, a35 int) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'mongo_test_large'); +-- Shouldn't pushdown ORDERBY clause due to exceeded number of path keys limit. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT a32, sum(a32) FROM f_test_large GROUP BY + a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, + a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, + a31, a32, a33, a34, a35 ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST, a33 ASC NULLS FIRST, a34 DESC NULLS LAST, a35 ASC NULLS FIRST; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: a32, (sum(a32)), a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a33, a34, a35 + Sort Key: f_test_large.a01 NULLS FIRST, f_test_large.a02 NULLS FIRST, f_test_large.a03 NULLS FIRST, f_test_large.a04 NULLS FIRST, f_test_large.a05 NULLS FIRST, f_test_large.a06 NULLS FIRST, f_test_large.a07 NULLS FIRST, f_test_large.a08 NULLS FIRST, f_test_large.a09 NULLS FIRST, f_test_large.a10 NULLS FIRST, f_test_large.a11 NULLS FIRST, f_test_large.a12 NULLS FIRST, f_test_large.a13 NULLS FIRST, f_test_large.a14 NULLS FIRST, f_test_large.a15 NULLS FIRST, f_test_large.a16 NULLS FIRST, f_test_large.a17 NULLS FIRST, f_test_large.a18 NULLS FIRST, f_test_large.a19 NULLS FIRST, f_test_large.a20 NULLS FIRST, f_test_large.a21 NULLS FIRST, f_test_large.a22 NULLS FIRST, f_test_large.a23 NULLS FIRST, f_test_large.a24 NULLS FIRST, f_test_large.a25 NULLS FIRST, f_test_large.a26 NULLS FIRST, f_test_large.a27 NULLS FIRST, f_test_large.a28 NULLS FIRST, f_test_large.a29 NULLS FIRST, f_test_large.a30 NULLS FIRST, f_test_large.a31 NULLS FIRST, f_test_large.a32 NULLS FIRST, f_test_large.a33 NULLS FIRST, f_test_large.a34 DESC NULLS LAST, f_test_large.a35 NULLS FIRST + -> Foreign Scan + Output: a32, (sum(a32)), a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a33, a34, a35 + Foreign Namespace: Aggregate on (mongo_fdw_regress.mongo_test_large f_test_large) +(6 rows) + +SELECT a32, sum(a32) FROM f_test_large GROUP BY + a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, + a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, + a31, a32, a33, a34, a35 ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST, a33 ASC NULLS FIRST, a34 DESC NULLS LAST, a35 ASC NULLS FIRST; + a32 | sum +-----+----- + 2 | 2 + 32 | 32 + 32 | 32 + 32 | 32 + 132 | 132 +(5 rows) + +-- Should pushdown ORDERBY clause because number of path keys are in limit. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT a32, sum(a32) FROM f_test_large GROUP BY + a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, + a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, + a31, a32 ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: a32, (sum(a32)), a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31 + Foreign Namespace: Aggregate on (mongo_fdw_regress.mongo_test_large f_test_large) +(3 rows) + +SELECT a32, sum(a32) FROM f_test_large GROUP BY + a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, + a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, + a31, a32 ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST; + a32 | sum +-----+----- + 2 | 2 + 32 | 96 + 132 | 132 +(3 rows) + +-- FDW-131: Limit and offset pushdown with Aggregate pushdown. +SELECT avg(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1; + avg | c1 +-----+---- + 10 | 10 + 20 | 20 + 30 | 30 + 40 | 40 + 50 | 50 + | +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT 1 OFFSET 1; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(3 rows) + +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT 1 OFFSET 1; + sum | c1 +-----+---- + 10 | 10 +(1 row) + +-- Limit 0, Offset 0 with aggregates. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT 0 OFFSET 0; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(3 rows) + +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT 0 OFFSET 0; + sum | c1 +-----+---- +(0 rows) + +-- Limit NULL +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT NULL OFFSET 2; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(3 rows) + +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT NULL OFFSET 2; + sum | c1 +-----+---- + 20 | 20 + 30 | 30 + 40 | 40 + 50 | 50 +(4 rows) + +-- Limit ALL +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT ALL OFFSET 2; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(3 rows) + +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT ALL OFFSET 2; + sum | c1 +-----+---- + 20 | 20 + 30 | 30 + 40 | 40 + 50 | 50 +(4 rows) + +-- Limit with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, sum(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY 1 ASC NULLS FIRST LIMIT -1; + QUERY PLAN +--------------------------------------------------------------------------------- + Limit + Output: c1, (sum(c1)) + -> Foreign Scan + Output: c1, (sum(c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(5 rows) + +-- Should throw an error. +SELECT c1, sum(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY 1 ASC NULLS FIRST LIMIT -1; +ERROR: LIMIT must not be negative +-- Offset with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, sum(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY 1 ASC NULLS FIRST OFFSET -2; + QUERY PLAN +--------------------------------------------------------------------------------- + Limit + Output: c1, (sum(c1)) + -> Foreign Scan + Output: c1, (sum(c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(5 rows) + +-- Should throw an error. +SELECT c1, sum(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY 1 ASC NULLS FIRST OFFSET -2; +ERROR: OFFSET must not be negative +-- Limit/Offset with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, avg(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT -1 OFFSET -2; + QUERY PLAN +--------------------------------------------------------------------------------- + Limit + Output: c1, (avg(c1)) + -> Foreign Scan + Output: c1, (avg(c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(5 rows) + +-- Should throw an error. +SELECT c1, avg(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT -1 OFFSET -2; +ERROR: OFFSET must not be negative +-- Limit with expression evaluating to -ve value. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, avg(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM fdw137_t2)); + QUERY PLAN +----------------------------------------------------------------------------------- + Limit + Output: fdw137_t2.c1, (avg(fdw137_t2.c1)) + InitPlan 1 + -> Foreign Scan + Output: (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) + -> Foreign Scan + Output: fdw137_t2.c1, (avg(fdw137_t2.c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(9 rows) + +SELECT c1, avg(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM fdw137_t2)); +ERROR: LIMIT must not be negative +-- FDW-559: Test mongo_fdw.enable_aggregate_pushdown GUC. +-- Check default value. Should be ON. +SHOW mongo_fdw.enable_aggregate_pushdown; + mongo_fdw.enable_aggregate_pushdown +------------------------------------- + on +(1 row) + +-- Negative testing for GUC value. +SET mongo_fdw.enable_aggregate_pushdown to 'abc'; +ERROR: parameter "mongo_fdw.enable_aggregate_pushdown" requires a Boolean value +--Disable the GUC enable_aggregate_pushdown. +SET mongo_fdw.enable_aggregate_pushdown to false; +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +-- Shouldn't pushdown aggregate because GUC is OFF. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> HashAggregate + Output: count(*), c1 + Group Key: fdw137_t1.c1 + Filter: (min(fdw137_t1.c1) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + count +------- + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +(11 rows) + +--Enable the GUC enable_aggregate_pushdown. +SET mongo_fdw.enable_aggregate_pushdown to on; +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +-- Should pushdown aggregate because GUC is ON. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> Foreign Scan + Output: (count(*)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + count +------- + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +(11 rows) + +-- Test for aggregation over join when server and table options for both the +-- tables is true and guc is enabled. Should pushdown. +SET mongo_fdw.enable_aggregate_pushdown to on; +SET mongo_fdw.enable_join_pushdown to on; +ALTER SERVER mongo_server OPTIONS (SET enable_join_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: (count(*)), t1.c8 + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(3 rows) + +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + count | c8 +-------+---- + 1 | + 3 | 10 + 5 | 20 + 6 | 30 + 1 | 60 +(5 rows) + +--Disable the GUC enable_join_pushdown. Shouldn't pushdown aggregate. +SET mongo_fdw.enable_join_pushdown to off; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------------------------------- + GroupAggregate + Output: count(*), t1.c8 + Group Key: t1.c8 + -> Merge Left Join + Output: t1.c8 + Merge Cond: (t1.c8 = t2.c1) + -> Foreign Scan on public.fdw137_t1 t1 + Output: t1._id, t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Sort + Output: t2.c1 + Sort Key: t2.c1 NULLS FIRST + -> Foreign Scan on public.fdw137_t2 t2 + Output: t2.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(15 rows) + +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + count | c8 +-------+---- + 1 | + 3 | 10 + 5 | 20 + 6 | 30 + 1 | 60 +(5 rows) + +SET mongo_fdw.enable_join_pushdown to on; +--Disable the GUC enable_aggregate_pushdown. Shouldn't pushdown. +SET mongo_fdw.enable_aggregate_pushdown to false; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: count(*), t1.c8 + Group Key: t1.c8 + -> Foreign Scan + Output: t1.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2) +(6 rows) + +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + count | c8 +-------+---- + 1 | + 3 | 10 + 5 | 20 + 6 | 30 + 1 | 60 +(5 rows) + +-- FDW-589: Test enable_order_by_pushdown option at server and table level. +SET mongo_fdw.enable_join_pushdown to true; +SET mongo_fdw.enable_aggregate_pushdown to true; +SET mongo_fdw.enable_order_by_pushdown to true; +ALTER SERVER mongo_server OPTIONS (ADD enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (ADD enable_join_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (ADD enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: c2, (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(3 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (ADD enable_join_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (ADD enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(3 rows) + +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + sum | c8 | avg +-----+----+----- + 100 | 20 | 20 + 180 | 30 | 30 + 0 | 60 | 60 +(3 rows) + +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_order_by_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: c2, (sum(c1)), c1 + Sort Key: fdw137_t1.c2 NULLS FIRST + -> Foreign Scan + Output: c2, (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Sort Key: t1.c8 NULLS FIRST + -> Foreign Scan + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(6 rows) + +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + sum | c8 | avg +-----+----+----- + 100 | 20 | 20 + 180 | 30 | 30 + 0 | 60 | 60 +(3 rows) + +-- Test that setting option at table level does not affect the setting at +-- server level. +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'false'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: c2, (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(3 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(3 rows) + +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + sum | c8 | avg +-----+----+----- + 100 | 20 | 20 + 180 | 30 | 30 + 0 | 60 | 60 +(3 rows) + +-- When option enable_aggregate_pushdown is disabled. Shouldn't pushdown +-- aggregate as well as ORDER BY too. +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c2, (sum(c1)), c1 + Sort Key: fdw137_t1.c2 NULLS FIRST + -> HashAggregate + Output: c2, sum(c1), c1 + Group Key: fdw137_t1.c2, fdw137_t1.c1 + Filter: (min(fdw137_t1.c1) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +-- Cleanup +DELETE FROM fdw137_t1 WHERE c8 IS NULL; +DELETE FROM fdw137_t1 WHERE c8 = 60; +DELETE FROM fdw137_t2 WHERE c1 IS NULL; +DELETE FROM fdw137_t2 WHERE c1 = 50; +DROP FOREIGN TABLE fdw137_t1; +DROP FOREIGN TABLE fdw137_t2; +DROP FOREIGN TABLE ftprt1_p1; +DROP FOREIGN TABLE ftprt1_p2; +DROP FOREIGN TABLE f_test_large; +DROP TABLE fprt1; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/expected/aggregate_pushdown_3.out b/expected/aggregate_pushdown_3.out new file mode 100644 index 0000000..5d989eb --- /dev/null +++ b/expected/aggregate_pushdown_3.out @@ -0,0 +1,1997 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` +-- Before running this file user must create database mongo_fdw_regress on +-- MongoDB with all permission for MONGO_USER_NAME user with MONGO_PASS +-- password and ran mongodb_init.sh file to load collections. +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; +-- Create foreign tables. +CREATE FOREIGN TABLE fdw137_t1 (_id NAME, c1 INTEGER, c2 TEXT, c3 CHAR(9), c4 INTEGER, c5 pg_catalog.Date, c6 DECIMAL, c7 INTEGER, c8 INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl1'); +CREATE FOREIGN TABLE fdw137_t2 (_id NAME, c1 INTEGER, c2 TEXT, c3 TEXT) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); +INSERT INTO fdw137_t1 VALUES (0, 1500, 'EMP15', 'FINANCE', 1300, '2000-12-25', 950.0, 400, 60); +INSERT INTO fdw137_t1 VALUES (0, 1600, 'EMP16', 'ADMIN', 600); +INSERT INTO fdw137_t2 VALUES (0, 50, 'TESTING', 'NASHIK'); +INSERT INTO fdw137_t2 VALUES (0); +-- Create local table. +CREATE TABLE fdw137_local AS + SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM fdw137_t1; +-- Simple aggregates. ORDER BY push-down not possible because only column names allowed. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), sum(c1), avg(c1), min(c4), max(c1), sum(c1) * (random() <= 1)::int AS sum2 FROM fdw137_t1 WHERE c4 > 600 GROUP BY c4 ORDER BY 1 ASC NULLS FIRST, 2 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------ + Result + Output: (count(*)), (sum(c1)), (avg(c1)), (min(c4)), (max(c1)), ((sum(c1)) * ((random() <= '1'::double precision))::integer), c4 + -> Sort + Output: (count(*)), (sum(c1)), (avg(c1)), (min(c4)), (max(c1)), c4 + Sort Key: (count(*)) NULLS FIRST, (sum(fdw137_t1.c1)) NULLS FIRST + -> Foreign Scan + Output: (count(*)), (sum(c1)), (avg(c1)), (min(c4)), (max(c1)), c4 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(8 rows) + +SELECT count(*), sum(c1), avg(c1), min(c4), max(c1), sum(c1) * (random() <= 1)::int AS sum2 FROM fdw137_t1 WHERE c4 > 600 GROUP BY c4 ORDER BY 1 ASC NULLS FIRST, 2 ASC NULLS FIRST; + count | sum | avg | min | max | sum2 +-------+------+------------------+------+------+------ + 1 | 1100 | 1100 | 800 | 1100 | 1100 + 1 | 1400 | 1400 | 700 | 1400 | 1400 + 2 | 1600 | 800 | 1300 | 1500 | 1600 + 3 | 1700 | 566.666666666667 | 900 | 700 | 1700 +(4 rows) + +-- GROUP BY clause HAVING expressions +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c1), count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY c1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: c1, (sum(c1)), (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(3 rows) + +SELECT c1, sum(c1), count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY c1 ASC NULLS FIRST; + c1 | sum | count +------+------+------- + 600 | 600 | 1 + 700 | 700 | 1 + 800 | 800 | 1 + 900 | 900 | 1 + 1000 | 1000 | 1 + 1100 | 1100 | 1 + 1200 | 1200 | 1 + 1300 | 1300 | 1 + 1400 | 1400 | 1 + 1500 | 1500 | 1 + 1600 | 1600 | 1 +(11 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, min(c2) FROM fdw137_t1 WHERE c3 = 'ADMIN' GROUP BY c8 HAVING min(c8) = 20 ORDER BY c8 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: c8, (min(c2)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(3 rows) + +SELECT c8, min(c2) FROM fdw137_t1 WHERE c3 = 'ADMIN' GROUP BY c8 HAVING min(c8) = 20 ORDER BY c8 ASC NULLS FIRST; + c8 | min +----+------ + 20 | EMP1 +(1 row) + +-- Multi-column GROUP BY clause. Push-down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: c2, (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(3 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: c2, (sum(c1)), c1 + Sort Key: fdw137_t1.c2 NULLS FIRST + -> Foreign Scan + Output: c2, (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +SET mongo_fdw.enable_order_by_pushdown TO ON; +-- Aggregation on expression. Don't push-down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c1+2) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY c1 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------- + GroupAggregate + Output: c1, sum((c1 + 2)) + Group Key: fdw137_t1.c1 + Filter: (min(fdw137_t1.c1) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(7 rows) + +SELECT c1, sum(c1+2) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY c1 ASC NULLS FIRST; + c1 | sum +------+------ + 600 | 602 + 700 | 702 + 800 | 802 + 900 | 902 + 1000 | 1002 + 1100 | 1102 + 1200 | 1202 + 1300 | 1302 + 1400 | 1402 + 1500 | 1502 + 1600 | 1602 +(11 rows) + +-- Aggregate with unshippable GROUP BY clause are not pushed +EXPLAIN (VERBOSE, COSTS OFF) +SELECT avg(c4) FROM fdw137_t1 GROUP BY c4 * (random() <= 1)::int ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------ + Sort + Output: (avg(c4)), ((c4 * ((random() <= '1'::double precision))::integer)) + Sort Key: (avg(fdw137_t1.c4)) + -> HashAggregate + Output: avg(c4), ((c4 * ((random() <= '1'::double precision))::integer)) + Group Key: (fdw137_t1.c4 * ((random() <= '1'::double precision))::integer) + -> Foreign Scan on public.fdw137_t1 + Output: (c4 * ((random() <= '1'::double precision))::integer), c4 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(9 rows) + +SELECT avg(c4) FROM fdw137_t1 GROUP BY c4 * (random() <= 1)::int ORDER BY 1; + avg +----------------------- + 400.0000000000000000 + 600.0000000000000000 + 700.0000000000000000 + 800.0000000000000000 + 900.0000000000000000 + 1300.0000000000000000 + +(7 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c1) FROM fdw137_t1 GROUP BY c1 HAVING min(c1 * 3) > 500 ORDER BY c1; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c1, (sum(c1)) + Sort Key: fdw137_t1.c1 + -> HashAggregate + Output: c1, sum(c1) + Group Key: fdw137_t1.c1 + Filter: (min((fdw137_t1.c1 * 3)) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c1, sum(c1) FROM fdw137_t1 GROUP BY c1 HAVING min(c1 * 3) > 500 ORDER BY c1; + c1 | sum +------+------ + 200 | 200 + 300 | 300 + 400 | 400 + 500 | 500 + 600 | 600 + 700 | 700 + 800 | 800 + 900 | 900 + 1000 | 1000 + 1100 | 1100 + 1200 | 1200 + 1300 | 1300 + 1400 | 1400 + 1500 | 1500 + 1600 | 1600 +(15 rows) + +-- FDW-134: Test ORDER BY with COLLATE. Shouldn't push-down +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY c2 COLLATE "en_US" ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: c2, (sum(c1)), ((c2)::text), c1 + Sort Key: fdw137_t1.c2 COLLATE "en_US" NULLS FIRST + -> Foreign Scan + Output: c2, (sum(c1)), c2, c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY c2 COLLATE "en_US" ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +-- Using expressions in HAVING clause. Pushed down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c3, count(*) FROM fdw137_t1 GROUP BY c3 HAVING abs(max(c8)) = abs(10) ORDER BY 1, 2; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: c3, (count(*)) + Sort Key: fdw137_t1.c3, (count(*)) + -> Foreign Scan + Output: c3, (count(*)) + Filter: (abs((max(fdw137_t1.c8))) = 10) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(7 rows) + +SELECT c3, count(*) FROM fdw137_t1 GROUP BY c3 HAVING abs(max(c8)) = abs(10) ORDER BY 1, 2; + c3 | count +-----------+------- + HEAD | 1 +(1 row) + +-- Unshippable HAVING clause will be evaluated locally, and other qual in HAVING clause is pushed down +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM (SELECT c3, count(c1) FROM fdw137_t1 GROUP BY c3 HAVING (avg(c1) / avg(c1)) * random() <= 1 and min(c1) > 100) x; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------- + Aggregate + Output: count(*) + -> Foreign Scan + Output: fdw137_t1.c3, NULL::bigint + Filter: (((((avg(fdw137_t1.c1)) / (avg(fdw137_t1.c1))))::double precision * random()) <= '1'::double precision) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT count(*) FROM (SELECT c3, count(c1) FROM fdw137_t1 GROUP BY c3 HAVING (avg(c1) / avg(c1)) * random() <= 1 and min(c1) > 100) x; + count +------- + 0 +(1 row) + +-- Aggregate over join query +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t1.c8), avg(t2.c1) FROM fdw137_t1 t1 INNER JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8%2 = 0 ORDER BY 1 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ + Sort + Output: (sum(t1.c8)), (avg(t2.c1)) + Sort Key: (sum(t1.c8)) DESC NULLS LAST + -> Foreign Scan + Output: (sum(t1.c8)), (avg(t2.c1)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2)) +(6 rows) + +SELECT sum(t1.c8), avg(t2.c1) FROM fdw137_t1 t1 INNER JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8%2 = 0 ORDER BY 1 DESC NULLS LAST; + sum | avg +-----+------------------ + 310 | 22.1428571428571 +(1 row) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, count(*), t2.c4 FROM fdw137_t2 t1 INNER JOIN fdw137_t1 t2 ON (t1.c1 = t2.c8) GROUP BY t1.c1, t2.c4 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Foreign Scan + Output: t1.c1, (count(*)), t2.c4 + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl2 t1) INNER JOIN (mongo_fdw_regress.test_tbl1 t2)) +(3 rows) + +SELECT t1.c1, count(*), t2.c4 FROM fdw137_t2 t1 INNER JOIN fdw137_t1 t2 ON (t1.c1 = t2.c8) GROUP BY t1.c1, t2.c4 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | count | c4 +----+-------+------ + 10 | 1 | + 10 | 1 | 700 + 10 | 1 | 900 + 20 | 2 | 400 + 20 | 1 | 800 + 20 | 1 | 900 + 20 | 1 | 1300 + 30 | 5 | 600 + 30 | 1 | 900 +(9 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(3 rows) + +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 ORDER BY 2 ASC NULLS FIRST; + sum | c8 | avg +-----+----+----- + 100 | 20 | 20 + 180 | 30 | 30 + 0 | 60 | 60 +(3 rows) + +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Sort Key: t1.c8 NULLS FIRST + -> Foreign Scan + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(6 rows) + +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 ORDER BY 2 ASC NULLS FIRST; + sum | c8 | avg +-----+----+----- + 100 | 20 | 20 + 180 | 30 | 30 + 0 | 60 | 60 +(3 rows) + +SET mongo_fdw.enable_order_by_pushdown TO ON; +-- Aggregate is not pushed down as aggregation contains random() +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1 * (random() <= 1)::int) AS sum, avg(c1) FROM fdw137_t1 ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------- + Sort + Output: (sum((c1 * ((random() <= '1'::double precision))::integer))), (avg(c1)) + Sort Key: (sum((fdw137_t1.c1 * ((random() <= '1'::double precision))::integer))) + -> Aggregate + Output: sum((c1 * ((random() <= '1'::double precision))::integer)), avg(c1) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(8 rows) + +SELECT sum(c1 * (random() <= 1)::int) AS sum, avg(c1) FROM fdw137_t1 ORDER BY 1; + sum | avg +-------+---------------------- + 13600 | 850.0000000000000000 +(1 row) + +-- Not pushed down due to local conditions present in underneath input rel +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t1.c8) FROM fdw137_t1 t1 INNER JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE ((t1.c8 * t2.c1)/(t1.c8 * t2.c1)) * random() <= 1 ORDER BY 1; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(t1.c8)) + Sort Key: (sum(t1.c8)) + -> Aggregate + Output: sum(t1.c8) + -> Foreign Scan + Output: t1.c8 + Filter: (((((t1.c8 * t2.c1) / (t1.c8 * t2.c1)))::double precision * random()) <= '1'::double precision) + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(9 rows) + +SELECT sum(t1.c8) FROM fdw137_t1 t1 INNER JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE ((t1.c8 * t2.c1)/(t1.c8 * t2.c1)) * random() <= 1 ORDER BY 1; + sum +----- + 310 +(1 row) + +-- Aggregates in subquery are pushed down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(x.a), sum(x.a) FROM (SELECT c8 a, sum(c1) b FROM fdw137_t1 GROUP BY c8 ORDER BY 1, 2) x; + QUERY PLAN +--------------------------------------------------------------------------------------- + Aggregate + Output: count(fdw137_t1.c8), sum(fdw137_t1.c8) + -> Sort + Output: fdw137_t1.c8, (sum(fdw137_t1.c1)) + Sort Key: fdw137_t1.c8, (sum(fdw137_t1.c1)) + -> Foreign Scan + Output: fdw137_t1.c8, (sum(fdw137_t1.c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(8 rows) + +SELECT count(x.a), sum(x.a) FROM (SELECT c8 a, sum(c1) b FROM fdw137_t1 GROUP BY c8 ORDER BY 1, 2) x; + count | sum +-------+----- + 4 | 120 +(1 row) + +-- Aggregate is still pushed down by taking unshippable expression out +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c4 * (random() <= 1)::int AS sum1, sum(c1) AS sum2 FROM fdw137_t1 GROUP BY c4 ORDER BY 1, 2; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Sort + Output: ((c4 * ((random() <= '1'::double precision))::integer)), (sum(c1)), c4 + Sort Key: ((fdw137_t1.c4 * ((random() <= '1'::double precision))::integer)), (sum(fdw137_t1.c1)) + -> Foreign Scan + Output: (c4 * ((random() <= '1'::double precision))::integer), (sum(c1)), c4 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT c4 * (random() <= 1)::int AS sum1, sum(c1) AS sum2 FROM fdw137_t1 GROUP BY c4 ORDER BY 1, 2; + sum1 | sum2 +------+------ + 400 | 2100 + 600 | 4800 + 700 | 1400 + 800 | 1100 + 900 | 1700 + 1300 | 1600 + | 900 +(7 rows) + +-- Testing ORDER BY, DISTINCT, FILTER and Ordered-sets within aggregates +-- ORDER BY within aggregates (same column used to order) are not pushed +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1 ORDER BY c1) FROM fdw137_t1 WHERE c1 < 500 GROUP BY c2 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Output: (sum(c1 ORDER BY c1)), c2 + Sort Key: (sum(fdw137_t1.c1 ORDER BY fdw137_t1.c1)) + -> GroupAggregate + Output: sum(c1 ORDER BY c1), c2 + Group Key: fdw137_t1.c2 + -> Sort + Output: c2, c1 + Sort Key: fdw137_t1.c2, fdw137_t1.c1 + -> Foreign Scan on public.fdw137_t1 + Output: c2, c1 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(12 rows) + +SELECT sum(c1 ORDER BY c1) FROM fdw137_t1 WHERE c1 < 500 GROUP BY c2 ORDER BY 1; + sum +----- + 100 + 200 + 300 + 400 +(4 rows) + +-- ORDER BY within aggregate (different column used to order also using DESC) +-- are not pushed. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c8 ORDER BY c1 desc) FROM fdw137_t1 WHERE c1 > 1000 and c8 > 20; + QUERY PLAN +-------------------------------------------------------------- + Aggregate + Output: sum(c8 ORDER BY c1 DESC) + -> Sort + Output: c8, c1 + Sort Key: fdw137_t1.c1 DESC + -> Foreign Scan on public.fdw137_t1 + Output: c8, c1 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(8 rows) + +SELECT sum(c8 ORDER BY c1 desc) FROM fdw137_t1 WHERE c1 > 1000 and c8 > 20; + sum +----- + 90 +(1 row) + +-- DISTINCT within aggregate. Don't push down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(DISTINCT (c1)) FROM fdw137_t1 WHERE c4 = 600 and c1 < 500; + QUERY PLAN +-------------------------------------------------------------- + Aggregate + Output: sum(DISTINCT c1) + -> Sort + Output: c1 + Sort Key: fdw137_t1.c1 + -> Foreign Scan on public.fdw137_t1 + Output: c1 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(8 rows) + +SELECT sum(DISTINCT (c1)) FROM fdw137_t1 WHERE c4 = 600 and c1 < 500; + sum +----- + 500 +(1 row) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(DISTINCT (t1.c1)) FROM fdw137_t1 t1 join fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 < 30 GROUP BY (t2.c1) ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(DISTINCT t1.c1)), t2.c1 + Sort Key: (sum(DISTINCT t1.c1)) + -> GroupAggregate + Output: sum(DISTINCT t1.c1), t2.c1 + Group Key: t2.c1 + -> Sort + Output: t2.c1, t1.c1 + Sort Key: t2.c1, t1.c1 + -> Foreign Scan + Output: t2.c1, t1.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(12 rows) + +SELECT sum(DISTINCT (t1.c1)) FROM fdw137_t1 t1 join fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 < 30 GROUP BY (t2.c1) ORDER BY 1; + sum +------ + 3000 + 3700 +(2 rows) + +-- DISTINCT, ORDER BY and FILTER within aggregate, not pushed down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), sum(DISTINCT c1 ORDER BY c1) filter (WHERE c1%3 < 2), c4 FROM fdw137_t1 WHERE c4 = 600 GROUP BY c4; + QUERY PLAN +----------------------------------------------------------------------------------- + GroupAggregate + Output: sum(c1), sum(DISTINCT c1 ORDER BY c1) FILTER (WHERE ((c1 % 3) < 2)), c4 + -> Sort + Output: c1, c4 + Sort Key: fdw137_t1.c1 + -> Foreign Scan on public.fdw137_t1 + Output: c1, c4 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(8 rows) + +SELECT sum(c1), sum(DISTINCT c1 ORDER BY c1) filter (WHERE c1%3 < 2), c4 FROM fdw137_t1 WHERE c4 = 600 GROUP BY c4; + sum | sum | c4 +------+------+----- + 4800 | 4100 | 600 +(1 row) + +-- FILTER within aggregate, not pushed +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1) filter (WHERE c1 < 1000 and c4 > 500) FROM fdw137_t1 GROUP BY c4 ORDER BY 1 nulls last; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Sort + Output: (sum(c1) FILTER (WHERE ((c1 < 1000) AND (c4 > 500)))), c4 + Sort Key: (sum(fdw137_t1.c1) FILTER (WHERE ((fdw137_t1.c1 < 1000) AND (fdw137_t1.c4 > 500)))) + -> HashAggregate + Output: sum(c1) FILTER (WHERE ((c1 < 1000) AND (c4 > 500))), c4 + Group Key: fdw137_t1.c4 + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(9 rows) + +SELECT sum(c1) filter (WHERE c1 < 1000 and c4 > 500) FROM fdw137_t1 GROUP BY c4 ORDER BY 1 nulls last; + sum +------ + 100 + 1000 + 1700 + + + + +(7 rows) + +-- Outer query is aggregation query +EXPLAIN (VERBOSE, COSTS OFF) +SELECT DISTINCT (SELECT count(*) filter (WHERE t2.c1 = 20 and t2.c1 < 30) FROM fdw137_t1 t1 WHERE t1.c1 = 500) FROM fdw137_t2 t2 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------- + Unique + Output: ((SubPlan 1)) + -> Sort + Output: ((SubPlan 1)) + Sort Key: ((SubPlan 1)) + -> Aggregate + Output: (SubPlan 1) + -> Foreign Scan on public.fdw137_t2 t2 + Output: t2._id, t2.c1, t2.c2, t2.c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + SubPlan 1 + -> Foreign Scan on public.fdw137_t1 t1 + Output: count(*) FILTER (WHERE ((t2.c1 = 20) AND (t2.c1 < 30))) + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(14 rows) + +SELECT DISTINCT (SELECT count(*) filter (WHERE t2.c1 = 20 and t2.c1 < 30) FROM fdw137_t1 t1 WHERE t1.c1 = 500) FROM fdw137_t2 t2 ORDER BY 1; + count +------- + 1 +(1 row) + +-- Inner query is aggregation query +EXPLAIN (VERBOSE, COSTS OFF) +SELECT DISTINCT (SELECT count(t1.c1) filter (WHERE t2.c1 = 20 and t2.c1 < 30) FROM fdw137_t1 t1 WHERE t1.c1 > 600) FROM fdw137_t2 t2 ORDER BY 1; + QUERY PLAN +---------------------------------------------------------------------------------------------------- + Unique + Output: ((SubPlan 1)) + -> Sort + Output: ((SubPlan 1)) + Sort Key: ((SubPlan 1)) + -> Foreign Scan on public.fdw137_t2 t2 + Output: (SubPlan 1) + Foreign Namespace: mongo_fdw_regress.test_tbl2 + SubPlan 1 + -> Aggregate + Output: count(t1.c1) FILTER (WHERE ((t2.c1 = 20) AND (t2.c1 < 30))) + -> Foreign Scan on public.fdw137_t1 t1 + Output: t1._id, t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(14 rows) + +SELECT DISTINCT (SELECT count(t1.c1) filter (WHERE t2.c1 = 20 and t2.c1 < 30) FROM fdw137_t1 t1 WHERE t1.c1 > 600) FROM fdw137_t2 t2 ORDER BY 1; + count +------- + 0 + 10 +(2 rows) + +-- Ordered-sets within aggregate, not pushed down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, rank('10'::varchar) within group (ORDER BY c3), percentile_cont(c8/200::numeric) within group (ORDER BY c1) FROM fdw137_t1 GROUP BY c8 HAVING percentile_cont(c8/200::numeric) within group (ORDER BY c1) < 500 ORDER BY c8; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: c8, rank('10'::bpchar) WITHIN GROUP (ORDER BY c3), percentile_cont((((c8)::numeric / '200'::numeric))::double precision) WITHIN GROUP (ORDER BY ((c1)::double precision)) + Group Key: fdw137_t1.c8 + Filter: (percentile_cont((((fdw137_t1.c8)::numeric / '200'::numeric))::double precision) WITHIN GROUP (ORDER BY ((fdw137_t1.c1)::double precision)) < '500'::double precision) + -> Sort + Output: c8, c3, c1 + Sort Key: fdw137_t1.c8 + -> Foreign Scan on public.fdw137_t1 + Output: c8, c3, c1 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c8, rank('10'::varchar) within group (ORDER BY c3), percentile_cont(c8/200::numeric) within group (ORDER BY c1) FROM fdw137_t1 GROUP BY c8 HAVING percentile_cont(c8/200::numeric) within group (ORDER BY c1) < 500 ORDER BY c8; + c8 | rank | percentile_cont +----+------+----------------- + 20 | 1 | 220 + 30 | 1 | 275 +(2 rows) + +-- Subquery in FROM clause HAVING aggregate +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), x.b FROM fdw137_t1, (SELECT c1 a, sum(c1) b FROM fdw137_t2 GROUP BY c1) x WHERE fdw137_t1.c8 = x.a GROUP BY x.b ORDER BY 1, 2; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: (count(*)), x.b + Sort Key: (count(*)), x.b + -> HashAggregate + Output: count(*), x.b + Group Key: x.b + -> Hash Join + Output: x.b + Inner Unique: true + Hash Cond: (fdw137_t1.c8 = x.a) + -> Foreign Scan on public.fdw137_t1 + Output: fdw137_t1._id, fdw137_t1.c1, fdw137_t1.c2, fdw137_t1.c3, fdw137_t1.c4, fdw137_t1.c5, fdw137_t1.c6, fdw137_t1.c7, fdw137_t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + Output: x.b, x.a + -> Subquery Scan on x + Output: x.b, x.a + -> Foreign Scan + Output: fdw137_t2.c1, (sum(fdw137_t2.c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(20 rows) + +SELECT count(*), x.b FROM fdw137_t1, (SELECT c1 a, sum(c1) b FROM fdw137_t2 GROUP BY c1) x WHERE fdw137_t1.c8 = x.a GROUP BY x.b ORDER BY 1, 2; + count | b +-------+---- + 3 | 10 + 5 | 20 + 6 | 30 +(3 rows) + +-- Join with IS NULL check in HAVING +EXPLAIN (VERBOSE, COSTS OFF) +SELECT avg(t1.c1), sum(t2.c1) FROM fdw137_t1 t1 join fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t2.c1 HAVING avg(t1.c1) is null ORDER BY 1 nulls last, 2; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ + Sort + Output: (avg(t1.c1)), (sum(t2.c1)), t2.c1 + Sort Key: (avg(t1.c1)), (sum(t2.c1)) + -> Foreign Scan + Output: (avg(t1.c1)), (sum(t2.c1)), t2.c1 + Filter: ((avg(t1.c1)) IS NULL) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2)) +(7 rows) + +SELECT avg(t1.c1), sum(t2.c1) FROM fdw137_t1 t1 join fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t2.c1 HAVING avg(t1.c1) is null ORDER BY 1 nulls last, 2; + avg | sum +-----+----- +(0 rows) + +-- ORDER BY expression is part of the target list but not pushed down to +-- foreign server. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1) * (random() <= 1)::int AS sum FROM fdw137_t1 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------------------------------- + Sort + Output: (((sum(c1)) * ((random() <= '1'::double precision))::integer)) + Sort Key: (((sum(fdw137_t1.c1)) * ((random() <= '1'::double precision))::integer)) + -> Foreign Scan + Output: ((sum(c1)) * ((random() <= '1'::double precision))::integer) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT sum(c1) * (random() <= 1)::int AS sum FROM fdw137_t1 ORDER BY 1; + sum +------- + 13600 +(1 row) + +-- LATERAL join, with parameterization +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum FROM fdw137_t1 t1, lateral (SELECT sum(t2.c1) sum FROM fdw137_t2 t2 GROUP BY t2.c1) qry WHERE t1.c8 * 2 = qry.sum ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------------------------------------- + Sort + Output: t1.c8, qry.sum + Sort Key: t1.c8 + -> Hash Join + Output: t1.c8, qry.sum + Hash Cond: ((t1.c8 * 2) = qry.sum) + -> Foreign Scan on public.fdw137_t1 t1 + Output: t1._id, t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + Output: qry.sum + -> Subquery Scan on qry + Output: qry.sum + -> Foreign Scan + Output: (sum(t2.c1)), t2.c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 t2) +(16 rows) + +-- Check with placeHolderVars +EXPLAIN (VERBOSE, COSTS OFF) +SELECT q.b, count(fdw137_t1.c1), sum(q.a) FROM fdw137_t1 left join (SELECT min(13), avg(fdw137_t1.c1), sum(fdw137_t2.c1) FROM fdw137_t1 right join fdw137_t2 ON (fdw137_t1.c8 = fdw137_t2.c1) WHERE fdw137_t1.c8 = 20) q(a, b, c) ON (fdw137_t1.c8 = q.b) WHERE fdw137_t1.c1 between 100 and 500 GROUP BY q.b ORDER BY 1 nulls last, 2; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------- + Incremental Sort + Output: q.b, (count(fdw137_t1.c1)), (sum(q.a)) + Sort Key: q.b, (count(fdw137_t1.c1)) + Presorted Key: q.b + -> GroupAggregate + Output: q.b, count(fdw137_t1.c1), sum(q.a) + Group Key: q.b + -> Sort + Output: q.b, fdw137_t1.c1, q.a + Sort Key: q.b + -> Hash Left Join + Output: q.b, fdw137_t1.c1, q.a + Inner Unique: true + Hash Cond: ((fdw137_t1.c8)::numeric = q.b) + -> Foreign Scan on public.fdw137_t1 + Output: fdw137_t1._id, fdw137_t1.c1, fdw137_t1.c2, fdw137_t1.c3, fdw137_t1.c4, fdw137_t1.c5, fdw137_t1.c6, fdw137_t1.c7, fdw137_t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + Output: q.b, q.a + -> Subquery Scan on q + Output: q.b, q.a + -> Aggregate + Output: min(13), avg(fdw137_t1_1.c1), NULL::bigint + -> Foreign Scan + Output: fdw137_t1_1.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 fdw137_t1) INNER JOIN (mongo_fdw_regress.test_tbl2 fdw137_t2) +(26 rows) + +SELECT q.b, count(fdw137_t1.c1), sum(q.a) FROM fdw137_t1 left join (SELECT min(13), avg(fdw137_t1.c1), sum(fdw137_t2.c1) FROM fdw137_t1 right join fdw137_t2 ON (fdw137_t1.c8 = fdw137_t2.c1) WHERE fdw137_t1.c8 = 20) q(a, b, c) ON (fdw137_t1.c8 = q.b) WHERE fdw137_t1.c1 between 100 and 500 GROUP BY q.b ORDER BY 1 nulls last, 2; + b | count | sum +---+-------+----- + | 5 | +(1 row) + +-- Not supported cases +-- The COUNT of column +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(c8) FROM fdw137_t1 ; + QUERY PLAN +-------------------------------------------------------- + Aggregate + Output: count(c8) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(5 rows) + +SELECT count(c8) FROM fdw137_t1 ; + count +------- + 15 +(1 row) + +-- Grouping sets +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum(c1) FROM fdw137_t1 WHERE c8 > 10 GROUP BY rollup(c8) ORDER BY 1 nulls last; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c8, (sum(c1)) + Sort Key: fdw137_t1.c8 + -> MixedAggregate + Output: c8, sum(c1) + Hash Key: fdw137_t1.c8 + Group Key: () + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c8, sum(c1) FROM fdw137_t1 WHERE c8 > 10 GROUP BY rollup(c8) ORDER BY 1 nulls last; + c8 | sum +----+------ + 20 | 3700 + 30 | 3800 + 60 | 1500 + | 9000 +(4 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum(c1) FROM fdw137_t1 WHERE c8 > 3 GROUP BY cube(c8) ORDER BY 1 nulls last; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c8, (sum(c1)) + Sort Key: fdw137_t1.c8 + -> MixedAggregate + Output: c8, sum(c1) + Hash Key: fdw137_t1.c8 + Group Key: () + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c8, sum(c1) FROM fdw137_t1 WHERE c8 > 3 GROUP BY cube(c8) ORDER BY 1 nulls last; + c8 | sum +----+------- + 10 | 3000 + 20 | 3700 + 30 | 3800 + 60 | 1500 + | 12000 +(5 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, c4, sum(c1) FROM fdw137_t1 WHERE c8 > 20 GROUP BY grouping sets(c8, c4) ORDER BY 1 nulls last, 2 nulls last; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c8, c4, (sum(c1)) + Sort Key: fdw137_t1.c8, fdw137_t1.c4 + -> HashAggregate + Output: c8, c4, sum(c1) + Hash Key: fdw137_t1.c8 + Hash Key: fdw137_t1.c4 + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c8, c4, sum(c1) FROM fdw137_t1 WHERE c8 > 20 GROUP BY grouping sets(c8, c4) ORDER BY 1 nulls last, 2 nulls last; + c8 | c4 | sum +----+------+------ + 30 | | 3800 + 60 | | 1500 + | 600 | 3200 + | 900 | 600 + | 1300 | 1500 +(5 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum(c1), grouping(c8) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1 nulls last; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c8, (sum(c1)), (GROUPING(c8)) + Sort Key: fdw137_t1.c8 + -> HashAggregate + Output: c8, sum(c1), GROUPING(c8) + Group Key: fdw137_t1.c8 + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(9 rows) + +SELECT c8, sum(c1), grouping(c8) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1 nulls last; + c8 | sum | grouping +----+------+---------- + 20 | 3700 | 0 + 30 | 3800 | 0 + 60 | 1500 | 0 +(3 rows) + +-- DISTINCT itself is not pushed down, whereas underneath aggregate is pushed +EXPLAIN (VERBOSE, COSTS OFF) +SELECT DISTINCT sum(c1) s FROM fdw137_t1 WHERE c1 > 1000 GROUP BY c1 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------- + Unique + Output: (sum(c1)), c1 + -> Sort + Output: (sum(c1)), c1 + Sort Key: (sum(fdw137_t1.c1)) + -> Foreign Scan + Output: (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(8 rows) + +SELECT DISTINCT sum(c1) s FROM fdw137_t1 WHERE c1 > 1000 GROUP BY c1 ORDER BY 1; + s +------ + 1100 + 1200 + 1300 + 1400 + 1500 + 1600 +(6 rows) + +-- WindowAgg +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum(c8), count(c8) over (partition by c8%2) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Sort + Output: c8, (sum(c8)), (count(c8) OVER (?)), ((c8 % 2)) + Sort Key: fdw137_t1.c8 + -> WindowAgg + Output: c8, (sum(c8)), count(c8) OVER (?), ((c8 % 2)) + -> Sort + Output: c8, ((c8 % 2)), (sum(c8)) + Sort Key: ((fdw137_t1.c8 % 2)) + -> Foreign Scan + Output: c8, (c8 % 2), (sum(c8)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(11 rows) + +SELECT c8, sum(c8), count(c8) over (partition by c8%2) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + c8 | sum | count +----+-----+------- + 20 | 100 | 3 + 30 | 180 | 3 + 60 | 60 | 3 +(3 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, array_agg(c8) over (partition by c8%2 ORDER BY c8 desc) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Sort + Output: c8, (array_agg(c8) OVER (?)), ((c8 % 2)) + Sort Key: fdw137_t1.c8 + -> WindowAgg + Output: c8, array_agg(c8) OVER (?), ((c8 % 2)) + -> Sort + Output: c8, ((c8 % 2)) + Sort Key: ((fdw137_t1.c8 % 2)), fdw137_t1.c8 DESC + -> Foreign Scan + Output: c8, (c8 % 2) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(11 rows) + +SELECT c8, array_agg(c8) over (partition by c8%2 ORDER BY c8 desc) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + c8 | array_agg +----+------------ + 20 | {60,30,20} + 30 | {60,30} + 60 | {60} +(3 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, array_agg(c8) over (partition by c8%2 ORDER BY c8 range between current row and unbounded following) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------------------- + Sort + Output: c8, (array_agg(c8) OVER (?)), ((c8 % 2)) + Sort Key: fdw137_t1.c8 + -> WindowAgg + Output: c8, array_agg(c8) OVER (?), ((c8 % 2)) + -> Sort + Output: c8, ((c8 % 2)) + Sort Key: ((fdw137_t1.c8 % 2)), fdw137_t1.c8 + -> Foreign Scan + Output: c8, (c8 % 2) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(11 rows) + +SELECT c8, array_agg(c8) over (partition by c8%2 ORDER BY c8 range between current row and unbounded following) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + c8 | array_agg +----+------------ + 20 | {20,30,60} + 30 | {30,60} + 60 | {60} +(3 rows) + +-- User defined function for user defined aggregate, VARIADIC +CREATE FUNCTION least_accum(anyelement, variadic anyarray) +returns anyelement language sql AS + 'SELECT least($1, min($2[i])) FROM generate_subscripts($2,2) g(i)'; +CREATE aggregate least_agg(variadic items anyarray) ( + stype = anyelement, sfunc = least_accum +); +-- Not pushed down due to user defined aggregate +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, least_agg(c1) FROM fdw137_t1 GROUP BY c2 ORDER BY c2; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c2, (least_agg(VARIADIC ARRAY[c1])) + Sort Key: fdw137_t1.c2 + -> HashAggregate + Output: c2, least_agg(VARIADIC ARRAY[c1]) + Group Key: fdw137_t1.c2 + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(9 rows) + +SELECT c2, least_agg(c1) FROM fdw137_t1 GROUP BY c2 ORDER BY c2; + c2 | least_agg +-------+----------- + EMP1 | + EMP10 | + EMP11 | + EMP12 | + EMP13 | + EMP14 | + EMP15 | + EMP16 | + EMP2 | + EMP3 | + EMP4 | + EMP5 | + EMP6 | + EMP7 | + EMP8 | + EMP9 | +(16 rows) + +-- Test partition-wise aggregate +SET enable_partitionwise_aggregate TO ON; +-- Create the partition tables +CREATE TABLE fprt1 (_id NAME, c1 INTEGER, c2 INTEGER, c3 TEXT) PARTITION BY RANGE(c1); +CREATE FOREIGN TABLE ftprt1_p1 PARTITION OF fprt1 FOR VALUES FROM (1) TO (4) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test1'); +CREATE FOREIGN TABLE ftprt1_p2 PARTITION OF fprt1 FOR VALUES FROM (5) TO (8) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test2'); +-- Plan with partitionwise aggregates is enabled +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c1) FROM fprt1 GROUP BY c1 ORDER BY 2; + QUERY PLAN +------------------------------------------------------------------------------- + Sort + Output: fprt1.c1, (sum(fprt1.c1)) + Sort Key: (sum(fprt1.c1)) + -> Append + -> Foreign Scan + Output: fprt1.c1, (sum(fprt1.c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test1 fprt1) + -> Foreign Scan + Output: fprt1_1.c1, (sum(fprt1_1.c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test2 fprt1) +(10 rows) + +SELECT c1, sum(c1) FROM fprt1 GROUP BY c1 ORDER BY 2; + c1 | sum +----+----- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 +(8 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c2), min(c2), count(*) FROM fprt1 GROUP BY c1 HAVING avg(c2) < 22 ORDER BY 2; + QUERY PLAN +------------------------------------------------------------------------------------ + Sort + Output: fprt1.c1, (sum(fprt1.c2)), (min(fprt1.c2)), (count(*)) + Sort Key: (sum(fprt1.c2)) + -> Append + -> Foreign Scan + Output: fprt1.c1, (sum(fprt1.c2)), (min(fprt1.c2)), (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test1 fprt1) + -> Foreign Scan + Output: fprt1_1.c1, (sum(fprt1_1.c2)), (min(fprt1_1.c2)), (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test2 fprt1) +(10 rows) + +SELECT c1, sum(c2), min(c2), count(*) FROM fprt1 GROUP BY c1 HAVING avg(c2) < 22 ORDER BY 2; + c1 | sum | min | count +----+-----+-----+------- + 1 | 1 | 1 | 1 + 2 | 2 | 2 | 1 + 3 | 3 | 3 | 1 + 4 | 4 | 4 | 1 + 5 | 5 | 5 | 1 + 6 | 6 | 6 | 1 + 7 | 7 | 7 | 1 + 8 | 8 | 8 | 1 +(8 rows) + +-- Check with whole-row reference +-- Should have all the columns in the target list for the given relation +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, count(t1) FROM fprt1 t1 GROUP BY c1 HAVING avg(c2) < 22 ORDER BY 1; + QUERY PLAN +---------------------------------------------------------------- + Sort + Output: t1.c1, (count(((t1.*)::fprt1))) + Sort Key: t1.c1 + -> Append + -> HashAggregate + Output: t1.c1, count(((t1.*)::fprt1)) + Group Key: t1.c1 + Filter: (avg(t1.c2) < '22'::numeric) + -> Foreign Scan on public.ftprt1_p1 t1 + Output: t1.c1, t1.*, t1.c2 + Foreign Namespace: mongo_fdw_regress.test1 + -> HashAggregate + Output: t1_1.c1, count(((t1_1.*)::fprt1)) + Group Key: t1_1.c1 + Filter: (avg(t1_1.c2) < '22'::numeric) + -> Foreign Scan on public.ftprt1_p2 t1_1 + Output: t1_1.c1, t1_1.*, t1_1.c2 + Foreign Namespace: mongo_fdw_regress.test2 +(18 rows) + +SELECT c1, count(t1) FROM fprt1 t1 GROUP BY c1 HAVING avg(c2) < 22 ORDER BY 1; + c1 | count +----+------- + 1 | 1 + 2 | 1 + 3 | 1 + 4 | 1 + 5 | 1 + 6 | 1 + 7 | 1 + 8 | 1 +(8 rows) + +SET enable_partitionwise_aggregate TO OFF; +-- Support enable_aggregate_pushdown option at server level and table level. +-- Check only boolean values are accepted. +ALTER SERVER mongo_server OPTIONS (ADD enable_aggregate_pushdown 'non-bolean'); +ERROR: enable_aggregate_pushdown requires a Boolean value +-- Test the option at server level. +ALTER SERVER mongo_server OPTIONS (ADD enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> HashAggregate + Output: count(*), c1 + Group Key: fdw137_t1.c1 + Filter: (min(fdw137_t1.c1) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> Foreign Scan + Output: (count(*)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +-- Test the option at table level. Setting option at table level does not +-- affect the setting at server level. +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (ADD enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> HashAggregate + Output: count(*), c1 + Group Key: fdw137_t1.c1 + Filter: (min(fdw137_t1.c1) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'false'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> Foreign Scan + Output: (count(*)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +-- Test option for aggregation over join. Allow aggregation only if enabled for +-- both the relations involved in the join. +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'false'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (ADD enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(t2.c1)), t1.c8 + Sort Key: t1.c8 + -> HashAggregate + Output: sum(t2.c1), t1.c8 + Group Key: t1.c8 + -> Foreign Scan + Output: t1.c8, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2) +(9 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(t2.c1)), t1.c8 + Sort Key: t1.c8 + -> HashAggregate + Output: sum(t2.c1), t1.c8 + Group Key: t1.c8 + -> Foreign Scan + Output: t1.c8, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2) +(9 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'false'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: (sum(t2.c1)), t1.c8 + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(3 rows) + +-- FDW-560: Aggregation over nested join. As nested join push down is not +-- supported, aggregation shouldn't get pushdown. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) INNER JOIN fdw137_t1 t3 ON (t3.c1 = t1.c1) GROUP BY t1.c8 ORDER BY 2; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: sum(t2.c1), t1.c8 + Group Key: t1.c8 + -> Merge Left Join + Output: t1.c8, t2.c1 + Merge Cond: (t1.c8 = t2.c1) + -> Sort + Output: t1.c8 + Sort Key: t1.c8 + -> Foreign Scan + Output: t1.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl1 t3) + -> Sort + Output: t2.c1 + Sort Key: t2.c1 + -> Foreign Scan on public.fdw137_t2 t2 + Output: t2.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(18 rows) + +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) INNER JOIN fdw137_t1 t3 ON (t3.c1 = t1.c1) GROUP BY t1.c8 ORDER BY 2; + sum | c8 +-----+---- + 30 | 10 + 100 | 20 + 180 | 30 + | 60 + | +(5 rows) + +-- Check when enable_join_pushdown is OFF and enable_aggregate_pushdown is ON. +-- Shouldn't push down join as well as aggregation. +ALTER SERVER mongo_server OPTIONS (ADD enable_join_pushdown 'false'); +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2; + QUERY PLAN +-------------------------------------------------------------------- + GroupAggregate + Output: sum(t2.c1), t1.c8 + Group Key: t1.c8 + -> Merge Left Join + Output: t1.c8, t2.c1 + Merge Cond: (t1.c8 = t2.c1) + -> Sort + Output: t1.c8 + Sort Key: t1.c8 + -> Foreign Scan on public.fdw137_t1 t1 + Output: t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Sort + Output: t2.c1 + Sort Key: t2.c1 + -> Foreign Scan on public.fdw137_t2 t2 + Output: t2.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(18 rows) + +-- FDW-134: Test with number of columns more than 32 +CREATE FOREIGN TABLE f_test_large (_id int, + a01 int, a02 int, a03 int, a04 int, a05 int, a06 int, a07 int, a08 int, a09 int, a10 int, + a11 int, a12 int, a13 int, a14 int, a15 int, a16 int, a17 int, a18 int, a19 int, a20 int, + a21 int, a22 int, a23 int, a24 int, a25 int, a26 int, a27 int, a28 int, a29 int, a30 int, + a31 int, a32 int, a33 int, a34 int, a35 int) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'mongo_test_large'); +-- Shouldn't pushdown ORDERBY clause due to exceeded number of path keys limit. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT a32, sum(a32) FROM f_test_large GROUP BY + a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, + a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, + a31, a32, a33, a34, a35 ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST, a33 ASC NULLS FIRST, a34 DESC NULLS LAST, a35 ASC NULLS FIRST; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: a32, (sum(a32)), a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a33, a34, a35 + Sort Key: f_test_large.a01 NULLS FIRST, f_test_large.a02 NULLS FIRST, f_test_large.a03 NULLS FIRST, f_test_large.a04 NULLS FIRST, f_test_large.a05 NULLS FIRST, f_test_large.a06 NULLS FIRST, f_test_large.a07 NULLS FIRST, f_test_large.a08 NULLS FIRST, f_test_large.a09 NULLS FIRST, f_test_large.a10 NULLS FIRST, f_test_large.a11 NULLS FIRST, f_test_large.a12 NULLS FIRST, f_test_large.a13 NULLS FIRST, f_test_large.a14 NULLS FIRST, f_test_large.a15 NULLS FIRST, f_test_large.a16 NULLS FIRST, f_test_large.a17 NULLS FIRST, f_test_large.a18 NULLS FIRST, f_test_large.a19 NULLS FIRST, f_test_large.a20 NULLS FIRST, f_test_large.a21 NULLS FIRST, f_test_large.a22 NULLS FIRST, f_test_large.a23 NULLS FIRST, f_test_large.a24 NULLS FIRST, f_test_large.a25 NULLS FIRST, f_test_large.a26 NULLS FIRST, f_test_large.a27 NULLS FIRST, f_test_large.a28 NULLS FIRST, f_test_large.a29 NULLS FIRST, f_test_large.a30 NULLS FIRST, f_test_large.a31 NULLS FIRST, f_test_large.a32 NULLS FIRST, f_test_large.a33 NULLS FIRST, f_test_large.a34 DESC NULLS LAST, f_test_large.a35 NULLS FIRST + -> Foreign Scan + Output: a32, (sum(a32)), a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31, a33, a34, a35 + Foreign Namespace: Aggregate on (mongo_fdw_regress.mongo_test_large f_test_large) +(6 rows) + +SELECT a32, sum(a32) FROM f_test_large GROUP BY + a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, + a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, + a31, a32, a33, a34, a35 ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST, a33 ASC NULLS FIRST, a34 DESC NULLS LAST, a35 ASC NULLS FIRST; + a32 | sum +-----+----- + 2 | 2 + 32 | 32 + 32 | 32 + 32 | 32 + 132 | 132 +(5 rows) + +-- Should pushdown ORDERBY clause because number of path keys are in limit. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT a32, sum(a32) FROM f_test_large GROUP BY + a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, + a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, + a31, a32 ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: a32, (sum(a32)), a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, a31 + Foreign Namespace: Aggregate on (mongo_fdw_regress.mongo_test_large f_test_large) +(3 rows) + +SELECT a32, sum(a32) FROM f_test_large GROUP BY + a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, + a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, + a31, a32 ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST; + a32 | sum +-----+----- + 2 | 2 + 32 | 96 + 132 | 132 +(3 rows) + +-- FDW-131: Limit and offset pushdown with Aggregate pushdown. +SELECT avg(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1; + avg | c1 +-----+---- + 10 | 10 + 20 | 20 + 30 | 30 + 40 | 40 + 50 | 50 + | +(6 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT 1 OFFSET 1; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(3 rows) + +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT 1 OFFSET 1; + sum | c1 +-----+---- + 10 | 10 +(1 row) + +-- Limit 0, Offset 0 with aggregates. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT 0 OFFSET 0; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(3 rows) + +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT 0 OFFSET 0; + sum | c1 +-----+---- +(0 rows) + +-- Limit NULL +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT NULL OFFSET 2; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(3 rows) + +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT NULL OFFSET 2; + sum | c1 +-----+---- + 20 | 20 + 30 | 30 + 40 | 40 + 50 | 50 +(4 rows) + +-- Limit ALL +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT ALL OFFSET 2; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(3 rows) + +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT ALL OFFSET 2; + sum | c1 +-----+---- + 20 | 20 + 30 | 30 + 40 | 40 + 50 | 50 +(4 rows) + +-- Limit with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, sum(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY 1 ASC NULLS FIRST LIMIT -1; + QUERY PLAN +--------------------------------------------------------------------------------- + Limit + Output: c1, (sum(c1)) + -> Foreign Scan + Output: c1, (sum(c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(5 rows) + +-- Should throw an error. +SELECT c1, sum(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY 1 ASC NULLS FIRST LIMIT -1; +ERROR: LIMIT must not be negative +-- Offset with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, sum(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY 1 ASC NULLS FIRST OFFSET -2; + QUERY PLAN +--------------------------------------------------------------------------------- + Limit + Output: c1, (sum(c1)) + -> Foreign Scan + Output: c1, (sum(c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(5 rows) + +-- Should throw an error. +SELECT c1, sum(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY 1 ASC NULLS FIRST OFFSET -2; +ERROR: OFFSET must not be negative +-- Limit/Offset with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, avg(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT -1 OFFSET -2; + QUERY PLAN +--------------------------------------------------------------------------------- + Limit + Output: c1, (avg(c1)) + -> Foreign Scan + Output: c1, (avg(c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(5 rows) + +-- Should throw an error. +SELECT c1, avg(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT -1 OFFSET -2; +ERROR: OFFSET must not be negative +-- Limit with expression evaluating to -ve value. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, avg(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM fdw137_t2)); + QUERY PLAN +----------------------------------------------------------------------------------- + Limit + Output: fdw137_t2.c1, (avg(fdw137_t2.c1)) + InitPlan 1 (returns $0) + -> Foreign Scan + Output: (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) + -> Foreign Scan + Output: fdw137_t2.c1, (avg(fdw137_t2.c1)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw137_t2) +(9 rows) + +SELECT c1, avg(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM fdw137_t2)); +ERROR: LIMIT must not be negative +-- FDW-559: Test mongo_fdw.enable_aggregate_pushdown GUC. +-- Check default value. Should be ON. +SHOW mongo_fdw.enable_aggregate_pushdown; + mongo_fdw.enable_aggregate_pushdown +------------------------------------- + on +(1 row) + +-- Negative testing for GUC value. +SET mongo_fdw.enable_aggregate_pushdown to 'abc'; +ERROR: parameter "mongo_fdw.enable_aggregate_pushdown" requires a Boolean value +--Disable the GUC enable_aggregate_pushdown. +SET mongo_fdw.enable_aggregate_pushdown to false; +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +-- Shouldn't pushdown aggregate because GUC is OFF. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> HashAggregate + Output: count(*), c1 + Group Key: fdw137_t1.c1 + Filter: (min(fdw137_t1.c1) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + count +------- + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +(11 rows) + +--Enable the GUC enable_aggregate_pushdown. +SET mongo_fdw.enable_aggregate_pushdown to on; +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +-- Should pushdown aggregate because GUC is ON. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: (count(*)), c1 + Sort Key: (count(*)) + -> Foreign Scan + Output: (count(*)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + count +------- + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +(11 rows) + +-- Test for aggregation over join when server and table options for both the +-- tables is true and guc is enabled. Should pushdown. +SET mongo_fdw.enable_aggregate_pushdown to on; +SET mongo_fdw.enable_join_pushdown to on; +ALTER SERVER mongo_server OPTIONS (SET enable_join_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: (count(*)), t1.c8 + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(3 rows) + +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + count | c8 +-------+---- + 1 | + 3 | 10 + 5 | 20 + 6 | 30 + 1 | 60 +(5 rows) + +--Disable the GUC enable_join_pushdown. Shouldn't pushdown aggregate. +SET mongo_fdw.enable_join_pushdown to off; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------------------------------- + GroupAggregate + Output: count(*), t1.c8 + Group Key: t1.c8 + -> Merge Left Join + Output: t1.c8 + Merge Cond: (t1.c8 = t2.c1) + -> Foreign Scan on public.fdw137_t1 t1 + Output: t1._id, t1.c1, t1.c2, t1.c3, t1.c4, t1.c5, t1.c6, t1.c7, t1.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Sort + Output: t2.c1 + Sort Key: t2.c1 NULLS FIRST + -> Foreign Scan on public.fdw137_t2 t2 + Output: t2.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(15 rows) + +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + count | c8 +-------+---- + 1 | + 3 | 10 + 5 | 20 + 6 | 30 + 1 | 60 +(5 rows) + +SET mongo_fdw.enable_join_pushdown to on; +--Disable the GUC enable_aggregate_pushdown. Shouldn't pushdown. +SET mongo_fdw.enable_aggregate_pushdown to false; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + GroupAggregate + Output: count(*), t1.c8 + Group Key: t1.c8 + -> Foreign Scan + Output: t1.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2) +(6 rows) + +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + count | c8 +-------+---- + 1 | + 3 | 10 + 5 | 20 + 6 | 30 + 1 | 60 +(5 rows) + +-- FDW-589: Test enable_order_by_pushdown option at server and table level. +SET mongo_fdw.enable_join_pushdown to true; +SET mongo_fdw.enable_aggregate_pushdown to true; +SET mongo_fdw.enable_order_by_pushdown to true; +ALTER SERVER mongo_server OPTIONS (ADD enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (ADD enable_join_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (ADD enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: c2, (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(3 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (ADD enable_join_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (ADD enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(3 rows) + +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + sum | c8 | avg +-----+----+----- + 100 | 20 | 20 + 180 | 30 | 30 + 0 | 60 | 60 +(3 rows) + +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_order_by_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------------- + Sort + Output: c2, (sum(c1)), c1 + Sort Key: fdw137_t1.c2 NULLS FIRST + -> Foreign Scan + Output: c2, (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(6 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Sort Key: t1.c8 NULLS FIRST + -> Foreign Scan + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(6 rows) + +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + sum | c8 | avg +-----+----+----- + 100 | 20 | 20 + 180 | 30 | 30 + 0 | 60 | 60 +(3 rows) + +-- Test that setting option at table level does not affect the setting at +-- server level. +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'false'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------------------------- + Foreign Scan + Output: c2, (sum(c1)), c1 + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 fdw137_t1) +(3 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + Foreign Scan + Output: (sum(t2.c1)), t1.c8, (avg(t1.c8)) + Foreign Namespace: Aggregate on ((mongo_fdw_regress.test_tbl1 t1) LEFT JOIN (mongo_fdw_regress.test_tbl2 t2)) +(3 rows) + +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; + sum | c8 | avg +-----+----+----- + 100 | 20 | 20 + 180 | 30 | 30 + 0 | 60 | 60 +(3 rows) + +-- When option enable_aggregate_pushdown is disabled. Shouldn't pushdown +-- aggregate as well as ORDER BY too. +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------- + Sort + Output: c2, (sum(c1)), c1 + Sort Key: fdw137_t1.c2 NULLS FIRST + -> HashAggregate + Output: c2, sum(c1), c1 + Group Key: fdw137_t1.c2, fdw137_t1.c1 + Filter: (min(fdw137_t1.c1) > 500) + -> Foreign Scan on public.fdw137_t1 + Output: _id, c1, c2, c3, c4, c5, c6, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; + c2 | sum +-------+------ + EMP10 | 1000 + EMP11 | 1100 + EMP12 | 1200 + EMP13 | 1300 + EMP14 | 1400 + EMP15 | 1500 + EMP16 | 1600 + EMP6 | 600 + EMP7 | 700 + EMP8 | 800 + EMP9 | 900 +(11 rows) + +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +-- Cleanup +DELETE FROM fdw137_t1 WHERE c8 IS NULL; +DELETE FROM fdw137_t1 WHERE c8 = 60; +DELETE FROM fdw137_t2 WHERE c1 IS NULL; +DELETE FROM fdw137_t2 WHERE c1 = 50; +DROP FOREIGN TABLE fdw137_t1; +DROP FOREIGN TABLE fdw137_t2; +DROP FOREIGN TABLE ftprt1_p1; +DROP FOREIGN TABLE ftprt1_p2; +DROP FOREIGN TABLE f_test_large; +DROP TABLE fprt1; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/expected/connection_validation.out b/expected/connection_validation.out new file mode 100644 index 0000000..bcf665f --- /dev/null +++ b/expected/connection_validation.out @@ -0,0 +1,71 @@ +\set VERBOSITY terse +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` +-- Before running this file user must create database mongo_fdw_regress on +-- MongoDB with all permission for MONGO_USER_NAME user with MONGO_PASS +-- password and ran mongodb_init.sh file to load collections. +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; +-- Create foreign tables and validate +CREATE FOREIGN TABLE f_mongo_test (_id name, a int, b varchar) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'mongo_test'); +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; + a | b +---+----------------------- + 0 | mongo_test collection +(1 row) + +-- +-- fdw-108: After a change to a pg_foreign_server or pg_user_mapping catalog +-- entry, connection should be invalidated. +-- +-- Alter one of the SERVER option +-- Set wrong address for mongo_server +ALTER SERVER mongo_server OPTIONS (SET address '127.0.0.10'); +ALTER SERVER mongo_server OPTIONS (SET port '9999'); +-- Should fail with an error +INSERT INTO f_mongo_test VALUES ('0', 2, 'RECORD INSERTED'); +ERROR: could not connect to server mongo_server +UPDATE f_mongo_test SET b = 'RECORD UPDATED' WHERE a = 2; +ERROR: could not connect to server mongo_server +DELETE FROM f_mongo_test WHERE a = 2; +ERROR: could not connect to server mongo_server +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; +ERROR: could not connect to server mongo_server +-- Set correct address for mongo_server +ALTER SERVER mongo_server OPTIONS (SET address :MONGO_HOST); +ALTER SERVER mongo_server OPTIONS (SET port :MONGO_PORT); +-- Should able to insert the data +INSERT INTO f_mongo_test VALUES ('0', 2, 'RECORD INSERTED'); +DELETE FROM f_mongo_test WHERE a = 2; +-- Drop user mapping and create with invalid username and password for public +-- user mapping +DROP USER MAPPING FOR public SERVER mongo_server; +CREATE USER MAPPING FOR public SERVER mongo_server + OPTIONS (username 'wrong', password 'wrong'); +-- Should fail with an error +INSERT INTO f_mongo_test VALUES ('0', 3, 'RECORD INSERTED'); +ERROR: could not connect to server mongo_server +UPDATE f_mongo_test SET b = 'RECORD UPDATED' WHERE a = 3; +ERROR: could not connect to server mongo_server +DELETE FROM f_mongo_test WHERE a = 3; +ERROR: could not connect to server mongo_server +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; +ERROR: could not connect to server mongo_server +-- Drop user mapping and create without username and password for public +-- user mapping +DROP USER MAPPING FOR public SERVER mongo_server; +CREATE USER MAPPING FOR public SERVER mongo_server; +-- Should able to insert the data +INSERT INTO f_mongo_test VALUES ('0', 3, 'RECORD INSERTED'); +DELETE FROM f_mongo_test WHERE a = 3; +-- Cleanup +DROP FOREIGN TABLE f_mongo_test; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/expected/dml.out b/expected/dml.out new file mode 100644 index 0000000..cff659a --- /dev/null +++ b/expected/dml.out @@ -0,0 +1,358 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` +-- Before running this file user must create database mongo_fdw_regress, +-- mongo_fdw_regress1 and mongo_fdw_regress2 databases on MongoDB with all +-- permission for MONGO_USER_NAME user with MONGO_PASS password and ran +-- mongodb_init.sh file to load collections. +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; +-- Create foreign tables +CREATE FOREIGN TABLE f_mongo_test (_id name, a int, b varchar) SERVER mongo_server + OPTIONS (database 'mongo_fdw_regress', collection 'mongo_test'); +CREATE FOREIGN TABLE f_mongo_test1 (_id name, a int, b varchar) SERVER mongo_server + OPTIONS (database 'mongo_fdw_regress1', collection 'mongo_test1'); +CREATE FOREIGN TABLE f_mongo_test2 (_id name, a int, b varchar) SERVER mongo_server + OPTIONS (database 'mongo_fdw_regress2', collection 'mongo_test2'); +-- Creating foreign table without specifying database. +CREATE FOREIGN TABLE f_mongo_test3 (_id name, a int, b varchar) SERVER mongo_server + OPTIONS (collection 'mongo_test3'); +CREATE FOREIGN TABLE f_mongo_test6 (_id name, a int, b text[]) SERVER mongo_server + OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl6'); +-- Verify the INSERT/UPDATE/DELETE operations on a collection (mongo_test) +-- exist in a database (mongo_fdw_regress) in mongoDB. +SELECT a,b FROM f_mongo_test ORDER BY 1, 2; + a | b +---+----------------------- + 0 | mongo_test collection +(1 row) + +INSERT INTO f_mongo_test VALUES ('0', 10 , 'INSERT'); +SELECT a,b FROM f_mongo_test ORDER BY 1, 2; + a | b +----+----------------------- + 0 | mongo_test collection + 10 | INSERT +(2 rows) + +UPDATE f_mongo_test SET b = 'UPDATE' WHERE a = 10; +SELECT a,b FROM f_mongo_test ORDER BY 1, 2; + a | b +----+----------------------- + 0 | mongo_test collection + 10 | UPDATE +(2 rows) + +DELETE FROM f_mongo_test WHERE a = 10; +SELECT a,b FROM f_mongo_test ORDER BY 1, 2; + a | b +---+----------------------- + 0 | mongo_test collection +(1 row) + +-- Verify the INSERT/UPDATE/DELETE operations on a collection (mongo_test1) +-- not exist in a database (mongo_fdw_regress1) in mongoDB. +SELECT a,b FROM f_mongo_test1 ORDER BY 1, 2; + a | b +---+--- +(0 rows) + +INSERT INTO f_mongo_test1 VALUES ('0', 10 , 'INSERT'); +SELECT a,b FROM f_mongo_test1 ORDER BY 1, 2; + a | b +----+-------- + 10 | INSERT +(1 row) + +UPDATE f_mongo_test1 SET b = 'UPDATE' WHERE a = 10; +SELECT a,b FROM f_mongo_test1 ORDER BY 1, 2; + a | b +----+-------- + 10 | UPDATE +(1 row) + +DELETE FROM f_mongo_test1 WHERE a = 10; +SELECT a,b FROM f_mongo_test1 ORDER BY 1, 2; + a | b +---+--- +(0 rows) + +-- Verify the INSERT/UPDATE/DELETE operations on a collection (mongo_test2) +-- not exist in a non exist database (mongo_fdw_regress2) in mongoDB. +SELECT a,b FROM f_mongo_test2 ORDER BY 1, 2; + a | b +---+--- +(0 rows) + +INSERT INTO f_mongo_test2 VALUES ('0', 10 , 'INSERT'); +SELECT a,b FROM f_mongo_test2 ORDER BY 1, 2; + a | b +----+-------- + 10 | INSERT +(1 row) + +UPDATE f_mongo_test2 SET b = 'UPDATE' WHERE a = 10; +SELECT a,b FROM f_mongo_test2 ORDER BY 1, 2; + a | b +----+-------- + 10 | UPDATE +(1 row) + +DELETE FROM f_mongo_test2 WHERE a = 10; +SELECT a,b FROM f_mongo_test2 ORDER BY 1, 2; + a | b +---+--- +(0 rows) + +-- Verify the INSERT/UPDATE/DELETE operations on a collection (mongo_test) +-- when foreign table created without database option. +SELECT a,b FROM f_mongo_test3 ORDER BY 1, 2; + a | b +---+--- +(0 rows) + +INSERT INTO f_mongo_test3 VALUES ('0', 10 , 'INSERT'); +SELECT a,b FROM f_mongo_test3 ORDER BY 1, 2; + a | b +----+-------- + 10 | INSERT +(1 row) + +UPDATE f_mongo_test3 SET b = 'UPDATE' WHERE a = 10; +SELECT a,b FROM f_mongo_test3 ORDER BY 1, 2; + a | b +----+-------- + 10 | UPDATE +(1 row) + +DELETE FROM f_mongo_test3 WHERE a = 10; +SELECT a,b FROM f_mongo_test3 ORDER BY 1, 2; + a | b +---+--- +(0 rows) + +-- FDW-158: Fix server crash when analyzing a foreign table. +ANALYZE f_mongo_test; +-- Should give correct number of rows now. +SELECT reltuples FROM pg_class WHERE relname = 'f_mongo_test'; + reltuples +----------- + 1 +(1 row) + +-- Check count using select query on table. +SELECT count(*) FROM f_mongo_test; + count +------- + 1 +(1 row) + +-- Some more variants of vacuum and analyze +VACUUM f_mongo_test; +WARNING: skipping "f_mongo_test" --- cannot vacuum non-tables or special system tables +VACUUM FULL f_mongo_test; +WARNING: skipping "f_mongo_test" --- cannot vacuum non-tables or special system tables +VACUUM FREEZE f_mongo_test; +WARNING: skipping "f_mongo_test" --- cannot vacuum non-tables or special system tables +ANALYZE f_mongo_test; +ANALYZE f_mongo_test(a); +VACUUM ANALYZE f_mongo_test; +WARNING: skipping "f_mongo_test" --- cannot vacuum non-tables or special system tables +-- FDW-226: Fix COPY FROM and foreign partition routing results in a +-- server crash +-- Should fail as foreign table direct copy is not supported +COPY f_mongo_test TO '/tmp/data.txt' delimiter ','; +ERROR: cannot copy from foreign table "f_mongo_test" +HINT: Try the COPY (SELECT ...) TO variant. +COPY f_mongo_test (a) TO '/tmp/data.txt' delimiter ','; +ERROR: cannot copy from foreign table "f_mongo_test" +HINT: Try the COPY (SELECT ...) TO variant. +COPY f_mongo_test (b) TO '/tmp/data.txt' delimiter ','; +ERROR: cannot copy from foreign table "f_mongo_test" +HINT: Try the COPY (SELECT ...) TO variant. +-- Should pass +COPY (SELECT * FROM f_mongo_test) TO '/tmp/data.txt' delimiter ','; +COPY (SELECT a, b FROM f_mongo_test) TO '/tmp/data.txt' delimiter ','; +COPY (SELECT a FROM f_mongo_test) TO '/tmp/data.txt' delimiter ','; +COPY (SELECT b FROM f_mongo_test) TO '/tmp/data.txt' delimiter ','; +-- Should throw an error as copy to foreign table is not supported +DO +$$ +BEGIN + COPY f_mongo_test FROM '/tmp/data.txt' delimiter ','; +EXCEPTION WHEN others THEN + IF SQLERRM = 'COPY and foreign partition routing not supported in mongo_fdw' OR + SQLERRM = 'cannot copy to foreign table "f_mongo_test"' THEN + RAISE NOTICE 'ERROR: COPY and foreign partition routing not supported in mongo_fdw'; + ELSE + RAISE NOTICE '%', SQLERRM; + END IF; +END; +$$ +LANGUAGE plpgsql; +NOTICE: ERROR: COPY and foreign partition routing not supported in mongo_fdw +DO +$$ +BEGIN + COPY f_mongo_test(a, b) FROM '/tmp/data.txt' delimiter ','; +EXCEPTION WHEN others THEN + IF SQLERRM = 'COPY and foreign partition routing not supported in mongo_fdw' OR + SQLERRM = 'cannot copy to foreign table "f_mongo_test"' THEN + RAISE NOTICE 'ERROR: COPY and foreign partition routing not supported in mongo_fdw'; + ELSE + RAISE NOTICE '%', SQLERRM; + END IF; +END; +$$ +LANGUAGE plpgsql; +NOTICE: ERROR: COPY and foreign partition routing not supported in mongo_fdw +DO +$$ +BEGIN + COPY f_mongo_test(a) FROM '/tmp/data.txt' delimiter ','; +EXCEPTION WHEN others THEN + IF SQLERRM = 'COPY and foreign partition routing not supported in mongo_fdw' OR + SQLERRM = 'cannot copy to foreign table "f_mongo_test"' THEN + RAISE NOTICE 'ERROR: COPY and foreign partition routing not supported in mongo_fdw'; + ELSE + RAISE NOTICE '%', SQLERRM; + END IF; +END; +$$ +LANGUAGE plpgsql; +NOTICE: ERROR: COPY and foreign partition routing not supported in mongo_fdw +DO +$$ +BEGIN + COPY f_mongo_test(b) FROM '/tmp/data.txt' delimiter ','; +EXCEPTION WHEN others THEN + IF SQLERRM = 'COPY and foreign partition routing not supported in mongo_fdw' OR + SQLERRM = 'cannot copy to foreign table "f_mongo_test"' THEN + RAISE NOTICE 'ERROR: COPY and foreign partition routing not supported in mongo_fdw'; + ELSE + RAISE NOTICE '%', SQLERRM; + END IF; +END; +$$ +LANGUAGE plpgsql; +NOTICE: ERROR: COPY and foreign partition routing not supported in mongo_fdw +--FDW-466: Document update for array elements shouldn't lead to the crash +INSERT INTO f_mongo_test6 VALUES (0, 1, ARRAY ['INSERT', 'DELETE']); +SELECT a, b FROM f_mongo_test6 ORDER BY a; + a | b +---+----------------- + 1 | {INSERT,DELETE} +(1 row) + +UPDATE f_mongo_test6 SET b[1] = 'UPDATE' WHERE a = 1; +SELECT a, b FROM f_mongo_test6 ORDER BY a; + a | b +---+----------------- + 1 | {UPDATE,DELETE} +(1 row) + +DELETE FROM f_mongo_test6 WHERE b[2] = 'DELETE'; +SELECT a, b FROM f_mongo_test6 ORDER BY a; + a | b +---+--- +(0 rows) + +--FDW-481: UPDATE/DELETE shouldn't lead to crash when _id is NULL. +-- If first column type is not NAME then UPDATE/DELETE should result into an error. +CREATE FOREIGN TABLE f_mongo_test7 (_id text, a int, b text) SERVER mongo_server + OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl7'); +SELECT a, b FROM f_mongo_test7 ORDER BY 1; + a | b +----+------ + 10 | ROW1 + 20 | ROW2 +(2 rows) + +UPDATE f_mongo_test7 SET b = 'UPDATED' WHERE a = 10; +ERROR: type of first column of MongoDB's foreign table must be "NAME" +DELETE FROM f_mongo_test7 WHERE a = 10; +ERROR: type of first column of MongoDB's foreign table must be "NAME" +DROP FOREIGN TABLE f_mongo_test7; +-- If first column name is not _id then UPDATE/DELETE should result into an error. +CREATE FOREIGN TABLE f_mongo_test7 (id1 NAME, a int, b text) SERVER mongo_server + OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl7'); +SELECT a, b FROM f_mongo_test7 ORDER BY 1; + a | b +----+------ + 10 | ROW1 + 20 | ROW2 +(2 rows) + +UPDATE f_mongo_test7 SET b = 'UPDATED' WHERE a = 10; +ERROR: first column of MongoDB's foreign table must be "_id" +DELETE FROM f_mongo_test7 WHERE a = 10; +ERROR: first column of MongoDB's foreign table must be "_id" +DROP FOREIGN TABLE f_mongo_test7; +-- UPDATE/DELETE when _id is NULL. Shouldn't crash. +CREATE FOREIGN TABLE f_mongo_test7 (_id NAME, a int, b text) SERVER mongo_server + OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl7'); +SELECT a, b FROM f_mongo_test7 ORDER BY 1; + a | b +----+------ + 10 | ROW1 + 20 | ROW2 +(2 rows) + +SELECT * FROM f_mongo_test7 WHERE a = 10 ORDER BY 1; + _id | a | b +-----+----+------ + | 10 | ROW1 +(1 row) + +UPDATE f_mongo_test7 SET b = 'UPDATED' WHERE _id IS NULL; +SELECT a, b FROM f_mongo_test7 ORDER BY 1; + a | b +----+--------- + 10 | UPDATED + 20 | ROW2 +(2 rows) + +DELETE FROM f_mongo_test7 WHERE a = 20; +SELECT a, b FROM f_mongo_test7 ORDER BY 1; + a | b +----+--------- + 10 | UPDATED +(1 row) + +-- Retain original data of test_tbl7 +UPDATE f_mongo_test7 SET b = 'ROW1' WHERE a = 10; +INSERT INTO f_mongo_test7 VALUES(0, 20, 'ROW2'); +-- When _id is non-objectId type on MongoDB. Should result into an error. +CREATE FOREIGN TABLE f_mongo_test8 (_id NAME, a int, b text) SERVER mongo_server + OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl8'); +SELECT * FROM f_mongo_test8 ORDER BY 1; +ERROR: cannot convert BSON type to column type +HINT: Column type "NAME" is compatible only with BSON type "ObjectId". +UPDATE f_mongo_test8 SET b = 'UPDATED' WHERE a = 2; +ERROR: cannot convert BSON type to column type +HINT: Column type "NAME" is compatible only with BSON type "ObjectId". +DELETE FROM f_mongo_test8 WHERE a = 2; +ERROR: cannot convert BSON type to column type +HINT: Column type "NAME" is compatible only with BSON type "ObjectId". +SELECT a, b FROM f_mongo_test8 ORDER BY 1; + a | b +---+------ + 2 | ROW1 + 3 | ROW2 +(2 rows) + +-- Cleanup +DROP FOREIGN TABLE f_mongo_test; +DROP FOREIGN TABLE f_mongo_test1; +DROP FOREIGN TABLE f_mongo_test2; +DROP FOREIGN TABLE f_mongo_test3; +DROP FOREIGN TABLE f_mongo_test6; +DROP FOREIGN TABLE f_mongo_test7; +DROP FOREIGN TABLE f_mongo_test8; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/expected/join_pushdown.out b/expected/join_pushdown.out new file mode 100644 index 0000000..3fa1e39 --- /dev/null +++ b/expected/join_pushdown.out @@ -0,0 +1,2123 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` +-- Before running this file user must create database mongo_fdw_regress on +-- MongoDB with all permission for MONGO_USER_NAME user with MONGO_PASS +-- password and ran mongodb_init.sh file to load collections. +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; +CREATE SERVER mongo_server1 FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server1; +-- Create foreign tables. +CREATE FOREIGN TABLE f_test_tbl1 (_id NAME, c1 INTEGER, c2 TEXT, c3 CHAR(9), c4 INTEGER, c5 pg_catalog.Date, c6 DECIMAL, c7 INTEGER, c8 INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl1'); +CREATE FOREIGN TABLE f_test_tbl2 (_id NAME, c1 INTEGER, c2 TEXT, c3 TEXT) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); +CREATE FOREIGN TABLE f_test_tbl3 (_id NAME, c1 INTEGER, c2 TEXT, c3 TEXT) + SERVER mongo_server1 OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); +CREATE FOREIGN TABLE test_text ( __doc text) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'warehouse'); +CREATE FOREIGN TABLE test_varchar ( __doc varchar) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'warehouse'); +CREATE FOREIGN TABLE f_test_tbl4 (_id NAME, c1 INTEGER, c2 TEXT, c3 CHAR(9), c4 INTEGER, c5 pg_catalog.Date, c6 DECIMAL, c7 INTEGER, c8 INTEGER) + SERVER mongo_server1 OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl1'); +INSERT INTO f_test_tbl1 VALUES (0, 1500, 'EMP15', 'FINANCE', 1300, '2000-12-25', 950.0, 400, 60); +INSERT INTO f_test_tbl1 VALUES (0, 1600, 'EMP16', 'ADMIN', 600); +INSERT INTO f_test_tbl2 VALUES (0, 50, 'TESTING', 'NASHIK'); +INSERT INTO f_test_tbl2 VALUES (0); +-- Create local table. +CREATE TABLE l_test_tbl1 AS + SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM f_test_tbl1; +-- Push down LEFT OUTER JOIN. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(17 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl1 e LEFT OUTER JOIN f_test_tbl2 d ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 e) LEFT JOIN (mongo_fdw_regress.test_tbl2 d) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl1 e LEFT OUTER JOIN f_test_tbl2 d ON e.c8 = d.c1 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + | | 1500 | EMP15 | 950 | 60 + | | 1600 | EMP16 | | +(16 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 OR e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST OFFSET 50; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 OR e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST OFFSET 50; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+---------+---- + 40 | HR | 1300 | EMP13 | 3000 | 20 + 40 | HR | 1400 | EMP14 | 1300 | 10 + 40 | HR | 1500 | EMP15 | 950 | 60 + 40 | HR | 1600 | EMP16 | | + 50 | TESTING | 100 | EMP1 | 800.3 | 20 + 50 | TESTING | 200 | EMP2 | 1600 | 30 + 50 | TESTING | 300 | EMP3 | 1250 | 30 + 50 | TESTING | 400 | EMP4 | 2975 | 20 + 50 | TESTING | 500 | EMP5 | 1250.23 | 30 + 50 | TESTING | 600 | EMP6 | 2850 | 30 + 50 | TESTING | 700 | EMP7 | 2450.34 | 10 + 50 | TESTING | 800 | EMP8 | 3000 | 20 + 50 | TESTING | 900 | EMP9 | 5000 | 10 + 50 | TESTING | 1000 | EMP10 | 1500 | 30 + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | +(20 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: d.c1 NULLS FIRST, e.c1 NULLS FIRST + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +SET mongo_fdw.enable_order_by_pushdown TO ON; +-- Column comparing with 'Constant' pushed down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = 20 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = 20 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | | | | + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 200 | EMP2 | 1600 | 30 + 20 | ADMINISTRATION | 300 | EMP3 | 1250 | 30 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 500 | EMP5 | 1250.23 | 30 + 20 | ADMINISTRATION | 600 | EMP6 | 2850 | 30 + 20 | ADMINISTRATION | 700 | EMP7 | 2450.34 | 10 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 900 | EMP9 | 5000 | 10 + 20 | ADMINISTRATION | 1000 | EMP10 | 1500 | 30 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1200 | EMP12 | 950 | 30 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 20 | ADMINISTRATION | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 1500 | EMP15 | 950 | 60 + 20 | ADMINISTRATION | 1600 | EMP16 | | + 30 | SALES | | | | + 40 | HR | | | | + 50 | TESTING | | | | +(21 rows) + +-- Push down RIGHT OUTER JOIN. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 e) LEFT JOIN (mongo_fdw_regress.test_tbl2 d) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + | | 1500 | EMP15 | 950 | 60 + | | 1600 | EMP16 | | +(16 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl1 e RIGHT OUTER JOIN f_test_tbl2 d ON e.c8 = d.c1 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl1 e RIGHT OUTER JOIN f_test_tbl2 d ON e.c8 = d.c1 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(17 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c4 > d.c1 OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 e) LEFT JOIN (mongo_fdw_regress.test_tbl2 d) +(5 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c4 > d.c1 OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+---------+---- + 40 | HR | 1400 | EMP14 | 1300 | 10 + 40 | HR | 1500 | EMP15 | 950 | 60 + 40 | HR | 1600 | EMP16 | | + 50 | TESTING | 100 | EMP1 | 800.3 | 20 + 50 | TESTING | 200 | EMP2 | 1600 | 30 + 50 | TESTING | 300 | EMP3 | 1250 | 30 + 50 | TESTING | 400 | EMP4 | 2975 | 20 + 50 | TESTING | 500 | EMP5 | 1250.23 | 30 + 50 | TESTING | 600 | EMP6 | 2850 | 30 + 50 | TESTING | 700 | EMP7 | 2450.34 | 10 + 50 | TESTING | 800 | EMP8 | 3000 | 20 + 50 | TESTING | 900 | EMP9 | 5000 | 10 + 50 | TESTING | 1000 | EMP10 | 1500 | 30 + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | +(19 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON ((d.c1 = e.c8 OR e.c4 > d.c1) AND e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON ((d.c1 = e.c8 OR e.c4 > d.c1) OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+---------+---- + 40 | HR | 1400 | EMP14 | 1300 | 10 + 40 | HR | 1500 | EMP15 | 950 | 60 + 40 | HR | 1600 | EMP16 | | + 50 | TESTING | 100 | EMP1 | 800.3 | 20 + 50 | TESTING | 200 | EMP2 | 1600 | 30 + 50 | TESTING | 300 | EMP3 | 1250 | 30 + 50 | TESTING | 400 | EMP4 | 2975 | 20 + 50 | TESTING | 500 | EMP5 | 1250.23 | 30 + 50 | TESTING | 600 | EMP6 | 2850 | 30 + 50 | TESTING | 700 | EMP7 | 2450.34 | 10 + 50 | TESTING | 800 | EMP8 | 3000 | 20 + 50 | TESTING | 900 | EMP9 | 5000 | 10 + 50 | TESTING | 1000 | EMP10 | 1500 | 30 + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | + | | | | | +(20 rows) + +-- Column comparing with 'Constant' pushed down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = 20 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 e) LEFT JOIN (mongo_fdw_regress.test_tbl2 d) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON (d.c1 = 20 AND e.c2 = 'EMP1') ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + | | 200 | EMP2 | 1600 | 30 + | | 300 | EMP3 | 1250 | 30 + | | 400 | EMP4 | 2975 | 20 + | | 500 | EMP5 | 1250.23 | 30 + | | 600 | EMP6 | 2850 | 30 + | | 700 | EMP7 | 2450.34 | 10 + | | 800 | EMP8 | 3000 | 20 + | | 900 | EMP9 | 5000 | 10 + | | 1000 | EMP10 | 1500 | 30 + | | 1100 | EMP11 | 1100 | 20 + | | 1200 | EMP12 | 950 | 30 + | | 1300 | EMP13 | 3000 | 20 + | | 1400 | EMP14 | 1300 | 10 + | | 1500 | EMP15 | 950 | 60 + | | 1600 | EMP16 | | +(16 rows) + +-- Push INNER JOIN. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 +(14 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON ((d.c1 = e.c8 OR e.c4 > d.c1) AND e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON ((d.c1 = e.c8 OR e.c4 > d.c1) OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+---------+---- + 40 | HR | 1400 | EMP14 | 1300 | 10 + 40 | HR | 1500 | EMP15 | 950 | 60 + 40 | HR | 1600 | EMP16 | | + 50 | TESTING | 100 | EMP1 | 800.3 | 20 + 50 | TESTING | 200 | EMP2 | 1600 | 30 + 50 | TESTING | 300 | EMP3 | 1250 | 30 + 50 | TESTING | 400 | EMP4 | 2975 | 20 + 50 | TESTING | 500 | EMP5 | 1250.23 | 30 + 50 | TESTING | 600 | EMP6 | 2850 | 30 + 50 | TESTING | 700 | EMP7 | 2450.34 | 10 + 50 | TESTING | 800 | EMP8 | 3000 | 20 + 50 | TESTING | 900 | EMP9 | 5000 | 10 + 50 | TESTING | 1000 | EMP10 | 1500 | 30 + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | +(19 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+------+---- + 50 | TESTING | 800 | EMP8 | 3000 | 20 + 50 | TESTING | 900 | EMP9 | 5000 | 10 + 50 | TESTING | 1000 | EMP10 | 1500 | 30 + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | +(9 rows) + +-- Column comparing with 'Constant' pushed down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+-------+---- + 10 | DEVELOPMENT | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 100 | EMP1 | 800.3 | 20 + 40 | HR | 100 | EMP1 | 800.3 | 20 + 50 | TESTING | 100 | EMP1 | 800.3 | 20 + | | 100 | EMP1 | 800.3 | 20 +(10 rows) + +-- INNER JOIN with WHERE clause. Should execute where condition separately +-- (NOT added into join clauses) on remote side. +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (d.c8 = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(2 rows) + +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (d.c8 = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + c1 | c1 +-----+---- + 100 | 20 +(1 row) + +-- INNER JOIN in which join clause is not pushable but WHERE condition is +-- pushable with join clause 'TRUE'. +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (abs(d.c8) = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Foreign Scan + Filter: (abs(c8) = c1) + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(3 rows) + +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (abs(d.c8) = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + c1 | c1 +-----+---- + 100 | 20 +(1 row) + +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (abs(d.c8) = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: e.c3 DESC NULLS LAST + -> Foreign Scan + Filter: (abs(c8) = c1) + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(5 rows) + +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (abs(d.c8) = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + c1 | c1 +-----+---- + 100 | 20 +(1 row) + +SET mongo_fdw.enable_order_by_pushdown TO ON; +SET enable_mergejoin TO OFF; +SET enable_nestloop TO OFF; +-- Local-Foreign table joins. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN l_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------- + Sort + Sort Key: d.c1 NULLS FIRST, e.c1 NULLS FIRST + -> Hash Left Join + Hash Cond: (d.c1 = e.c8) + -> Foreign Scan on f_test_tbl2 d + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Hash + -> Seq Scan on l_test_tbl1 e +(8 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN l_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(17 rows) + +RESET enable_mergejoin; +RESET enable_nestloop; +-- JOIN in sub-query, should be pushed down. +EXPLAIN (COSTS OFF) +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 IN (SELECT f1.c1 FROM f_test_tbl1 f1 LEFT JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1)) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: l.c1 NULLS FIRST, l.c8 NULLS FIRST + -> Hash Join + Hash Cond: (l.c1 = f1.c1) + -> Seq Scan on l_test_tbl1 l + -> Hash + -> HashAggregate + Group Key: f1.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 f1) LEFT JOIN (mongo_fdw_regress.test_tbl2 f2) +(10 rows) + +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 IN (SELECT f1.c1 FROM f_test_tbl1 f1 LEFT JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1)) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c6 | c8 +------+---------+---- + 100 | 800.3 | 20 + 200 | 1600 | 30 + 300 | 1250 | 30 + 400 | 2975 | 20 + 500 | 1250.23 | 30 + 600 | 2850 | 30 + 700 | 2450.34 | 10 + 800 | 3000 | 20 + 900 | 5000 | 10 + 1000 | 1500 | 30 + 1100 | 1100 | 20 + 1200 | 950 | 30 + 1300 | 3000 | 20 + 1400 | 1300 | 10 + 1500 | 950 | 60 + 1600 | | +(16 rows) + +EXPLAIN (COSTS OFF) +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 = (SELECT f1.c1 FROM f_test_tbl1 f1 LEFT JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1) LIMIT 1) ORDER BY 1, 3; + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Sort + Sort Key: l.c8 + InitPlan 1 (returns $0) + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 f1) LEFT JOIN (mongo_fdw_regress.test_tbl2 f2) + -> Seq Scan on l_test_tbl1 l + Filter: (c1 = $0) +(7 rows) + +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 = (SELECT f1.c1 FROM f_test_tbl1 f1 LEFT JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1) LIMIT 1) ORDER BY 1, 3; + c1 | c6 | c8 +-----+-------+---- + 100 | 800.3 | 20 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 = (SELECT f1.c1 FROM f_test_tbl1 f1 INNER JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1) LIMIT 1) ORDER BY 1, 3; + QUERY PLAN +----------------------------------------------------------------------------------------------------------- + Sort + Sort Key: l.c8 + InitPlan 1 (returns $0) + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 f1) INNER JOIN (mongo_fdw_regress.test_tbl2 f2) + -> Seq Scan on l_test_tbl1 l + Filter: (c1 = $0) +(7 rows) + +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 = (SELECT f1.c1 FROM f_test_tbl1 f1 INNER JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1) LIMIT 1) ORDER BY 1, 3; + c1 | c6 | c8 +-----+-------+---- + 100 | 800.3 | 20 +(1 row) + +-- Execute JOIN through PREPARE statement. +PREPARE pre_stmt_left_join AS +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c4 > d.c1) ORDER BY 1, 3 OFFSET 70; +EXPLAIN (COSTS OFF) +EXECUTE pre_stmt_left_join; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +EXECUTE pre_stmt_left_join; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+------+---- + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | + | | | | | +(7 rows) + +PREPARE pre_stmt_inner_join AS +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c4 > d.c1) ORDER BY 1, 3 OFFSET 70; +EXPLAIN (COSTS OFF) +EXECUTE pre_stmt_inner_join; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +EXECUTE pre_stmt_inner_join; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+------+---- + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | +(6 rows) + +-- join + WHERE clause push-down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE d.c1 = 10 ORDER BY 1 DESC NULLS LAST, 3 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE d.c1 = 10 ORDER BY 1 DESC NULLS LAST, 3 DESC NULLS LAST; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 +(3 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE e.c8 = 10 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 e) LEFT JOIN (mongo_fdw_regress.test_tbl2 d) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE e.c8 = 10 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 +(3 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE d.c2 = 'SALES' ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE d.c2 = 'SALES' ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------+------+-------+---------+---- + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 +(6 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE e.c2 = 'EMP2' ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE e.c2 = 'EMP2' ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------+-----+------+------+---- + 30 | SALES | 200 | EMP2 | 1600 | 30 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') WHERE d.c1 = 10 OR e.c8 = 30 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') WHERE d.c1 = 10 OR e.c8 = 30 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------------+-----+------+-------+---- + 10 | DEVELOPMENT | 100 | EMP1 | 800.3 | 20 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, d.c6, d.c8 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8 AND (e.c1 = 20 OR d.c2 = 'EMP1')) WHERE e.c1 = 20 AND d.c8 = 20 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, d.c6, d.c8 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8 AND (e.c1 = 20 OR d.c2 = 'EMP1')) WHERE e.c1 = 20 AND d.c8 = 20 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +------+-------+----+----------------+-------+---- + 100 | EMP1 | 20 | ADMINISTRATION | 800.3 | 20 + 400 | EMP4 | 20 | ADMINISTRATION | 2975 | 20 + 800 | EMP8 | 20 | ADMINISTRATION | 3000 | 20 + 1100 | EMP11 | 20 | ADMINISTRATION | 1100 | 20 + 1300 | EMP13 | 20 | ADMINISTRATION | 3000 | 20 +(5 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8 AND (d.c5 = '02-22-1981' OR d.c5 = '12-17-1980')) ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: d.c1, d.c5 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) LEFT JOIN (mongo_fdw_regress.test_tbl2 e) +(4 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8 AND (d.c5 = '02-22-1981' OR d.c5 = '12-17-1980')) ORDER BY 1, 3; + c1 | c2 | c5 | c1 | c2 +------+-------+------------+----+---------------- + 100 | EMP1 | 12-17-1980 | 20 | ADMINISTRATION + 200 | EMP2 | 02-20-1981 | | + 300 | EMP3 | 02-22-1981 | 30 | SALES + 400 | EMP4 | 04-02-1981 | | + 500 | EMP5 | 09-28-1981 | | + 600 | EMP6 | 05-01-1981 | | + 700 | EMP7 | 06-09-1981 | | + 800 | EMP8 | 04-19-1987 | | + 900 | EMP9 | 11-17-1981 | | + 1000 | EMP10 | 09-08-1980 | | + 1100 | EMP11 | 05-23-1987 | | + 1200 | EMP12 | 12-03-1981 | | + 1300 | EMP13 | 12-03-1981 | | + 1400 | EMP14 | 01-23-1982 | | + 1500 | EMP15 | 12-25-2000 | | + 1600 | EMP16 | | | +(16 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) WHERE d.c5 = '02-22-1981' ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: d.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) LEFT JOIN (mongo_fdw_regress.test_tbl2 e) +(4 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) WHERE d.c5 = '02-22-1981' ORDER BY 1; + c1 | c2 | c5 | c1 | c2 +-----+------+------------+----+------- + 300 | EMP3 | 02-22-1981 | 30 | SALES +(1 row) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') WHERE d.c1 = 10 OR e.c8 = 30 ORDER BY 1 DESC NULLS LAST, 3 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Filter: ((c1 = 10) OR (c8 = 30)) + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(3 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') WHERE d.c1 = 10 OR e.c8 = 30 ORDER BY 1 DESC NULLS LAST, 3 DESC NULLS LAST; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------------+-----+------+-------+---- + 10 | DEVELOPMENT | 100 | EMP1 | 800.3 | 20 +(1 row) + +-- Natural join, should push-down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d NATURAL JOIN f_test_tbl1 e WHERE e.c1 > d.c8 ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d NATURAL JOIN f_test_tbl1 e WHERE e.c1 > d.c8 ORDER BY 1; + c1 | c2 | c5 | c1 | c2 +------+-------+------------+------+------- + 100 | EMP1 | 12-17-1980 | 100 | EMP1 + 200 | EMP2 | 02-20-1981 | 200 | EMP2 + 300 | EMP3 | 02-22-1981 | 300 | EMP3 + 400 | EMP4 | 04-02-1981 | 400 | EMP4 + 500 | EMP5 | 09-28-1981 | 500 | EMP5 + 600 | EMP6 | 05-01-1981 | 600 | EMP6 + 700 | EMP7 | 06-09-1981 | 700 | EMP7 + 800 | EMP8 | 04-19-1987 | 800 | EMP8 + 1000 | EMP10 | 09-08-1980 | 1000 | EMP10 + 1100 | EMP11 | 05-23-1987 | 1100 | EMP11 + 1200 | EMP12 | 12-03-1981 | 1200 | EMP12 + 1300 | EMP13 | 12-03-1981 | 1300 | EMP13 + 1400 | EMP14 | 01-23-1982 | 1400 | EMP14 + 1500 | EMP15 | 12-25-2000 | 1500 | EMP15 +(14 rows) + +-- Self join, should push-down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d INNER JOIN f_test_tbl1 e ON e.c8 = d.c8 ORDER BY 1 OFFSET 65; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d INNER JOIN f_test_tbl1 e ON e.c8 = d.c8 ORDER BY 1 OFFSET 65; + c1 | c2 | c5 | c1 | c2 +------+-------+------------+------+------- + 1300 | EMP13 | 12-03-1981 | 1100 | EMP11 + 1300 | EMP13 | 12-03-1981 | 1300 | EMP13 + 1400 | EMP14 | 01-23-1982 | 700 | EMP7 + 1400 | EMP14 | 01-23-1982 | 900 | EMP9 + 1400 | EMP14 | 01-23-1982 | 1400 | EMP14 + 1500 | EMP15 | 12-25-2000 | 1500 | EMP15 +(6 rows) + +-- Join in CTE. +-- Explain plan difference between v11 (or pre) and later. +EXPLAIN (COSTS false, VERBOSE) +WITH t (c1_1, c1_3, c2_1) AS ( + SELECT d.c1, d.c3, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (d.c8 = e.c1) +) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: d.c1, e.c1, d.c3 + Sort Key: d.c3, d.c1 + -> Foreign Scan + Output: d.c1, e.c1, d.c3 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(6 rows) + +WITH t (c1_1, c1_3, c2_1) AS ( + SELECT d.c1, d.c3, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (d.c8 = e.c1) +) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1; + c1_1 | c2_1 +------+------ + 100 | 20 + 1100 | 20 + 1200 | 30 + 1400 | 10 + 800 | 20 + 1300 | 20 + 900 | 10 + 400 | 20 + 600 | 30 + 700 | 10 + 200 | 30 + 300 | 30 + 500 | 30 + 1000 | 30 +(14 rows) + +-- WHERE with boolean expression. Should push-down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl2 e LEFT JOIN f_test_tbl1 d ON (e.c1 = d.c8) WHERE d.c5 = '02-22-1981' OR d.c5 = '12-17-1980' ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 e) INNER JOIN (mongo_fdw_regress.test_tbl1 d) +(4 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl2 e LEFT JOIN f_test_tbl1 d ON (e.c1 = d.c8) WHERE d.c5 = '02-22-1981' OR d.c5 = '12-17-1980' ORDER BY 1; + c1 | c2 | c5 | c1 | c2 +-----+------+------------+----+---------------- + 100 | EMP1 | 12-17-1980 | 20 | ADMINISTRATION + 300 | EMP3 | 02-22-1981 | 30 | SALES +(2 rows) + +-- Nested joins(Don't push-down nested join) +SET enable_mergejoin TO OFF; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) LEFT JOIN f_test_tbl1 f ON (f.c8 = e.c1) ORDER BY d.c1 OFFSET 65 ; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Limit + -> Sort + Sort Key: d.c1 + -> Hash Left Join + Hash Cond: (e.c1 = f.c8) + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) LEFT JOIN (mongo_fdw_regress.test_tbl2 e) + -> Hash + -> Foreign Scan on f_test_tbl1 f + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) LEFT JOIN f_test_tbl1 f ON (f.c8 = e.c1) ORDER BY d.c1 OFFSET 65; + c1 | c2 | c5 | c1 | c2 +------+-------+------------+----+---------------- + 1300 | EMP13 | 12-03-1981 | 20 | ADMINISTRATION + 1300 | EMP13 | 12-03-1981 | 20 | ADMINISTRATION + 1400 | EMP14 | 01-23-1982 | 10 | DEVELOPMENT + 1400 | EMP14 | 01-23-1982 | 10 | DEVELOPMENT + 1400 | EMP14 | 01-23-1982 | 10 | DEVELOPMENT + 1500 | EMP15 | 12-25-2000 | | + 1600 | EMP16 | | | +(7 rows) + +RESET enable_mergejoin; +-- Not supported expressions won't push-down(e.g. function expression, etc.) +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (ABS(d.c1) = e.c8) ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Merge Left Join + Merge Cond: ((abs(d.c1)) = e.c8) + -> Sort + Sort Key: (abs(d.c1)) + -> Foreign Scan on f_test_tbl2 d + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Sort Key: e.c8 + -> Foreign Scan on f_test_tbl1 e + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(12 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (ABS(d.c1) = e.c8) ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | + | | | | | +(17 rows) + +-- Don't pushdown when whole row reference is involved. +EXPLAIN (COSTS OFF) +SELECT d, e + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) LEFT JOIN f_test_tbl1 f ON (f.c8 = e.c1) ORDER BY e.c1 OFFSET 65; + QUERY PLAN +-------------------------------------------------------------------------------- + Limit + -> Merge Left Join + Merge Cond: (e.c1 = f.c8) + -> Sort + Sort Key: e.c1 + -> Hash Left Join + Hash Cond: (d.c8 = e.c1) + -> Foreign Scan on f_test_tbl1 d + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + -> Foreign Scan on f_test_tbl2 e + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Sort Key: f.c8 + -> Foreign Scan on f_test_tbl1 f + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(16 rows) + +-- Don't pushdown when full document retrieval is involved. +EXPLAIN (COSTS OFF) +SELECT json_data.key AS key1, json_data.value AS value1 + FROM test_text, test_varchar, json_each_text(test_text.__doc::json) AS json_data WHERE key NOT IN ('_id') ORDER BY json_data.key COLLATE "C"; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: json_data.key COLLATE "C" + -> Nested Loop + -> Nested Loop + -> Foreign Scan on test_text + Foreign Namespace: mongo_fdw_regress.warehouse + -> Function Scan on json_each_text json_data + Filter: (key <> '_id'::text) + -> Materialize + -> Foreign Scan on test_varchar + Foreign Namespace: mongo_fdw_regress.warehouse +(11 rows) + +SELECT json_data.key AS key1, json_data.value AS value1 + FROM test_text, test_varchar, json_each_text(test_text.__doc::json) AS json_data WHERE key NOT IN ('_id') ORDER BY json_data.key COLLATE "C"; + key1 | value1 +-------------------+----------------------------- + warehouse_created | { "$date" : 1418368330000 } + warehouse_created | { "$date" : 1418368330000 } + warehouse_created | { "$date" : 1447229590000 } + warehouse_created | { "$date" : 1447229590000 } + warehouse_id | 2 + warehouse_id | 1 + warehouse_id | 1 + warehouse_id | 2 + warehouse_name | Laptop + warehouse_name | Laptop + warehouse_name | UPS + warehouse_name | UPS +(12 rows) + +-- Join two tables from two different foreign servers. +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl3 e ON d.c1 = e.c1 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------- + Merge Left Join + Merge Cond: (d.c1 = e.c1) + -> Sort + Sort Key: d.c1 + -> Foreign Scan on f_test_tbl2 d + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Sort Key: e.c1 + -> Foreign Scan on f_test_tbl3 e + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(10 rows) + +-- SEMI JOIN, not pushed down +EXPLAIN (COSTS OFF) +SELECT d.c2 + FROM f_test_tbl1 d WHERE EXISTS (SELECT 1 FROM f_test_tbl2 e WHERE d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c2 + -> Hash Join + Hash Cond: (d.c8 = e.c1) + -> Foreign Scan on f_test_tbl1 d + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + -> HashAggregate + Group Key: e.c1 + -> Foreign Scan on f_test_tbl2 e + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(12 rows) + +SELECT d.c2 + FROM f_test_tbl1 d WHERE EXISTS (SELECT 1 FROM f_test_tbl2 e WHERE d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + c2 +------- + EMP1 + EMP10 + EMP11 + EMP12 + EMP13 + EMP14 + EMP2 + EMP3 + EMP4 + EMP5 +(10 rows) + +-- ANTI JOIN, not pushed down +EXPLAIN (COSTS OFF) +SELECT d.c2 + FROM f_test_tbl1 d WHERE NOT EXISTS (SELECT 1 FROM f_test_tbl2 e WHERE d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c2 + -> Hash Anti Join + Hash Cond: (d.c8 = e.c1) + -> Foreign Scan on f_test_tbl1 d + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + -> Foreign Scan on f_test_tbl2 e + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(10 rows) + +SELECT d.c2 + FROM f_test_tbl1 d WHERE NOT EXISTS (SELECT 1 FROM f_test_tbl2 e WHERE d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + c2 +------- + EMP15 + EMP16 +(2 rows) + +-- FULL OUTER JOIN, should not pushdown. +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl1 d FULL JOIN f_test_tbl2 e ON (d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c2 + -> Hash Full Join + Hash Cond: (d.c8 = e.c1) + -> Foreign Scan on f_test_tbl1 d + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + -> Foreign Scan on f_test_tbl2 e + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(10 rows) + +SELECT d.c1, e.c1 + FROM f_test_tbl1 d FULL JOIN f_test_tbl2 e ON (d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + c1 | c1 +------+---- + 100 | 20 + 1000 | 30 + 1100 | 20 + 1200 | 30 + 1300 | 20 + 1400 | 10 + 1500 | + 1600 | + 200 | 30 + 300 | 30 +(10 rows) + +-- CROSS JOIN can be pushed down +EXPLAIN (COSTS OFF) +SELECT e.c1, d.c2 + FROM f_test_tbl1 d CROSS JOIN f_test_tbl2 e ORDER BY e.c1, d.c2 LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: e.c1, d.c2 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(5 rows) + +SELECT e.c1, d.c2 + FROM f_test_tbl1 d CROSS JOIN f_test_tbl2 e ORDER BY e.c1, d.c2 LIMIT 10; + c1 | c2 +----+------- + 10 | EMP1 + 10 | EMP10 + 10 | EMP11 + 10 | EMP12 + 10 | EMP13 + 10 | EMP14 + 10 | EMP15 + 10 | EMP16 + 10 | EMP2 + 10 | EMP3 +(10 rows) + +-- FDW-131: Limit and offset pushdown with join pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT round(2.2) OFFSET 2; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(3 rows) + +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT round(2.2) OFFSET 2; + c1 | c1 +-----+---- + 100 | 10 + 100 | 30 +(2 rows) + +-- Limit as NULL, no LIMIT/OFFSET pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (t1.c8 = t2.c1) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT NULL OFFSET 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(3 rows) + +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (t1.c8 = t2.c1) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT NULL OFFSET 1; + c1 | c1 +------+---- + 200 | 30 + 300 | 30 + 400 | 20 + 500 | 30 + 600 | 30 + 700 | 10 + 800 | 20 + 900 | 10 + 1000 | 30 + 1100 | 20 + 1200 | 30 + 1300 | 20 + 1400 | 10 +(13 rows) + +-- Limit as ALL, no LIMIT/OFFSET pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (t1.c8 = t2.c1) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT ALL OFFSET 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(3 rows) + +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (t1.c8 = t2.c1) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT ALL OFFSET 1; + c1 | c1 +------+---- + 200 | 30 + 300 | 30 + 400 | 20 + 500 | 30 + 600 | 30 + 700 | 10 + 800 | 20 + 900 | 10 + 1000 | 30 + 1100 | 20 + 1200 | 30 + 1300 | 20 + 1400 | 10 +(13 rows) + +-- Offset as NULL, no LIMIT/OFFSET pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT 3 OFFSET NULL; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(3 rows) + +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT 3 OFFSET NULL; + c1 | c1 +-----+---- + 100 | 10 + 100 | 20 + 100 | 30 +(3 rows) + +-- Limit with -ve value. Shouldn't pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT -2; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Limit + Output: t1.c1, t2.c1 + -> Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(5 rows) + +-- Should throw an error. +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT -2; +ERROR: LIMIT must not be negative +-- Offset with -ve value. Shouldn't pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST OFFSET -1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Limit + Output: t1.c1, t2.c1 + -> Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(5 rows) + +-- Should throw an error. +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST OFFSET -1; +ERROR: OFFSET must not be negative +-- Limit/Offset with -ve value. Shouldn't pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT -3 OFFSET -1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Limit + Output: t1.c1, t2.c1 + -> Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(5 rows) + +-- Should throw an error. +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT -3 OFFSET -1; +ERROR: OFFSET must not be negative +-- Limit with expression evaluating to -ve value. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM f_test_tbl1)); + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Limit + Output: t1.c1, t2.c1 + InitPlan 1 (returns $0) + -> Foreign Scan + Output: (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 f_test_tbl1) + -> Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(9 rows) + +-- Should throw an error. +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM f_test_tbl1)); +ERROR: LIMIT must not be negative +-- Test partition-wise join +SET enable_partitionwise_join TO on; +-- Create the partition tables +CREATE TABLE fprt1 (_id NAME, c1 INTEGER, c2 INTEGER, c3 TEXT) PARTITION BY RANGE(c1); +CREATE FOREIGN TABLE ftprt1_p1 PARTITION OF fprt1 FOR VALUES FROM (1) TO (4) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test1'); +CREATE FOREIGN TABLE ftprt1_p2 PARTITION OF fprt1 FOR VALUES FROM (5) TO (8) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test2'); +CREATE TABLE fprt2 (_id NAME, c1 INTEGER, c2 INTEGER, c3 TEXT) PARTITION BY RANGE(c2); +CREATE FOREIGN TABLE ftprt2_p1 PARTITION OF fprt2 FOR VALUES FROM (1) TO (4) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test3'); +CREATE FOREIGN TABLE ftprt2_p2 PARTITION OF fprt2 FOR VALUES FROM (5) TO (8) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test4'); +-- Inner join two tables +-- Different explain plan on v10 as partition-wise join is not supported there. +SET enable_mergejoin TO OFF; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, t2.c2 + FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.c1 = t2.c2) ORDER BY 1,2; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: t1.c1, t2.c2 + Sort Key: t1.c1 + -> Append + -> Foreign Scan + Output: t1_1.c1, t2_1.c2 + Foreign Namespace: (mongo_fdw_regress.test1 t1) INNER JOIN (mongo_fdw_regress.test3 t2) + -> Foreign Scan + Output: t1_2.c1, t2_2.c2 + Foreign Namespace: (mongo_fdw_regress.test2 t1) INNER JOIN (mongo_fdw_regress.test4 t2) +(10 rows) + +SELECT t1.c1, t2.c2 + FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.c1 = t2.c2) ORDER BY 1,2; + c1 | c2 +----+---- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 +(8 rows) + +-- Inner join three tables +-- Different explain plan on v10 as partition-wise join is not supported there. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, t2.c2, t3.c2 + FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.c1 = t2.c2) INNER JOIN fprt1 t3 ON (t3.c1 = t2.c2) ORDER BY 1,2; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Sort + Output: t1.c1, t2.c2, t3.c2 + Sort Key: t1.c1 + -> Append + -> Hash Join + Output: t1_1.c1, t2_1.c2, t3_1.c2 + Hash Cond: (t1_1.c1 = t3_1.c1) + -> Foreign Scan + Output: t1_1.c1, t2_1.c2 + Foreign Namespace: (mongo_fdw_regress.test1 t1) INNER JOIN (mongo_fdw_regress.test3 t2) + -> Hash + Output: t3_1.c2, t3_1.c1 + -> Foreign Scan on public.ftprt1_p1 t3_1 + Output: t3_1.c2, t3_1.c1 + Foreign Namespace: mongo_fdw_regress.test1 + -> Hash Join + Output: t1_2.c1, t2_2.c2, t3_2.c2 + Hash Cond: (t1_2.c1 = t3_2.c1) + -> Foreign Scan + Output: t1_2.c1, t2_2.c2 + Foreign Namespace: (mongo_fdw_regress.test2 t1) INNER JOIN (mongo_fdw_regress.test4 t2) + -> Hash + Output: t3_2.c2, t3_2.c1 + -> Foreign Scan on public.ftprt1_p2 t3_2 + Output: t3_2.c2, t3_2.c1 + Foreign Namespace: mongo_fdw_regress.test2 +(26 rows) + +SELECT t1.c1, t2.c2, t3.c2 + FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.c1 = t2.c2) INNER JOIN fprt1 t3 ON (t3.c1 = t2.c2) ORDER BY 1,2; + c1 | c2 | c2 +----+----+---- + 1 | 1 | 1 + 2 | 2 | 2 + 3 | 3 | 3 + 4 | 4 | 4 + 5 | 5 | 5 + 6 | 6 | 6 + 7 | 7 | 7 + 8 | 8 | 8 +(8 rows) + +RESET enable_mergejoin; +-- Join with lateral reference +-- Different explain plan on v10 as partition-wise join is not supported there. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, t1.c2 + FROM fprt1 t1, LATERAL (SELECT t2.c1, t2.c2 FROM fprt2 t2 + WHERE t1.c1 = t2.c2 AND t1.c2 = t2.c1) q WHERE t1.c1 % 2 = 0 ORDER BY 1,2; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: t1.c1, t1.c2 + Sort Key: t1.c1, t1.c2 + -> Append + -> Foreign Scan + Output: t1_1.c1, t1_1.c2 + Foreign Namespace: (mongo_fdw_regress.test1 t1) INNER JOIN (mongo_fdw_regress.test3 t2) + -> Foreign Scan + Output: t1_2.c1, t1_2.c2 + Foreign Namespace: (mongo_fdw_regress.test2 t1) INNER JOIN (mongo_fdw_regress.test4 t2) +(10 rows) + +SELECT t1.c1, t1.c2 + FROM fprt1 t1, LATERAL (SELECT t2.c1, t2.c2 FROM fprt2 t2 + WHERE t1.c1 = t2.c2 AND t1.c2 = t2.c1) q WHERE t1.c1 % 2 = 0 ORDER BY 1,2; + c1 | c2 +----+---- + 2 | 2 + 4 | 4 + 6 | 6 + 8 | 8 +(4 rows) + +-- With PHVs, partitionwise join selected but no join pushdown +-- Table alias in foreign scan is different for v12, v11 and v10. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, t1.phv, t2.c2, t2.phv + FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE c1 % 2 = 0) t1 LEFT JOIN + (SELECT 't2_phv' phv, * FROM fprt2 WHERE c2 % 2 = 0) t2 ON (t1.c1 = t2.c2) + ORDER BY t1.c1, t2.c2; + QUERY PLAN +-------------------------------------------------------------------------------- + Incremental Sort + Output: fprt1.c1, 't1_phv'::text, fprt2.c2, ('t2_phv'::text) + Sort Key: fprt1.c1, fprt2.c2 + Presorted Key: fprt1.c1 + -> Merge Append + Sort Key: fprt1.c1 + -> Merge Left Join + Output: fprt1_1.c1, 't1_phv'::text, fprt2_1.c2, ('t2_phv'::text) + Merge Cond: (fprt1_1.c1 = fprt2_1.c2) + -> Sort + Output: fprt1_1.c1 + Sort Key: fprt1_1.c1 + -> Foreign Scan on public.ftprt1_p1 fprt1_1 + Output: fprt1_1.c1 + Foreign Namespace: mongo_fdw_regress.test1 + -> Sort + Output: fprt2_1.c2, ('t2_phv'::text) + Sort Key: fprt2_1.c2 + -> Foreign Scan on public.ftprt2_p1 fprt2_1 + Output: fprt2_1.c2, 't2_phv'::text + Foreign Namespace: mongo_fdw_regress.test3 + -> Merge Left Join + Output: fprt1_2.c1, 't1_phv'::text, fprt2_2.c2, ('t2_phv'::text) + Merge Cond: (fprt1_2.c1 = fprt2_2.c2) + -> Sort + Output: fprt1_2.c1 + Sort Key: fprt1_2.c1 + -> Foreign Scan on public.ftprt1_p2 fprt1_2 + Output: fprt1_2.c1 + Foreign Namespace: mongo_fdw_regress.test2 + -> Sort + Output: fprt2_2.c2, ('t2_phv'::text) + Sort Key: fprt2_2.c2 + -> Foreign Scan on public.ftprt2_p2 fprt2_2 + Output: fprt2_2.c2, 't2_phv'::text + Foreign Namespace: mongo_fdw_regress.test4 +(36 rows) + +SELECT t1.c1, t1.phv, t2.c2, t2.phv + FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE c1 % 2 = 0) t1 LEFT JOIN + (SELECT 't2_phv' phv, * FROM fprt2 WHERE c2 % 2 = 0) t2 ON (t1.c1 = t2.c2) + ORDER BY t1.c1, t2.c2; + c1 | phv | c2 | phv +----+--------+----+-------- + 2 | t1_phv | 2 | t2_phv + 4 | t1_phv | 4 | t2_phv + 6 | t1_phv | 6 | t2_phv + 8 | t1_phv | 8 | t2_phv +(4 rows) + +RESET enable_partitionwise_join; +-- FDW-445: Support enable_join_pushdown option at server level and table level. +-- Check only boolean values are accepted. +ALTER SERVER mongo_server OPTIONS (ADD enable_join_pushdown 'abc11'); +ERROR: enable_join_pushdown requires a Boolean value +-- Test the option at server level. +ALTER SERVER mongo_server OPTIONS (ADD enable_join_pushdown 'false'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Incremental Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + Presorted Key: d.c1 + -> Merge Join + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Merge Cond: (d.c1 = e.c8) + -> Sort + Output: d.c1, d.c2 + Sort Key: d.c1 + -> Foreign Scan on public.f_test_tbl2 d + Output: d.c1, d.c2 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Output: e.c1, e.c2, e.c6, e.c8 + Sort Key: e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(19 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_join_pushdown 'true'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + -> Foreign Scan + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(6 rows) + +-- Test the option with outer rel. +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (ADD enable_join_pushdown 'false'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Incremental Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + Presorted Key: d.c1 + -> Merge Join + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Merge Cond: (d.c1 = e.c8) + -> Sort + Output: d.c1, d.c2 + Sort Key: d.c1 + -> Foreign Scan on public.f_test_tbl2 d + Output: d.c1, d.c2 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Output: e.c1, e.c2, e.c6, e.c8 + Sort Key: e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(19 rows) + +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'true'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + -> Foreign Scan + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(6 rows) + +-- Test the option with inner rel. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (ADD enable_join_pushdown 'false'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Incremental Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + Presorted Key: d.c1 + -> Merge Join + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Merge Cond: (d.c1 = e.c8) + -> Sort + Output: d.c1, d.c2 + Sort Key: d.c1 + -> Foreign Scan on public.f_test_tbl2 d + Output: d.c1, d.c2 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Output: e.c1, e.c2, e.c6, e.c8 + Sort Key: e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(19 rows) + +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'true'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + -> Foreign Scan + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(6 rows) + +-- Test that setting option at table level does not affect the setting at +-- server level. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'false'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'false'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Incremental Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + Presorted Key: d.c1 + -> Merge Join + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Merge Cond: (d.c1 = e.c8) + -> Sort + Output: d.c1, d.c2 + Sort Key: d.c1 + -> Foreign Scan on public.f_test_tbl2 d + Output: d.c1, d.c2 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Output: e.c1, e.c2, e.c6, e.c8 + Sort Key: e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(19 rows) + +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT t1.c1, t2.c2 + FROM f_test_tbl3 t1 JOIN f_test_tbl4 t2 ON (t1.c1 = t2.c8) ORDER BY 1, 2; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Sort + Output: t1.c1, t2.c2 + Sort Key: t1.c1, t2.c2 + -> Foreign Scan + Output: t1.c1, t2.c2 + Foreign Namespace: (mongo_fdw_regress.test_tbl2 t1) INNER JOIN (mongo_fdw_regress.test_tbl1 t2) +(6 rows) + +-- FDW-558: Test mongo_fdw.enable_join_pushdown GUC. +-- Negative testing for GUC value. +SET mongo_fdw.enable_join_pushdown to 'abc'; +ERROR: parameter "mongo_fdw.enable_join_pushdown" requires a Boolean value +-- Check default value. Should be ON. +SHOW mongo_fdw.enable_join_pushdown; + mongo_fdw.enable_join_pushdown +-------------------------------- + on +(1 row) + +-- Join pushdown should happen as the GUC enable_join_pushdown is true. +ALTER SERVER mongo_server OPTIONS (SET enable_join_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'true'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON (d.c1 = e.c8) ORDER BY 1, 2; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: d.c1, e.c8 + Sort Key: d.c1 + -> Foreign Scan + Output: d.c1, e.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(6 rows) + +--Disable the GUC enable_join_pushdown. +SET mongo_fdw.enable_join_pushdown to false; +-- Join pushdown shouldn't happen as the GUC enable_join_pushdown is false. +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON (d.c1 = e.c8) ORDER BY 1, 2; + QUERY PLAN +-------------------------------------------------------------- + Merge Join + Output: d.c1, e.c8 + Merge Cond: (d.c1 = e.c8) + -> Sort + Output: d.c1 + Sort Key: d.c1 + -> Foreign Scan on public.f_test_tbl2 d + Output: d.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Output: e.c8 + Sort Key: e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(15 rows) + +-- Enable the GUC and table level option is set to false, should not pushdown. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'false'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'false'); +SET mongo_fdw.enable_join_pushdown to true; +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON (d.c1 = e.c8) ORDER BY 1, 2; + QUERY PLAN +-------------------------------------------------------------- + Merge Join + Output: d.c1, e.c8 + Merge Cond: (d.c1 = e.c8) + -> Sort + Output: d.c1 + Sort Key: d.c1 + -> Foreign Scan on public.f_test_tbl2 d + Output: d.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Output: e.c8 + Sort Key: e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(15 rows) + +-- FDW-589: Test enable_order_by_pushdown option at server and table level. +SET mongo_fdw.enable_join_pushdown to true; +SET mongo_fdw.enable_order_by_pushdown to true; +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'true'); +ALTER SERVER mongo_server OPTIONS (ADD enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (ADD enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (ADD enable_order_by_pushdown 'true'); +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +-- One table level option is OFF. Shouldn't pushdown ORDER BY. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_order_by_pushdown 'false'); +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: d.c1 NULLS FIRST, e.c1 NULLS FIRST + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +-- Test that setting option at table level does not affect the setting at +-- server level. +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'false'); +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_order_by_pushdown 'true'); +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'true'); +-- When enable_join_pushdown option is disabled. Shouldn't pushdown join and +-- hence, ORDER BY too. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'false'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'false'); +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: d.c1 NULLS FIRST, e.c1 NULLS FIRST + -> Hash Left Join + Hash Cond: (d.c1 = e.c8) + Join Filter: ((e.c4 > d.c1) AND (e.c2 < d.c3)) + -> Foreign Scan on f_test_tbl2 d + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Hash + -> Foreign Scan on f_test_tbl1 e + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +DELETE FROM f_test_tbl1 WHERE c8 IS NULL; +DELETE FROM f_test_tbl1 WHERE c8 = 60; +DELETE FROM f_test_tbl2 WHERE c1 IS NULL; +DELETE FROM f_test_tbl2 WHERE c1 = 50; +DROP FOREIGN TABLE f_test_tbl1; +DROP FOREIGN TABLE f_test_tbl2; +DROP FOREIGN TABLE f_test_tbl3; +DROP FOREIGN TABLE f_test_tbl4; +DROP FOREIGN TABLE test_text; +DROP FOREIGN TABLE test_varchar; +DROP TABLE l_test_tbl1; +DROP FOREIGN TABLE ftprt1_p1; +DROP FOREIGN TABLE ftprt1_p2; +DROP FOREIGN TABLE ftprt2_p1; +DROP FOREIGN TABLE ftprt2_p2; +DROP TABLE IF EXISTS fprt1; +DROP TABLE IF EXISTS fprt2; +DROP USER MAPPING FOR public SERVER mongo_server1; +DROP SERVER mongo_server1; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/expected/join_pushdown_1.out b/expected/join_pushdown_1.out new file mode 100644 index 0000000..5115d36 --- /dev/null +++ b/expected/join_pushdown_1.out @@ -0,0 +1,2093 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` +-- Before running this file user must create database mongo_fdw_regress on +-- MongoDB with all permission for MONGO_USER_NAME user with MONGO_PASS +-- password and ran mongodb_init.sh file to load collections. +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; +CREATE SERVER mongo_server1 FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server1; +-- Create foreign tables. +CREATE FOREIGN TABLE f_test_tbl1 (_id NAME, c1 INTEGER, c2 TEXT, c3 CHAR(9), c4 INTEGER, c5 pg_catalog.Date, c6 DECIMAL, c7 INTEGER, c8 INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl1'); +CREATE FOREIGN TABLE f_test_tbl2 (_id NAME, c1 INTEGER, c2 TEXT, c3 TEXT) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); +CREATE FOREIGN TABLE f_test_tbl3 (_id NAME, c1 INTEGER, c2 TEXT, c3 TEXT) + SERVER mongo_server1 OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); +CREATE FOREIGN TABLE test_text ( __doc text) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'warehouse'); +CREATE FOREIGN TABLE test_varchar ( __doc varchar) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'warehouse'); +CREATE FOREIGN TABLE f_test_tbl4 (_id NAME, c1 INTEGER, c2 TEXT, c3 CHAR(9), c4 INTEGER, c5 pg_catalog.Date, c6 DECIMAL, c7 INTEGER, c8 INTEGER) + SERVER mongo_server1 OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl1'); +INSERT INTO f_test_tbl1 VALUES (0, 1500, 'EMP15', 'FINANCE', 1300, '2000-12-25', 950.0, 400, 60); +INSERT INTO f_test_tbl1 VALUES (0, 1600, 'EMP16', 'ADMIN', 600); +INSERT INTO f_test_tbl2 VALUES (0, 50, 'TESTING', 'NASHIK'); +INSERT INTO f_test_tbl2 VALUES (0); +-- Create local table. +CREATE TABLE l_test_tbl1 AS + SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM f_test_tbl1; +-- Push down LEFT OUTER JOIN. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(17 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl1 e LEFT OUTER JOIN f_test_tbl2 d ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 e) LEFT JOIN (mongo_fdw_regress.test_tbl2 d) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl1 e LEFT OUTER JOIN f_test_tbl2 d ON e.c8 = d.c1 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + | | 1500 | EMP15 | 950 | 60 + | | 1600 | EMP16 | | +(16 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 OR e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST OFFSET 50; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 OR e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST OFFSET 50; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+---------+---- + 40 | HR | 1300 | EMP13 | 3000 | 20 + 40 | HR | 1400 | EMP14 | 1300 | 10 + 40 | HR | 1500 | EMP15 | 950 | 60 + 40 | HR | 1600 | EMP16 | | + 50 | TESTING | 100 | EMP1 | 800.3 | 20 + 50 | TESTING | 200 | EMP2 | 1600 | 30 + 50 | TESTING | 300 | EMP3 | 1250 | 30 + 50 | TESTING | 400 | EMP4 | 2975 | 20 + 50 | TESTING | 500 | EMP5 | 1250.23 | 30 + 50 | TESTING | 600 | EMP6 | 2850 | 30 + 50 | TESTING | 700 | EMP7 | 2450.34 | 10 + 50 | TESTING | 800 | EMP8 | 3000 | 20 + 50 | TESTING | 900 | EMP9 | 5000 | 10 + 50 | TESTING | 1000 | EMP10 | 1500 | 30 + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | +(20 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: d.c1 NULLS FIRST, e.c1 NULLS FIRST + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +SET mongo_fdw.enable_order_by_pushdown TO ON; +-- Column comparing with 'Constant' pushed down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = 20 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = 20 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | | | | + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 200 | EMP2 | 1600 | 30 + 20 | ADMINISTRATION | 300 | EMP3 | 1250 | 30 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 500 | EMP5 | 1250.23 | 30 + 20 | ADMINISTRATION | 600 | EMP6 | 2850 | 30 + 20 | ADMINISTRATION | 700 | EMP7 | 2450.34 | 10 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 900 | EMP9 | 5000 | 10 + 20 | ADMINISTRATION | 1000 | EMP10 | 1500 | 30 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1200 | EMP12 | 950 | 30 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 20 | ADMINISTRATION | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 1500 | EMP15 | 950 | 60 + 20 | ADMINISTRATION | 1600 | EMP16 | | + 30 | SALES | | | | + 40 | HR | | | | + 50 | TESTING | | | | +(21 rows) + +-- Push down RIGHT OUTER JOIN. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 e) LEFT JOIN (mongo_fdw_regress.test_tbl2 d) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + | | 1500 | EMP15 | 950 | 60 + | | 1600 | EMP16 | | +(16 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl1 e RIGHT OUTER JOIN f_test_tbl2 d ON e.c8 = d.c1 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl1 e RIGHT OUTER JOIN f_test_tbl2 d ON e.c8 = d.c1 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(17 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c4 > d.c1 OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 e) LEFT JOIN (mongo_fdw_regress.test_tbl2 d) +(5 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c4 > d.c1 OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+---------+---- + 40 | HR | 1400 | EMP14 | 1300 | 10 + 40 | HR | 1500 | EMP15 | 950 | 60 + 40 | HR | 1600 | EMP16 | | + 50 | TESTING | 100 | EMP1 | 800.3 | 20 + 50 | TESTING | 200 | EMP2 | 1600 | 30 + 50 | TESTING | 300 | EMP3 | 1250 | 30 + 50 | TESTING | 400 | EMP4 | 2975 | 20 + 50 | TESTING | 500 | EMP5 | 1250.23 | 30 + 50 | TESTING | 600 | EMP6 | 2850 | 30 + 50 | TESTING | 700 | EMP7 | 2450.34 | 10 + 50 | TESTING | 800 | EMP8 | 3000 | 20 + 50 | TESTING | 900 | EMP9 | 5000 | 10 + 50 | TESTING | 1000 | EMP10 | 1500 | 30 + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | +(19 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON ((d.c1 = e.c8 OR e.c4 > d.c1) AND e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON ((d.c1 = e.c8 OR e.c4 > d.c1) OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+---------+---- + 40 | HR | 1400 | EMP14 | 1300 | 10 + 40 | HR | 1500 | EMP15 | 950 | 60 + 40 | HR | 1600 | EMP16 | | + 50 | TESTING | 100 | EMP1 | 800.3 | 20 + 50 | TESTING | 200 | EMP2 | 1600 | 30 + 50 | TESTING | 300 | EMP3 | 1250 | 30 + 50 | TESTING | 400 | EMP4 | 2975 | 20 + 50 | TESTING | 500 | EMP5 | 1250.23 | 30 + 50 | TESTING | 600 | EMP6 | 2850 | 30 + 50 | TESTING | 700 | EMP7 | 2450.34 | 10 + 50 | TESTING | 800 | EMP8 | 3000 | 20 + 50 | TESTING | 900 | EMP9 | 5000 | 10 + 50 | TESTING | 1000 | EMP10 | 1500 | 30 + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | + | | | | | +(20 rows) + +-- Column comparing with 'Constant' pushed down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = 20 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 e) LEFT JOIN (mongo_fdw_regress.test_tbl2 d) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON (d.c1 = 20 AND e.c2 = 'EMP1') ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + | | 200 | EMP2 | 1600 | 30 + | | 300 | EMP3 | 1250 | 30 + | | 400 | EMP4 | 2975 | 20 + | | 500 | EMP5 | 1250.23 | 30 + | | 600 | EMP6 | 2850 | 30 + | | 700 | EMP7 | 2450.34 | 10 + | | 800 | EMP8 | 3000 | 20 + | | 900 | EMP9 | 5000 | 10 + | | 1000 | EMP10 | 1500 | 30 + | | 1100 | EMP11 | 1100 | 20 + | | 1200 | EMP12 | 950 | 30 + | | 1300 | EMP13 | 3000 | 20 + | | 1400 | EMP14 | 1300 | 10 + | | 1500 | EMP15 | 950 | 60 + | | 1600 | EMP16 | | +(16 rows) + +-- Push INNER JOIN. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 +(14 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON ((d.c1 = e.c8 OR e.c4 > d.c1) AND e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON ((d.c1 = e.c8 OR e.c4 > d.c1) OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+---------+---- + 40 | HR | 1400 | EMP14 | 1300 | 10 + 40 | HR | 1500 | EMP15 | 950 | 60 + 40 | HR | 1600 | EMP16 | | + 50 | TESTING | 100 | EMP1 | 800.3 | 20 + 50 | TESTING | 200 | EMP2 | 1600 | 30 + 50 | TESTING | 300 | EMP3 | 1250 | 30 + 50 | TESTING | 400 | EMP4 | 2975 | 20 + 50 | TESTING | 500 | EMP5 | 1250.23 | 30 + 50 | TESTING | 600 | EMP6 | 2850 | 30 + 50 | TESTING | 700 | EMP7 | 2450.34 | 10 + 50 | TESTING | 800 | EMP8 | 3000 | 20 + 50 | TESTING | 900 | EMP9 | 5000 | 10 + 50 | TESTING | 1000 | EMP10 | 1500 | 30 + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | +(19 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+------+---- + 50 | TESTING | 800 | EMP8 | 3000 | 20 + 50 | TESTING | 900 | EMP9 | 5000 | 10 + 50 | TESTING | 1000 | EMP10 | 1500 | 30 + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | +(9 rows) + +-- Column comparing with 'Constant' pushed down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+-------+---- + 10 | DEVELOPMENT | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 100 | EMP1 | 800.3 | 20 + 40 | HR | 100 | EMP1 | 800.3 | 20 + 50 | TESTING | 100 | EMP1 | 800.3 | 20 + | | 100 | EMP1 | 800.3 | 20 +(10 rows) + +-- INNER JOIN with WHERE clause. Should execute where condition separately +-- (NOT added into join clauses) on remote side. +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (d.c8 = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(2 rows) + +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (d.c8 = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + c1 | c1 +-----+---- + 100 | 20 +(1 row) + +-- INNER JOIN in which join clause is not pushable but WHERE condition is +-- pushable with join clause 'TRUE'. +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (abs(d.c8) = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Foreign Scan + Filter: (abs(c8) = c1) + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(3 rows) + +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (abs(d.c8) = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + c1 | c1 +-----+---- + 100 | 20 +(1 row) + +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (abs(d.c8) = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: e.c3 DESC NULLS LAST + -> Foreign Scan + Filter: (abs(c8) = c1) + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(5 rows) + +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (abs(d.c8) = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + c1 | c1 +-----+---- + 100 | 20 +(1 row) + +SET mongo_fdw.enable_order_by_pushdown TO ON; +SET enable_mergejoin TO OFF; +SET enable_nestloop TO OFF; +-- Local-Foreign table joins. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN l_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------- + Sort + Sort Key: d.c1 NULLS FIRST, e.c1 NULLS FIRST + -> Hash Left Join + Hash Cond: (d.c1 = e.c8) + -> Foreign Scan on f_test_tbl2 d + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Hash + -> Seq Scan on l_test_tbl1 e +(8 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN l_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(17 rows) + +RESET enable_mergejoin; +RESET enable_nestloop; +-- JOIN in sub-query, should be pushed down. +EXPLAIN (COSTS OFF) +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 IN (SELECT f1.c1 FROM f_test_tbl1 f1 LEFT JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1)) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: l.c1 NULLS FIRST, l.c8 NULLS FIRST + -> Hash Join + Hash Cond: (l.c1 = f1.c1) + -> Seq Scan on l_test_tbl1 l + -> Hash + -> HashAggregate + Group Key: f1.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 f1) LEFT JOIN (mongo_fdw_regress.test_tbl2 f2) +(10 rows) + +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 IN (SELECT f1.c1 FROM f_test_tbl1 f1 LEFT JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1)) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c6 | c8 +------+---------+---- + 100 | 800.3 | 20 + 200 | 1600 | 30 + 300 | 1250 | 30 + 400 | 2975 | 20 + 500 | 1250.23 | 30 + 600 | 2850 | 30 + 700 | 2450.34 | 10 + 800 | 3000 | 20 + 900 | 5000 | 10 + 1000 | 1500 | 30 + 1100 | 1100 | 20 + 1200 | 950 | 30 + 1300 | 3000 | 20 + 1400 | 1300 | 10 + 1500 | 950 | 60 + 1600 | | +(16 rows) + +EXPLAIN (COSTS OFF) +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 = (SELECT f1.c1 FROM f_test_tbl1 f1 LEFT JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1) LIMIT 1) ORDER BY 1, 3; + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Sort + Sort Key: l.c8 + InitPlan 1 (returns $0) + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 f1) LEFT JOIN (mongo_fdw_regress.test_tbl2 f2) + -> Seq Scan on l_test_tbl1 l + Filter: (c1 = $0) +(7 rows) + +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 = (SELECT f1.c1 FROM f_test_tbl1 f1 LEFT JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1) LIMIT 1) ORDER BY 1, 3; + c1 | c6 | c8 +-----+-------+---- + 100 | 800.3 | 20 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 = (SELECT f1.c1 FROM f_test_tbl1 f1 INNER JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1) LIMIT 1) ORDER BY 1, 3; + QUERY PLAN +----------------------------------------------------------------------------------------------------------- + Sort + Sort Key: l.c8 + InitPlan 1 (returns $0) + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 f1) INNER JOIN (mongo_fdw_regress.test_tbl2 f2) + -> Seq Scan on l_test_tbl1 l + Filter: (c1 = $0) +(7 rows) + +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 = (SELECT f1.c1 FROM f_test_tbl1 f1 INNER JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1) LIMIT 1) ORDER BY 1, 3; + c1 | c6 | c8 +-----+-------+---- + 100 | 800.3 | 20 +(1 row) + +-- Execute JOIN through PREPARE statement. +PREPARE pre_stmt_left_join AS +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c4 > d.c1) ORDER BY 1, 3 OFFSET 70; +EXPLAIN (COSTS OFF) +EXECUTE pre_stmt_left_join; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +EXECUTE pre_stmt_left_join; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+------+---- + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | + | | | | | +(7 rows) + +PREPARE pre_stmt_inner_join AS +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c4 > d.c1) ORDER BY 1, 3 OFFSET 70; +EXPLAIN (COSTS OFF) +EXECUTE pre_stmt_inner_join; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +EXECUTE pre_stmt_inner_join; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+------+---- + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | +(6 rows) + +-- join + WHERE clause push-down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE d.c1 = 10 ORDER BY 1 DESC NULLS LAST, 3 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE d.c1 = 10 ORDER BY 1 DESC NULLS LAST, 3 DESC NULLS LAST; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 +(3 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE e.c8 = 10 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 e) LEFT JOIN (mongo_fdw_regress.test_tbl2 d) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE e.c8 = 10 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 +(3 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE d.c2 = 'SALES' ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE d.c2 = 'SALES' ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------+------+-------+---------+---- + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 +(6 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE e.c2 = 'EMP2' ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE e.c2 = 'EMP2' ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------+-----+------+------+---- + 30 | SALES | 200 | EMP2 | 1600 | 30 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') WHERE d.c1 = 10 OR e.c8 = 30 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') WHERE d.c1 = 10 OR e.c8 = 30 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------------+-----+------+-------+---- + 10 | DEVELOPMENT | 100 | EMP1 | 800.3 | 20 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, d.c6, d.c8 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8 AND (e.c1 = 20 OR d.c2 = 'EMP1')) WHERE e.c1 = 20 AND d.c8 = 20 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, d.c6, d.c8 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8 AND (e.c1 = 20 OR d.c2 = 'EMP1')) WHERE e.c1 = 20 AND d.c8 = 20 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +------+-------+----+----------------+-------+---- + 100 | EMP1 | 20 | ADMINISTRATION | 800.3 | 20 + 400 | EMP4 | 20 | ADMINISTRATION | 2975 | 20 + 800 | EMP8 | 20 | ADMINISTRATION | 3000 | 20 + 1100 | EMP11 | 20 | ADMINISTRATION | 1100 | 20 + 1300 | EMP13 | 20 | ADMINISTRATION | 3000 | 20 +(5 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8 AND (d.c5 = '02-22-1981' OR d.c5 = '12-17-1980')) ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: d.c1, d.c5 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) LEFT JOIN (mongo_fdw_regress.test_tbl2 e) +(4 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8 AND (d.c5 = '02-22-1981' OR d.c5 = '12-17-1980')) ORDER BY 1, 3; + c1 | c2 | c5 | c1 | c2 +------+-------+------------+----+---------------- + 100 | EMP1 | 12-17-1980 | 20 | ADMINISTRATION + 200 | EMP2 | 02-20-1981 | | + 300 | EMP3 | 02-22-1981 | 30 | SALES + 400 | EMP4 | 04-02-1981 | | + 500 | EMP5 | 09-28-1981 | | + 600 | EMP6 | 05-01-1981 | | + 700 | EMP7 | 06-09-1981 | | + 800 | EMP8 | 04-19-1987 | | + 900 | EMP9 | 11-17-1981 | | + 1000 | EMP10 | 09-08-1980 | | + 1100 | EMP11 | 05-23-1987 | | + 1200 | EMP12 | 12-03-1981 | | + 1300 | EMP13 | 12-03-1981 | | + 1400 | EMP14 | 01-23-1982 | | + 1500 | EMP15 | 12-25-2000 | | + 1600 | EMP16 | | | +(16 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) WHERE d.c5 = '02-22-1981' ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: d.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) LEFT JOIN (mongo_fdw_regress.test_tbl2 e) +(4 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) WHERE d.c5 = '02-22-1981' ORDER BY 1; + c1 | c2 | c5 | c1 | c2 +-----+------+------------+----+------- + 300 | EMP3 | 02-22-1981 | 30 | SALES +(1 row) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') WHERE d.c1 = 10 OR e.c8 = 30 ORDER BY 1 DESC NULLS LAST, 3 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Filter: ((c1 = 10) OR (c8 = 30)) + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(3 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') WHERE d.c1 = 10 OR e.c8 = 30 ORDER BY 1 DESC NULLS LAST, 3 DESC NULLS LAST; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------------+-----+------+-------+---- + 10 | DEVELOPMENT | 100 | EMP1 | 800.3 | 20 +(1 row) + +-- Natural join, should push-down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d NATURAL JOIN f_test_tbl1 e WHERE e.c1 > d.c8 ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d NATURAL JOIN f_test_tbl1 e WHERE e.c1 > d.c8 ORDER BY 1; + c1 | c2 | c5 | c1 | c2 +------+-------+------------+------+------- + 100 | EMP1 | 12-17-1980 | 100 | EMP1 + 200 | EMP2 | 02-20-1981 | 200 | EMP2 + 300 | EMP3 | 02-22-1981 | 300 | EMP3 + 400 | EMP4 | 04-02-1981 | 400 | EMP4 + 500 | EMP5 | 09-28-1981 | 500 | EMP5 + 600 | EMP6 | 05-01-1981 | 600 | EMP6 + 700 | EMP7 | 06-09-1981 | 700 | EMP7 + 800 | EMP8 | 04-19-1987 | 800 | EMP8 + 1000 | EMP10 | 09-08-1980 | 1000 | EMP10 + 1100 | EMP11 | 05-23-1987 | 1100 | EMP11 + 1200 | EMP12 | 12-03-1981 | 1200 | EMP12 + 1300 | EMP13 | 12-03-1981 | 1300 | EMP13 + 1400 | EMP14 | 01-23-1982 | 1400 | EMP14 + 1500 | EMP15 | 12-25-2000 | 1500 | EMP15 +(14 rows) + +-- Self join, should push-down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d INNER JOIN f_test_tbl1 e ON e.c8 = d.c8 ORDER BY 1 OFFSET 65; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d INNER JOIN f_test_tbl1 e ON e.c8 = d.c8 ORDER BY 1 OFFSET 65; + c1 | c2 | c5 | c1 | c2 +------+-------+------------+------+------- + 1300 | EMP13 | 12-03-1981 | 1100 | EMP11 + 1300 | EMP13 | 12-03-1981 | 1300 | EMP13 + 1400 | EMP14 | 01-23-1982 | 700 | EMP7 + 1400 | EMP14 | 01-23-1982 | 900 | EMP9 + 1400 | EMP14 | 01-23-1982 | 1400 | EMP14 + 1500 | EMP15 | 12-25-2000 | 1500 | EMP15 +(6 rows) + +-- Join in CTE. +-- Explain plan difference between v11 (or pre) and later. +EXPLAIN (COSTS false, VERBOSE) +WITH t (c1_1, c1_3, c2_1) AS ( + SELECT d.c1, d.c3, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (d.c8 = e.c1) +) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: d.c1, e.c1, d.c3 + Sort Key: d.c3, d.c1 + -> Foreign Scan + Output: d.c1, e.c1, d.c3 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(6 rows) + +WITH t (c1_1, c1_3, c2_1) AS ( + SELECT d.c1, d.c3, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (d.c8 = e.c1) +) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1; + c1_1 | c2_1 +------+------ + 100 | 20 + 1100 | 20 + 1200 | 30 + 1400 | 10 + 800 | 20 + 1300 | 20 + 900 | 10 + 400 | 20 + 600 | 30 + 700 | 10 + 200 | 30 + 300 | 30 + 500 | 30 + 1000 | 30 +(14 rows) + +-- WHERE with boolean expression. Should push-down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl2 e LEFT JOIN f_test_tbl1 d ON (e.c1 = d.c8) WHERE d.c5 = '02-22-1981' OR d.c5 = '12-17-1980' ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 e) INNER JOIN (mongo_fdw_regress.test_tbl1 d) +(4 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl2 e LEFT JOIN f_test_tbl1 d ON (e.c1 = d.c8) WHERE d.c5 = '02-22-1981' OR d.c5 = '12-17-1980' ORDER BY 1; + c1 | c2 | c5 | c1 | c2 +-----+------+------------+----+---------------- + 100 | EMP1 | 12-17-1980 | 20 | ADMINISTRATION + 300 | EMP3 | 02-22-1981 | 30 | SALES +(2 rows) + +-- Nested joins(Don't push-down nested join) +SET enable_mergejoin TO OFF; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) LEFT JOIN f_test_tbl1 f ON (f.c8 = e.c1) ORDER BY d.c1 OFFSET 65 ; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Limit + -> Sort + Sort Key: d.c1 + -> Hash Left Join + Hash Cond: (e.c1 = f.c8) + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) LEFT JOIN (mongo_fdw_regress.test_tbl2 e) + -> Hash + -> Foreign Scan on f_test_tbl1 f + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) LEFT JOIN f_test_tbl1 f ON (f.c8 = e.c1) ORDER BY d.c1 OFFSET 65; + c1 | c2 | c5 | c1 | c2 +------+-------+------------+----+---------------- + 1300 | EMP13 | 12-03-1981 | 20 | ADMINISTRATION + 1300 | EMP13 | 12-03-1981 | 20 | ADMINISTRATION + 1400 | EMP14 | 01-23-1982 | 10 | DEVELOPMENT + 1400 | EMP14 | 01-23-1982 | 10 | DEVELOPMENT + 1400 | EMP14 | 01-23-1982 | 10 | DEVELOPMENT + 1500 | EMP15 | 12-25-2000 | | + 1600 | EMP16 | | | +(7 rows) + +RESET enable_mergejoin; +-- Not supported expressions won't push-down(e.g. function expression, etc.) +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (ABS(d.c1) = e.c8) ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Merge Left Join + Merge Cond: ((abs(d.c1)) = e.c8) + -> Sort + Sort Key: (abs(d.c1)) + -> Foreign Scan on f_test_tbl2 d + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Sort Key: e.c8 + -> Foreign Scan on f_test_tbl1 e + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(12 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (ABS(d.c1) = e.c8) ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | + | | | | | +(17 rows) + +-- Don't pushdown when whole row reference is involved. +EXPLAIN (COSTS OFF) +SELECT d, e + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) LEFT JOIN f_test_tbl1 f ON (f.c8 = e.c1) ORDER BY e.c1 OFFSET 65; + QUERY PLAN +-------------------------------------------------------------------------------- + Limit + -> Merge Left Join + Merge Cond: (e.c1 = f.c8) + -> Sort + Sort Key: e.c1 + -> Hash Left Join + Hash Cond: (d.c8 = e.c1) + -> Foreign Scan on f_test_tbl1 d + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + -> Foreign Scan on f_test_tbl2 e + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Sort Key: f.c8 + -> Foreign Scan on f_test_tbl1 f + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(16 rows) + +-- Don't pushdown when full document retrieval is involved. +EXPLAIN (COSTS OFF) +SELECT json_data.key AS key1, json_data.value AS value1 + FROM test_text, test_varchar, json_each_text(test_text.__doc::json) AS json_data WHERE key NOT IN ('_id') ORDER BY json_data.key COLLATE "C"; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: json_data.key COLLATE "C" + -> Nested Loop + -> Nested Loop + -> Foreign Scan on test_text + Foreign Namespace: mongo_fdw_regress.warehouse + -> Function Scan on json_each_text json_data + Filter: (key <> '_id'::text) + -> Materialize + -> Foreign Scan on test_varchar + Foreign Namespace: mongo_fdw_regress.warehouse +(11 rows) + +SELECT json_data.key AS key1, json_data.value AS value1 + FROM test_text, test_varchar, json_each_text(test_text.__doc::json) AS json_data WHERE key NOT IN ('_id') ORDER BY json_data.key COLLATE "C"; + key1 | value1 +-------------------+----------------------------- + warehouse_created | { "$date" : 1418368330000 } + warehouse_created | { "$date" : 1418368330000 } + warehouse_created | { "$date" : 1447229590000 } + warehouse_created | { "$date" : 1447229590000 } + warehouse_id | 2 + warehouse_id | 1 + warehouse_id | 1 + warehouse_id | 2 + warehouse_name | Laptop + warehouse_name | Laptop + warehouse_name | UPS + warehouse_name | UPS +(12 rows) + +-- Join two tables from two different foreign servers. +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl3 e ON d.c1 = e.c1 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------- + Merge Left Join + Merge Cond: (d.c1 = e.c1) + -> Sort + Sort Key: d.c1 + -> Foreign Scan on f_test_tbl2 d + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Sort Key: e.c1 + -> Foreign Scan on f_test_tbl3 e + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(10 rows) + +-- SEMI JOIN, not pushed down +EXPLAIN (COSTS OFF) +SELECT d.c2 + FROM f_test_tbl1 d WHERE EXISTS (SELECT 1 FROM f_test_tbl2 e WHERE d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c2 + -> Hash Join + Hash Cond: (d.c8 = e.c1) + -> Foreign Scan on f_test_tbl1 d + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + -> HashAggregate + Group Key: e.c1 + -> Foreign Scan on f_test_tbl2 e + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(12 rows) + +SELECT d.c2 + FROM f_test_tbl1 d WHERE EXISTS (SELECT 1 FROM f_test_tbl2 e WHERE d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + c2 +------- + EMP1 + EMP10 + EMP11 + EMP12 + EMP13 + EMP14 + EMP2 + EMP3 + EMP4 + EMP5 +(10 rows) + +-- ANTI JOIN, not pushed down +EXPLAIN (COSTS OFF) +SELECT d.c2 + FROM f_test_tbl1 d WHERE NOT EXISTS (SELECT 1 FROM f_test_tbl2 e WHERE d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c2 + -> Hash Anti Join + Hash Cond: (d.c8 = e.c1) + -> Foreign Scan on f_test_tbl1 d + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + -> Foreign Scan on f_test_tbl2 e + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(10 rows) + +SELECT d.c2 + FROM f_test_tbl1 d WHERE NOT EXISTS (SELECT 1 FROM f_test_tbl2 e WHERE d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + c2 +------- + EMP15 + EMP16 +(2 rows) + +-- FULL OUTER JOIN, should not pushdown. +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl1 d FULL JOIN f_test_tbl2 e ON (d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c2 + -> Hash Full Join + Hash Cond: (d.c8 = e.c1) + -> Foreign Scan on f_test_tbl1 d + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + -> Foreign Scan on f_test_tbl2 e + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(10 rows) + +SELECT d.c1, e.c1 + FROM f_test_tbl1 d FULL JOIN f_test_tbl2 e ON (d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + c1 | c1 +------+---- + 100 | 20 + 1000 | 30 + 1100 | 20 + 1200 | 30 + 1300 | 20 + 1400 | 10 + 1500 | + 1600 | + 200 | 30 + 300 | 30 +(10 rows) + +-- CROSS JOIN can be pushed down +EXPLAIN (COSTS OFF) +SELECT e.c1, d.c2 + FROM f_test_tbl1 d CROSS JOIN f_test_tbl2 e ORDER BY e.c1, d.c2 LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: e.c1, d.c2 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(5 rows) + +SELECT e.c1, d.c2 + FROM f_test_tbl1 d CROSS JOIN f_test_tbl2 e ORDER BY e.c1, d.c2 LIMIT 10; + c1 | c2 +----+------- + 10 | EMP1 + 10 | EMP10 + 10 | EMP11 + 10 | EMP12 + 10 | EMP13 + 10 | EMP14 + 10 | EMP15 + 10 | EMP16 + 10 | EMP2 + 10 | EMP3 +(10 rows) + +-- FDW-131: Limit and offset pushdown with join pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT round(2.2) OFFSET 2; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(3 rows) + +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT round(2.2) OFFSET 2; + c1 | c1 +-----+---- + 100 | 10 + 100 | 30 +(2 rows) + +-- Limit as NULL, no LIMIT/OFFSET pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (t1.c8 = t2.c1) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT NULL OFFSET 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(3 rows) + +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (t1.c8 = t2.c1) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT NULL OFFSET 1; + c1 | c1 +------+---- + 200 | 30 + 300 | 30 + 400 | 20 + 500 | 30 + 600 | 30 + 700 | 10 + 800 | 20 + 900 | 10 + 1000 | 30 + 1100 | 20 + 1200 | 30 + 1300 | 20 + 1400 | 10 +(13 rows) + +-- Limit as ALL, no LIMIT/OFFSET pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (t1.c8 = t2.c1) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT ALL OFFSET 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(3 rows) + +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (t1.c8 = t2.c1) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT ALL OFFSET 1; + c1 | c1 +------+---- + 200 | 30 + 300 | 30 + 400 | 20 + 500 | 30 + 600 | 30 + 700 | 10 + 800 | 20 + 900 | 10 + 1000 | 30 + 1100 | 20 + 1200 | 30 + 1300 | 20 + 1400 | 10 +(13 rows) + +-- Offset as NULL, no LIMIT/OFFSET pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT 3 OFFSET NULL; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(3 rows) + +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT 3 OFFSET NULL; + c1 | c1 +-----+---- + 100 | 10 + 100 | 20 + 100 | 30 +(3 rows) + +-- Limit with -ve value. Shouldn't pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT -2; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Limit + Output: t1.c1, t2.c1 + -> Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(5 rows) + +-- Should throw an error. +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT -2; +ERROR: LIMIT must not be negative +-- Offset with -ve value. Shouldn't pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST OFFSET -1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Limit + Output: t1.c1, t2.c1 + -> Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(5 rows) + +-- Should throw an error. +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST OFFSET -1; +ERROR: OFFSET must not be negative +-- Limit/Offset with -ve value. Shouldn't pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT -3 OFFSET -1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Limit + Output: t1.c1, t2.c1 + -> Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(5 rows) + +-- Should throw an error. +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT -3 OFFSET -1; +ERROR: OFFSET must not be negative +-- Limit with expression evaluating to -ve value. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM f_test_tbl1)); + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Limit + Output: t1.c1, t2.c1 + InitPlan 1 (returns $0) + -> Foreign Scan + Output: (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 f_test_tbl1) + -> Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(9 rows) + +-- Should throw an error. +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM f_test_tbl1)); +ERROR: LIMIT must not be negative +-- Test partition-wise join +SET enable_partitionwise_join TO on; +-- Create the partition tables +CREATE TABLE fprt1 (_id NAME, c1 INTEGER, c2 INTEGER, c3 TEXT) PARTITION BY RANGE(c1); +CREATE FOREIGN TABLE ftprt1_p1 PARTITION OF fprt1 FOR VALUES FROM (1) TO (4) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test1'); +CREATE FOREIGN TABLE ftprt1_p2 PARTITION OF fprt1 FOR VALUES FROM (5) TO (8) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test2'); +CREATE TABLE fprt2 (_id NAME, c1 INTEGER, c2 INTEGER, c3 TEXT) PARTITION BY RANGE(c2); +CREATE FOREIGN TABLE ftprt2_p1 PARTITION OF fprt2 FOR VALUES FROM (1) TO (4) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test3'); +CREATE FOREIGN TABLE ftprt2_p2 PARTITION OF fprt2 FOR VALUES FROM (5) TO (8) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test4'); +-- Inner join two tables +-- Different explain plan on v10 as partition-wise join is not supported there. +SET enable_mergejoin TO OFF; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, t2.c2 + FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.c1 = t2.c2) ORDER BY 1,2; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: t1.c1, t2.c2 + Sort Key: t1.c1 + -> Append + -> Foreign Scan + Output: t1.c1, t2.c2 + Foreign Namespace: (mongo_fdw_regress.test1 t1) INNER JOIN (mongo_fdw_regress.test3 t2) + -> Foreign Scan + Output: t1_1.c1, t2_1.c2 + Foreign Namespace: (mongo_fdw_regress.test2 t1) INNER JOIN (mongo_fdw_regress.test4 t2) +(10 rows) + +SELECT t1.c1, t2.c2 + FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.c1 = t2.c2) ORDER BY 1,2; + c1 | c2 +----+---- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 +(8 rows) + +-- Inner join three tables +-- Different explain plan on v10 as partition-wise join is not supported there. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, t2.c2, t3.c2 + FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.c1 = t2.c2) INNER JOIN fprt1 t3 ON (t3.c1 = t2.c2) ORDER BY 1,2; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Sort + Output: t1.c1, t2.c2, t3.c2 + Sort Key: t1.c1 + -> Append + -> Hash Join + Output: t1.c1, t2.c2, t3.c2 + Hash Cond: (t1.c1 = t3.c1) + -> Foreign Scan + Output: t1.c1, t2.c2 + Foreign Namespace: (mongo_fdw_regress.test1 t1) INNER JOIN (mongo_fdw_regress.test3 t2) + -> Hash + Output: t3.c2, t3.c1 + -> Foreign Scan on public.ftprt1_p1 t3 + Output: t3.c2, t3.c1 + Foreign Namespace: mongo_fdw_regress.test1 + -> Hash Join + Output: t1_1.c1, t2_1.c2, t3_1.c2 + Hash Cond: (t1_1.c1 = t3_1.c1) + -> Foreign Scan + Output: t1_1.c1, t2_1.c2 + Foreign Namespace: (mongo_fdw_regress.test2 t1) INNER JOIN (mongo_fdw_regress.test4 t2) + -> Hash + Output: t3_1.c2, t3_1.c1 + -> Foreign Scan on public.ftprt1_p2 t3_1 + Output: t3_1.c2, t3_1.c1 + Foreign Namespace: mongo_fdw_regress.test2 +(26 rows) + +SELECT t1.c1, t2.c2, t3.c2 + FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.c1 = t2.c2) INNER JOIN fprt1 t3 ON (t3.c1 = t2.c2) ORDER BY 1,2; + c1 | c2 | c2 +----+----+---- + 1 | 1 | 1 + 2 | 2 | 2 + 3 | 3 | 3 + 4 | 4 | 4 + 5 | 5 | 5 + 6 | 6 | 6 + 7 | 7 | 7 + 8 | 8 | 8 +(8 rows) + +RESET enable_mergejoin; +-- Join with lateral reference +-- Different explain plan on v10 as partition-wise join is not supported there. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, t1.c2 + FROM fprt1 t1, LATERAL (SELECT t2.c1, t2.c2 FROM fprt2 t2 + WHERE t1.c1 = t2.c2 AND t1.c2 = t2.c1) q WHERE t1.c1 % 2 = 0 ORDER BY 1,2; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: t1.c1, t1.c2 + Sort Key: t1.c1, t1.c2 + -> Append + -> Foreign Scan + Output: t1.c1, t1.c2 + Foreign Namespace: (mongo_fdw_regress.test1 t1) INNER JOIN (mongo_fdw_regress.test3 t2) + -> Foreign Scan + Output: t1_1.c1, t1_1.c2 + Foreign Namespace: (mongo_fdw_regress.test2 t1) INNER JOIN (mongo_fdw_regress.test4 t2) +(10 rows) + +SELECT t1.c1, t1.c2 + FROM fprt1 t1, LATERAL (SELECT t2.c1, t2.c2 FROM fprt2 t2 + WHERE t1.c1 = t2.c2 AND t1.c2 = t2.c1) q WHERE t1.c1 % 2 = 0 ORDER BY 1,2; + c1 | c2 +----+---- + 2 | 2 + 4 | 4 + 6 | 6 + 8 | 8 +(4 rows) + +-- With PHVs, partitionwise join selected but no join pushdown +-- Table alias in foreign scan is different for v12, v11 and v10. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, t1.phv, t2.c2, t2.phv + FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE c1 % 2 = 0) t1 LEFT JOIN + (SELECT 't2_phv' phv, * FROM fprt2 WHERE c2 % 2 = 0) t2 ON (t1.c1 = t2.c2) + ORDER BY t1.c1, t2.c2; + QUERY PLAN +------------------------------------------------------------------------------------ + Sort + Output: ftprt1_p1.c1, 't1_phv'::text, ftprt2_p1.c2, ('t2_phv'::text) + Sort Key: ftprt1_p1.c1, ftprt2_p1.c2 + -> Append + -> Hash Left Join + Output: ftprt1_p1.c1, 't1_phv'::text, ftprt2_p1.c2, ('t2_phv'::text) + Hash Cond: (ftprt1_p1.c1 = ftprt2_p1.c2) + -> Foreign Scan on public.ftprt1_p1 + Output: ftprt1_p1.c1 + Foreign Namespace: mongo_fdw_regress.test1 + -> Hash + Output: ftprt2_p1.c2, ('t2_phv'::text) + -> Foreign Scan on public.ftprt2_p1 + Output: ftprt2_p1.c2, 't2_phv'::text + Foreign Namespace: mongo_fdw_regress.test3 + -> Hash Left Join + Output: ftprt1_p2.c1, 't1_phv'::text, ftprt2_p2.c2, ('t2_phv'::text) + Hash Cond: (ftprt1_p2.c1 = ftprt2_p2.c2) + -> Foreign Scan on public.ftprt1_p2 + Output: ftprt1_p2.c1 + Foreign Namespace: mongo_fdw_regress.test2 + -> Hash + Output: ftprt2_p2.c2, ('t2_phv'::text) + -> Foreign Scan on public.ftprt2_p2 + Output: ftprt2_p2.c2, 't2_phv'::text + Foreign Namespace: mongo_fdw_regress.test4 +(26 rows) + +SELECT t1.c1, t1.phv, t2.c2, t2.phv + FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE c1 % 2 = 0) t1 LEFT JOIN + (SELECT 't2_phv' phv, * FROM fprt2 WHERE c2 % 2 = 0) t2 ON (t1.c1 = t2.c2) + ORDER BY t1.c1, t2.c2; + c1 | phv | c2 | phv +----+--------+----+-------- + 2 | t1_phv | 2 | t2_phv + 4 | t1_phv | 4 | t2_phv + 6 | t1_phv | 6 | t2_phv + 8 | t1_phv | 8 | t2_phv +(4 rows) + +RESET enable_partitionwise_join; +-- FDW-445: Support enable_join_pushdown option at server level and table level. +-- Check only boolean values are accepted. +ALTER SERVER mongo_server OPTIONS (ADD enable_join_pushdown 'abc11'); +ERROR: enable_join_pushdown requires a Boolean value +-- Test the option at server level. +ALTER SERVER mongo_server OPTIONS (ADD enable_join_pushdown 'false'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + -> Hash Join + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Hash Cond: (d.c1 = e.c8) + -> Foreign Scan on public.f_test_tbl2 d + Output: d._id, d.c1, d.c2, d.c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Hash + Output: e.c1, e.c2, e.c6, e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(14 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_join_pushdown 'true'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + -> Foreign Scan + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(6 rows) + +-- Test the option with outer rel. +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (ADD enable_join_pushdown 'false'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + -> Hash Join + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Hash Cond: (d.c1 = e.c8) + -> Foreign Scan on public.f_test_tbl2 d + Output: d._id, d.c1, d.c2, d.c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Hash + Output: e.c1, e.c2, e.c6, e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(14 rows) + +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'true'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + -> Foreign Scan + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(6 rows) + +-- Test the option with inner rel. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (ADD enable_join_pushdown 'false'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + -> Hash Join + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Hash Cond: (d.c1 = e.c8) + -> Foreign Scan on public.f_test_tbl2 d + Output: d._id, d.c1, d.c2, d.c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Hash + Output: e.c1, e.c2, e.c6, e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(14 rows) + +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'true'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + -> Foreign Scan + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(6 rows) + +-- Test that setting option at table level does not affect the setting at +-- server level. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'false'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'false'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + -> Hash Join + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Hash Cond: (d.c1 = e.c8) + -> Foreign Scan on public.f_test_tbl2 d + Output: d._id, d.c1, d.c2, d.c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Hash + Output: e.c1, e.c2, e.c6, e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(14 rows) + +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT t1.c1, t2.c2 + FROM f_test_tbl3 t1 JOIN f_test_tbl4 t2 ON (t1.c1 = t2.c8) ORDER BY 1, 2; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Sort + Output: t1.c1, t2.c2 + Sort Key: t1.c1, t2.c2 + -> Foreign Scan + Output: t1.c1, t2.c2 + Foreign Namespace: (mongo_fdw_regress.test_tbl2 t1) INNER JOIN (mongo_fdw_regress.test_tbl1 t2) +(6 rows) + +-- FDW-558: Test mongo_fdw.enable_join_pushdown GUC. +-- Negative testing for GUC value. +SET mongo_fdw.enable_join_pushdown to 'abc'; +ERROR: parameter "mongo_fdw.enable_join_pushdown" requires a Boolean value +-- Check default value. Should be ON. +SHOW mongo_fdw.enable_join_pushdown; + mongo_fdw.enable_join_pushdown +-------------------------------- + on +(1 row) + +-- Join pushdown should happen as the GUC enable_join_pushdown is true. +ALTER SERVER mongo_server OPTIONS (SET enable_join_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'true'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON (d.c1 = e.c8) ORDER BY 1, 2; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: d.c1, e.c8 + Sort Key: d.c1 + -> Foreign Scan + Output: d.c1, e.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(6 rows) + +--Disable the GUC enable_join_pushdown. +SET mongo_fdw.enable_join_pushdown to false; +-- Join pushdown shouldn't happen as the GUC enable_join_pushdown is false. +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON (d.c1 = e.c8) ORDER BY 1, 2; + QUERY PLAN +-------------------------------------------------------------- + Merge Join + Output: d.c1, e.c8 + Merge Cond: (d.c1 = e.c8) + -> Sort + Output: d.c1 + Sort Key: d.c1 + -> Foreign Scan on public.f_test_tbl2 d + Output: d.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Output: e.c8 + Sort Key: e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(15 rows) + +-- Enable the GUC and table level option is set to false, should not pushdown. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'false'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'false'); +SET mongo_fdw.enable_join_pushdown to true; +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON (d.c1 = e.c8) ORDER BY 1, 2; + QUERY PLAN +-------------------------------------------------------------- + Merge Join + Output: d.c1, e.c8 + Merge Cond: (d.c1 = e.c8) + -> Sort + Output: d.c1 + Sort Key: d.c1 + -> Foreign Scan on public.f_test_tbl2 d + Output: d.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Output: e.c8 + Sort Key: e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(15 rows) + +-- FDW-589: Test enable_order_by_pushdown option at server and table level. +SET mongo_fdw.enable_join_pushdown to true; +SET mongo_fdw.enable_order_by_pushdown to true; +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'true'); +ALTER SERVER mongo_server OPTIONS (ADD enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (ADD enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (ADD enable_order_by_pushdown 'true'); +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +-- One table level option is OFF. Shouldn't pushdown ORDER BY. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_order_by_pushdown 'false'); +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: d.c1 NULLS FIRST, e.c1 NULLS FIRST + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +-- Test that setting option at table level does not affect the setting at +-- server level. +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'false'); +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_order_by_pushdown 'true'); +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'true'); +-- When enable_join_pushdown option is disabled. Shouldn't pushdown join and +-- hence, ORDER BY too. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'false'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'false'); +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: d.c1 NULLS FIRST, e.c1 NULLS FIRST + -> Hash Left Join + Hash Cond: (d.c1 = e.c8) + Join Filter: ((e.c4 > d.c1) AND (e.c2 < d.c3)) + -> Foreign Scan on f_test_tbl2 d + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Hash + -> Foreign Scan on f_test_tbl1 e + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +DELETE FROM f_test_tbl1 WHERE c8 IS NULL; +DELETE FROM f_test_tbl1 WHERE c8 = 60; +DELETE FROM f_test_tbl2 WHERE c1 IS NULL; +DELETE FROM f_test_tbl2 WHERE c1 = 50; +DROP FOREIGN TABLE f_test_tbl1; +DROP FOREIGN TABLE f_test_tbl2; +DROP FOREIGN TABLE f_test_tbl3; +DROP FOREIGN TABLE f_test_tbl4; +DROP FOREIGN TABLE test_text; +DROP FOREIGN TABLE test_varchar; +DROP TABLE l_test_tbl1; +DROP FOREIGN TABLE ftprt1_p1; +DROP FOREIGN TABLE ftprt1_p2; +DROP FOREIGN TABLE ftprt2_p1; +DROP FOREIGN TABLE ftprt2_p2; +DROP TABLE IF EXISTS fprt1; +DROP TABLE IF EXISTS fprt2; +DROP USER MAPPING FOR public SERVER mongo_server1; +DROP SERVER mongo_server1; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/expected/join_pushdown_2.out b/expected/join_pushdown_2.out new file mode 100644 index 0000000..c664f77 --- /dev/null +++ b/expected/join_pushdown_2.out @@ -0,0 +1,2127 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` +-- Before running this file user must create database mongo_fdw_regress on +-- MongoDB with all permission for MONGO_USER_NAME user with MONGO_PASS +-- password and ran mongodb_init.sh file to load collections. +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; +CREATE SERVER mongo_server1 FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server1; +-- Create foreign tables. +CREATE FOREIGN TABLE f_test_tbl1 (_id NAME, c1 INTEGER, c2 TEXT, c3 CHAR(9), c4 INTEGER, c5 pg_catalog.Date, c6 DECIMAL, c7 INTEGER, c8 INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl1'); +CREATE FOREIGN TABLE f_test_tbl2 (_id NAME, c1 INTEGER, c2 TEXT, c3 TEXT) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); +CREATE FOREIGN TABLE f_test_tbl3 (_id NAME, c1 INTEGER, c2 TEXT, c3 TEXT) + SERVER mongo_server1 OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); +CREATE FOREIGN TABLE test_text ( __doc text) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'warehouse'); +CREATE FOREIGN TABLE test_varchar ( __doc varchar) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'warehouse'); +CREATE FOREIGN TABLE f_test_tbl4 (_id NAME, c1 INTEGER, c2 TEXT, c3 CHAR(9), c4 INTEGER, c5 pg_catalog.Date, c6 DECIMAL, c7 INTEGER, c8 INTEGER) + SERVER mongo_server1 OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl1'); +INSERT INTO f_test_tbl1 VALUES (0, 1500, 'EMP15', 'FINANCE', 1300, '2000-12-25', 950.0, 400, 60); +INSERT INTO f_test_tbl1 VALUES (0, 1600, 'EMP16', 'ADMIN', 600); +INSERT INTO f_test_tbl2 VALUES (0, 50, 'TESTING', 'NASHIK'); +INSERT INTO f_test_tbl2 VALUES (0); +-- Create local table. +CREATE TABLE l_test_tbl1 AS + SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM f_test_tbl1; +-- Push down LEFT OUTER JOIN. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(17 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl1 e LEFT OUTER JOIN f_test_tbl2 d ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 e) LEFT JOIN (mongo_fdw_regress.test_tbl2 d) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl1 e LEFT OUTER JOIN f_test_tbl2 d ON e.c8 = d.c1 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + | | 1500 | EMP15 | 950 | 60 + | | 1600 | EMP16 | | +(16 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 OR e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST OFFSET 50; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 OR e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST OFFSET 50; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+---------+---- + 40 | HR | 1300 | EMP13 | 3000 | 20 + 40 | HR | 1400 | EMP14 | 1300 | 10 + 40 | HR | 1500 | EMP15 | 950 | 60 + 40 | HR | 1600 | EMP16 | | + 50 | TESTING | 100 | EMP1 | 800.3 | 20 + 50 | TESTING | 200 | EMP2 | 1600 | 30 + 50 | TESTING | 300 | EMP3 | 1250 | 30 + 50 | TESTING | 400 | EMP4 | 2975 | 20 + 50 | TESTING | 500 | EMP5 | 1250.23 | 30 + 50 | TESTING | 600 | EMP6 | 2850 | 30 + 50 | TESTING | 700 | EMP7 | 2450.34 | 10 + 50 | TESTING | 800 | EMP8 | 3000 | 20 + 50 | TESTING | 900 | EMP9 | 5000 | 10 + 50 | TESTING | 1000 | EMP10 | 1500 | 30 + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | +(20 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: d.c1 NULLS FIRST, e.c1 NULLS FIRST + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +SET mongo_fdw.enable_order_by_pushdown TO ON; +-- Column comparing with 'Constant' pushed down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = 20 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = 20 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | | | | + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 200 | EMP2 | 1600 | 30 + 20 | ADMINISTRATION | 300 | EMP3 | 1250 | 30 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 500 | EMP5 | 1250.23 | 30 + 20 | ADMINISTRATION | 600 | EMP6 | 2850 | 30 + 20 | ADMINISTRATION | 700 | EMP7 | 2450.34 | 10 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 900 | EMP9 | 5000 | 10 + 20 | ADMINISTRATION | 1000 | EMP10 | 1500 | 30 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1200 | EMP12 | 950 | 30 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 20 | ADMINISTRATION | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 1500 | EMP15 | 950 | 60 + 20 | ADMINISTRATION | 1600 | EMP16 | | + 30 | SALES | | | | + 40 | HR | | | | + 50 | TESTING | | | | +(21 rows) + +-- Push down RIGHT OUTER JOIN. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 e) LEFT JOIN (mongo_fdw_regress.test_tbl2 d) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + | | 1500 | EMP15 | 950 | 60 + | | 1600 | EMP16 | | +(16 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl1 e RIGHT OUTER JOIN f_test_tbl2 d ON e.c8 = d.c1 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl1 e RIGHT OUTER JOIN f_test_tbl2 d ON e.c8 = d.c1 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(17 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c4 > d.c1 OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 e) LEFT JOIN (mongo_fdw_regress.test_tbl2 d) +(5 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c4 > d.c1 OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+---------+---- + 40 | HR | 1400 | EMP14 | 1300 | 10 + 40 | HR | 1500 | EMP15 | 950 | 60 + 40 | HR | 1600 | EMP16 | | + 50 | TESTING | 100 | EMP1 | 800.3 | 20 + 50 | TESTING | 200 | EMP2 | 1600 | 30 + 50 | TESTING | 300 | EMP3 | 1250 | 30 + 50 | TESTING | 400 | EMP4 | 2975 | 20 + 50 | TESTING | 500 | EMP5 | 1250.23 | 30 + 50 | TESTING | 600 | EMP6 | 2850 | 30 + 50 | TESTING | 700 | EMP7 | 2450.34 | 10 + 50 | TESTING | 800 | EMP8 | 3000 | 20 + 50 | TESTING | 900 | EMP9 | 5000 | 10 + 50 | TESTING | 1000 | EMP10 | 1500 | 30 + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | +(19 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON ((d.c1 = e.c8 OR e.c4 > d.c1) AND e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON ((d.c1 = e.c8 OR e.c4 > d.c1) OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+---------+---- + 40 | HR | 1400 | EMP14 | 1300 | 10 + 40 | HR | 1500 | EMP15 | 950 | 60 + 40 | HR | 1600 | EMP16 | | + 50 | TESTING | 100 | EMP1 | 800.3 | 20 + 50 | TESTING | 200 | EMP2 | 1600 | 30 + 50 | TESTING | 300 | EMP3 | 1250 | 30 + 50 | TESTING | 400 | EMP4 | 2975 | 20 + 50 | TESTING | 500 | EMP5 | 1250.23 | 30 + 50 | TESTING | 600 | EMP6 | 2850 | 30 + 50 | TESTING | 700 | EMP7 | 2450.34 | 10 + 50 | TESTING | 800 | EMP8 | 3000 | 20 + 50 | TESTING | 900 | EMP9 | 5000 | 10 + 50 | TESTING | 1000 | EMP10 | 1500 | 30 + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | + | | | | | +(20 rows) + +-- Column comparing with 'Constant' pushed down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = 20 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 e) LEFT JOIN (mongo_fdw_regress.test_tbl2 d) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON (d.c1 = 20 AND e.c2 = 'EMP1') ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + | | 200 | EMP2 | 1600 | 30 + | | 300 | EMP3 | 1250 | 30 + | | 400 | EMP4 | 2975 | 20 + | | 500 | EMP5 | 1250.23 | 30 + | | 600 | EMP6 | 2850 | 30 + | | 700 | EMP7 | 2450.34 | 10 + | | 800 | EMP8 | 3000 | 20 + | | 900 | EMP9 | 5000 | 10 + | | 1000 | EMP10 | 1500 | 30 + | | 1100 | EMP11 | 1100 | 20 + | | 1200 | EMP12 | 950 | 30 + | | 1300 | EMP13 | 3000 | 20 + | | 1400 | EMP14 | 1300 | 10 + | | 1500 | EMP15 | 950 | 60 + | | 1600 | EMP16 | | +(16 rows) + +-- Push INNER JOIN. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 +(14 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON ((d.c1 = e.c8 OR e.c4 > d.c1) AND e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON ((d.c1 = e.c8 OR e.c4 > d.c1) OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+---------+---- + 40 | HR | 1400 | EMP14 | 1300 | 10 + 40 | HR | 1500 | EMP15 | 950 | 60 + 40 | HR | 1600 | EMP16 | | + 50 | TESTING | 100 | EMP1 | 800.3 | 20 + 50 | TESTING | 200 | EMP2 | 1600 | 30 + 50 | TESTING | 300 | EMP3 | 1250 | 30 + 50 | TESTING | 400 | EMP4 | 2975 | 20 + 50 | TESTING | 500 | EMP5 | 1250.23 | 30 + 50 | TESTING | 600 | EMP6 | 2850 | 30 + 50 | TESTING | 700 | EMP7 | 2450.34 | 10 + 50 | TESTING | 800 | EMP8 | 3000 | 20 + 50 | TESTING | 900 | EMP9 | 5000 | 10 + 50 | TESTING | 1000 | EMP10 | 1500 | 30 + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | +(19 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+------+---- + 50 | TESTING | 800 | EMP8 | 3000 | 20 + 50 | TESTING | 900 | EMP9 | 5000 | 10 + 50 | TESTING | 1000 | EMP10 | 1500 | 30 + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | +(9 rows) + +-- Column comparing with 'Constant' pushed down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+-------+---- + 10 | DEVELOPMENT | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 100 | EMP1 | 800.3 | 20 + 40 | HR | 100 | EMP1 | 800.3 | 20 + 50 | TESTING | 100 | EMP1 | 800.3 | 20 + | | 100 | EMP1 | 800.3 | 20 +(10 rows) + +-- INNER JOIN with WHERE clause. Should execute where condition separately +-- (NOT added into join clauses) on remote side. +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (d.c8 = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(2 rows) + +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (d.c8 = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + c1 | c1 +-----+---- + 100 | 20 +(1 row) + +-- INNER JOIN in which join clause is not pushable but WHERE condition is +-- pushable with join clause 'TRUE'. +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (abs(d.c8) = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Foreign Scan + Filter: (abs(c8) = c1) + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(3 rows) + +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (abs(d.c8) = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + c1 | c1 +-----+---- + 100 | 20 +(1 row) + +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (abs(d.c8) = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: e.c3 DESC NULLS LAST + -> Foreign Scan + Filter: (abs(c8) = c1) + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(5 rows) + +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (abs(d.c8) = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + c1 | c1 +-----+---- + 100 | 20 +(1 row) + +SET mongo_fdw.enable_order_by_pushdown TO ON; +SET enable_mergejoin TO OFF; +SET enable_nestloop TO OFF; +-- Local-Foreign table joins. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN l_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------- + Sort + Sort Key: d.c1 NULLS FIRST, e.c1 NULLS FIRST + -> Hash Left Join + Hash Cond: (d.c1 = e.c8) + -> Foreign Scan on f_test_tbl2 d + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Hash + -> Seq Scan on l_test_tbl1 e +(8 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN l_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(17 rows) + +RESET enable_mergejoin; +RESET enable_nestloop; +-- JOIN in sub-query, should be pushed down. +EXPLAIN (COSTS OFF) +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 IN (SELECT f1.c1 FROM f_test_tbl1 f1 LEFT JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1)) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: l.c1 NULLS FIRST, l.c8 NULLS FIRST + -> Hash Join + Hash Cond: (l.c1 = f1.c1) + -> Seq Scan on l_test_tbl1 l + -> Hash + -> HashAggregate + Group Key: f1.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 f1) LEFT JOIN (mongo_fdw_regress.test_tbl2 f2) +(10 rows) + +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 IN (SELECT f1.c1 FROM f_test_tbl1 f1 LEFT JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1)) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c6 | c8 +------+---------+---- + 100 | 800.3 | 20 + 200 | 1600 | 30 + 300 | 1250 | 30 + 400 | 2975 | 20 + 500 | 1250.23 | 30 + 600 | 2850 | 30 + 700 | 2450.34 | 10 + 800 | 3000 | 20 + 900 | 5000 | 10 + 1000 | 1500 | 30 + 1100 | 1100 | 20 + 1200 | 950 | 30 + 1300 | 3000 | 20 + 1400 | 1300 | 10 + 1500 | 950 | 60 + 1600 | | +(16 rows) + +EXPLAIN (COSTS OFF) +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 = (SELECT f1.c1 FROM f_test_tbl1 f1 LEFT JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1) LIMIT 1) ORDER BY 1, 3; + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Sort + Sort Key: l.c8 + InitPlan 1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 f1) LEFT JOIN (mongo_fdw_regress.test_tbl2 f2) + -> Seq Scan on l_test_tbl1 l + Filter: (c1 = (InitPlan 1).col1) +(7 rows) + +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 = (SELECT f1.c1 FROM f_test_tbl1 f1 LEFT JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1) LIMIT 1) ORDER BY 1, 3; + c1 | c6 | c8 +-----+-------+---- + 100 | 800.3 | 20 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 = (SELECT f1.c1 FROM f_test_tbl1 f1 INNER JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1) LIMIT 1) ORDER BY 1, 3; + QUERY PLAN +----------------------------------------------------------------------------------------------------------- + Sort + Sort Key: l.c8 + InitPlan 1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 f1) INNER JOIN (mongo_fdw_regress.test_tbl2 f2) + -> Seq Scan on l_test_tbl1 l + Filter: (c1 = (InitPlan 1).col1) +(7 rows) + +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 = (SELECT f1.c1 FROM f_test_tbl1 f1 INNER JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1) LIMIT 1) ORDER BY 1, 3; + c1 | c6 | c8 +-----+-------+---- + 100 | 800.3 | 20 +(1 row) + +-- Execute JOIN through PREPARE statement. +PREPARE pre_stmt_left_join AS +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c4 > d.c1) ORDER BY 1, 3 OFFSET 70; +EXPLAIN (COSTS OFF) +EXECUTE pre_stmt_left_join; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +EXECUTE pre_stmt_left_join; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+------+---- + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | + | | | | | +(7 rows) + +PREPARE pre_stmt_inner_join AS +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c4 > d.c1) ORDER BY 1, 3 OFFSET 70; +EXPLAIN (COSTS OFF) +EXECUTE pre_stmt_inner_join; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +EXECUTE pre_stmt_inner_join; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+------+---- + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | +(6 rows) + +-- join + WHERE clause push-down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE d.c1 = 10 ORDER BY 1 DESC NULLS LAST, 3 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE d.c1 = 10 ORDER BY 1 DESC NULLS LAST, 3 DESC NULLS LAST; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 +(3 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE e.c8 = 10 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 e) LEFT JOIN (mongo_fdw_regress.test_tbl2 d) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE e.c8 = 10 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 +(3 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE d.c2 = 'SALES' ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE d.c2 = 'SALES' ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------+------+-------+---------+---- + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 +(6 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE e.c2 = 'EMP2' ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE e.c2 = 'EMP2' ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------+-----+------+------+---- + 30 | SALES | 200 | EMP2 | 1600 | 30 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') WHERE d.c1 = 10 OR e.c8 = 30 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') WHERE d.c1 = 10 OR e.c8 = 30 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------------+-----+------+-------+---- + 10 | DEVELOPMENT | 100 | EMP1 | 800.3 | 20 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, d.c6, d.c8 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8 AND (e.c1 = 20 OR d.c2 = 'EMP1')) WHERE e.c1 = 20 AND d.c8 = 20 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, d.c6, d.c8 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8 AND (e.c1 = 20 OR d.c2 = 'EMP1')) WHERE e.c1 = 20 AND d.c8 = 20 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +------+-------+----+----------------+-------+---- + 100 | EMP1 | 20 | ADMINISTRATION | 800.3 | 20 + 400 | EMP4 | 20 | ADMINISTRATION | 2975 | 20 + 800 | EMP8 | 20 | ADMINISTRATION | 3000 | 20 + 1100 | EMP11 | 20 | ADMINISTRATION | 1100 | 20 + 1300 | EMP13 | 20 | ADMINISTRATION | 3000 | 20 +(5 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8 AND (d.c5 = '02-22-1981' OR d.c5 = '12-17-1980')) ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: d.c1, d.c5 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) LEFT JOIN (mongo_fdw_regress.test_tbl2 e) +(4 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8 AND (d.c5 = '02-22-1981' OR d.c5 = '12-17-1980')) ORDER BY 1, 3; + c1 | c2 | c5 | c1 | c2 +------+-------+------------+----+---------------- + 100 | EMP1 | 12-17-1980 | 20 | ADMINISTRATION + 200 | EMP2 | 02-20-1981 | | + 300 | EMP3 | 02-22-1981 | 30 | SALES + 400 | EMP4 | 04-02-1981 | | + 500 | EMP5 | 09-28-1981 | | + 600 | EMP6 | 05-01-1981 | | + 700 | EMP7 | 06-09-1981 | | + 800 | EMP8 | 04-19-1987 | | + 900 | EMP9 | 11-17-1981 | | + 1000 | EMP10 | 09-08-1980 | | + 1100 | EMP11 | 05-23-1987 | | + 1200 | EMP12 | 12-03-1981 | | + 1300 | EMP13 | 12-03-1981 | | + 1400 | EMP14 | 01-23-1982 | | + 1500 | EMP15 | 12-25-2000 | | + 1600 | EMP16 | | | +(16 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) WHERE d.c5 = '02-22-1981' ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: d.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) LEFT JOIN (mongo_fdw_regress.test_tbl2 e) +(4 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) WHERE d.c5 = '02-22-1981' ORDER BY 1; + c1 | c2 | c5 | c1 | c2 +-----+------+------------+----+------- + 300 | EMP3 | 02-22-1981 | 30 | SALES +(1 row) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') WHERE d.c1 = 10 OR e.c8 = 30 ORDER BY 1 DESC NULLS LAST, 3 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Filter: ((c1 = 10) OR (c8 = 30)) + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(3 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') WHERE d.c1 = 10 OR e.c8 = 30 ORDER BY 1 DESC NULLS LAST, 3 DESC NULLS LAST; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------------+-----+------+-------+---- + 10 | DEVELOPMENT | 100 | EMP1 | 800.3 | 20 +(1 row) + +-- Natural join, should push-down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d NATURAL JOIN f_test_tbl1 e WHERE e.c1 > d.c8 ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d NATURAL JOIN f_test_tbl1 e WHERE e.c1 > d.c8 ORDER BY 1; + c1 | c2 | c5 | c1 | c2 +------+-------+------------+------+------- + 100 | EMP1 | 12-17-1980 | 100 | EMP1 + 200 | EMP2 | 02-20-1981 | 200 | EMP2 + 300 | EMP3 | 02-22-1981 | 300 | EMP3 + 400 | EMP4 | 04-02-1981 | 400 | EMP4 + 500 | EMP5 | 09-28-1981 | 500 | EMP5 + 600 | EMP6 | 05-01-1981 | 600 | EMP6 + 700 | EMP7 | 06-09-1981 | 700 | EMP7 + 800 | EMP8 | 04-19-1987 | 800 | EMP8 + 1000 | EMP10 | 09-08-1980 | 1000 | EMP10 + 1100 | EMP11 | 05-23-1987 | 1100 | EMP11 + 1200 | EMP12 | 12-03-1981 | 1200 | EMP12 + 1300 | EMP13 | 12-03-1981 | 1300 | EMP13 + 1400 | EMP14 | 01-23-1982 | 1400 | EMP14 + 1500 | EMP15 | 12-25-2000 | 1500 | EMP15 +(14 rows) + +-- Self join, should push-down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d INNER JOIN f_test_tbl1 e ON e.c8 = d.c8 ORDER BY 1 OFFSET 65; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d INNER JOIN f_test_tbl1 e ON e.c8 = d.c8 ORDER BY 1 OFFSET 65; + c1 | c2 | c5 | c1 | c2 +------+-------+------------+------+------- + 1300 | EMP13 | 12-03-1981 | 1100 | EMP11 + 1300 | EMP13 | 12-03-1981 | 1300 | EMP13 + 1400 | EMP14 | 01-23-1982 | 700 | EMP7 + 1400 | EMP14 | 01-23-1982 | 900 | EMP9 + 1400 | EMP14 | 01-23-1982 | 1400 | EMP14 + 1500 | EMP15 | 12-25-2000 | 1500 | EMP15 +(6 rows) + +-- Join in CTE. +-- Explain plan difference between v11 (or pre) and later. +EXPLAIN (COSTS false, VERBOSE) +WITH t (c1_1, c1_3, c2_1) AS ( + SELECT d.c1, d.c3, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (d.c8 = e.c1) +) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: d.c1, e.c1, d.c3 + Sort Key: d.c3, d.c1 + -> Foreign Scan + Output: d.c1, e.c1, d.c3 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(6 rows) + +WITH t (c1_1, c1_3, c2_1) AS ( + SELECT d.c1, d.c3, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (d.c8 = e.c1) +) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1; + c1_1 | c2_1 +------+------ + 100 | 20 + 1100 | 20 + 1200 | 30 + 1400 | 10 + 800 | 20 + 1300 | 20 + 900 | 10 + 400 | 20 + 600 | 30 + 700 | 10 + 200 | 30 + 300 | 30 + 500 | 30 + 1000 | 30 +(14 rows) + +-- WHERE with boolean expression. Should push-down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl2 e LEFT JOIN f_test_tbl1 d ON (e.c1 = d.c8) WHERE d.c5 = '02-22-1981' OR d.c5 = '12-17-1980' ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 e) INNER JOIN (mongo_fdw_regress.test_tbl1 d) +(4 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl2 e LEFT JOIN f_test_tbl1 d ON (e.c1 = d.c8) WHERE d.c5 = '02-22-1981' OR d.c5 = '12-17-1980' ORDER BY 1; + c1 | c2 | c5 | c1 | c2 +-----+------+------------+----+---------------- + 100 | EMP1 | 12-17-1980 | 20 | ADMINISTRATION + 300 | EMP3 | 02-22-1981 | 30 | SALES +(2 rows) + +-- Nested joins(Don't push-down nested join) +SET enable_mergejoin TO OFF; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) LEFT JOIN f_test_tbl1 f ON (f.c8 = e.c1) ORDER BY d.c1 OFFSET 65 ; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Limit + -> Sort + Sort Key: d.c1 + -> Hash Left Join + Hash Cond: (e.c1 = f.c8) + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) LEFT JOIN (mongo_fdw_regress.test_tbl2 e) + -> Hash + -> Foreign Scan on f_test_tbl1 f + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) LEFT JOIN f_test_tbl1 f ON (f.c8 = e.c1) ORDER BY d.c1 OFFSET 65; + c1 | c2 | c5 | c1 | c2 +------+-------+------------+----+---------------- + 1300 | EMP13 | 12-03-1981 | 20 | ADMINISTRATION + 1300 | EMP13 | 12-03-1981 | 20 | ADMINISTRATION + 1400 | EMP14 | 01-23-1982 | 10 | DEVELOPMENT + 1400 | EMP14 | 01-23-1982 | 10 | DEVELOPMENT + 1400 | EMP14 | 01-23-1982 | 10 | DEVELOPMENT + 1500 | EMP15 | 12-25-2000 | | + 1600 | EMP16 | | | +(7 rows) + +RESET enable_mergejoin; +-- Not supported expressions won't push-down(e.g. function expression, etc.) +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (ABS(d.c1) = e.c8) ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Merge Left Join + Merge Cond: ((abs(d.c1)) = e.c8) + -> Sort + Sort Key: (abs(d.c1)) + -> Foreign Scan on f_test_tbl2 d + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Sort Key: e.c8 + -> Foreign Scan on f_test_tbl1 e + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(12 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (ABS(d.c1) = e.c8) ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | + | | | | | +(17 rows) + +-- Don't pushdown when whole row reference is involved. +EXPLAIN (COSTS OFF) +SELECT d, e + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) LEFT JOIN f_test_tbl1 f ON (f.c8 = e.c1) ORDER BY e.c1 OFFSET 65; + QUERY PLAN +-------------------------------------------------------------------------------- + Limit + -> Merge Left Join + Merge Cond: (e.c1 = f.c8) + -> Sort + Sort Key: e.c1 + -> Hash Left Join + Hash Cond: (d.c8 = e.c1) + -> Foreign Scan on f_test_tbl1 d + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + -> Foreign Scan on f_test_tbl2 e + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Sort Key: f.c8 + -> Foreign Scan on f_test_tbl1 f + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(16 rows) + +-- Don't pushdown when full document retrieval is involved. +EXPLAIN (COSTS OFF) +SELECT json_data.key AS key1, json_data.value AS value1 + FROM test_text, test_varchar, json_each_text(test_text.__doc::json) AS json_data WHERE key NOT IN ('_id') ORDER BY json_data.key COLLATE "C"; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: json_data.key COLLATE "C" + -> Nested Loop + -> Nested Loop + -> Foreign Scan on test_text + Foreign Namespace: mongo_fdw_regress.warehouse + -> Function Scan on json_each_text json_data + Filter: (key <> '_id'::text) + -> Materialize + -> Foreign Scan on test_varchar + Foreign Namespace: mongo_fdw_regress.warehouse +(11 rows) + +SELECT json_data.key AS key1, json_data.value AS value1 + FROM test_text, test_varchar, json_each_text(test_text.__doc::json) AS json_data WHERE key NOT IN ('_id') ORDER BY json_data.key COLLATE "C"; + key1 | value1 +-------------------+----------------------------- + warehouse_created | { "$date" : 1418368330000 } + warehouse_created | { "$date" : 1418368330000 } + warehouse_created | { "$date" : 1447229590000 } + warehouse_created | { "$date" : 1447229590000 } + warehouse_id | 2 + warehouse_id | 1 + warehouse_id | 1 + warehouse_id | 2 + warehouse_name | Laptop + warehouse_name | Laptop + warehouse_name | UPS + warehouse_name | UPS +(12 rows) + +-- Join two tables from two different foreign servers. +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl3 e ON d.c1 = e.c1 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------- + Merge Left Join + Merge Cond: (d.c1 = e.c1) + -> Sort + Sort Key: d.c1 + -> Foreign Scan on f_test_tbl2 d + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Sort Key: e.c1 + -> Foreign Scan on f_test_tbl3 e + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(10 rows) + +-- SEMI JOIN, not pushed down +EXPLAIN (COSTS OFF) +SELECT d.c2 + FROM f_test_tbl1 d WHERE EXISTS (SELECT 1 FROM f_test_tbl2 e WHERE d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c2 + -> Hash Join + Hash Cond: (d.c8 = e.c1) + -> Foreign Scan on f_test_tbl1 d + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + -> HashAggregate + Group Key: e.c1 + -> Foreign Scan on f_test_tbl2 e + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(12 rows) + +SELECT d.c2 + FROM f_test_tbl1 d WHERE EXISTS (SELECT 1 FROM f_test_tbl2 e WHERE d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + c2 +------- + EMP1 + EMP10 + EMP11 + EMP12 + EMP13 + EMP14 + EMP2 + EMP3 + EMP4 + EMP5 +(10 rows) + +-- ANTI JOIN, not pushed down +EXPLAIN (COSTS OFF) +SELECT d.c2 + FROM f_test_tbl1 d WHERE NOT EXISTS (SELECT 1 FROM f_test_tbl2 e WHERE d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c2 + -> Hash Anti Join + Hash Cond: (d.c8 = e.c1) + -> Foreign Scan on f_test_tbl1 d + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + -> Foreign Scan on f_test_tbl2 e + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(10 rows) + +SELECT d.c2 + FROM f_test_tbl1 d WHERE NOT EXISTS (SELECT 1 FROM f_test_tbl2 e WHERE d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + c2 +------- + EMP15 + EMP16 +(2 rows) + +-- FULL OUTER JOIN, should not pushdown. +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl1 d FULL JOIN f_test_tbl2 e ON (d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c2 + -> Hash Full Join + Hash Cond: (d.c8 = e.c1) + -> Foreign Scan on f_test_tbl1 d + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + -> Foreign Scan on f_test_tbl2 e + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(10 rows) + +SELECT d.c1, e.c1 + FROM f_test_tbl1 d FULL JOIN f_test_tbl2 e ON (d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + c1 | c1 +------+---- + 100 | 20 + 1000 | 30 + 1100 | 20 + 1200 | 30 + 1300 | 20 + 1400 | 10 + 1500 | + 1600 | + 200 | 30 + 300 | 30 +(10 rows) + +-- CROSS JOIN can be pushed down +EXPLAIN (COSTS OFF) +SELECT e.c1, d.c2 + FROM f_test_tbl1 d CROSS JOIN f_test_tbl2 e ORDER BY e.c1, d.c2 LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: e.c1, d.c2 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(5 rows) + +SELECT e.c1, d.c2 + FROM f_test_tbl1 d CROSS JOIN f_test_tbl2 e ORDER BY e.c1, d.c2 LIMIT 10; + c1 | c2 +----+------- + 10 | EMP1 + 10 | EMP10 + 10 | EMP11 + 10 | EMP12 + 10 | EMP13 + 10 | EMP14 + 10 | EMP15 + 10 | EMP16 + 10 | EMP2 + 10 | EMP3 +(10 rows) + +-- FDW-131: Limit and offset pushdown with join pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT round(2.2) OFFSET 2; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(3 rows) + +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT round(2.2) OFFSET 2; + c1 | c1 +-----+---- + 100 | 10 + 100 | 30 +(2 rows) + +-- Limit as NULL, no LIMIT/OFFSET pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (t1.c8 = t2.c1) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT NULL OFFSET 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(3 rows) + +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (t1.c8 = t2.c1) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT NULL OFFSET 1; + c1 | c1 +------+---- + 200 | 30 + 300 | 30 + 400 | 20 + 500 | 30 + 600 | 30 + 700 | 10 + 800 | 20 + 900 | 10 + 1000 | 30 + 1100 | 20 + 1200 | 30 + 1300 | 20 + 1400 | 10 +(13 rows) + +-- Limit as ALL, no LIMIT/OFFSET pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (t1.c8 = t2.c1) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT ALL OFFSET 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(3 rows) + +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (t1.c8 = t2.c1) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT ALL OFFSET 1; + c1 | c1 +------+---- + 200 | 30 + 300 | 30 + 400 | 20 + 500 | 30 + 600 | 30 + 700 | 10 + 800 | 20 + 900 | 10 + 1000 | 30 + 1100 | 20 + 1200 | 30 + 1300 | 20 + 1400 | 10 +(13 rows) + +-- Offset as NULL, no LIMIT/OFFSET pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT 3 OFFSET NULL; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(3 rows) + +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT 3 OFFSET NULL; + c1 | c1 +-----+---- + 100 | 10 + 100 | 20 + 100 | 30 +(3 rows) + +-- Limit with -ve value. Shouldn't pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT -2; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Limit + Output: t1.c1, t2.c1 + -> Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(5 rows) + +-- Should throw an error. +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT -2; +ERROR: LIMIT must not be negative +-- Offset with -ve value. Shouldn't pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST OFFSET -1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Limit + Output: t1.c1, t2.c1 + -> Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(5 rows) + +-- Should throw an error. +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST OFFSET -1; +ERROR: OFFSET must not be negative +-- Limit/Offset with -ve value. Shouldn't pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT -3 OFFSET -1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Limit + Output: t1.c1, t2.c1 + -> Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(5 rows) + +-- Should throw an error. +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT -3 OFFSET -1; +ERROR: OFFSET must not be negative +-- Limit with expression evaluating to -ve value. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM f_test_tbl1)); + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Limit + Output: t1.c1, t2.c1 + InitPlan 1 + -> Foreign Scan + Output: (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 f_test_tbl1) + -> Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(9 rows) + +-- Should throw an error. +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM f_test_tbl1)); +ERROR: LIMIT must not be negative +-- Test partition-wise join +SET enable_partitionwise_join TO on; +-- Create the partition tables +CREATE TABLE fprt1 (_id NAME, c1 INTEGER, c2 INTEGER, c3 TEXT) PARTITION BY RANGE(c1); +CREATE FOREIGN TABLE ftprt1_p1 PARTITION OF fprt1 FOR VALUES FROM (1) TO (4) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test1'); +CREATE FOREIGN TABLE ftprt1_p2 PARTITION OF fprt1 FOR VALUES FROM (5) TO (8) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test2'); +CREATE TABLE fprt2 (_id NAME, c1 INTEGER, c2 INTEGER, c3 TEXT) PARTITION BY RANGE(c2); +CREATE FOREIGN TABLE ftprt2_p1 PARTITION OF fprt2 FOR VALUES FROM (1) TO (4) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test3'); +CREATE FOREIGN TABLE ftprt2_p2 PARTITION OF fprt2 FOR VALUES FROM (5) TO (8) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test4'); +-- Inner join two tables +-- Different explain plan on v10 as partition-wise join is not supported there. +SET enable_mergejoin TO OFF; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, t2.c2 + FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.c1 = t2.c2) ORDER BY 1,2; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: t1.c1, t2.c2 + Sort Key: t1.c1 + -> Append + -> Foreign Scan + Output: t1_1.c1, t2_1.c2 + Foreign Namespace: (mongo_fdw_regress.test1 t1) INNER JOIN (mongo_fdw_regress.test3 t2) + -> Foreign Scan + Output: t1_2.c1, t2_2.c2 + Foreign Namespace: (mongo_fdw_regress.test2 t1) INNER JOIN (mongo_fdw_regress.test4 t2) +(10 rows) + +SELECT t1.c1, t2.c2 + FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.c1 = t2.c2) ORDER BY 1,2; + c1 | c2 +----+---- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 +(8 rows) + +-- Inner join three tables +-- Different explain plan on v10 as partition-wise join is not supported there. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, t2.c2, t3.c2 + FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.c1 = t2.c2) INNER JOIN fprt1 t3 ON (t3.c1 = t2.c2) ORDER BY 1,2; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Sort + Output: t1.c1, t2.c2, t3.c2 + Sort Key: t1.c1 + -> Append + -> Hash Join + Output: t1_1.c1, t2_1.c2, t3_1.c2 + Hash Cond: (t1_1.c1 = t3_1.c1) + -> Foreign Scan + Output: t1_1.c1, t2_1.c2 + Foreign Namespace: (mongo_fdw_regress.test1 t1) INNER JOIN (mongo_fdw_regress.test3 t2) + -> Hash + Output: t3_1.c2, t3_1.c1 + -> Foreign Scan on public.ftprt1_p1 t3_1 + Output: t3_1.c2, t3_1.c1 + Foreign Namespace: mongo_fdw_regress.test1 + -> Hash Join + Output: t1_2.c1, t2_2.c2, t3_2.c2 + Hash Cond: (t1_2.c1 = t3_2.c1) + -> Foreign Scan + Output: t1_2.c1, t2_2.c2 + Foreign Namespace: (mongo_fdw_regress.test2 t1) INNER JOIN (mongo_fdw_regress.test4 t2) + -> Hash + Output: t3_2.c2, t3_2.c1 + -> Foreign Scan on public.ftprt1_p2 t3_2 + Output: t3_2.c2, t3_2.c1 + Foreign Namespace: mongo_fdw_regress.test2 +(26 rows) + +SELECT t1.c1, t2.c2, t3.c2 + FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.c1 = t2.c2) INNER JOIN fprt1 t3 ON (t3.c1 = t2.c2) ORDER BY 1,2; + c1 | c2 | c2 +----+----+---- + 1 | 1 | 1 + 2 | 2 | 2 + 3 | 3 | 3 + 4 | 4 | 4 + 5 | 5 | 5 + 6 | 6 | 6 + 7 | 7 | 7 + 8 | 8 | 8 +(8 rows) + +RESET enable_mergejoin; +-- Join with lateral reference +-- Different explain plan on v10 as partition-wise join is not supported there. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, t1.c2 + FROM fprt1 t1, LATERAL (SELECT t2.c1, t2.c2 FROM fprt2 t2 + WHERE t1.c1 = t2.c2 AND t1.c2 = t2.c1) q WHERE t1.c1 % 2 = 0 ORDER BY 1,2; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: t1.c1, t1.c2 + Sort Key: t1.c1, t1.c2 + -> Append + -> Foreign Scan + Output: t1_1.c1, t1_1.c2 + Foreign Namespace: (mongo_fdw_regress.test1 t1) INNER JOIN (mongo_fdw_regress.test3 t2) + -> Foreign Scan + Output: t1_2.c1, t1_2.c2 + Foreign Namespace: (mongo_fdw_regress.test2 t1) INNER JOIN (mongo_fdw_regress.test4 t2) +(10 rows) + +SELECT t1.c1, t1.c2 + FROM fprt1 t1, LATERAL (SELECT t2.c1, t2.c2 FROM fprt2 t2 + WHERE t1.c1 = t2.c2 AND t1.c2 = t2.c1) q WHERE t1.c1 % 2 = 0 ORDER BY 1,2; + c1 | c2 +----+---- + 2 | 2 + 4 | 4 + 6 | 6 + 8 | 8 +(4 rows) + +-- With PHVs, partitionwise join selected but no join pushdown +-- Table alias in foreign scan is different for v12, v11 and v10. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, t1.phv, t2.c2, t2.phv + FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE c1 % 2 = 0) t1 LEFT JOIN + (SELECT 't2_phv' phv, * FROM fprt2 WHERE c2 % 2 = 0) t2 ON (t1.c1 = t2.c2) + ORDER BY t1.c1, t2.c2; + QUERY PLAN +-------------------------------------------------------------------------------- + Incremental Sort + Output: fprt1.c1, 't1_phv'::text, fprt2.c2, ('t2_phv'::text) + Sort Key: fprt1.c1, fprt2.c2 + Presorted Key: fprt1.c1 + -> Merge Append + Sort Key: fprt1.c1 + -> Merge Left Join + Output: fprt1_1.c1, 't1_phv'::text, fprt2_1.c2, ('t2_phv'::text) + Merge Cond: (fprt1_1.c1 = fprt2_1.c2) + -> Sort + Output: fprt1_1.c1 + Sort Key: fprt1_1.c1 + -> Foreign Scan on public.ftprt1_p1 fprt1_1 + Output: fprt1_1.c1 + Foreign Namespace: mongo_fdw_regress.test1 + -> Sort + Output: fprt2_1.c2, ('t2_phv'::text) + Sort Key: fprt2_1.c2 + -> Foreign Scan on public.ftprt2_p1 fprt2_1 + Output: fprt2_1.c2, 't2_phv'::text + Foreign Namespace: mongo_fdw_regress.test3 + -> Merge Left Join + Output: fprt1_2.c1, 't1_phv'::text, fprt2_2.c2, ('t2_phv'::text) + Merge Cond: (fprt1_2.c1 = fprt2_2.c2) + -> Sort + Output: fprt1_2.c1 + Sort Key: fprt1_2.c1 + -> Foreign Scan on public.ftprt1_p2 fprt1_2 + Output: fprt1_2.c1 + Foreign Namespace: mongo_fdw_regress.test2 + -> Sort + Output: fprt2_2.c2, ('t2_phv'::text) + Sort Key: fprt2_2.c2 + -> Foreign Scan on public.ftprt2_p2 fprt2_2 + Output: fprt2_2.c2, 't2_phv'::text + Foreign Namespace: mongo_fdw_regress.test4 +(36 rows) + +SELECT t1.c1, t1.phv, t2.c2, t2.phv + FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE c1 % 2 = 0) t1 LEFT JOIN + (SELECT 't2_phv' phv, * FROM fprt2 WHERE c2 % 2 = 0) t2 ON (t1.c1 = t2.c2) + ORDER BY t1.c1, t2.c2; + c1 | phv | c2 | phv +----+--------+----+-------- + 2 | t1_phv | 2 | t2_phv + 4 | t1_phv | 4 | t2_phv + 6 | t1_phv | 6 | t2_phv + 8 | t1_phv | 8 | t2_phv +(4 rows) + +RESET enable_partitionwise_join; +-- FDW-445: Support enable_join_pushdown option at server level and table level. +-- Check only boolean values are accepted. +ALTER SERVER mongo_server OPTIONS (ADD enable_join_pushdown 'abc11'); +ERROR: enable_join_pushdown requires a Boolean value +-- Test the option at server level. +ALTER SERVER mongo_server OPTIONS (ADD enable_join_pushdown 'false'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Incremental Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + Presorted Key: d.c1 + -> Merge Join + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Merge Cond: (d.c1 = e.c8) + -> Sort + Output: d.c1, d.c2 + Sort Key: d.c1 + -> Foreign Scan on public.f_test_tbl2 d + Output: d.c1, d.c2 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Output: e.c1, e.c2, e.c6, e.c8 + Sort Key: e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(19 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_join_pushdown 'true'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + -> Foreign Scan + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(6 rows) + +-- Test the option with outer rel. +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (ADD enable_join_pushdown 'false'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Incremental Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + Presorted Key: d.c1 + -> Merge Join + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Merge Cond: (d.c1 = e.c8) + -> Sort + Output: d.c1, d.c2 + Sort Key: d.c1 + -> Foreign Scan on public.f_test_tbl2 d + Output: d.c1, d.c2 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Output: e.c1, e.c2, e.c6, e.c8 + Sort Key: e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(19 rows) + +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'true'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + -> Foreign Scan + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(6 rows) + +-- Test the option with inner rel. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (ADD enable_join_pushdown 'false'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Incremental Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + Presorted Key: d.c1 + -> Merge Join + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Merge Cond: (d.c1 = e.c8) + -> Sort + Output: d.c1, d.c2 + Sort Key: d.c1 + -> Foreign Scan on public.f_test_tbl2 d + Output: d.c1, d.c2 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Output: e.c1, e.c2, e.c6, e.c8 + Sort Key: e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(19 rows) + +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'true'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + -> Foreign Scan + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(6 rows) + +-- Test that setting option at table level does not affect the setting at +-- server level. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'false'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'false'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Incremental Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + Presorted Key: d.c1 + -> Merge Join + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Merge Cond: (d.c1 = e.c8) + -> Sort + Output: d.c1, d.c2 + Sort Key: d.c1 + -> Foreign Scan on public.f_test_tbl2 d + Output: d.c1, d.c2 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Output: e.c1, e.c2, e.c6, e.c8 + Sort Key: e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(19 rows) + +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT t1.c1, t2.c2 + FROM f_test_tbl3 t1 JOIN f_test_tbl4 t2 ON (t1.c1 = t2.c8) ORDER BY 1, 2; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Sort + Output: t1.c1, t2.c2 + Sort Key: t1.c1, t2.c2 + -> Foreign Scan + Output: t1.c1, t2.c2 + Foreign Namespace: (mongo_fdw_regress.test_tbl2 t1) INNER JOIN (mongo_fdw_regress.test_tbl1 t2) +(6 rows) + +-- FDW-558: Test mongo_fdw.enable_join_pushdown GUC. +-- Negative testing for GUC value. +SET mongo_fdw.enable_join_pushdown to 'abc'; +ERROR: parameter "mongo_fdw.enable_join_pushdown" requires a Boolean value +-- Check default value. Should be ON. +SHOW mongo_fdw.enable_join_pushdown; + mongo_fdw.enable_join_pushdown +-------------------------------- + on +(1 row) + +-- Join pushdown should happen as the GUC enable_join_pushdown is true. +ALTER SERVER mongo_server OPTIONS (SET enable_join_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'true'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON (d.c1 = e.c8) ORDER BY 1, 2; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: d.c1, e.c8 + Sort Key: d.c1 + -> Foreign Scan + Output: d.c1, e.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(6 rows) + +--Disable the GUC enable_join_pushdown. +SET mongo_fdw.enable_join_pushdown to false; +-- Join pushdown shouldn't happen as the GUC enable_join_pushdown is false. +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON (d.c1 = e.c8) ORDER BY 1, 2; + QUERY PLAN +-------------------------------------------------------------- + Merge Join + Output: d.c1, e.c8 + Merge Cond: (d.c1 = e.c8) + -> Sort + Output: d.c1 + Sort Key: d.c1 + -> Foreign Scan on public.f_test_tbl2 d + Output: d.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Output: e.c8 + Sort Key: e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(15 rows) + +-- Enable the GUC and table level option is set to false, should not pushdown. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'false'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'false'); +SET mongo_fdw.enable_join_pushdown to true; +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON (d.c1 = e.c8) ORDER BY 1, 2; + QUERY PLAN +-------------------------------------------------------------- + Merge Join + Output: d.c1, e.c8 + Merge Cond: (d.c1 = e.c8) + -> Sort + Output: d.c1 + Sort Key: d.c1 + -> Foreign Scan on public.f_test_tbl2 d + Output: d.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Output: e.c8 + Sort Key: e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(15 rows) + +-- FDW-589: Test enable_order_by_pushdown option at server and table level. +SET mongo_fdw.enable_join_pushdown to true; +SET mongo_fdw.enable_order_by_pushdown to true; +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'true'); +ALTER SERVER mongo_server OPTIONS (ADD enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (ADD enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (ADD enable_order_by_pushdown 'true'); +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +-- One table level option is OFF. Shouldn't pushdown ORDER BY. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_order_by_pushdown 'false'); +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: d.c1 NULLS FIRST, e.c1 NULLS FIRST + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +-- Test that setting option at table level does not affect the setting at +-- server level. +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'false'); +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_order_by_pushdown 'true'); +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'true'); +-- When enable_join_pushdown option is disabled. Shouldn't pushdown join and +-- hence, ORDER BY too. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'false'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'false'); +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------------- + Incremental Sort + Sort Key: d.c1 NULLS FIRST, e.c1 NULLS FIRST + Presorted Key: d.c1 + -> Merge Left Join + Merge Cond: (d.c1 = e.c8) + Join Filter: ((e.c4 > d.c1) AND (e.c2 < d.c3)) + -> Sort + Sort Key: d.c1 NULLS FIRST + -> Foreign Scan on f_test_tbl2 d + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Sort Key: e.c8 NULLS FIRST + -> Foreign Scan on f_test_tbl1 e + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(14 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +DELETE FROM f_test_tbl1 WHERE c8 IS NULL; +DELETE FROM f_test_tbl1 WHERE c8 = 60; +DELETE FROM f_test_tbl2 WHERE c1 IS NULL; +DELETE FROM f_test_tbl2 WHERE c1 = 50; +DROP FOREIGN TABLE f_test_tbl1; +DROP FOREIGN TABLE f_test_tbl2; +DROP FOREIGN TABLE f_test_tbl3; +DROP FOREIGN TABLE f_test_tbl4; +DROP FOREIGN TABLE test_text; +DROP FOREIGN TABLE test_varchar; +DROP TABLE l_test_tbl1; +DROP FOREIGN TABLE ftprt1_p1; +DROP FOREIGN TABLE ftprt1_p2; +DROP FOREIGN TABLE ftprt2_p1; +DROP FOREIGN TABLE ftprt2_p2; +DROP TABLE IF EXISTS fprt1; +DROP TABLE IF EXISTS fprt2; +DROP USER MAPPING FOR public SERVER mongo_server1; +DROP SERVER mongo_server1; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/expected/join_pushdown_3.out b/expected/join_pushdown_3.out new file mode 100644 index 0000000..096ef91 --- /dev/null +++ b/expected/join_pushdown_3.out @@ -0,0 +1,2127 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` +-- Before running this file user must create database mongo_fdw_regress on +-- MongoDB with all permission for MONGO_USER_NAME user with MONGO_PASS +-- password and ran mongodb_init.sh file to load collections. +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; +CREATE SERVER mongo_server1 FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server1; +-- Create foreign tables. +CREATE FOREIGN TABLE f_test_tbl1 (_id NAME, c1 INTEGER, c2 TEXT, c3 CHAR(9), c4 INTEGER, c5 pg_catalog.Date, c6 DECIMAL, c7 INTEGER, c8 INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl1'); +CREATE FOREIGN TABLE f_test_tbl2 (_id NAME, c1 INTEGER, c2 TEXT, c3 TEXT) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); +CREATE FOREIGN TABLE f_test_tbl3 (_id NAME, c1 INTEGER, c2 TEXT, c3 TEXT) + SERVER mongo_server1 OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); +CREATE FOREIGN TABLE test_text ( __doc text) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'warehouse'); +CREATE FOREIGN TABLE test_varchar ( __doc varchar) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'warehouse'); +CREATE FOREIGN TABLE f_test_tbl4 (_id NAME, c1 INTEGER, c2 TEXT, c3 CHAR(9), c4 INTEGER, c5 pg_catalog.Date, c6 DECIMAL, c7 INTEGER, c8 INTEGER) + SERVER mongo_server1 OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl1'); +INSERT INTO f_test_tbl1 VALUES (0, 1500, 'EMP15', 'FINANCE', 1300, '2000-12-25', 950.0, 400, 60); +INSERT INTO f_test_tbl1 VALUES (0, 1600, 'EMP16', 'ADMIN', 600); +INSERT INTO f_test_tbl2 VALUES (0, 50, 'TESTING', 'NASHIK'); +INSERT INTO f_test_tbl2 VALUES (0); +-- Create local table. +CREATE TABLE l_test_tbl1 AS + SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM f_test_tbl1; +-- Push down LEFT OUTER JOIN. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(17 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl1 e LEFT OUTER JOIN f_test_tbl2 d ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 e) LEFT JOIN (mongo_fdw_regress.test_tbl2 d) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl1 e LEFT OUTER JOIN f_test_tbl2 d ON e.c8 = d.c1 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + | | 1500 | EMP15 | 950 | 60 + | | 1600 | EMP16 | | +(16 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 OR e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST OFFSET 50; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 OR e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST OFFSET 50; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+---------+---- + 40 | HR | 1300 | EMP13 | 3000 | 20 + 40 | HR | 1400 | EMP14 | 1300 | 10 + 40 | HR | 1500 | EMP15 | 950 | 60 + 40 | HR | 1600 | EMP16 | | + 50 | TESTING | 100 | EMP1 | 800.3 | 20 + 50 | TESTING | 200 | EMP2 | 1600 | 30 + 50 | TESTING | 300 | EMP3 | 1250 | 30 + 50 | TESTING | 400 | EMP4 | 2975 | 20 + 50 | TESTING | 500 | EMP5 | 1250.23 | 30 + 50 | TESTING | 600 | EMP6 | 2850 | 30 + 50 | TESTING | 700 | EMP7 | 2450.34 | 10 + 50 | TESTING | 800 | EMP8 | 3000 | 20 + 50 | TESTING | 900 | EMP9 | 5000 | 10 + 50 | TESTING | 1000 | EMP10 | 1500 | 30 + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | +(20 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: d.c1 NULLS FIRST, e.c1 NULLS FIRST + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +SET mongo_fdw.enable_order_by_pushdown TO ON; +-- Column comparing with 'Constant' pushed down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = 20 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = 20 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | | | | + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 200 | EMP2 | 1600 | 30 + 20 | ADMINISTRATION | 300 | EMP3 | 1250 | 30 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 500 | EMP5 | 1250.23 | 30 + 20 | ADMINISTRATION | 600 | EMP6 | 2850 | 30 + 20 | ADMINISTRATION | 700 | EMP7 | 2450.34 | 10 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 900 | EMP9 | 5000 | 10 + 20 | ADMINISTRATION | 1000 | EMP10 | 1500 | 30 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1200 | EMP12 | 950 | 30 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 20 | ADMINISTRATION | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 1500 | EMP15 | 950 | 60 + 20 | ADMINISTRATION | 1600 | EMP16 | | + 30 | SALES | | | | + 40 | HR | | | | + 50 | TESTING | | | | +(21 rows) + +-- Push down RIGHT OUTER JOIN. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 e) LEFT JOIN (mongo_fdw_regress.test_tbl2 d) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + | | 1500 | EMP15 | 950 | 60 + | | 1600 | EMP16 | | +(16 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl1 e RIGHT OUTER JOIN f_test_tbl2 d ON e.c8 = d.c1 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl1 e RIGHT OUTER JOIN f_test_tbl2 d ON e.c8 = d.c1 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(17 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c4 > d.c1 OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 e) LEFT JOIN (mongo_fdw_regress.test_tbl2 d) +(5 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c4 > d.c1 OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+---------+---- + 40 | HR | 1400 | EMP14 | 1300 | 10 + 40 | HR | 1500 | EMP15 | 950 | 60 + 40 | HR | 1600 | EMP16 | | + 50 | TESTING | 100 | EMP1 | 800.3 | 20 + 50 | TESTING | 200 | EMP2 | 1600 | 30 + 50 | TESTING | 300 | EMP3 | 1250 | 30 + 50 | TESTING | 400 | EMP4 | 2975 | 20 + 50 | TESTING | 500 | EMP5 | 1250.23 | 30 + 50 | TESTING | 600 | EMP6 | 2850 | 30 + 50 | TESTING | 700 | EMP7 | 2450.34 | 10 + 50 | TESTING | 800 | EMP8 | 3000 | 20 + 50 | TESTING | 900 | EMP9 | 5000 | 10 + 50 | TESTING | 1000 | EMP10 | 1500 | 30 + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | +(19 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON ((d.c1 = e.c8 OR e.c4 > d.c1) AND e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON ((d.c1 = e.c8 OR e.c4 > d.c1) OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+---------+---- + 40 | HR | 1400 | EMP14 | 1300 | 10 + 40 | HR | 1500 | EMP15 | 950 | 60 + 40 | HR | 1600 | EMP16 | | + 50 | TESTING | 100 | EMP1 | 800.3 | 20 + 50 | TESTING | 200 | EMP2 | 1600 | 30 + 50 | TESTING | 300 | EMP3 | 1250 | 30 + 50 | TESTING | 400 | EMP4 | 2975 | 20 + 50 | TESTING | 500 | EMP5 | 1250.23 | 30 + 50 | TESTING | 600 | EMP6 | 2850 | 30 + 50 | TESTING | 700 | EMP7 | 2450.34 | 10 + 50 | TESTING | 800 | EMP8 | 3000 | 20 + 50 | TESTING | 900 | EMP9 | 5000 | 10 + 50 | TESTING | 1000 | EMP10 | 1500 | 30 + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | + | | | | | +(20 rows) + +-- Column comparing with 'Constant' pushed down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = 20 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 e) LEFT JOIN (mongo_fdw_regress.test_tbl2 d) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON (d.c1 = 20 AND e.c2 = 'EMP1') ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + | | 200 | EMP2 | 1600 | 30 + | | 300 | EMP3 | 1250 | 30 + | | 400 | EMP4 | 2975 | 20 + | | 500 | EMP5 | 1250.23 | 30 + | | 600 | EMP6 | 2850 | 30 + | | 700 | EMP7 | 2450.34 | 10 + | | 800 | EMP8 | 3000 | 20 + | | 900 | EMP9 | 5000 | 10 + | | 1000 | EMP10 | 1500 | 30 + | | 1100 | EMP11 | 1100 | 20 + | | 1200 | EMP12 | 950 | 30 + | | 1300 | EMP13 | 3000 | 20 + | | 1400 | EMP14 | 1300 | 10 + | | 1500 | EMP15 | 950 | 60 + | | 1600 | EMP16 | | +(16 rows) + +-- Push INNER JOIN. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 +(14 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON ((d.c1 = e.c8 OR e.c4 > d.c1) AND e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON ((d.c1 = e.c8 OR e.c4 > d.c1) OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+---------+---- + 40 | HR | 1400 | EMP14 | 1300 | 10 + 40 | HR | 1500 | EMP15 | 950 | 60 + 40 | HR | 1600 | EMP16 | | + 50 | TESTING | 100 | EMP1 | 800.3 | 20 + 50 | TESTING | 200 | EMP2 | 1600 | 30 + 50 | TESTING | 300 | EMP3 | 1250 | 30 + 50 | TESTING | 400 | EMP4 | 2975 | 20 + 50 | TESTING | 500 | EMP5 | 1250.23 | 30 + 50 | TESTING | 600 | EMP6 | 2850 | 30 + 50 | TESTING | 700 | EMP7 | 2450.34 | 10 + 50 | TESTING | 800 | EMP8 | 3000 | 20 + 50 | TESTING | 900 | EMP9 | 5000 | 10 + 50 | TESTING | 1000 | EMP10 | 1500 | 30 + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | +(19 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+------+---- + 50 | TESTING | 800 | EMP8 | 3000 | 20 + 50 | TESTING | 900 | EMP9 | 5000 | 10 + 50 | TESTING | 1000 | EMP10 | 1500 | 30 + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | +(9 rows) + +-- Column comparing with 'Constant' pushed down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+-------+---- + 10 | DEVELOPMENT | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 100 | EMP1 | 800.3 | 20 + 40 | HR | 100 | EMP1 | 800.3 | 20 + 50 | TESTING | 100 | EMP1 | 800.3 | 20 + | | 100 | EMP1 | 800.3 | 20 +(10 rows) + +-- INNER JOIN with WHERE clause. Should execute where condition separately +-- (NOT added into join clauses) on remote side. +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (d.c8 = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(2 rows) + +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (d.c8 = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + c1 | c1 +-----+---- + 100 | 20 +(1 row) + +-- INNER JOIN in which join clause is not pushable but WHERE condition is +-- pushable with join clause 'TRUE'. +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (abs(d.c8) = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Foreign Scan + Filter: (abs(c8) = c1) + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(3 rows) + +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (abs(d.c8) = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + c1 | c1 +-----+---- + 100 | 20 +(1 row) + +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (abs(d.c8) = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: e.c3 DESC NULLS LAST + -> Foreign Scan + Filter: (abs(c8) = c1) + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(5 rows) + +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (abs(d.c8) = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; + c1 | c1 +-----+---- + 100 | 20 +(1 row) + +SET mongo_fdw.enable_order_by_pushdown TO ON; +SET enable_mergejoin TO OFF; +SET enable_nestloop TO OFF; +-- Local-Foreign table joins. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN l_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------- + Sort + Sort Key: d.c1 NULLS FIRST, e.c1 NULLS FIRST + -> Hash Left Join + Hash Cond: (d.c1 = e.c8) + -> Foreign Scan on f_test_tbl2 d + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Hash + -> Seq Scan on l_test_tbl1 e +(8 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN l_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(17 rows) + +RESET enable_mergejoin; +RESET enable_nestloop; +-- JOIN in sub-query, should be pushed down. +EXPLAIN (COSTS OFF) +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 IN (SELECT f1.c1 FROM f_test_tbl1 f1 LEFT JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1)) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Sort + Sort Key: l.c1 NULLS FIRST, l.c8 NULLS FIRST + -> Hash Join + Hash Cond: (l.c1 = f1.c1) + -> Seq Scan on l_test_tbl1 l + -> Hash + -> HashAggregate + Group Key: f1.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 f1) LEFT JOIN (mongo_fdw_regress.test_tbl2 f2) +(10 rows) + +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 IN (SELECT f1.c1 FROM f_test_tbl1 f1 LEFT JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1)) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c6 | c8 +------+---------+---- + 100 | 800.3 | 20 + 200 | 1600 | 30 + 300 | 1250 | 30 + 400 | 2975 | 20 + 500 | 1250.23 | 30 + 600 | 2850 | 30 + 700 | 2450.34 | 10 + 800 | 3000 | 20 + 900 | 5000 | 10 + 1000 | 1500 | 30 + 1100 | 1100 | 20 + 1200 | 950 | 30 + 1300 | 3000 | 20 + 1400 | 1300 | 10 + 1500 | 950 | 60 + 1600 | | +(16 rows) + +EXPLAIN (COSTS OFF) +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 = (SELECT f1.c1 FROM f_test_tbl1 f1 LEFT JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1) LIMIT 1) ORDER BY 1, 3; + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Sort + Sort Key: l.c8 + InitPlan 1 (returns $0) + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 f1) LEFT JOIN (mongo_fdw_regress.test_tbl2 f2) + -> Seq Scan on l_test_tbl1 l + Filter: (c1 = $0) +(7 rows) + +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 = (SELECT f1.c1 FROM f_test_tbl1 f1 LEFT JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1) LIMIT 1) ORDER BY 1, 3; + c1 | c6 | c8 +-----+-------+---- + 100 | 800.3 | 20 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 = (SELECT f1.c1 FROM f_test_tbl1 f1 INNER JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1) LIMIT 1) ORDER BY 1, 3; + QUERY PLAN +----------------------------------------------------------------------------------------------------------- + Sort + Sort Key: l.c8 + InitPlan 1 (returns $0) + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 f1) INNER JOIN (mongo_fdw_regress.test_tbl2 f2) + -> Seq Scan on l_test_tbl1 l + Filter: (c1 = $0) +(7 rows) + +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 = (SELECT f1.c1 FROM f_test_tbl1 f1 INNER JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1) LIMIT 1) ORDER BY 1, 3; + c1 | c6 | c8 +-----+-------+---- + 100 | 800.3 | 20 +(1 row) + +-- Execute JOIN through PREPARE statement. +PREPARE pre_stmt_left_join AS +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c4 > d.c1) ORDER BY 1, 3 OFFSET 70; +EXPLAIN (COSTS OFF) +EXECUTE pre_stmt_left_join; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +EXECUTE pre_stmt_left_join; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+------+---- + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | + | | | | | +(7 rows) + +PREPARE pre_stmt_inner_join AS +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c4 > d.c1) ORDER BY 1, 3 OFFSET 70; +EXPLAIN (COSTS OFF) +EXECUTE pre_stmt_inner_join; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +EXECUTE pre_stmt_inner_join; + c1 | c2 | c1 | c2 | c6 | c8 +----+---------+------+-------+------+---- + 50 | TESTING | 1100 | EMP11 | 1100 | 20 + 50 | TESTING | 1200 | EMP12 | 950 | 30 + 50 | TESTING | 1300 | EMP13 | 3000 | 20 + 50 | TESTING | 1400 | EMP14 | 1300 | 10 + 50 | TESTING | 1500 | EMP15 | 950 | 60 + 50 | TESTING | 1600 | EMP16 | | +(6 rows) + +-- join + WHERE clause push-down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE d.c1 = 10 ORDER BY 1 DESC NULLS LAST, 3 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE d.c1 = 10 ORDER BY 1 DESC NULLS LAST, 3 DESC NULLS LAST; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 +(3 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE e.c8 = 10 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 e) LEFT JOIN (mongo_fdw_regress.test_tbl2 d) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE e.c8 = 10 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 +(3 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE d.c2 = 'SALES' ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE d.c2 = 'SALES' ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------+------+-------+---------+---- + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 +(6 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE e.c2 = 'EMP2' ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE e.c2 = 'EMP2' ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------+-----+------+------+---- + 30 | SALES | 200 | EMP2 | 1600 | 30 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') WHERE d.c1 = 10 OR e.c8 = 30 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') WHERE d.c1 = 10 OR e.c8 = 30 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------------+-----+------+-------+---- + 10 | DEVELOPMENT | 100 | EMP1 | 800.3 | 20 +(1 row) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, d.c6, d.c8 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8 AND (e.c1 = 20 OR d.c2 = 'EMP1')) WHERE e.c1 = 20 AND d.c8 = 20 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, d.c6, d.c8 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8 AND (e.c1 = 20 OR d.c2 = 'EMP1')) WHERE e.c1 = 20 AND d.c8 = 20 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +------+-------+----+----------------+-------+---- + 100 | EMP1 | 20 | ADMINISTRATION | 800.3 | 20 + 400 | EMP4 | 20 | ADMINISTRATION | 2975 | 20 + 800 | EMP8 | 20 | ADMINISTRATION | 3000 | 20 + 1100 | EMP11 | 20 | ADMINISTRATION | 1100 | 20 + 1300 | EMP13 | 20 | ADMINISTRATION | 3000 | 20 +(5 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8 AND (d.c5 = '02-22-1981' OR d.c5 = '12-17-1980')) ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: d.c1, d.c5 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) LEFT JOIN (mongo_fdw_regress.test_tbl2 e) +(4 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8 AND (d.c5 = '02-22-1981' OR d.c5 = '12-17-1980')) ORDER BY 1, 3; + c1 | c2 | c5 | c1 | c2 +------+-------+------------+----+---------------- + 100 | EMP1 | 12-17-1980 | 20 | ADMINISTRATION + 200 | EMP2 | 02-20-1981 | | + 300 | EMP3 | 02-22-1981 | 30 | SALES + 400 | EMP4 | 04-02-1981 | | + 500 | EMP5 | 09-28-1981 | | + 600 | EMP6 | 05-01-1981 | | + 700 | EMP7 | 06-09-1981 | | + 800 | EMP8 | 04-19-1987 | | + 900 | EMP9 | 11-17-1981 | | + 1000 | EMP10 | 09-08-1980 | | + 1100 | EMP11 | 05-23-1987 | | + 1200 | EMP12 | 12-03-1981 | | + 1300 | EMP13 | 12-03-1981 | | + 1400 | EMP14 | 01-23-1982 | | + 1500 | EMP15 | 12-25-2000 | | + 1600 | EMP16 | | | +(16 rows) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) WHERE d.c5 = '02-22-1981' ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: d.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) LEFT JOIN (mongo_fdw_regress.test_tbl2 e) +(4 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) WHERE d.c5 = '02-22-1981' ORDER BY 1; + c1 | c2 | c5 | c1 | c2 +-----+------+------------+----+------- + 300 | EMP3 | 02-22-1981 | 30 | SALES +(1 row) + +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') WHERE d.c1 = 10 OR e.c8 = 30 ORDER BY 1 DESC NULLS LAST, 3 DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Filter: ((c1 = 10) OR (c8 = 30)) + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(3 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') WHERE d.c1 = 10 OR e.c8 = 30 ORDER BY 1 DESC NULLS LAST, 3 DESC NULLS LAST; + c1 | c2 | c1 | c2 | c6 | c8 +----+-------------+-----+------+-------+---- + 10 | DEVELOPMENT | 100 | EMP1 | 800.3 | 20 +(1 row) + +-- Natural join, should push-down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d NATURAL JOIN f_test_tbl1 e WHERE e.c1 > d.c8 ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d NATURAL JOIN f_test_tbl1 e WHERE e.c1 > d.c8 ORDER BY 1; + c1 | c2 | c5 | c1 | c2 +------+-------+------------+------+------- + 100 | EMP1 | 12-17-1980 | 100 | EMP1 + 200 | EMP2 | 02-20-1981 | 200 | EMP2 + 300 | EMP3 | 02-22-1981 | 300 | EMP3 + 400 | EMP4 | 04-02-1981 | 400 | EMP4 + 500 | EMP5 | 09-28-1981 | 500 | EMP5 + 600 | EMP6 | 05-01-1981 | 600 | EMP6 + 700 | EMP7 | 06-09-1981 | 700 | EMP7 + 800 | EMP8 | 04-19-1987 | 800 | EMP8 + 1000 | EMP10 | 09-08-1980 | 1000 | EMP10 + 1100 | EMP11 | 05-23-1987 | 1100 | EMP11 + 1200 | EMP12 | 12-03-1981 | 1200 | EMP12 + 1300 | EMP13 | 12-03-1981 | 1300 | EMP13 + 1400 | EMP14 | 01-23-1982 | 1400 | EMP14 + 1500 | EMP15 | 12-25-2000 | 1500 | EMP15 +(14 rows) + +-- Self join, should push-down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d INNER JOIN f_test_tbl1 e ON e.c8 = d.c8 ORDER BY 1 OFFSET 65; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(5 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d INNER JOIN f_test_tbl1 e ON e.c8 = d.c8 ORDER BY 1 OFFSET 65; + c1 | c2 | c5 | c1 | c2 +------+-------+------------+------+------- + 1300 | EMP13 | 12-03-1981 | 1100 | EMP11 + 1300 | EMP13 | 12-03-1981 | 1300 | EMP13 + 1400 | EMP14 | 01-23-1982 | 700 | EMP7 + 1400 | EMP14 | 01-23-1982 | 900 | EMP9 + 1400 | EMP14 | 01-23-1982 | 1400 | EMP14 + 1500 | EMP15 | 12-25-2000 | 1500 | EMP15 +(6 rows) + +-- Join in CTE. +-- Explain plan difference between v11 (or pre) and later. +EXPLAIN (COSTS false, VERBOSE) +WITH t (c1_1, c1_3, c2_1) AS ( + SELECT d.c1, d.c3, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (d.c8 = e.c1) +) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: d.c1, e.c1, d.c3 + Sort Key: d.c3, d.c1 + -> Foreign Scan + Output: d.c1, e.c1, d.c3 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(6 rows) + +WITH t (c1_1, c1_3, c2_1) AS ( + SELECT d.c1, d.c3, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (d.c8 = e.c1) +) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1; + c1_1 | c2_1 +------+------ + 100 | 20 + 1100 | 20 + 1200 | 30 + 1400 | 10 + 800 | 20 + 1300 | 20 + 900 | 10 + 400 | 20 + 600 | 30 + 700 | 10 + 200 | 30 + 300 | 30 + 500 | 30 + 1000 | 30 +(14 rows) + +-- WHERE with boolean expression. Should push-down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl2 e LEFT JOIN f_test_tbl1 d ON (e.c1 = d.c8) WHERE d.c5 = '02-22-1981' OR d.c5 = '12-17-1980' ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Sort Key: d.c1 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 e) INNER JOIN (mongo_fdw_regress.test_tbl1 d) +(4 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl2 e LEFT JOIN f_test_tbl1 d ON (e.c1 = d.c8) WHERE d.c5 = '02-22-1981' OR d.c5 = '12-17-1980' ORDER BY 1; + c1 | c2 | c5 | c1 | c2 +-----+------+------------+----+---------------- + 100 | EMP1 | 12-17-1980 | 20 | ADMINISTRATION + 300 | EMP3 | 02-22-1981 | 30 | SALES +(2 rows) + +-- Nested joins(Don't push-down nested join) +SET enable_mergejoin TO OFF; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) LEFT JOIN f_test_tbl1 f ON (f.c8 = e.c1) ORDER BY d.c1 OFFSET 65 ; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Limit + -> Sort + Sort Key: d.c1 + -> Hash Left Join + Hash Cond: (e.c1 = f.c8) + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) LEFT JOIN (mongo_fdw_regress.test_tbl2 e) + -> Hash + -> Foreign Scan on f_test_tbl1 f + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(10 rows) + +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) LEFT JOIN f_test_tbl1 f ON (f.c8 = e.c1) ORDER BY d.c1 OFFSET 65; + c1 | c2 | c5 | c1 | c2 +------+-------+------------+----+---------------- + 1300 | EMP13 | 12-03-1981 | 20 | ADMINISTRATION + 1300 | EMP13 | 12-03-1981 | 20 | ADMINISTRATION + 1400 | EMP14 | 01-23-1982 | 10 | DEVELOPMENT + 1400 | EMP14 | 01-23-1982 | 10 | DEVELOPMENT + 1400 | EMP14 | 01-23-1982 | 10 | DEVELOPMENT + 1500 | EMP15 | 12-25-2000 | | + 1600 | EMP16 | | | +(7 rows) + +RESET enable_mergejoin; +-- Not supported expressions won't push-down(e.g. function expression, etc.) +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (ABS(d.c1) = e.c8) ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: d.c1, e.c1 + -> Merge Left Join + Merge Cond: ((abs(d.c1)) = e.c8) + -> Sort + Sort Key: (abs(d.c1)) + -> Foreign Scan on f_test_tbl2 d + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Sort Key: e.c8 + -> Foreign Scan on f_test_tbl1 e + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(12 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (ABS(d.c1) = e.c8) ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | + | | | | | +(17 rows) + +-- Don't pushdown when whole row reference is involved. +EXPLAIN (COSTS OFF) +SELECT d, e + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) LEFT JOIN f_test_tbl1 f ON (f.c8 = e.c1) ORDER BY e.c1 OFFSET 65; + QUERY PLAN +-------------------------------------------------------------------------------- + Limit + -> Merge Left Join + Merge Cond: (e.c1 = f.c8) + -> Sort + Sort Key: e.c1 + -> Hash Left Join + Hash Cond: (d.c8 = e.c1) + -> Foreign Scan on f_test_tbl1 d + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + -> Foreign Scan on f_test_tbl2 e + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Sort Key: f.c8 + -> Foreign Scan on f_test_tbl1 f + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(16 rows) + +-- Don't pushdown when full document retrieval is involved. +EXPLAIN (COSTS OFF) +SELECT json_data.key AS key1, json_data.value AS value1 + FROM test_text, test_varchar, json_each_text(test_text.__doc::json) AS json_data WHERE key NOT IN ('_id') ORDER BY json_data.key COLLATE "C"; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: json_data.key COLLATE "C" + -> Nested Loop + -> Nested Loop + -> Foreign Scan on test_text + Foreign Namespace: mongo_fdw_regress.warehouse + -> Function Scan on json_each_text json_data + Filter: (key <> '_id'::text) + -> Materialize + -> Foreign Scan on test_varchar + Foreign Namespace: mongo_fdw_regress.warehouse +(11 rows) + +SELECT json_data.key AS key1, json_data.value AS value1 + FROM test_text, test_varchar, json_each_text(test_text.__doc::json) AS json_data WHERE key NOT IN ('_id') ORDER BY json_data.key COLLATE "C"; + key1 | value1 +-------------------+----------------------------- + warehouse_created | { "$date" : 1418368330000 } + warehouse_created | { "$date" : 1418368330000 } + warehouse_created | { "$date" : 1447229590000 } + warehouse_created | { "$date" : 1447229590000 } + warehouse_id | 2 + warehouse_id | 1 + warehouse_id | 1 + warehouse_id | 2 + warehouse_name | Laptop + warehouse_name | Laptop + warehouse_name | UPS + warehouse_name | UPS +(12 rows) + +-- Join two tables from two different foreign servers. +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl3 e ON d.c1 = e.c1 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------------- + Merge Left Join + Merge Cond: (d.c1 = e.c1) + -> Sort + Sort Key: d.c1 + -> Foreign Scan on f_test_tbl2 d + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Sort Key: e.c1 + -> Foreign Scan on f_test_tbl3 e + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(10 rows) + +-- SEMI JOIN, not pushed down +EXPLAIN (COSTS OFF) +SELECT d.c2 + FROM f_test_tbl1 d WHERE EXISTS (SELECT 1 FROM f_test_tbl2 e WHERE d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c2 + -> Hash Join + Hash Cond: (d.c8 = e.c1) + -> Foreign Scan on f_test_tbl1 d + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + -> HashAggregate + Group Key: e.c1 + -> Foreign Scan on f_test_tbl2 e + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(12 rows) + +SELECT d.c2 + FROM f_test_tbl1 d WHERE EXISTS (SELECT 1 FROM f_test_tbl2 e WHERE d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + c2 +------- + EMP1 + EMP10 + EMP11 + EMP12 + EMP13 + EMP14 + EMP2 + EMP3 + EMP4 + EMP5 +(10 rows) + +-- ANTI JOIN, not pushed down +EXPLAIN (COSTS OFF) +SELECT d.c2 + FROM f_test_tbl1 d WHERE NOT EXISTS (SELECT 1 FROM f_test_tbl2 e WHERE d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c2 + -> Hash Anti Join + Hash Cond: (d.c8 = e.c1) + -> Foreign Scan on f_test_tbl1 d + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + -> Foreign Scan on f_test_tbl2 e + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(10 rows) + +SELECT d.c2 + FROM f_test_tbl1 d WHERE NOT EXISTS (SELECT 1 FROM f_test_tbl2 e WHERE d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + c2 +------- + EMP15 + EMP16 +(2 rows) + +-- FULL OUTER JOIN, should not pushdown. +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl1 d FULL JOIN f_test_tbl2 e ON (d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + QUERY PLAN +-------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: d.c2 + -> Hash Full Join + Hash Cond: (d.c8 = e.c1) + -> Foreign Scan on f_test_tbl1 d + Foreign Namespace: mongo_fdw_regress.test_tbl1 + -> Hash + -> Foreign Scan on f_test_tbl2 e + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(10 rows) + +SELECT d.c1, e.c1 + FROM f_test_tbl1 d FULL JOIN f_test_tbl2 e ON (d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + c1 | c1 +------+---- + 100 | 20 + 1000 | 30 + 1100 | 20 + 1200 | 30 + 1300 | 20 + 1400 | 10 + 1500 | + 1600 | + 200 | 30 + 300 | 30 +(10 rows) + +-- CROSS JOIN can be pushed down +EXPLAIN (COSTS OFF) +SELECT e.c1, d.c2 + FROM f_test_tbl1 d CROSS JOIN f_test_tbl2 e ORDER BY e.c1, d.c2 LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: e.c1, d.c2 + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl1 d) INNER JOIN (mongo_fdw_regress.test_tbl2 e) +(5 rows) + +SELECT e.c1, d.c2 + FROM f_test_tbl1 d CROSS JOIN f_test_tbl2 e ORDER BY e.c1, d.c2 LIMIT 10; + c1 | c2 +----+------- + 10 | EMP1 + 10 | EMP10 + 10 | EMP11 + 10 | EMP12 + 10 | EMP13 + 10 | EMP14 + 10 | EMP15 + 10 | EMP16 + 10 | EMP2 + 10 | EMP3 +(10 rows) + +-- FDW-131: Limit and offset pushdown with join pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT round(2.2) OFFSET 2; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(3 rows) + +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT round(2.2) OFFSET 2; + c1 | c1 +-----+---- + 100 | 10 + 100 | 30 +(2 rows) + +-- Limit as NULL, no LIMIT/OFFSET pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (t1.c8 = t2.c1) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT NULL OFFSET 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(3 rows) + +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (t1.c8 = t2.c1) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT NULL OFFSET 1; + c1 | c1 +------+---- + 200 | 30 + 300 | 30 + 400 | 20 + 500 | 30 + 600 | 30 + 700 | 10 + 800 | 20 + 900 | 10 + 1000 | 30 + 1100 | 20 + 1200 | 30 + 1300 | 20 + 1400 | 10 +(13 rows) + +-- Limit as ALL, no LIMIT/OFFSET pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (t1.c8 = t2.c1) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT ALL OFFSET 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(3 rows) + +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (t1.c8 = t2.c1) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT ALL OFFSET 1; + c1 | c1 +------+---- + 200 | 30 + 300 | 30 + 400 | 20 + 500 | 30 + 600 | 30 + 700 | 10 + 800 | 20 + 900 | 10 + 1000 | 30 + 1100 | 20 + 1200 | 30 + 1300 | 20 + 1400 | 10 +(13 rows) + +-- Offset as NULL, no LIMIT/OFFSET pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT 3 OFFSET NULL; + QUERY PLAN +--------------------------------------------------------------------------------------------------- + Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(3 rows) + +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT 3 OFFSET NULL; + c1 | c1 +-----+---- + 100 | 10 + 100 | 20 + 100 | 30 +(3 rows) + +-- Limit with -ve value. Shouldn't pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT -2; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Limit + Output: t1.c1, t2.c1 + -> Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(5 rows) + +-- Should throw an error. +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT -2; +ERROR: LIMIT must not be negative +-- Offset with -ve value. Shouldn't pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST OFFSET -1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Limit + Output: t1.c1, t2.c1 + -> Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(5 rows) + +-- Should throw an error. +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST OFFSET -1; +ERROR: OFFSET must not be negative +-- Limit/Offset with -ve value. Shouldn't pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT -3 OFFSET -1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Limit + Output: t1.c1, t2.c1 + -> Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(5 rows) + +-- Should throw an error. +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT -3 OFFSET -1; +ERROR: OFFSET must not be negative +-- Limit with expression evaluating to -ve value. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM f_test_tbl1)); + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Limit + Output: t1.c1, t2.c1 + InitPlan 1 (returns $0) + -> Foreign Scan + Output: (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl1 f_test_tbl1) + -> Foreign Scan + Output: t1.c1, t2.c1 + Foreign Namespace: (mongo_fdw_regress.test_tbl1 t1) INNER JOIN (mongo_fdw_regress.test_tbl2 t2) +(9 rows) + +-- Should throw an error. +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM f_test_tbl1)); +ERROR: LIMIT must not be negative +-- Test partition-wise join +SET enable_partitionwise_join TO on; +-- Create the partition tables +CREATE TABLE fprt1 (_id NAME, c1 INTEGER, c2 INTEGER, c3 TEXT) PARTITION BY RANGE(c1); +CREATE FOREIGN TABLE ftprt1_p1 PARTITION OF fprt1 FOR VALUES FROM (1) TO (4) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test1'); +CREATE FOREIGN TABLE ftprt1_p2 PARTITION OF fprt1 FOR VALUES FROM (5) TO (8) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test2'); +CREATE TABLE fprt2 (_id NAME, c1 INTEGER, c2 INTEGER, c3 TEXT) PARTITION BY RANGE(c2); +CREATE FOREIGN TABLE ftprt2_p1 PARTITION OF fprt2 FOR VALUES FROM (1) TO (4) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test3'); +CREATE FOREIGN TABLE ftprt2_p2 PARTITION OF fprt2 FOR VALUES FROM (5) TO (8) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test4'); +-- Inner join two tables +-- Different explain plan on v10 as partition-wise join is not supported there. +SET enable_mergejoin TO OFF; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, t2.c2 + FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.c1 = t2.c2) ORDER BY 1,2; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: t1.c1, t2.c2 + Sort Key: t1.c1 + -> Append + -> Foreign Scan + Output: t1_1.c1, t2_1.c2 + Foreign Namespace: (mongo_fdw_regress.test1 t1) INNER JOIN (mongo_fdw_regress.test3 t2) + -> Foreign Scan + Output: t1_2.c1, t2_2.c2 + Foreign Namespace: (mongo_fdw_regress.test2 t1) INNER JOIN (mongo_fdw_regress.test4 t2) +(10 rows) + +SELECT t1.c1, t2.c2 + FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.c1 = t2.c2) ORDER BY 1,2; + c1 | c2 +----+---- + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + 8 | 8 +(8 rows) + +-- Inner join three tables +-- Different explain plan on v10 as partition-wise join is not supported there. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, t2.c2, t3.c2 + FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.c1 = t2.c2) INNER JOIN fprt1 t3 ON (t3.c1 = t2.c2) ORDER BY 1,2; + QUERY PLAN +------------------------------------------------------------------------------------------------------------- + Sort + Output: t1.c1, t2.c2, t3.c2 + Sort Key: t1.c1 + -> Append + -> Hash Join + Output: t1_1.c1, t2_1.c2, t3_1.c2 + Hash Cond: (t1_1.c1 = t3_1.c1) + -> Foreign Scan + Output: t1_1.c1, t2_1.c2 + Foreign Namespace: (mongo_fdw_regress.test1 t1) INNER JOIN (mongo_fdw_regress.test3 t2) + -> Hash + Output: t3_1.c2, t3_1.c1 + -> Foreign Scan on public.ftprt1_p1 t3_1 + Output: t3_1.c2, t3_1.c1 + Foreign Namespace: mongo_fdw_regress.test1 + -> Hash Join + Output: t1_2.c1, t2_2.c2, t3_2.c2 + Hash Cond: (t1_2.c1 = t3_2.c1) + -> Foreign Scan + Output: t1_2.c1, t2_2.c2 + Foreign Namespace: (mongo_fdw_regress.test2 t1) INNER JOIN (mongo_fdw_regress.test4 t2) + -> Hash + Output: t3_2.c2, t3_2.c1 + -> Foreign Scan on public.ftprt1_p2 t3_2 + Output: t3_2.c2, t3_2.c1 + Foreign Namespace: mongo_fdw_regress.test2 +(26 rows) + +SELECT t1.c1, t2.c2, t3.c2 + FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.c1 = t2.c2) INNER JOIN fprt1 t3 ON (t3.c1 = t2.c2) ORDER BY 1,2; + c1 | c2 | c2 +----+----+---- + 1 | 1 | 1 + 2 | 2 | 2 + 3 | 3 | 3 + 4 | 4 | 4 + 5 | 5 | 5 + 6 | 6 | 6 + 7 | 7 | 7 + 8 | 8 | 8 +(8 rows) + +RESET enable_mergejoin; +-- Join with lateral reference +-- Different explain plan on v10 as partition-wise join is not supported there. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, t1.c2 + FROM fprt1 t1, LATERAL (SELECT t2.c1, t2.c2 FROM fprt2 t2 + WHERE t1.c1 = t2.c2 AND t1.c2 = t2.c1) q WHERE t1.c1 % 2 = 0 ORDER BY 1,2; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: t1.c1, t1.c2 + Sort Key: t1.c1, t1.c2 + -> Append + -> Foreign Scan + Output: t1_1.c1, t1_1.c2 + Foreign Namespace: (mongo_fdw_regress.test1 t1) INNER JOIN (mongo_fdw_regress.test3 t2) + -> Foreign Scan + Output: t1_2.c1, t1_2.c2 + Foreign Namespace: (mongo_fdw_regress.test2 t1) INNER JOIN (mongo_fdw_regress.test4 t2) +(10 rows) + +SELECT t1.c1, t1.c2 + FROM fprt1 t1, LATERAL (SELECT t2.c1, t2.c2 FROM fprt2 t2 + WHERE t1.c1 = t2.c2 AND t1.c2 = t2.c1) q WHERE t1.c1 % 2 = 0 ORDER BY 1,2; + c1 | c2 +----+---- + 2 | 2 + 4 | 4 + 6 | 6 + 8 | 8 +(4 rows) + +-- With PHVs, partitionwise join selected but no join pushdown +-- Table alias in foreign scan is different for v12, v11 and v10. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, t1.phv, t2.c2, t2.phv + FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE c1 % 2 = 0) t1 LEFT JOIN + (SELECT 't2_phv' phv, * FROM fprt2 WHERE c2 % 2 = 0) t2 ON (t1.c1 = t2.c2) + ORDER BY t1.c1, t2.c2; + QUERY PLAN +-------------------------------------------------------------------------------- + Incremental Sort + Output: fprt1.c1, 't1_phv'::text, fprt2.c2, ('t2_phv'::text) + Sort Key: fprt1.c1, fprt2.c2 + Presorted Key: fprt1.c1 + -> Merge Append + Sort Key: fprt1.c1 + -> Merge Left Join + Output: fprt1_1.c1, 't1_phv'::text, fprt2_1.c2, ('t2_phv'::text) + Merge Cond: (fprt1_1.c1 = fprt2_1.c2) + -> Sort + Output: fprt1_1.c1 + Sort Key: fprt1_1.c1 + -> Foreign Scan on public.ftprt1_p1 fprt1_1 + Output: fprt1_1.c1 + Foreign Namespace: mongo_fdw_regress.test1 + -> Sort + Output: fprt2_1.c2, ('t2_phv'::text) + Sort Key: fprt2_1.c2 + -> Foreign Scan on public.ftprt2_p1 fprt2_1 + Output: fprt2_1.c2, 't2_phv'::text + Foreign Namespace: mongo_fdw_regress.test3 + -> Merge Left Join + Output: fprt1_2.c1, 't1_phv'::text, fprt2_2.c2, ('t2_phv'::text) + Merge Cond: (fprt1_2.c1 = fprt2_2.c2) + -> Sort + Output: fprt1_2.c1 + Sort Key: fprt1_2.c1 + -> Foreign Scan on public.ftprt1_p2 fprt1_2 + Output: fprt1_2.c1 + Foreign Namespace: mongo_fdw_regress.test2 + -> Sort + Output: fprt2_2.c2, ('t2_phv'::text) + Sort Key: fprt2_2.c2 + -> Foreign Scan on public.ftprt2_p2 fprt2_2 + Output: fprt2_2.c2, 't2_phv'::text + Foreign Namespace: mongo_fdw_regress.test4 +(36 rows) + +SELECT t1.c1, t1.phv, t2.c2, t2.phv + FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE c1 % 2 = 0) t1 LEFT JOIN + (SELECT 't2_phv' phv, * FROM fprt2 WHERE c2 % 2 = 0) t2 ON (t1.c1 = t2.c2) + ORDER BY t1.c1, t2.c2; + c1 | phv | c2 | phv +----+--------+----+-------- + 2 | t1_phv | 2 | t2_phv + 4 | t1_phv | 4 | t2_phv + 6 | t1_phv | 6 | t2_phv + 8 | t1_phv | 8 | t2_phv +(4 rows) + +RESET enable_partitionwise_join; +-- FDW-445: Support enable_join_pushdown option at server level and table level. +-- Check only boolean values are accepted. +ALTER SERVER mongo_server OPTIONS (ADD enable_join_pushdown 'abc11'); +ERROR: enable_join_pushdown requires a Boolean value +-- Test the option at server level. +ALTER SERVER mongo_server OPTIONS (ADD enable_join_pushdown 'false'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Incremental Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + Presorted Key: d.c1 + -> Merge Join + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Merge Cond: (d.c1 = e.c8) + -> Sort + Output: d.c1, d.c2 + Sort Key: d.c1 + -> Foreign Scan on public.f_test_tbl2 d + Output: d.c1, d.c2 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Output: e.c1, e.c2, e.c6, e.c8 + Sort Key: e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(19 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_join_pushdown 'true'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + -> Foreign Scan + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(6 rows) + +-- Test the option with outer rel. +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (ADD enable_join_pushdown 'false'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Incremental Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + Presorted Key: d.c1 + -> Merge Join + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Merge Cond: (d.c1 = e.c8) + -> Sort + Output: d.c1, d.c2 + Sort Key: d.c1 + -> Foreign Scan on public.f_test_tbl2 d + Output: d.c1, d.c2 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Output: e.c1, e.c2, e.c6, e.c8 + Sort Key: e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(19 rows) + +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'true'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + -> Foreign Scan + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(6 rows) + +-- Test the option with inner rel. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (ADD enable_join_pushdown 'false'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Incremental Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + Presorted Key: d.c1 + -> Merge Join + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Merge Cond: (d.c1 = e.c8) + -> Sort + Output: d.c1, d.c2 + Sort Key: d.c1 + -> Foreign Scan on public.f_test_tbl2 d + Output: d.c1, d.c2 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Output: e.c1, e.c2, e.c6, e.c8 + Sort Key: e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(19 rows) + +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'true'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + -> Foreign Scan + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(6 rows) + +-- Test that setting option at table level does not affect the setting at +-- server level. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'false'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'false'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Incremental Sort + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Sort Key: d.c1, e.c1 + Presorted Key: d.c1 + -> Merge Join + Output: d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + Merge Cond: (d.c1 = e.c8) + -> Sort + Output: d.c1, d.c2 + Sort Key: d.c1 + -> Foreign Scan on public.f_test_tbl2 d + Output: d.c1, d.c2 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Output: e.c1, e.c2, e.c6, e.c8 + Sort Key: e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c1, e.c2, e.c6, e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(19 rows) + +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT t1.c1, t2.c2 + FROM f_test_tbl3 t1 JOIN f_test_tbl4 t2 ON (t1.c1 = t2.c8) ORDER BY 1, 2; + QUERY PLAN +--------------------------------------------------------------------------------------------------------- + Sort + Output: t1.c1, t2.c2 + Sort Key: t1.c1, t2.c2 + -> Foreign Scan + Output: t1.c1, t2.c2 + Foreign Namespace: (mongo_fdw_regress.test_tbl2 t1) INNER JOIN (mongo_fdw_regress.test_tbl1 t2) +(6 rows) + +-- FDW-558: Test mongo_fdw.enable_join_pushdown GUC. +-- Negative testing for GUC value. +SET mongo_fdw.enable_join_pushdown to 'abc'; +ERROR: parameter "mongo_fdw.enable_join_pushdown" requires a Boolean value +-- Check default value. Should be ON. +SHOW mongo_fdw.enable_join_pushdown; + mongo_fdw.enable_join_pushdown +-------------------------------- + on +(1 row) + +-- Join pushdown should happen as the GUC enable_join_pushdown is true. +ALTER SERVER mongo_server OPTIONS (SET enable_join_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'true'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON (d.c1 = e.c8) ORDER BY 1, 2; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Sort + Output: d.c1, e.c8 + Sort Key: d.c1 + -> Foreign Scan + Output: d.c1, e.c8 + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) INNER JOIN (mongo_fdw_regress.test_tbl1 e) +(6 rows) + +--Disable the GUC enable_join_pushdown. +SET mongo_fdw.enable_join_pushdown to false; +-- Join pushdown shouldn't happen as the GUC enable_join_pushdown is false. +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON (d.c1 = e.c8) ORDER BY 1, 2; + QUERY PLAN +-------------------------------------------------------------- + Merge Join + Output: d.c1, e.c8 + Merge Cond: (d.c1 = e.c8) + -> Sort + Output: d.c1 + Sort Key: d.c1 + -> Foreign Scan on public.f_test_tbl2 d + Output: d.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Output: e.c8 + Sort Key: e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(15 rows) + +-- Enable the GUC and table level option is set to false, should not pushdown. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'false'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'false'); +SET mongo_fdw.enable_join_pushdown to true; +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON (d.c1 = e.c8) ORDER BY 1, 2; + QUERY PLAN +-------------------------------------------------------------- + Merge Join + Output: d.c1, e.c8 + Merge Cond: (d.c1 = e.c8) + -> Sort + Output: d.c1 + Sort Key: d.c1 + -> Foreign Scan on public.f_test_tbl2 d + Output: d.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Output: e.c8 + Sort Key: e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(15 rows) + +-- FDW-589: Test enable_order_by_pushdown option at server and table level. +SET mongo_fdw.enable_join_pushdown to true; +SET mongo_fdw.enable_order_by_pushdown to true; +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'true'); +ALTER SERVER mongo_server OPTIONS (ADD enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (ADD enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (ADD enable_order_by_pushdown 'true'); +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +-- One table level option is OFF. Shouldn't pushdown ORDER BY. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_order_by_pushdown 'false'); +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------------ + Sort + Sort Key: d.c1 NULLS FIRST, e.c1 NULLS FIRST + -> Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(4 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +-- Test that setting option at table level does not affect the setting at +-- server level. +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'false'); +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_order_by_pushdown 'true'); +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +------------------------------------------------------------------------------------------------ + Foreign Scan + Foreign Namespace: (mongo_fdw_regress.test_tbl2 d) LEFT JOIN (mongo_fdw_regress.test_tbl1 e) +(2 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'true'); +-- When enable_join_pushdown option is disabled. Shouldn't pushdown join and +-- hence, ORDER BY too. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'false'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'false'); +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------------------- + Incremental Sort + Sort Key: d.c1 NULLS FIRST, e.c1 NULLS FIRST + Presorted Key: d.c1 + -> Merge Left Join + Merge Cond: (d.c1 = e.c8) + Join Filter: ((e.c4 > d.c1) AND (e.c2 < d.c3)) + -> Sort + Sort Key: d.c1 NULLS FIRST + -> Foreign Scan on f_test_tbl2 d + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Sort + Sort Key: e.c8 NULLS FIRST + -> Foreign Scan on f_test_tbl1 e + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(14 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + | | | | | + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | | | | + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | + 50 | TESTING | | | | +(12 rows) + +DELETE FROM f_test_tbl1 WHERE c8 IS NULL; +DELETE FROM f_test_tbl1 WHERE c8 = 60; +DELETE FROM f_test_tbl2 WHERE c1 IS NULL; +DELETE FROM f_test_tbl2 WHERE c1 = 50; +DROP FOREIGN TABLE f_test_tbl1; +DROP FOREIGN TABLE f_test_tbl2; +DROP FOREIGN TABLE f_test_tbl3; +DROP FOREIGN TABLE f_test_tbl4; +DROP FOREIGN TABLE test_text; +DROP FOREIGN TABLE test_varchar; +DROP TABLE l_test_tbl1; +DROP FOREIGN TABLE ftprt1_p1; +DROP FOREIGN TABLE ftprt1_p2; +DROP FOREIGN TABLE ftprt2_p1; +DROP FOREIGN TABLE ftprt2_p2; +DROP TABLE IF EXISTS fprt1; +DROP TABLE IF EXISTS fprt2; +DROP USER MAPPING FOR public SERVER mongo_server1; +DROP SERVER mongo_server1; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/expected/limit_offset_pushdown.out b/expected/limit_offset_pushdown.out new file mode 100644 index 0000000..0227f28 --- /dev/null +++ b/expected/limit_offset_pushdown.out @@ -0,0 +1,380 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` +-- Before running this file user must create database mongo_fdw_regress, +-- mongo_fdw_regress1 and mongo_fdw_regress2 databases on MongoDB with all +-- permission for MONGO_USER_NAME user with MONGO_PASS password and ran +-- mongodb_init.sh file to load collections. +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; +CREATE FOREIGN TABLE fdw131_t1 (_id NAME, c1 INTEGER, c2 TEXT, c3 TEXT) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1; + c1 | c2 | c3 +----+----------------+---------- + 10 | DEVELOPMENT | PUNE + 20 | ADMINISTRATION | BANGLORE + 30 | SALES | MUMBAI + 40 | HR | NAGPUR +(4 rows) + +-- LIMIT/OFFSET pushdown. +-- Limit with Offset should get pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 2 OFFSET 2; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 2 OFFSET 2; + c1 | c2 | c3 +----+-------+-------- + 30 | SALES | MUMBAI + 40 | HR | NAGPUR +(2 rows) + +-- If ORDER BY is not pushable then limit/Offset shouldn't get pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 LIMIT 2 OFFSET 2; + QUERY PLAN +-------------------------------------------------------------- + Limit + Output: c1, c2, c3 + -> Sort + Output: c1, c2, c3 + Sort Key: fdw131_t1.c1 + -> Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(8 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 LIMIT 2 OFFSET 2; + c1 | c2 | c3 +----+-------+-------- + 30 | SALES | MUMBAI + 40 | HR | NAGPUR +(2 rows) + +-- With ORDER BY pushdown disabled, limit shouldn't get pushdown. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 2 OFFSET 2; + QUERY PLAN +-------------------------------------------------------------- + Limit + Output: c1, c2, c3 + -> Sort + Output: c1, c2, c3 + Sort Key: fdw131_t1.c1 NULLS FIRST + -> Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(8 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 2 OFFSET 2; + c1 | c2 | c3 +----+-------+-------- + 30 | SALES | MUMBAI + 40 | HR | NAGPUR +(2 rows) + +SET mongo_fdw.enable_order_by_pushdown TO ON; +-- Only limit should get pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 2 DESC NULLS LAST LIMIT 3; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 2 DESC NULLS LAST LIMIT 3; + c1 | c2 | c3 +----+-------------+-------- + 30 | SALES | MUMBAI + 40 | HR | NAGPUR + 10 | DEVELOPMENT | PUNE +(3 rows) + +-- Expression in limit clause. Should get pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 DESC NULLS LAST LIMIT round(3.2) OFFSET 2; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 DESC NULLS LAST LIMIT round(3.2) OFFSET 2; + c1 | c2 | c3 +----+----------------+---------- + 30 | SALES | MUMBAI + 20 | ADMINISTRATION | BANGLORE +(2 rows) + +-- Only Offset without limit should get pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 ASC NULLS FIRST OFFSET 2; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 ASC NULLS FIRST OFFSET 2; + c1 | c2 | c3 +----+-------------+-------- + 40 | HR | NAGPUR + 10 | DEVELOPMENT | PUNE +(2 rows) + +-- Limit ALL +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT ALL; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT ALL; + c1 | c2 | c3 +----+----------------+---------- + 10 | DEVELOPMENT | PUNE + 20 | ADMINISTRATION | BANGLORE + 30 | SALES | MUMBAI + 40 | HR | NAGPUR +(4 rows) + +-- Limit ALL with OFFSET +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT ALL OFFSET 1; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT ALL OFFSET 1; + c1 | c2 | c3 +----+----------------+---------- + 20 | ADMINISTRATION | BANGLORE + 30 | SALES | MUMBAI + 40 | HR | NAGPUR +(3 rows) + +-- Limit NULL +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 ASC NULLS FIRST LIMIT NULL; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 ASC NULLS FIRST LIMIT NULL; + c1 | c2 | c3 +----+----------------+---------- + 20 | ADMINISTRATION | BANGLORE + 30 | SALES | MUMBAI + 40 | HR | NAGPUR + 10 | DEVELOPMENT | PUNE +(4 rows) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 ASC NULLS FIRST LIMIT NULL OFFSET 2; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 ASC NULLS FIRST LIMIT NULL OFFSET 2; + c1 | c2 | c3 +----+-------------+-------- + 40 | HR | NAGPUR + 10 | DEVELOPMENT | PUNE +(2 rows) + +-- Limit 0 and Offset 0 +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 0; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 0; + c1 | c2 | c3 +----+----+---- +(0 rows) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 0 OFFSET 0; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 0 OFFSET 0; + c1 | c2 | c3 +----+----+---- +(0 rows) + +-- Offset NULL. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 2 ASC NULLS FIRST LIMIT 5 OFFSET NULL; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 2 ASC NULLS FIRST LIMIT 5 OFFSET NULL; + c1 | c2 | c3 +----+----------------+---------- + 20 | ADMINISTRATION | BANGLORE + 10 | DEVELOPMENT | PUNE + 40 | HR | NAGPUR + 30 | SALES | MUMBAI +(4 rows) + +-- Limit with placeholder. Shouldn't get pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 2 LIMIT (SELECT COUNT(*) FROM fdw131_t1); + QUERY PLAN +----------------------------------------------------------------------------------- + Limit + Output: fdw131_t1.c1, fdw131_t1.c2, fdw131_t1.c3 + InitPlan 1 (returns $0) + -> Foreign Scan + Output: (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw131_t1) + -> Sort + Output: fdw131_t1.c1, fdw131_t1.c2, fdw131_t1.c3 + Sort Key: fdw131_t1.c2 + -> Foreign Scan on public.fdw131_t1 + Output: fdw131_t1.c1, fdw131_t1.c2, fdw131_t1.c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(12 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 2 LIMIT (SELECT COUNT(*) FROM fdw131_t1); + c1 | c2 | c3 +----+----------------+---------- + 20 | ADMINISTRATION | BANGLORE + 10 | DEVELOPMENT | PUNE + 40 | HR | NAGPUR + 30 | SALES | MUMBAI +(4 rows) + +-- Limit with expression, shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT (10 - (SELECT COUNT(*) FROM fdw131_t1)); + QUERY PLAN +----------------------------------------------------------------------------------- + Limit + Output: fdw131_t1.c1, fdw131_t1.c2, fdw131_t1.c3 + InitPlan 1 (returns $0) + -> Foreign Scan + Output: (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw131_t1) + -> Foreign Scan on public.fdw131_t1 + Output: fdw131_t1.c1, fdw131_t1.c2, fdw131_t1.c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(9 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT (10 - (SELECT COUNT(*) FROM fdw131_t1)); + c1 | c2 | c3 +----+----------------+---------- + 10 | DEVELOPMENT | PUNE + 20 | ADMINISTRATION | BANGLORE + 30 | SALES | MUMBAI + 40 | HR | NAGPUR +(4 rows) + +-- Limit with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT -1; + QUERY PLAN +-------------------------------------------------------- + Limit + Output: c1, c2, c3 + -> Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(5 rows) + +-- Should throw an error. +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT -1; +ERROR: LIMIT must not be negative +-- Offset with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST OFFSET -2; + QUERY PLAN +-------------------------------------------------------- + Limit + Output: c1, c2, c3 + -> Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(5 rows) + +-- Should throw an error. +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST OFFSET -2; +ERROR: OFFSET must not be negative +-- Limit/Offset with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT -1 OFFSET -2; + QUERY PLAN +-------------------------------------------------------- + Limit + Output: c1, c2, c3 + -> Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(5 rows) + +-- Should throw an error. +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT -1 OFFSET -2; +ERROR: OFFSET must not be negative +-- Limit with expression evaluating to -ve value. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM fdw131_t1)); + QUERY PLAN +----------------------------------------------------------------------------------- + Limit + Output: fdw131_t1.c1, fdw131_t1.c2, fdw131_t1.c3 + InitPlan 1 (returns $0) + -> Foreign Scan + Output: (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw131_t1) + -> Foreign Scan on public.fdw131_t1 + Output: fdw131_t1.c1, fdw131_t1.c2, fdw131_t1.c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(9 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM fdw131_t1)); +ERROR: LIMIT must not be negative +DROP FOREIGN TABLE fdw131_t1; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/expected/limit_offset_pushdown_1.out b/expected/limit_offset_pushdown_1.out new file mode 100644 index 0000000..e349f50 --- /dev/null +++ b/expected/limit_offset_pushdown_1.out @@ -0,0 +1,380 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` +-- Before running this file user must create database mongo_fdw_regress, +-- mongo_fdw_regress1 and mongo_fdw_regress2 databases on MongoDB with all +-- permission for MONGO_USER_NAME user with MONGO_PASS password and ran +-- mongodb_init.sh file to load collections. +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; +CREATE FOREIGN TABLE fdw131_t1 (_id NAME, c1 INTEGER, c2 TEXT, c3 TEXT) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1; + c1 | c2 | c3 +----+----------------+---------- + 10 | DEVELOPMENT | PUNE + 20 | ADMINISTRATION | BANGLORE + 30 | SALES | MUMBAI + 40 | HR | NAGPUR +(4 rows) + +-- LIMIT/OFFSET pushdown. +-- Limit with Offset should get pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 2 OFFSET 2; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 2 OFFSET 2; + c1 | c2 | c3 +----+-------+-------- + 30 | SALES | MUMBAI + 40 | HR | NAGPUR +(2 rows) + +-- If ORDER BY is not pushable then limit/Offset shouldn't get pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 LIMIT 2 OFFSET 2; + QUERY PLAN +-------------------------------------------------------------- + Limit + Output: c1, c2, c3 + -> Sort + Output: c1, c2, c3 + Sort Key: fdw131_t1.c1 + -> Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(8 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 LIMIT 2 OFFSET 2; + c1 | c2 | c3 +----+-------+-------- + 30 | SALES | MUMBAI + 40 | HR | NAGPUR +(2 rows) + +-- With ORDER BY pushdown disabled, limit shouldn't get pushdown. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 2 OFFSET 2; + QUERY PLAN +-------------------------------------------------------------- + Limit + Output: c1, c2, c3 + -> Sort + Output: c1, c2, c3 + Sort Key: fdw131_t1.c1 NULLS FIRST + -> Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(8 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 2 OFFSET 2; + c1 | c2 | c3 +----+-------+-------- + 30 | SALES | MUMBAI + 40 | HR | NAGPUR +(2 rows) + +SET mongo_fdw.enable_order_by_pushdown TO ON; +-- Only limit should get pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 2 DESC NULLS LAST LIMIT 3; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 2 DESC NULLS LAST LIMIT 3; + c1 | c2 | c3 +----+-------------+-------- + 30 | SALES | MUMBAI + 40 | HR | NAGPUR + 10 | DEVELOPMENT | PUNE +(3 rows) + +-- Expression in limit clause. Should get pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 DESC NULLS LAST LIMIT round(3.2) OFFSET 2; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 DESC NULLS LAST LIMIT round(3.2) OFFSET 2; + c1 | c2 | c3 +----+----------------+---------- + 30 | SALES | MUMBAI + 20 | ADMINISTRATION | BANGLORE +(2 rows) + +-- Only Offset without limit should get pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 ASC NULLS FIRST OFFSET 2; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 ASC NULLS FIRST OFFSET 2; + c1 | c2 | c3 +----+-------------+-------- + 40 | HR | NAGPUR + 10 | DEVELOPMENT | PUNE +(2 rows) + +-- Limit ALL +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT ALL; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT ALL; + c1 | c2 | c3 +----+----------------+---------- + 10 | DEVELOPMENT | PUNE + 20 | ADMINISTRATION | BANGLORE + 30 | SALES | MUMBAI + 40 | HR | NAGPUR +(4 rows) + +-- Limit ALL with OFFSET +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT ALL OFFSET 1; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT ALL OFFSET 1; + c1 | c2 | c3 +----+----------------+---------- + 20 | ADMINISTRATION | BANGLORE + 30 | SALES | MUMBAI + 40 | HR | NAGPUR +(3 rows) + +-- Limit NULL +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 ASC NULLS FIRST LIMIT NULL; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 ASC NULLS FIRST LIMIT NULL; + c1 | c2 | c3 +----+----------------+---------- + 20 | ADMINISTRATION | BANGLORE + 30 | SALES | MUMBAI + 40 | HR | NAGPUR + 10 | DEVELOPMENT | PUNE +(4 rows) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 ASC NULLS FIRST LIMIT NULL OFFSET 2; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 ASC NULLS FIRST LIMIT NULL OFFSET 2; + c1 | c2 | c3 +----+-------------+-------- + 40 | HR | NAGPUR + 10 | DEVELOPMENT | PUNE +(2 rows) + +-- Limit 0 and Offset 0 +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 0; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 0; + c1 | c2 | c3 +----+----+---- +(0 rows) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 0 OFFSET 0; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 0 OFFSET 0; + c1 | c2 | c3 +----+----+---- +(0 rows) + +-- Offset NULL. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 2 ASC NULLS FIRST LIMIT 5 OFFSET NULL; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 2 ASC NULLS FIRST LIMIT 5 OFFSET NULL; + c1 | c2 | c3 +----+----------------+---------- + 20 | ADMINISTRATION | BANGLORE + 10 | DEVELOPMENT | PUNE + 40 | HR | NAGPUR + 30 | SALES | MUMBAI +(4 rows) + +-- Limit with placeholder. Shouldn't get pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 2 LIMIT (SELECT COUNT(*) FROM fdw131_t1); + QUERY PLAN +----------------------------------------------------------------------------------- + Limit + Output: fdw131_t1.c1, fdw131_t1.c2, fdw131_t1.c3 + InitPlan 1 + -> Foreign Scan + Output: (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw131_t1) + -> Sort + Output: fdw131_t1.c1, fdw131_t1.c2, fdw131_t1.c3 + Sort Key: fdw131_t1.c2 + -> Foreign Scan on public.fdw131_t1 + Output: fdw131_t1.c1, fdw131_t1.c2, fdw131_t1.c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(12 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 2 LIMIT (SELECT COUNT(*) FROM fdw131_t1); + c1 | c2 | c3 +----+----------------+---------- + 20 | ADMINISTRATION | BANGLORE + 10 | DEVELOPMENT | PUNE + 40 | HR | NAGPUR + 30 | SALES | MUMBAI +(4 rows) + +-- Limit with expression, shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT (10 - (SELECT COUNT(*) FROM fdw131_t1)); + QUERY PLAN +----------------------------------------------------------------------------------- + Limit + Output: fdw131_t1.c1, fdw131_t1.c2, fdw131_t1.c3 + InitPlan 1 + -> Foreign Scan + Output: (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw131_t1) + -> Foreign Scan on public.fdw131_t1 + Output: fdw131_t1.c1, fdw131_t1.c2, fdw131_t1.c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(9 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT (10 - (SELECT COUNT(*) FROM fdw131_t1)); + c1 | c2 | c3 +----+----------------+---------- + 10 | DEVELOPMENT | PUNE + 20 | ADMINISTRATION | BANGLORE + 30 | SALES | MUMBAI + 40 | HR | NAGPUR +(4 rows) + +-- Limit with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT -1; + QUERY PLAN +-------------------------------------------------------- + Limit + Output: c1, c2, c3 + -> Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(5 rows) + +-- Should throw an error. +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT -1; +ERROR: LIMIT must not be negative +-- Offset with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST OFFSET -2; + QUERY PLAN +-------------------------------------------------------- + Limit + Output: c1, c2, c3 + -> Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(5 rows) + +-- Should throw an error. +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST OFFSET -2; +ERROR: OFFSET must not be negative +-- Limit/Offset with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT -1 OFFSET -2; + QUERY PLAN +-------------------------------------------------------- + Limit + Output: c1, c2, c3 + -> Foreign Scan on public.fdw131_t1 + Output: c1, c2, c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(5 rows) + +-- Should throw an error. +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT -1 OFFSET -2; +ERROR: OFFSET must not be negative +-- Limit with expression evaluating to -ve value. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM fdw131_t1)); + QUERY PLAN +----------------------------------------------------------------------------------- + Limit + Output: fdw131_t1.c1, fdw131_t1.c2, fdw131_t1.c3 + InitPlan 1 + -> Foreign Scan + Output: (count(*)) + Foreign Namespace: Aggregate on (mongo_fdw_regress.test_tbl2 fdw131_t1) + -> Foreign Scan on public.fdw131_t1 + Output: fdw131_t1.c1, fdw131_t1.c2, fdw131_t1.c3 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(9 rows) + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM fdw131_t1)); +ERROR: LIMIT must not be negative +DROP FOREIGN TABLE fdw131_t1; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/expected/pushdown.out b/expected/pushdown.out new file mode 100644 index 0000000..3abbd3c --- /dev/null +++ b/expected/pushdown.out @@ -0,0 +1,896 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` +-- Before running this file user must create database mongo_fdw_regress on +-- MongoDB with all permission for MONGO_USER_NAME user with MONGO_PASS +-- password and ran mongodb_init.sh file to load collections. +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; +-- Create foreign tables +CREATE FOREIGN TABLE f_mongo_test (_id name, a int, b varchar) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'mongo_test'); +CREATE FOREIGN TABLE f_test_tbl1 (_id name, c1 INTEGER, c2 VARCHAR(10), c3 CHAR(9), c4 INTEGER, c5 pg_catalog.Date, c6 DECIMAL, c7 INTEGER, c8 INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl1'); +CREATE FOREIGN TABLE f_test_tbl2 (_id name, c1 INTEGER, c2 VARCHAR(14), c3 VARCHAR(13)) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); +CREATE FOREIGN TABLE f_test_tbl3 (_id name, name TEXT, marks FLOAT ARRAY, pass BOOLEAN) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl3'); +-- Inserts some values in mongo_test collection. +INSERT INTO f_mongo_test VALUES ('0', 1, 'One'); +INSERT INTO f_mongo_test VALUES ('0', 2, 'Two'); +INSERT INTO f_mongo_test VALUES ('0', 3, 'Three'); +SET datestyle TO ISO; +-- Sample data +SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM f_test_tbl1 ORDER BY c1; + c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 +------+-------+-----------+------+------------+---------+------+---- + 100 | EMP1 | ADMIN | 1300 | 1980-12-17 | 800.3 | 0 | 20 + 200 | EMP2 | SALESMAN | 600 | 1981-02-20 | 1600 | 300 | 30 + 300 | EMP3 | SALESMAN | 600 | 1981-02-22 | 1250 | 500 | 30 + 400 | EMP4 | MANAGER | 900 | 1981-04-02 | 2975 | 0 | 20 + 500 | EMP5 | SALESMAN | 600 | 1981-09-28 | 1250.23 | 1400 | 30 + 600 | EMP6 | MANAGER | 900 | 1981-05-01 | 2850 | 0 | 30 + 700 | EMP7 | MANAGER | 900 | 1981-06-09 | 2450.34 | 0 | 10 + 800 | EMP8 | FINANCE | 400 | 1987-04-19 | 3000 | 0 | 20 + 900 | EMP9 | HEAD | | 1981-11-17 | 5000 | 0 | 10 + 1000 | EMP10 | SALESMAN | 600 | 1980-09-08 | 1500 | 0 | 30 + 1100 | EMP11 | ADMIN | 800 | 1987-05-23 | 1100 | 0 | 20 + 1200 | EMP12 | ADMIN | 600 | 1981-12-03 | 950 | 0 | 30 + 1300 | EMP13 | FINANCE | 400 | 1981-12-03 | 3000 | 0 | 20 + 1400 | EMP14 | ADMIN | 700 | 1982-01-23 | 1300 | 0 | 10 +(14 rows) + +-- WHERE clause pushdown +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c6 AS "salary", c8 FROM f_test_tbl1 e + WHERE c6 IN (1600, 2450) + ORDER BY c1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------- + Foreign Scan on public.f_test_tbl1 e + Output: c1, c2, c6, c8 + Filter: (e.c6 = ANY ('{1600,2450}'::numeric[])) + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(4 rows) + +SELECT c1, c2, c6 AS "salary", c8 FROM f_test_tbl1 e + WHERE c6 IN (1600, 2450) + ORDER BY c1; + c1 | c2 | salary | c8 +-----+------+--------+---- + 200 | EMP2 | 1600 | 30 +(1 row) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c6 FROM f_test_tbl1 e + WHERE c6 > 3000 + ORDER BY c1 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.f_test_tbl1 e + Output: c1, c2, c6 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(3 rows) + +SELECT c1, c2, c6 FROM f_test_tbl1 e + WHERE c6 > 3000 + ORDER BY c1 ASC NULLS FIRST; + c1 | c2 | c6 +-----+------+------ + 900 | EMP9 | 5000 +(1 row) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c6, c8 FROM f_test_tbl1 e + WHERE c6 = 1500 + ORDER BY c1 DESC NULLS LAST; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.f_test_tbl1 e + Output: c1, c2, c6, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(3 rows) + +SELECT c1, c2, c6, c8 FROM f_test_tbl1 e + WHERE c6 = 1500 + ORDER BY c1 DESC NULLS LAST; + c1 | c2 | c6 | c8 +------+-------+------+---- + 1000 | EMP10 | 1500 | 30 +(1 row) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c6, c8 FROM f_test_tbl1 e + WHERE c6 BETWEEN 1000 AND 4000 + ORDER BY c1 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.f_test_tbl1 e + Output: c1, c2, c6, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(3 rows) + +SELECT c1, c2, c6, c8 FROM f_test_tbl1 e + WHERE c6 BETWEEN 1000 AND 4000 + ORDER BY c1 ASC NULLS FIRST; + c1 | c2 | c6 | c8 +------+-------+---------+---- + 200 | EMP2 | 1600 | 30 + 300 | EMP3 | 1250 | 30 + 400 | EMP4 | 2975 | 20 + 500 | EMP5 | 1250.23 | 30 + 600 | EMP6 | 2850 | 30 + 700 | EMP7 | 2450.34 | 10 + 800 | EMP8 | 3000 | 20 + 1000 | EMP10 | 1500 | 30 + 1100 | EMP11 | 1100 | 20 + 1300 | EMP13 | 3000 | 20 + 1400 | EMP14 | 1300 | 10 +(11 rows) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c4, c6, c8 FROM f_test_tbl1 e + WHERE c4 IS NOT NULL + ORDER BY c1; + QUERY PLAN +-------------------------------------------------------- + Sort + Output: c1, c2, c4, c6, c8 + Sort Key: e.c1 + -> Foreign Scan on public.f_test_tbl1 e + Output: c1, c2, c4, c6, c8 + Filter: (e.c4 IS NOT NULL) + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(7 rows) + +SELECT c1, c2, c4, c6, c8 FROM f_test_tbl1 e + WHERE c4 IS NOT NULL + ORDER BY c1; + c1 | c2 | c4 | c6 | c8 +------+-------+------+---------+---- + 100 | EMP1 | 1300 | 800.3 | 20 + 200 | EMP2 | 600 | 1600 | 30 + 300 | EMP3 | 600 | 1250 | 30 + 400 | EMP4 | 900 | 2975 | 20 + 500 | EMP5 | 600 | 1250.23 | 30 + 600 | EMP6 | 900 | 2850 | 30 + 700 | EMP7 | 900 | 2450.34 | 10 + 800 | EMP8 | 400 | 3000 | 20 + 1000 | EMP10 | 600 | 1500 | 30 + 1100 | EMP11 | 800 | 1100 | 20 + 1200 | EMP12 | 600 | 950 | 30 + 1300 | EMP13 | 400 | 3000 | 20 + 1400 | EMP14 | 700 | 1300 | 10 +(13 rows) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c5 FROM f_test_tbl1 e + WHERE c5 <= '1980-12-17' + ORDER BY c1 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.f_test_tbl1 e + Output: c1, c2, c5 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(3 rows) + +SELECT c1, c2, c5 FROM f_test_tbl1 e + WHERE c5 <= '1980-12-17' + ORDER BY c1 ASC NULLS FIRST; + c1 | c2 | c5 +------+-------+------------ + 100 | EMP1 | 1980-12-17 + 1000 | EMP10 | 1980-09-08 +(2 rows) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c6, c8 FROM f_test_tbl1 e + WHERE c2 IN ('EMP6', 'EMP12', 'EMP5') + ORDER BY c1; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Output: c1, c2, c6, c8 + Sort Key: e.c1 + -> Foreign Scan on public.f_test_tbl1 e + Output: c1, c2, c6, c8 + Filter: ((e.c2)::text = ANY ('{EMP6,EMP12,EMP5}'::text[])) + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(7 rows) + +SELECT c1, c2, c6, c8 FROM f_test_tbl1 e + WHERE c2 IN ('EMP6', 'EMP12', 'EMP5') + ORDER BY c1; + c1 | c2 | c6 | c8 +------+-------+---------+---- + 500 | EMP5 | 1250.23 | 30 + 600 | EMP6 | 2850 | 30 + 1200 | EMP12 | 950 | 30 +(3 rows) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c6, c8 FROM f_test_tbl1 e + WHERE c3 LIKE 'SALESMAN' + ORDER BY c1; + QUERY PLAN +-------------------------------------------------------- + Sort + Output: c1, c2, c6, c8 + Sort Key: e.c1 + -> Foreign Scan on public.f_test_tbl1 e + Output: c1, c2, c6, c8 + Filter: (e.c3 ~~ 'SALESMAN'::text) + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(7 rows) + +SELECT c1, c2, c6, c8 FROM f_test_tbl1 e + WHERE c3 LIKE 'SALESMAN' + ORDER BY c1; + c1 | c2 | c6 | c8 +----+----+----+---- +(0 rows) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c6, c8 FROM f_test_tbl1 e + WHERE c3 LIKE 'MANA%' + ORDER BY c1; + QUERY PLAN +-------------------------------------------------------- + Sort + Output: c1, c2, c6, c8 + Sort Key: e.c1 + -> Foreign Scan on public.f_test_tbl1 e + Output: c1, c2, c6, c8 + Filter: (e.c3 ~~ 'MANA%'::text) + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(7 rows) + +SELECT c1, c2, c6, c8 FROM f_test_tbl1 e + WHERE c3 LIKE 'MANA%' + ORDER BY c1; + c1 | c2 | c6 | c8 +-----+------+---------+---- + 400 | EMP4 | 2975 | 20 + 600 | EMP6 | 2850 | 30 + 700 | EMP7 | 2450.34 | 10 +(3 rows) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT a FROM f_mongo_test + WHERE a%2 = 1 + ORDER BY a ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------- + Foreign Scan on public.f_mongo_test + Output: a + Foreign Namespace: mongo_fdw_regress.mongo_test +(3 rows) + +SELECT a FROM f_mongo_test + WHERE a%2 = 1 + ORDER BY a ASC NULLS FIRST; + a +--- + 1 + 3 +(2 rows) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT a, b FROM f_mongo_test + WHERE a >= 1 AND b LIKE '%O%' + ORDER BY a; + QUERY PLAN +--------------------------------------------------------- + Sort + Output: a, b + Sort Key: f_mongo_test.a + -> Foreign Scan on public.f_mongo_test + Output: a, b + Filter: ((f_mongo_test.b)::text ~~ '%O%'::text) + Foreign Namespace: mongo_fdw_regress.mongo_test +(7 rows) + +SELECT a, b FROM f_mongo_test + WHERE a >= 1 AND b LIKE '%O%' + ORDER BY a; + a | b +---+----- + 1 | One +(1 row) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c5 FROM f_test_tbl1 e + WHERE c5 <= '1980-12-17' AND c2 IN ('EMP1', 'EMP5', 'EMP10') AND c1 = 100 + ORDER BY c1; + QUERY PLAN +-------------------------------------------------------------- + Foreign Scan on public.f_test_tbl1 e + Output: c1, c2, c5 + Filter: ((e.c2)::text = ANY ('{EMP1,EMP5,EMP10}'::text[])) + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(4 rows) + +SELECT c1, c2, c5 FROM f_test_tbl1 e + WHERE c5 <= '1980-12-17' AND c2 IN ('EMP1', 'EMP5', 'EMP10') AND c1 = 100 + ORDER BY c1; + c1 | c2 | c5 +-----+------+------------ + 100 | EMP1 | 1980-12-17 +(1 row) + +-- The ORDER BY clause shouldn't push-down due to explicit COLLATE. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2 FROM f_test_tbl1 + WHERE c2 = 'EMP10' + ORDER BY c2 COLLATE "en_US" DESC NULLS LAST; + QUERY PLAN +------------------------------------------------------------ + Sort + Output: c1, c2, ((c2)::character varying(10)) + Sort Key: f_test_tbl1.c2 COLLATE "en_US" DESC NULLS LAST + -> Foreign Scan on public.f_test_tbl1 + Output: c1, c2, c2 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(6 rows) + +SELECT c1, c2 FROM f_test_tbl1 + WHERE c2 = 'EMP10' + ORDER BY c2 COLLATE "en_US" DESC NULLS LAST; + c1 | c2 +------+------- + 1000 | EMP10 +(1 row) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2 FROM f_test_tbl1 + WHERE c2 < 'EMP10' + ORDER BY c2 DESC NULLS LAST; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.f_test_tbl1 + Output: c1, c2 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(3 rows) + +SELECT c1, c2 FROM f_test_tbl1 + WHERE c2 < 'EMP10' + ORDER BY c2 DESC NULLS LAST; + c1 | c2 +-----+------ + 100 | EMP1 +(1 row) + +-- Should push down if two columns of same table are +-- involved in single WHERE clause operator expression. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c4 FROM f_test_tbl1 + WHERE c1 > c4 + ORDER BY c1 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.f_test_tbl1 + Output: c1, c4 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(3 rows) + +SELECT c1, c4 FROM f_test_tbl1 + WHERE c1 > c4 + ORDER BY c1 ASC NULLS FIRST; + c1 | c4 +------+----- + 800 | 400 + 1000 | 600 + 1100 | 800 + 1200 | 600 + 1300 | 400 + 1400 | 700 +(6 rows) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c4, c7, c8 FROM f_test_tbl1 + WHERE c1 < c4 AND c7 < c8 + ORDER BY c1; + QUERY PLAN +-------------------------------------------------------- + Sort + Output: c1, c4, c7, c8 + Sort Key: f_test_tbl1.c1 + -> Foreign Scan on public.f_test_tbl1 + Output: c1, c4, c7, c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(6 rows) + +SELECT c1, c4, c7, c8 FROM f_test_tbl1 + WHERE c1 < c4 AND c7 < c8 + ORDER BY c1; + c1 | c4 | c7 | c8 +-----+------+----+---- + 100 | 1300 | 0 | 20 + 400 | 900 | 0 | 20 + 600 | 900 | 0 | 30 + 700 | 900 | 0 | 10 +(4 rows) + +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c4 FROM f_test_tbl1 + WHERE c1 > c4 + ORDER BY c1 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------- + Sort + Output: c1, c4 + Sort Key: f_test_tbl1.c1 NULLS FIRST + -> Foreign Scan on public.f_test_tbl1 + Output: c1, c4 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(6 rows) + +SELECT c1, c4 FROM f_test_tbl1 + WHERE c1 > c4 + ORDER BY c1 ASC NULLS FIRST; + c1 | c4 +------+----- + 800 | 400 + 1000 | 600 + 1100 | 800 + 1200 | 600 + 1300 | 400 + 1400 | 700 +(6 rows) + +SET mongo_fdw.enable_order_by_pushdown TO ON; +-- Nested operator expression in WHERE clause. Should pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2 FROM f_test_tbl1 + WHERE (c1 > 1000) > FALSE + ORDER BY c1 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.f_test_tbl1 + Output: c1, c2 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(3 rows) + +SELECT c1, c2 FROM f_test_tbl1 + WHERE (c1 > 1000) > FALSE + ORDER BY c1 ASC NULLS FIRST; + c1 | c2 +------+------- + 1100 | EMP11 + 1200 | EMP12 + 1300 | EMP13 + 1400 | EMP14 +(4 rows) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2 FROM f_test_tbl1 + WHERE (c1 > 1000) > 0::BOOLEAN + ORDER BY c1 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.f_test_tbl1 + Output: c1, c2 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(3 rows) + +SELECT c1, c2 FROM f_test_tbl1 + WHERE (c1 > 1000) > 0::BOOLEAN + ORDER BY c1 ASC NULLS FIRST; + c1 | c2 +------+------- + 1100 | EMP11 + 1200 | EMP12 + 1300 | EMP13 + 1400 | EMP14 +(4 rows) + +-- Shouldn't push down operators where the constant is an array. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT name, marks FROM f_test_tbl3 + WHERE marks = ARRAY[23::FLOAT, 24::FLOAT] + ORDER BY name; + QUERY PLAN +--------------------------------------------------------------------- + Sort + Output: name, marks + Sort Key: f_test_tbl3.name + -> Foreign Scan on public.f_test_tbl3 + Output: name, marks + Filter: (f_test_tbl3.marks = '{23,24}'::double precision[]) + Foreign Namespace: mongo_fdw_regress.test_tbl3 +(7 rows) + +SELECT name, marks FROM f_test_tbl3 + WHERE marks = ARRAY[23::FLOAT, 24::FLOAT] + ORDER BY name; + name | marks +------+--------- + dvd | {23,24} +(1 row) + +-- Pushdown in prepared statement. +PREPARE pre_stmt_f_mongo_test(int) AS + SELECT b FROM f_mongo_test WHERE a = $1 ORDER BY b; +EXPLAIN (VERBOSE, COSTS FALSE) +EXECUTE pre_stmt_f_mongo_test(1); + QUERY PLAN +--------------------------------------------------------- + Sort + Output: b + Sort Key: f_mongo_test.b + -> Foreign Scan on public.f_mongo_test + Output: b + Foreign Namespace: mongo_fdw_regress.mongo_test +(6 rows) + +EXECUTE pre_stmt_f_mongo_test(1); + b +----- + One +(1 row) + +EXPLAIN (VERBOSE, COSTS FALSE) +EXECUTE pre_stmt_f_mongo_test(2); + QUERY PLAN +--------------------------------------------------------- + Sort + Output: b + Sort Key: f_mongo_test.b + -> Foreign Scan on public.f_mongo_test + Output: b + Foreign Namespace: mongo_fdw_regress.mongo_test +(6 rows) + +EXECUTE pre_stmt_f_mongo_test(2); + b +----- + Two +(1 row) + +-- FDW-297: Only operator expressions should be pushed down in WHERE clause. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT name, marks FROM f_test_tbl3 + WHERE pass = true + ORDER BY name DESC NULLS LAST; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.f_test_tbl3 + Output: name, marks + Foreign Namespace: mongo_fdw_regress.test_tbl3 +(3 rows) + +SELECT name, marks FROM f_test_tbl3 + WHERE pass = true + ORDER BY name DESC NULLS LAST; + name | marks +------+--------- + vdd | {29,31} +(1 row) + +-- INSERT NULL values and check behaviour. +INSERT INTO f_test_tbl2 VALUES ('0', NULL, NULL, NULL); +-- Should pushdown and shouldn't result row with NULL VALUES. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1 FROM f_test_tbl2 WHERE c1 < 1; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.f_test_tbl2 + Output: c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1 FROM f_test_tbl2 WHERE c1 < 1; + c1 +---- +(0 rows) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1 FROM f_test_tbl2 WHERE c2 = c3; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.f_test_tbl2 + Output: c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(3 rows) + +SELECT c1 FROM f_test_tbl2 WHERE c2 = c3; + c1 +---- +(0 rows) + +-- Test with IS NULL, shouldn't push down +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1 FROM f_test_tbl2 WHERE c2 IS NULL; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.f_test_tbl2 + Output: c1 + Filter: (f_test_tbl2.c2 IS NULL) + Foreign Namespace: mongo_fdw_regress.test_tbl2 +(4 rows) + +SELECT c1 FROM f_test_tbl2 WHERE c2 IS NULL; + c1 +---- + +(1 row) + +-- FDW-134: Test with number of columns more than 32 +CREATE FOREIGN TABLE f_test_large (_id int, + a01 int, a02 int, a03 int, a04 int, a05 int, a06 int, a07 int, a08 int, a09 int, a10 int, + a11 int, a12 int, a13 int, a14 int, a15 int, a16 int, a17 int, a18 int, a19 int, a20 int, + a21 int, a22 int, a23 int, a24 int, a25 int, a26 int, a27 int, a28 int, a29 int, a30 int, + a31 int, a32 int, a33 int, a34 int, a35 int) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'mongo_test_large'); +-- Shouldn't pushdown ORDERBY clause due to exceeded number of path keys limit. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT _id, a01, a31, a32, a33, a34, a35 FROM f_test_large ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST, a33 ASC NULLS FIRST, a34 DESC NULLS LAST, a35 ASC NULLS FIRST; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: _id, a01, a31, a32, a33, a34, a35, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30 + Sort Key: f_test_large.a01 NULLS FIRST, f_test_large.a02 NULLS FIRST, f_test_large.a03 NULLS FIRST, f_test_large.a04 NULLS FIRST, f_test_large.a05 NULLS FIRST, f_test_large.a06 NULLS FIRST, f_test_large.a07 NULLS FIRST, f_test_large.a08 NULLS FIRST, f_test_large.a09 NULLS FIRST, f_test_large.a10 NULLS FIRST, f_test_large.a11 NULLS FIRST, f_test_large.a12 NULLS FIRST, f_test_large.a13 NULLS FIRST, f_test_large.a14 NULLS FIRST, f_test_large.a15 NULLS FIRST, f_test_large.a16 NULLS FIRST, f_test_large.a17 NULLS FIRST, f_test_large.a18 NULLS FIRST, f_test_large.a19 NULLS FIRST, f_test_large.a20 NULLS FIRST, f_test_large.a21 NULLS FIRST, f_test_large.a22 NULLS FIRST, f_test_large.a23 NULLS FIRST, f_test_large.a24 NULLS FIRST, f_test_large.a25 NULLS FIRST, f_test_large.a26 NULLS FIRST, f_test_large.a27 NULLS FIRST, f_test_large.a28 NULLS FIRST, f_test_large.a29 NULLS FIRST, f_test_large.a30 NULLS FIRST, f_test_large.a31 NULLS FIRST, f_test_large.a32 NULLS FIRST, f_test_large.a33 NULLS FIRST, f_test_large.a34 DESC NULLS LAST, f_test_large.a35 NULLS FIRST + -> Foreign Scan on public.f_test_large + Output: _id, a01, a31, a32, a33, a34, a35, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30 + Foreign Namespace: mongo_fdw_regress.mongo_test_large +(6 rows) + +SELECT _id, a01, a31, a32, a33, a34, a35 FROM f_test_large ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST, a33 ASC NULLS FIRST, a34 DESC NULLS LAST, a35 ASC NULLS FIRST; + _id | a01 | a31 | a32 | a33 | a34 | a35 +-----+-----+-----+-----+-----+-----+----- + 1 | 1 | 31 | 2 | 3 | 4 | 5 + 3 | 1 | 31 | 32 | 3 | 34 | 35 + 0 | 1 | 31 | 32 | 33 | 134 | 35 + 4 | 1 | 31 | 32 | 33 | 34 | 35 + 2 | 1 | 31 | 132 | 133 | 134 | 135 +(5 rows) + +-- Should pushdown ORDERBY clause because number of path keys are in limit. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT _id, a01, a31, a32, a33, a34, a35 FROM f_test_large ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Foreign Scan on public.f_test_large + Output: _id, a01, a31, a32, a33, a34, a35, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30 + Foreign Namespace: mongo_fdw_regress.mongo_test_large +(3 rows) + +SELECT _id, a01, a31, a32, a33, a34, a35 FROM f_test_large ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST; + _id | a01 | a31 | a32 | a33 | a34 | a35 +-----+-----+-----+-----+-----+-----+----- + 1 | 1 | 31 | 2 | 3 | 4 | 5 + 0 | 1 | 31 | 32 | 33 | 134 | 35 + 3 | 1 | 31 | 32 | 3 | 34 | 35 + 4 | 1 | 31 | 32 | 33 | 34 | 35 + 2 | 1 | 31 | 132 | 133 | 134 | 135 +(5 rows) + +-- FDW-564: Test ORDER BY with user defined operators. Create the operator +-- family required for the test. +CREATE OPERATOR PUBLIC.<^ ( + LEFTARG = INT4, + RIGHTARG = INT4, + PROCEDURE = INT4EQ +); +CREATE OPERATOR PUBLIC.=^ ( + LEFTARG = INT4, + RIGHTARG = INT4, + PROCEDURE = INT4LT +); +CREATE OPERATOR PUBLIC.>^ ( + LEFTARG = INT4, + RIGHTARG = INT4, + PROCEDURE = INT4GT +); +CREATE OPERATOR FAMILY my_op_family USING btree; +CREATE FUNCTION MY_OP_CMP(A INT, B INT) RETURNS INT AS + $$ BEGIN RETURN BTINT4CMP(A, B); END $$ LANGUAGE PLPGSQL; +CREATE OPERATOR CLASS my_op_class FOR TYPE INT USING btree FAMILY my_op_family AS + OPERATOR 1 PUBLIC.<^, + OPERATOR 3 PUBLIC.=^, + OPERATOR 5 PUBLIC.>^, + FUNCTION 1 my_op_cmp(INT, INT); +-- FDW-564: User defined operators are not pushed down. +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT * FROM f_mongo_test ORDER BY a USING OPERATOR(public.<^); + QUERY PLAN +--------------------------------------------------------- + Sort + Output: _id, a, b + Sort Key: f_mongo_test.a USING <^ + -> Foreign Scan on public.f_mongo_test + Output: _id, a, b + Foreign Namespace: mongo_fdw_regress.mongo_test +(6 rows) + +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT MIN(a) FROM f_mongo_test GROUP BY b ORDER BY 1 USING OPERATOR(public.<^); + QUERY PLAN +------------------------------------------------------------------------------------- + Sort + Output: (min(a)), b + Sort Key: (min(f_mongo_test.a)) USING <^ + -> Foreign Scan + Output: (min(a)), b + Foreign Namespace: Aggregate on (mongo_fdw_regress.mongo_test f_mongo_test) +(6 rows) + +-- FDW-589: Test enable_order_by_pushdown option at server and table level. +-- Test the option at server level. +-- Check only boolean values are accepted. +ALTER SERVER mongo_server OPTIONS (ADD enable_order_by_pushdown 'abc11'); +ERROR: enable_order_by_pushdown requires a Boolean value +ALTER SERVER mongo_server OPTIONS (ADD enable_order_by_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c4 FROM f_test_tbl1 + WHERE c1 > c4 + ORDER BY c1 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------- + Sort + Output: c1, c4 + Sort Key: f_test_tbl1.c1 NULLS FIRST + -> Foreign Scan on public.f_test_tbl1 + Output: c1, c4 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(6 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c4 FROM f_test_tbl1 + WHERE c1 > c4 + ORDER BY c1 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.f_test_tbl1 + Output: c1, c4 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(3 rows) + +-- Test that setting option at table level does not affect the setting at +-- server level. +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'false'); +-- Test the option at table level. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (ADD enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c4 FROM f_test_tbl1 + WHERE c1 > c4 + ORDER BY c1 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------- + Foreign Scan on public.f_test_tbl1 + Output: c1, c4 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(3 rows) + +SELECT c1, c4 FROM f_test_tbl1 + WHERE c1 > c4 + ORDER BY c1 ASC NULLS FIRST; + c1 | c4 +------+----- + 800 | 400 + 1000 | 600 + 1100 | 800 + 1200 | 600 + 1300 | 400 + 1400 | 700 +(6 rows) + +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_order_by_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c4 FROM f_test_tbl1 + WHERE c1 > c4 + ORDER BY c1 ASC NULLS FIRST; + QUERY PLAN +-------------------------------------------------------- + Sort + Output: c1, c4 + Sort Key: f_test_tbl1.c1 NULLS FIRST + -> Foreign Scan on public.f_test_tbl1 + Output: c1, c4 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(6 rows) + +SELECT c1, c4 FROM f_test_tbl1 + WHERE c1 > c4 + ORDER BY c1 ASC NULLS FIRST; + c1 | c4 +------+----- + 800 | 400 + 1000 | 600 + 1100 | 800 + 1200 | 600 + 1300 | 400 + 1400 | 700 +(6 rows) + +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_order_by_pushdown 'true'); +-- FDW-631: Test pushdown of boolean expression +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT name, pass FROM f_test_tbl3 WHERE pass = false ORDER BY name; + QUERY PLAN +-------------------------------------------------------- + Sort + Output: name, pass + Sort Key: f_test_tbl3.name + -> Foreign Scan on public.f_test_tbl3 + Output: name, pass + Foreign Namespace: mongo_fdw_regress.test_tbl3 +(6 rows) + +SELECT name, pass FROM f_test_tbl3 WHERE pass = false ORDER BY name; + name | pass +------+------ + dvd | f +(1 row) + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT name, pass FROM f_test_tbl3 WHERE pass = true ORDER BY name; + QUERY PLAN +-------------------------------------------------------- + Sort + Output: name, pass + Sort Key: f_test_tbl3.name + -> Foreign Scan on public.f_test_tbl3 + Output: name, pass + Foreign Namespace: mongo_fdw_regress.test_tbl3 +(6 rows) + +SELECT name, pass FROM f_test_tbl3 WHERE pass = true ORDER BY name; + name | pass +------+------ + vdd | t +(1 row) + +-- Cleanup +DELETE FROM f_mongo_test WHERE a != 0; +DELETE FROM f_test_tbl2 WHERE c1 IS NULL; +DROP FOREIGN TABLE f_mongo_test; +DROP FOREIGN TABLE f_test_tbl1; +DROP FOREIGN TABLE f_test_tbl2; +DROP FOREIGN TABLE f_test_tbl3; +DROP FOREIGN TABLE f_test_large; +DROP OPERATOR CLASS my_op_class USING btree; +DROP FUNCTION my_op_cmp(a INT, b INT); +DROP OPERATOR FAMILY my_op_family USING btree; +DROP OPERATOR public.>^(INT, INT); +DROP OPERATOR public.=^(INT, INT); +DROP OPERATOR public.<^(INT, INT); +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/expected/select.out b/expected/select.out new file mode 100644 index 0000000..073a77b --- /dev/null +++ b/expected/select.out @@ -0,0 +1,1428 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` +-- Before running this file user must create database mongo_fdw_regress on +-- MongoDB with all permission for MONGO_USER_NAME user with MONGO_PASS +-- password and ran mongodb_init.sh file to load collections. +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; +-- Check version +SELECT mongo_fdw_version(); + mongo_fdw_version +------------------- + 50502 +(1 row) + +-- Create foreign tables +CREATE FOREIGN TABLE f_mongo_test (_id name, a int, b text) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'mongo_test'); +CREATE FOREIGN TABLE f_test_tbl1 (_id NAME, c1 INTEGER, c2 VARCHAR(10), c3 CHAR(9),c4 INTEGER, c5 pg_catalog.Date, c6 DECIMAL, c7 INTEGER, c8 INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl1'); +CREATE FOREIGN TABLE f_test_tbl2 (_id NAME, c1 INTEGER, c2 VARCHAR(14), c3 VARCHAR(13)) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); +CREATE FOREIGN TABLE countries (_id NAME, name VARCHAR, population INTEGER, capital VARCHAR, hdi FLOAT) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'countries'); +CREATE FOREIGN TABLE country_elections (_id NAME, "lastElections.type" VARCHAR, "lastElections.date" pg_catalog.TIMESTAMP) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'countries'); +CREATE FOREIGN TABLE main_exports (_id NAME, "mainExports" TEXT[] ) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'countries'); +CREATE FOREIGN TABLE test_json ( __doc json) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'warehouse'); +CREATE FOREIGN TABLE test_jsonb ( __doc jsonb) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'warehouse'); +CREATE FOREIGN TABLE test_text ( __doc text) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'warehouse'); +CREATE FOREIGN TABLE test_varchar ( __doc varchar) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'warehouse'); +CREATE FOREIGN TABLE f_test5 (_id NAME, c1 NUMERIC) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test5'); +CREATE FOREIGN TABLE f_test_tbl4 (_id NAME, a NUMERIC(12, 2)) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl4'); +CREATE FOREIGN TABLE f_test_tbl5 (_id NAME, a BOOLEAN) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl4'); +CREATE FOREIGN TABLE f_test_tbl6 (_id NAME, a INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl5'); +CREATE FOREIGN TABLE f_test_tbl7 (_id NAME, a INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl4'); +CREATE FOREIGN TABLE testlog (_id NAME, log VARCHAR, "logMeta.logMac" VARCHAR, "logMeta.nestMore.level" INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'testlog'); +CREATE FOREIGN TABLE testdevice (_id NAME, name VARCHAR, mac VARCHAR, level INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'testdevice'); +SET datestyle TO ISO; +-- Retrieve data from foreign table using SELECT statement. +SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM f_test_tbl1 + ORDER BY c1 DESC, c8; + c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 +------+-------+-----------+------+------------+---------+------+---- + 1400 | EMP14 | ADMIN | 700 | 1982-01-23 | 1300 | 0 | 10 + 1300 | EMP13 | FINANCE | 400 | 1981-12-03 | 3000 | 0 | 20 + 1200 | EMP12 | ADMIN | 600 | 1981-12-03 | 950 | 0 | 30 + 1100 | EMP11 | ADMIN | 800 | 1987-05-23 | 1100 | 0 | 20 + 1000 | EMP10 | SALESMAN | 600 | 1980-09-08 | 1500 | 0 | 30 + 900 | EMP9 | HEAD | | 1981-11-17 | 5000 | 0 | 10 + 800 | EMP8 | FINANCE | 400 | 1987-04-19 | 3000 | 0 | 20 + 700 | EMP7 | MANAGER | 900 | 1981-06-09 | 2450.34 | 0 | 10 + 600 | EMP6 | MANAGER | 900 | 1981-05-01 | 2850 | 0 | 30 + 500 | EMP5 | SALESMAN | 600 | 1981-09-28 | 1250.23 | 1400 | 30 + 400 | EMP4 | MANAGER | 900 | 1981-04-02 | 2975 | 0 | 20 + 300 | EMP3 | SALESMAN | 600 | 1981-02-22 | 1250 | 500 | 30 + 200 | EMP2 | SALESMAN | 600 | 1981-02-20 | 1600 | 300 | 30 + 100 | EMP1 | ADMIN | 1300 | 1980-12-17 | 800.3 | 0 | 20 +(14 rows) + +SELECT DISTINCT c8 FROM f_test_tbl1 ORDER BY 1; + c8 +---- + 10 + 20 + 30 +(3 rows) + +SELECT c2 AS "Employee Name" FROM f_test_tbl1 ORDER BY c2 COLLATE "C"; + Employee Name +--------------- + EMP1 + EMP10 + EMP11 + EMP12 + EMP13 + EMP14 + EMP2 + EMP3 + EMP4 + EMP5 + EMP6 + EMP7 + EMP8 + EMP9 +(14 rows) + +SELECT c8, c6, c7 FROM f_test_tbl1 ORDER BY 1, 2, 3; + c8 | c6 | c7 +----+---------+------ + 10 | 1300 | 0 + 10 | 2450.34 | 0 + 10 | 5000 | 0 + 20 | 800.3 | 0 + 20 | 1100 | 0 + 20 | 2975 | 0 + 20 | 3000 | 0 + 20 | 3000 | 0 + 30 | 950 | 0 + 30 | 1250 | 500 + 30 | 1250.23 | 1400 + 30 | 1500 | 0 + 30 | 1600 | 300 + 30 | 2850 | 0 +(14 rows) + +SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM f_test_tbl1 + WHERE c1 = 100 ORDER BY 1; + c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 +-----+------+-----------+------+------------+-------+----+---- + 100 | EMP1 | ADMIN | 1300 | 1980-12-17 | 800.3 | 0 | 20 +(1 row) + +SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM f_test_tbl1 + WHERE c1 = 100 OR c1 = 700 ORDER BY 1; + c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 +-----+------+-----------+------+------------+---------+----+---- + 100 | EMP1 | ADMIN | 1300 | 1980-12-17 | 800.3 | 0 | 20 + 700 | EMP7 | MANAGER | 900 | 1981-06-09 | 2450.34 | 0 | 10 +(2 rows) + +SELECT c1, c2, c3 FROM f_test_tbl1 WHERE c3 like 'SALESMAN' ORDER BY 1; + c1 | c2 | c3 +----+----+---- +(0 rows) + +SELECT c1, c2, c3 FROM f_test_tbl1 WHERE c1 IN (100, 700) ORDER BY 1; + c1 | c2 | c3 +-----+------+----------- + 100 | EMP1 | ADMIN + 700 | EMP7 | MANAGER +(2 rows) + +SELECT c1, c2, c3 FROM f_test_tbl1 WHERE c1 NOT IN (100, 700) ORDER BY 1 LIMIT 5; + c1 | c2 | c3 +-----+------+----------- + 200 | EMP2 | SALESMAN + 300 | EMP3 | SALESMAN + 400 | EMP4 | MANAGER + 500 | EMP5 | SALESMAN + 600 | EMP6 | MANAGER +(5 rows) + +SELECT c1, c2, c8 FROM f_test_tbl1 WHERE c8 BETWEEN 10 AND 20 ORDER BY 1; + c1 | c2 | c8 +------+-------+---- + 100 | EMP1 | 20 + 400 | EMP4 | 20 + 700 | EMP7 | 10 + 800 | EMP8 | 20 + 900 | EMP9 | 10 + 1100 | EMP11 | 20 + 1300 | EMP13 | 20 + 1400 | EMP14 | 10 +(8 rows) + +SELECT c1, c2, c6 FROM f_test_tbl1 ORDER BY 1 OFFSET 5; + c1 | c2 | c6 +------+-------+--------- + 600 | EMP6 | 2850 + 700 | EMP7 | 2450.34 + 800 | EMP8 | 3000 + 900 | EMP9 | 5000 + 1000 | EMP10 | 1500 + 1100 | EMP11 | 1100 + 1200 | EMP12 | 950 + 1300 | EMP13 | 3000 + 1400 | EMP14 | 1300 +(9 rows) + +-- Retrieve data from foreign table using group by clause. +SELECT c8 "Department", COUNT(c1) "Total Employees" FROM f_test_tbl1 + GROUP BY c8 ORDER BY c8; + Department | Total Employees +------------+----------------- + 10 | 3 + 20 | 5 + 30 | 6 +(3 rows) + +SELECT c8, SUM(c6) FROM f_test_tbl1 + GROUP BY c8 HAVING c8 IN (10, 30) ORDER BY c8; + c8 | sum +----+--------- + 10 | 8750.34 + 30 | 9400.23 +(2 rows) + +SELECT c8, SUM(c6) FROM f_test_tbl1 + GROUP BY c8 HAVING SUM(c6) > 9400 ORDER BY c8; + c8 | sum +----+--------- + 20 | 10875.3 + 30 | 9400.23 +(2 rows) + +-- Retrieve data from foreign table using sub-queries. +SELECT c1, c2, c6 FROM f_test_tbl1 + WHERE c8 <> ALL (SELECT c1 FROM f_test_tbl2 WHERE c1 IN (10, 30, 40)) + ORDER BY c1; + c1 | c2 | c6 +------+-------+------- + 100 | EMP1 | 800.3 + 400 | EMP4 | 2975 + 800 | EMP8 | 3000 + 1100 | EMP11 | 1100 + 1300 | EMP13 | 3000 +(5 rows) + +SELECT c1, c2, c3 FROM f_test_tbl2 + WHERE EXISTS (SELECT 1 FROM f_test_tbl1 WHERE f_test_tbl2.c1 = f_test_tbl1.c8) + ORDER BY 1, 2; + c1 | c2 | c3 +----+----------------+---------- + 10 | DEVELOPMENT | PUNE + 20 | ADMINISTRATION | BANGLORE + 30 | SALES | MUMBAI +(3 rows) + +SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM f_test_tbl1 + WHERE c8 NOT IN (SELECT c1 FROM f_test_tbl2) ORDER BY c1; + c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 +----+----+----+----+----+----+----+---- +(0 rows) + +-- Retrieve data from foreign table using UNION operator. +SELECT c1, c2 FROM f_test_tbl2 UNION +SELECT c1, c2 FROM f_test_tbl1 ORDER BY c1; + c1 | c2 +------+---------------- + 10 | DEVELOPMENT + 20 | ADMINISTRATION + 30 | SALES + 40 | HR + 100 | EMP1 + 200 | EMP2 + 300 | EMP3 + 400 | EMP4 + 500 | EMP5 + 600 | EMP6 + 700 | EMP7 + 800 | EMP8 + 900 | EMP9 + 1000 | EMP10 + 1100 | EMP11 + 1200 | EMP12 + 1300 | EMP13 + 1400 | EMP14 +(18 rows) + +SELECT c1, c2 FROM f_test_tbl2 UNION ALL +SELECT c1, c2 FROM f_test_tbl1 ORDER BY c1; + c1 | c2 +------+---------------- + 10 | DEVELOPMENT + 20 | ADMINISTRATION + 30 | SALES + 40 | HR + 100 | EMP1 + 200 | EMP2 + 300 | EMP3 + 400 | EMP4 + 500 | EMP5 + 600 | EMP6 + 700 | EMP7 + 800 | EMP8 + 900 | EMP9 + 1000 | EMP10 + 1100 | EMP11 + 1200 | EMP12 + 1300 | EMP13 + 1400 | EMP14 +(18 rows) + +-- Retrieve data from foreign table using INTERSECT operator. +SELECT c1, c2 FROM f_test_tbl1 WHERE c1 >= 800 INTERSECT +SELECT c1, c2 FROM f_test_tbl1 WHERE c1 >= 400 ORDER BY c1; + c1 | c2 +------+------- + 800 | EMP8 + 900 | EMP9 + 1000 | EMP10 + 1100 | EMP11 + 1200 | EMP12 + 1300 | EMP13 + 1400 | EMP14 +(7 rows) + +SELECT c1, c2 FROM f_test_tbl1 WHERE c1 >= 800 INTERSECT ALL +SELECT c1, c2 FROM f_test_tbl1 WHERE c1 >= 400 ORDER BY c1; + c1 | c2 +------+------- + 800 | EMP8 + 900 | EMP9 + 1000 | EMP10 + 1100 | EMP11 + 1200 | EMP12 + 1300 | EMP13 + 1400 | EMP14 +(7 rows) + +-- Retrieve data from foreign table using EXCEPT operator. +SELECT c1, c2 FROM f_test_tbl1 EXCEPT +SELECT c1, c2 FROM f_test_tbl1 WHERE c1 > 900 ORDER BY c1; + c1 | c2 +-----+------ + 100 | EMP1 + 200 | EMP2 + 300 | EMP3 + 400 | EMP4 + 500 | EMP5 + 600 | EMP6 + 700 | EMP7 + 800 | EMP8 + 900 | EMP9 +(9 rows) + +SELECT c1, c2 FROM f_test_tbl1 EXCEPT ALL +SELECT c1, c2 FROM f_test_tbl1 WHERE c1 > 900 ORDER BY c1; + c1 | c2 +-----+------ + 100 | EMP1 + 200 | EMP2 + 300 | EMP3 + 400 | EMP4 + 500 | EMP5 + 600 | EMP6 + 700 | EMP7 + 800 | EMP8 + 900 | EMP9 +(9 rows) + +-- Retrieve data from foreign table using CTE (with clause). +WITH + with_qry AS (SELECT c1, c2, c3 FROM f_test_tbl2) +SELECT e.c2, e.c6, w.c1, w.c2 FROM f_test_tbl1 e, with_qry w + WHERE e.c8 = w.c1 ORDER BY e.c8, e.c2 COLLATE "C"; + c2 | c6 | c1 | c2 +-------+---------+----+---------------- + EMP14 | 1300 | 10 | DEVELOPMENT + EMP7 | 2450.34 | 10 | DEVELOPMENT + EMP9 | 5000 | 10 | DEVELOPMENT + EMP1 | 800.3 | 20 | ADMINISTRATION + EMP11 | 1100 | 20 | ADMINISTRATION + EMP13 | 3000 | 20 | ADMINISTRATION + EMP4 | 2975 | 20 | ADMINISTRATION + EMP8 | 3000 | 20 | ADMINISTRATION + EMP10 | 1500 | 30 | SALES + EMP12 | 950 | 30 | SALES + EMP2 | 1600 | 30 | SALES + EMP3 | 1250 | 30 | SALES + EMP5 | 1250.23 | 30 | SALES + EMP6 | 2850 | 30 | SALES +(14 rows) + +WITH + test_tbl2_costs AS (SELECT d.c2, SUM(c6) test_tbl2_total FROM f_test_tbl1 e, f_test_tbl2 d + WHERE e.c8 = d.c1 GROUP BY 1), + avg_cost AS (SELECT SUM(test_tbl2_total)/COUNT(*) avg FROM test_tbl2_costs) +SELECT * FROM test_tbl2_costs + WHERE test_tbl2_total > (SELECT avg FROM avg_cost) ORDER BY c2 COLLATE "C"; + c2 | test_tbl2_total +----------------+----------------- + ADMINISTRATION | 10875.3 +(1 row) + +-- Retrieve data from foreign table using window clause. +SELECT c8, c1, c6, AVG(c6) OVER (PARTITION BY c8) FROM f_test_tbl1 + ORDER BY c8, c1; + c8 | c1 | c6 | avg +----+------+---------+----------------------- + 10 | 700 | 2450.34 | 2916.7800000000000000 + 10 | 900 | 5000 | 2916.7800000000000000 + 10 | 1400 | 1300 | 2916.7800000000000000 + 20 | 100 | 800.3 | 2175.0600000000000000 + 20 | 400 | 2975 | 2175.0600000000000000 + 20 | 800 | 3000 | 2175.0600000000000000 + 20 | 1100 | 1100 | 2175.0600000000000000 + 20 | 1300 | 3000 | 2175.0600000000000000 + 30 | 200 | 1600 | 1566.7050000000000000 + 30 | 300 | 1250 | 1566.7050000000000000 + 30 | 500 | 1250.23 | 1566.7050000000000000 + 30 | 600 | 2850 | 1566.7050000000000000 + 30 | 1000 | 1500 | 1566.7050000000000000 + 30 | 1200 | 950 | 1566.7050000000000000 +(14 rows) + +SELECT c8, c1, c6, COUNT(c6) OVER (PARTITION BY c8) FROM f_test_tbl1 + WHERE c8 IN (10, 30, 40, 50, 60, 70) ORDER BY c8, c1; + c8 | c1 | c6 | count +----+------+---------+------- + 10 | 700 | 2450.34 | 3 + 10 | 900 | 5000 | 3 + 10 | 1400 | 1300 | 3 + 30 | 200 | 1600 | 6 + 30 | 300 | 1250 | 6 + 30 | 500 | 1250.23 | 6 + 30 | 600 | 2850 | 6 + 30 | 1000 | 1500 | 6 + 30 | 1200 | 950 | 6 +(9 rows) + +SELECT c8, c1, c6, SUM(c6) OVER (PARTITION BY c8) FROM f_test_tbl1 + ORDER BY c8, c1; + c8 | c1 | c6 | sum +----+------+---------+--------- + 10 | 700 | 2450.34 | 8750.34 + 10 | 900 | 5000 | 8750.34 + 10 | 1400 | 1300 | 8750.34 + 20 | 100 | 800.3 | 10875.3 + 20 | 400 | 2975 | 10875.3 + 20 | 800 | 3000 | 10875.3 + 20 | 1100 | 1100 | 10875.3 + 20 | 1300 | 3000 | 10875.3 + 30 | 200 | 1600 | 9400.23 + 30 | 300 | 1250 | 9400.23 + 30 | 500 | 1250.23 | 9400.23 + 30 | 600 | 2850 | 9400.23 + 30 | 1000 | 1500 | 9400.23 + 30 | 1200 | 950 | 9400.23 +(14 rows) + +-- Views +CREATE VIEW smpl_vw AS + SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM f_test_tbl1 ORDER BY c1; +SELECT * FROM smpl_vw ORDER BY 1; + c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 +------+-------+-----------+------+------------+---------+------+---- + 100 | EMP1 | ADMIN | 1300 | 1980-12-17 | 800.3 | 0 | 20 + 200 | EMP2 | SALESMAN | 600 | 1981-02-20 | 1600 | 300 | 30 + 300 | EMP3 | SALESMAN | 600 | 1981-02-22 | 1250 | 500 | 30 + 400 | EMP4 | MANAGER | 900 | 1981-04-02 | 2975 | 0 | 20 + 500 | EMP5 | SALESMAN | 600 | 1981-09-28 | 1250.23 | 1400 | 30 + 600 | EMP6 | MANAGER | 900 | 1981-05-01 | 2850 | 0 | 30 + 700 | EMP7 | MANAGER | 900 | 1981-06-09 | 2450.34 | 0 | 10 + 800 | EMP8 | FINANCE | 400 | 1987-04-19 | 3000 | 0 | 20 + 900 | EMP9 | HEAD | | 1981-11-17 | 5000 | 0 | 10 + 1000 | EMP10 | SALESMAN | 600 | 1980-09-08 | 1500 | 0 | 30 + 1100 | EMP11 | ADMIN | 800 | 1987-05-23 | 1100 | 0 | 20 + 1200 | EMP12 | ADMIN | 600 | 1981-12-03 | 950 | 0 | 30 + 1300 | EMP13 | FINANCE | 400 | 1981-12-03 | 3000 | 0 | 20 + 1400 | EMP14 | ADMIN | 700 | 1982-01-23 | 1300 | 0 | 10 +(14 rows) + +CREATE VIEW comp_vw (s1, s2, s3, s6, s7, s8, d2) AS + SELECT s.c1, s.c2, s.c3, s.c6, s.c7, s.c8, d.c2 + FROM f_test_tbl2 d, f_test_tbl1 s WHERE d.c1 = s.c8 AND d.c1 = 10 + ORDER BY s.c1; +SELECT * FROM comp_vw ORDER BY 1; + s1 | s2 | s3 | s6 | s7 | s8 | d2 +------+-------+-----------+---------+----+----+------------- + 700 | EMP7 | MANAGER | 2450.34 | 0 | 10 | DEVELOPMENT + 900 | EMP9 | HEAD | 5000 | 0 | 10 | DEVELOPMENT + 1400 | EMP14 | ADMIN | 1300 | 0 | 10 | DEVELOPMENT +(3 rows) + +CREATE TEMPORARY VIEW temp_vw AS + SELECT c1, c2, c3 FROM f_test_tbl2; +SELECT * FROM temp_vw ORDER BY 1, 2; + c1 | c2 | c3 +----+----------------+---------- + 10 | DEVELOPMENT | PUNE + 20 | ADMINISTRATION | BANGLORE + 30 | SALES | MUMBAI + 40 | HR | NAGPUR +(4 rows) + +CREATE VIEW mul_tbl_view AS + SELECT d.c1 dc1, d.c2 dc2, e.c1 ec1, e.c2 ec2, e.c6 ec6 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY d.c1; +SELECT * FROM mul_tbl_view ORDER BY 1, 2, 3; + dc1 | dc2 | ec1 | ec2 | ec6 +-----+----------------+------+-------+--------- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 + 30 | SALES | 200 | EMP2 | 1600 + 30 | SALES | 300 | EMP3 | 1250 + 30 | SALES | 500 | EMP5 | 1250.23 + 30 | SALES | 600 | EMP6 | 2850 + 30 | SALES | 1000 | EMP10 | 1500 + 30 | SALES | 1200 | EMP12 | 950 +(14 rows) + +-- Foreign-Foreign table joins +-- CROSS JOIN. +SELECT f_test_tbl2.c2, f_test_tbl1.c2 + FROM f_test_tbl2 CROSS JOIN f_test_tbl1 ORDER BY 1, 2; + c2 | c2 +----------------+------- + ADMINISTRATION | EMP1 + ADMINISTRATION | EMP10 + ADMINISTRATION | EMP11 + ADMINISTRATION | EMP12 + ADMINISTRATION | EMP13 + ADMINISTRATION | EMP14 + ADMINISTRATION | EMP2 + ADMINISTRATION | EMP3 + ADMINISTRATION | EMP4 + ADMINISTRATION | EMP5 + ADMINISTRATION | EMP6 + ADMINISTRATION | EMP7 + ADMINISTRATION | EMP8 + ADMINISTRATION | EMP9 + DEVELOPMENT | EMP1 + DEVELOPMENT | EMP10 + DEVELOPMENT | EMP11 + DEVELOPMENT | EMP12 + DEVELOPMENT | EMP13 + DEVELOPMENT | EMP14 + DEVELOPMENT | EMP2 + DEVELOPMENT | EMP3 + DEVELOPMENT | EMP4 + DEVELOPMENT | EMP5 + DEVELOPMENT | EMP6 + DEVELOPMENT | EMP7 + DEVELOPMENT | EMP8 + DEVELOPMENT | EMP9 + HR | EMP1 + HR | EMP10 + HR | EMP11 + HR | EMP12 + HR | EMP13 + HR | EMP14 + HR | EMP2 + HR | EMP3 + HR | EMP4 + HR | EMP5 + HR | EMP6 + HR | EMP7 + HR | EMP8 + HR | EMP9 + SALES | EMP1 + SALES | EMP10 + SALES | EMP11 + SALES | EMP12 + SALES | EMP13 + SALES | EMP14 + SALES | EMP2 + SALES | EMP3 + SALES | EMP4 + SALES | EMP5 + SALES | EMP6 + SALES | EMP7 + SALES | EMP8 + SALES | EMP9 +(56 rows) + +-- INNER JOIN. +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d, f_test_tbl1 e WHERE d.c1 = e.c8 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 +(14 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 +(14 rows) + +-- OUTER JOINS. +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | +(15 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 +(14 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d FULL OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | +(15 rows) + +-- Local-Foreign table joins. +CREATE TABLE l_test_tbl1 AS + SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM f_test_tbl1; +CREATE TABLE l_test_tbl2 AS + SELECT c1, c2, c3 FROM f_test_tbl2; +-- CROSS JOIN. +SELECT f_test_tbl2.c2, l_test_tbl1.c2 FROM f_test_tbl2 CROSS JOIN l_test_tbl1 ORDER BY 1, 2; + c2 | c2 +----------------+------- + ADMINISTRATION | EMP1 + ADMINISTRATION | EMP10 + ADMINISTRATION | EMP11 + ADMINISTRATION | EMP12 + ADMINISTRATION | EMP13 + ADMINISTRATION | EMP14 + ADMINISTRATION | EMP2 + ADMINISTRATION | EMP3 + ADMINISTRATION | EMP4 + ADMINISTRATION | EMP5 + ADMINISTRATION | EMP6 + ADMINISTRATION | EMP7 + ADMINISTRATION | EMP8 + ADMINISTRATION | EMP9 + DEVELOPMENT | EMP1 + DEVELOPMENT | EMP10 + DEVELOPMENT | EMP11 + DEVELOPMENT | EMP12 + DEVELOPMENT | EMP13 + DEVELOPMENT | EMP14 + DEVELOPMENT | EMP2 + DEVELOPMENT | EMP3 + DEVELOPMENT | EMP4 + DEVELOPMENT | EMP5 + DEVELOPMENT | EMP6 + DEVELOPMENT | EMP7 + DEVELOPMENT | EMP8 + DEVELOPMENT | EMP9 + HR | EMP1 + HR | EMP10 + HR | EMP11 + HR | EMP12 + HR | EMP13 + HR | EMP14 + HR | EMP2 + HR | EMP3 + HR | EMP4 + HR | EMP5 + HR | EMP6 + HR | EMP7 + HR | EMP8 + HR | EMP9 + SALES | EMP1 + SALES | EMP10 + SALES | EMP11 + SALES | EMP12 + SALES | EMP13 + SALES | EMP14 + SALES | EMP2 + SALES | EMP3 + SALES | EMP4 + SALES | EMP5 + SALES | EMP6 + SALES | EMP7 + SALES | EMP8 + SALES | EMP9 +(56 rows) + +-- INNER JOIN. +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM l_test_tbl2 d, f_test_tbl1 e WHERE d.c1 = e.c8 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 +(14 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN l_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 +(14 rows) + +-- OUTER JOINS. +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN l_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | +(15 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN l_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 +(14 rows) + +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d FULL OUTER JOIN l_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + c1 | c2 | c1 | c2 | c6 | c8 +----+----------------+------+-------+---------+---- + 10 | DEVELOPMENT | 700 | EMP7 | 2450.34 | 10 + 10 | DEVELOPMENT | 900 | EMP9 | 5000 | 10 + 10 | DEVELOPMENT | 1400 | EMP14 | 1300 | 10 + 20 | ADMINISTRATION | 100 | EMP1 | 800.3 | 20 + 20 | ADMINISTRATION | 400 | EMP4 | 2975 | 20 + 20 | ADMINISTRATION | 800 | EMP8 | 3000 | 20 + 20 | ADMINISTRATION | 1100 | EMP11 | 1100 | 20 + 20 | ADMINISTRATION | 1300 | EMP13 | 3000 | 20 + 30 | SALES | 200 | EMP2 | 1600 | 30 + 30 | SALES | 300 | EMP3 | 1250 | 30 + 30 | SALES | 500 | EMP5 | 1250.23 | 30 + 30 | SALES | 600 | EMP6 | 2850 | 30 + 30 | SALES | 1000 | EMP10 | 1500 | 30 + 30 | SALES | 1200 | EMP12 | 950 | 30 + 40 | HR | | | | +(15 rows) + +-- Retrieve complex data containing Sub-fields, dates, Arrays +SELECT * FROM countries ORDER BY _id; + _id | name | population | capital | hdi +--------------------------+---------+------------+----------+------- + 5381ccf9d6d81c8e8bf0434f | Ukraine | 45590000 | Kyiv | 0.74 + 5381ccf9d6d81c8e8bf04350 | Poland | 38540000 | Warsaw | 0.821 + 5381ccf9d6d81c8e8bf04351 | Moldova | 3560000 | Chișinău | 0.66 +(3 rows) + +SELECT * FROM country_elections ORDER BY _id; + _id | lastElections.type | lastElections.date +--------------------------+--------------------+--------------------- + 5381ccf9d6d81c8e8bf0434f | presidential | 2014-05-25 00:00:00 + 5381ccf9d6d81c8e8bf04350 | parliamentary | 2011-10-09 00:00:00 + 5381ccf9d6d81c8e8bf04351 | parliamentary | 2010-11-28 00:00:00 +(3 rows) + +SELECT * FROM main_exports ORDER BY _id; + _id | mainExports +--------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + 5381ccf9d6d81c8e8bf0434f | {"Semi-finished products of iron or non-alloy steel","Flat-rolled products of iron or non-alloy steel","Sunflower-seed, safflower or cotton-seed oil"} + 5381ccf9d6d81c8e8bf04350 | {"Parts and accessories of the motor vehicles of headings 87.01 to 87.0","Motor cars and other motor vehicles principally designed for the transport","Reception apparatus for television"} + 5381ccf9d6d81c8e8bf04351 | {"Wine of fresh grapes, including fortified wines","Insulated (including enameled or anodized) wire, cable","Sunflower seeds, whether or not broken"} +(3 rows) + +-- Retrieve complex data containing Json objects (__doc tests) +SELECT json_data.key AS key1, json_data.value AS value1 + FROM test_json, json_each_text(test_json.__doc) AS json_data + WHERE key NOT IN ('_id') ORDER BY json_data.key COLLATE "C"; + key1 | value1 +-------------------+----------------------------- + warehouse_created | { "$date" : 1418368330000 } + warehouse_created | { "$date" : 1447229590000 } + warehouse_id | 1 + warehouse_id | 2 + warehouse_name | UPS + warehouse_name | Laptop +(6 rows) + +SELECT json_data.key AS key1, json_data.value AS value1 + FROM test_jsonb, jsonb_each_text(test_jsonb.__doc) AS json_data + WHERE key NOT IN ('_id') ORDER BY json_data.key COLLATE "C"; + key1 | value1 +-------------------+-------------------------- + warehouse_created | {"$date": 1418368330000} + warehouse_created | {"$date": 1447229590000} + warehouse_id | 1 + warehouse_id | 2 + warehouse_name | UPS + warehouse_name | Laptop +(6 rows) + +SELECT json_data.key AS key1, json_data.value AS value1 + FROM test_text, json_each_text(test_text.__doc::json) AS json_data + WHERE key NOT IN ('_id') ORDER BY json_data.key COLLATE "C"; + key1 | value1 +-------------------+----------------------------- + warehouse_created | { "$date" : 1418368330000 } + warehouse_created | { "$date" : 1447229590000 } + warehouse_id | 1 + warehouse_id | 2 + warehouse_name | UPS + warehouse_name | Laptop +(6 rows) + +SELECT json_data.key AS key1, json_data.value AS value1 + FROM test_varchar, json_each_text(test_varchar.__doc::json) AS json_data + WHERE key NOT IN ('_id') ORDER BY json_data.key COLLATE "C"; + key1 | value1 +-------------------+----------------------------- + warehouse_created | { "$date" : 1418368330000 } + warehouse_created | { "$date" : 1447229590000 } + warehouse_id | 1 + warehouse_id | 2 + warehouse_name | UPS + warehouse_name | Laptop +(6 rows) + +-- Inserts some values in mongo_test collection. +INSERT INTO f_mongo_test VALUES ('0', 1, 'One'); +INSERT INTO f_mongo_test VALUES ('0', 2, 'Two'); +INSERT INTO f_mongo_test VALUES ('0', 3, 'Three'); +INSERT INTO f_mongo_test VALUES ('0', 4, 'Four'); +INSERT INTO f_mongo_test VALUES ('0', 5, 'Five'); +INSERT INTO f_mongo_test VALUES ('0', 6, 'Six'); +INSERT INTO f_mongo_test VALUES ('0', 7, 'Seven'); +INSERT INTO f_mongo_test VALUES ('0', 8, 'Eight'); +INSERT INTO f_mongo_test VALUES ('0', 9, 'Nine'); +INSERT INTO f_mongo_test VALUES ('0', 10, 'Ten'); +-- Retrieve Data From foreign tables in functions. +CREATE OR REPLACE FUNCTION test_param_where() RETURNS void AS $$ +DECLARE + n varchar; +BEGIN + FOR x IN 1..9 LOOP + SELECT b INTO n FROM f_mongo_test WHERE a = x; + RAISE NOTICE 'Found number %', n; + END LOOP; + return; +END +$$ LANGUAGE plpgsql; +SELECT test_param_where(); +NOTICE: Found number One +NOTICE: Found number Two +NOTICE: Found number Three +NOTICE: Found number Four +NOTICE: Found number Five +NOTICE: Found number Six +NOTICE: Found number Seven +NOTICE: Found number Eight +NOTICE: Found number Nine + test_param_where +------------------ + +(1 row) + +-- FDW-103: Parameter expression should work correctly with WHERE clause. +SELECT a, b FROM f_mongo_test WHERE a = (SELECT 2) ORDER BY a; + a | b +---+----- + 2 | Two +(1 row) + +SELECT a, b FROM f_mongo_test WHERE b = (SELECT 'Seven'::text) ORDER BY a; + a | b +---+------- + 7 | Seven +(1 row) + +-- Create local table and load data into it. +CREATE TABLE l_mongo_test AS SELECT a, b FROM f_mongo_test; +-- Check correlated query. +SELECT a, b FROM l_mongo_test lt + WHERE lt.b = (SELECT b FROM f_mongo_test ft WHERE lt.b = ft.b) + ORDER BY a; + a | b +----+----------------------- + 0 | mongo_test collection + 1 | One + 2 | Two + 3 | Three + 4 | Four + 5 | Five + 6 | Six + 7 | Seven + 8 | Eight + 9 | Nine + 10 | Ten +(11 rows) + +SELECT a, b FROM l_mongo_test lt + WHERE lt.a = (SELECT a FROM f_mongo_test ft WHERE lt.a = ft.a) + ORDER BY a; + a | b +----+----------------------- + 0 | mongo_test collection + 1 | One + 2 | Two + 3 | Three + 4 | Four + 5 | Five + 6 | Six + 7 | Seven + 8 | Eight + 9 | Nine + 10 | Ten +(11 rows) + +SELECT c1, c8 FROM f_test_tbl1 ft1 + WHERE ft1.c8 = (SELECT c1 FROM f_test_tbl2 ft2 WHERE ft1.c8 = ft2.c1) + ORDER BY c1 LIMIT 2; + c1 | c8 +-----+---- + 100 | 20 + 200 | 30 +(2 rows) + +-- FDW-197: Casting target list should give correct result. +SELECT a::float FROM f_mongo_test ORDER BY a LIMIT 2; + a +--- + 0 + 1 +(2 rows) + +SELECT a::boolean FROM f_mongo_test ORDER BY a LIMIT 2; + a +--- + f + t +(2 rows) + +SELECT a, b::varchar FROM f_mongo_test ORDER BY a LIMIT 3; + a | b +---+----------------------- + 0 | mongo_test collection + 1 | One + 2 | Two +(3 rows) + +SELECT a::float, b::varchar FROM f_mongo_test ORDER BY a LIMIT 2; + a | b +---+----------------------- + 0 | mongo_test collection + 1 | One +(2 rows) + +SELECT a::real, b::char(20) FROM f_mongo_test ORDER BY a LIMIT 2; + a | b +---+---------------------- + 0 | mongo_test collectio + 1 | One +(2 rows) + +SELECT c1, c2::text FROM f_test_tbl1 ORDER BY c1 LIMIT 2; + c1 | c2 +-----+------ + 100 | EMP1 + 200 | EMP2 +(2 rows) + +SELECT a, LENGTH(b) FROM f_mongo_test ORDER BY 1 LIMIT 2; + a | length +---+-------- + 0 | 21 + 1 | 3 +(2 rows) + +SELECT t1.c6::float, t1.c6::int, t1.c5::timestamptz, t1.c3::text, t2.c1::numeric, t2.c3 + FROM f_test_tbl1 t1, f_test_tbl2 t2 WHERE t1.c8 = t2.c1 + ORDER BY t2.c1, t1.c6 LIMIT 5; + c6 | c6 | c5 | c3 | c1 | c3 +---------+------+------------------------+---------+----+---------- + 1300 | 1300 | 1982-01-23 00:00:00-08 | ADMIN | 10 | PUNE + 2450.34 | 2450 | 1981-06-09 00:00:00-07 | MANAGER | 10 | PUNE + 5000 | 5000 | 1981-11-17 00:00:00-08 | HEAD | 10 | PUNE + 800.3 | 800 | 1980-12-17 00:00:00-08 | ADMIN | 20 | BANGLORE + 1100 | 1100 | 1987-05-23 00:00:00-07 | ADMIN | 20 | BANGLORE +(5 rows) + +SELECT SUM(a::float), SUM(a % 2), a % 2 AS "a % 2"FROM f_mongo_test + GROUP BY a % 2 ORDER BY 2; + sum | sum | a % 2 +-----+-----+------- + 30 | 0 | 0 + 25 | 5 | 1 +(2 rows) + +SELECT (c6::float + (c1 * length(c3::text))) AS "c1 + c6", c1, c6 + FROM f_test_tbl1 ORDER BY c1 LIMIT 5; + c1 + c6 | c1 | c6 +---------+-----+--------- + 1300.3 | 100 | 800.3 + 3200 | 200 | 1600 + 3650 | 300 | 1250 + 5775 | 400 | 2975 + 5250.23 | 500 | 1250.23 +(5 rows) + +-- FDW-249; LEFT JOIN LATERAL should not crash +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.a, t1.b, t3.a, t1_a FROM f_mongo_test t1 LEFT JOIN LATERAL ( + SELECT t2.a, t1.a AS t1_a FROM f_mongo_test t2) t3 ON t1.a = t3.a ORDER BY 1 ASC NULLS FIRST; + QUERY PLAN +--------------------------------------------------------- + Nested Loop Left Join + Output: t1.a, t1.b, t2.a, (t1.a) + -> Foreign Scan on public.f_mongo_test t1 + Output: t1._id, t1.a, t1.b + Foreign Namespace: mongo_fdw_regress.mongo_test + -> Foreign Scan on public.f_mongo_test t2 + Output: t2.a, t1.a + Filter: (t1.a = t2.a) + Foreign Namespace: mongo_fdw_regress.mongo_test +(9 rows) + +SELECT t1.a, t1.b, t3.a, t1_a FROM f_mongo_test t1 LEFT JOIN LATERAL ( + SELECT t2.a, t1.a AS t1_a FROM f_mongo_test t2) t3 ON t1.a = t3.a ORDER BY 1 ASC NULLS FIRST; + a | b | a | t1_a +----+-----------------------+----+------ + 0 | mongo_test collection | 0 | 0 + 1 | One | 1 | 1 + 2 | Two | 2 | 2 + 3 | Three | 3 | 3 + 4 | Four | 4 | 4 + 5 | Five | 5 | 5 + 6 | Six | 6 | 6 + 7 | Seven | 7 | 7 + 8 | Eight | 8 | 8 + 9 | Nine | 9 | 9 + 10 | Ten | 10 | 10 +(11 rows) + +SELECT t1.c1, t3.c1, t3.t1_c8 FROM f_test_tbl1 t1 INNER JOIN LATERAL ( + SELECT t2.c1, t1.c8 AS t1_c8 FROM f_test_tbl2 t2) t3 ON t3.c1 = t3.t1_c8 + ORDER BY 1, 2, 3; + c1 | c1 | t1_c8 +------+----+------- + 100 | 20 | 20 + 200 | 30 | 30 + 300 | 30 | 30 + 400 | 20 | 20 + 500 | 30 | 30 + 600 | 30 | 30 + 700 | 10 | 10 + 800 | 20 | 20 + 900 | 10 | 10 + 1000 | 30 | 30 + 1100 | 20 | 20 + 1200 | 30 | 30 + 1300 | 20 | 20 + 1400 | 10 | 10 +(14 rows) + +SELECT t1.c1, t3.c1, t3.t1_c8 FROM l_test_tbl1 t1 LEFT JOIN LATERAL ( + SELECT t2.c1, t1.c8 AS t1_c8 FROM f_test_tbl2 t2) t3 ON t3.c1 = t3.t1_c8 + ORDER BY 1, 2, 3; + c1 | c1 | t1_c8 +------+----+------- + 100 | 20 | 20 + 200 | 30 | 30 + 300 | 30 | 30 + 400 | 20 | 20 + 500 | 30 | 30 + 600 | 30 | 30 + 700 | 10 | 10 + 800 | 20 | 20 + 900 | 10 | 10 + 1000 | 30 | 30 + 1100 | 20 | 20 + 1200 | 30 | 30 + 1300 | 20 | 20 + 1400 | 10 | 10 +(14 rows) + +SELECT c1, c2, (SELECT r FROM (SELECT c1 AS c1) x, LATERAL (SELECT c1 AS r) y) + FROM f_test_tbl1 ORDER BY 1, 2, 3; + c1 | c2 | r +------+-------+------ + 100 | EMP1 | 100 + 200 | EMP2 | 200 + 300 | EMP3 | 300 + 400 | EMP4 | 400 + 500 | EMP5 | 500 + 600 | EMP6 | 600 + 700 | EMP7 | 700 + 800 | EMP8 | 800 + 900 | EMP9 | 900 + 1000 | EMP10 | 1000 + 1100 | EMP11 | 1100 + 1200 | EMP12 | 1200 + 1300 | EMP13 | 1300 + 1400 | EMP14 | 1400 +(14 rows) + +-- LATERAL JOIN with RIGHT should throw error +SELECT t1.c1, t3.c1, t3.t1_c8 FROM f_test_tbl1 t1 RIGHT JOIN LATERAL ( + SELECT t2.c1, t1.c8 AS t1_c8 FROM f_test_tbl2 t2) t3 ON t3.c1 = t3.t1_c8 + ORDER BY 1, 2, 3; +ERROR: invalid reference to FROM-clause entry for table "t1" +LINE 2: SELECT t2.c1, t1.c8 AS t1_c8 FROM f_test_tbl2 t2) t3 ON t3... + ^ +DETAIL: The combining JOIN type must be INNER or LEFT for a LATERAL reference. +-- FDW-262: Should throw an error when we select system attribute. +SELECT xmin FROM f_test_tbl1; +ERROR: system attribute "xmin" can't be fetched from remote relation +SELECT ctid, xmax, tableoid FROM f_test_tbl1; +ERROR: system attribute "ctid" can't be fetched from remote relation +SELECT xmax, c1 FROM f_test_tbl1; +ERROR: system attribute "xmax" can't be fetched from remote relation +SELECT count(tableoid) FROM f_test_tbl1; +ERROR: system attribute "tableoid" can't be fetched from remote relation +-- FDW-391: Support whole-row reference. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c2, t1 FROM f_test_tbl1 t1 + WHERE c1 = 100 ORDER BY 1; + QUERY PLAN +-------------------------------------------------------- + Sort + Output: c2, t1.* + Sort Key: t1.c2 + -> Foreign Scan on public.f_test_tbl1 t1 + Output: c2, t1.* + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(6 rows) + +-- Force hash-join for consistent result. +SET enable_mergejoin TO off; +SET enable_nestloop TO off; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT d, d.c2, e.c1, e + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Output: d.*, d.c2, e.c1, e.* + Sort Key: d.*, e.c1 + -> Hash Left Join + Output: d.*, d.c2, e.c1, e.* + Hash Cond: (d.c1 = e.c8) + -> Foreign Scan on public.f_test_tbl2 d + Output: d.*, d.c2, d.c1 + Foreign Namespace: mongo_fdw_regress.test_tbl2 + -> Hash + Output: e.c1, e.*, e.c8 + -> Foreign Scan on public.f_test_tbl1 e + Output: e.c1, e.*, e.c8 + Foreign Namespace: mongo_fdw_regress.test_tbl1 +(14 rows) + +RESET enable_mergejoin; +RESET enable_nestloop; +-- FDW-427: The numeric value should display correctly as per precision and +-- scale defined. +SELECT c1 FROM f_test5 ORDER BY 1; + c1 +----------- + -1.23 + 12.345678 +(2 rows) + +-- Number with the required precision. +DROP FOREIGN TABLE f_test5; +CREATE FOREIGN TABLE f_test5 (_id NAME, c1 NUMERIC(8, 6)) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test5'); +SELECT c1 FROM f_test5 ORDER BY 1; + c1 +----------- + -1.230000 + 12.345678 +(2 rows) + +-- Number with less scale. Should round-off the scale. +DROP FOREIGN TABLE f_test5; +CREATE FOREIGN TABLE f_test5 (_id NAME, c1 NUMERIC(6, 2)) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test5'); +SELECT c1 FROM f_test5 ORDER BY 1; + c1 +------- + -1.23 + 12.35 +(2 rows) + +-- Number only with precision. +DROP FOREIGN TABLE f_test5; +CREATE FOREIGN TABLE f_test5 (_id NAME, c1 NUMERIC(2)) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test5'); +SELECT c1 FROM f_test5 ORDER BY 1; + c1 +---- + -1 + 12 +(2 rows) + +-- Number with improper precision and scale, +-- resulting in error "numeric field overflow". +DROP FOREIGN TABLE f_test5; +CREATE FOREIGN TABLE f_test5 (_id NAME, c1 NUMERIC(3, 2)) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test5'); +SELECT c1 FROM f_test5 ORDER BY 1; +ERROR: numeric field overflow +DETAIL: A field with precision 3, scale 2 must round to an absolute value less than 10^1. +-- FDW-418: Resolve data compatibility. +SELECT a FROM f_test_tbl4 ORDER BY 1; + a +--------------- + 0.00 + 25.00 + 25.00 + 25.09 + 9999999999.00 +(5 rows) + +SELECT a FROM f_test_tbl5 ORDER BY 1; + a +--- + f + t + t + t + t +(5 rows) + +SELECT a FROM f_test_tbl6 ORDER BY 1; + a +---- + 1 + 25 + 25 + 25 +(4 rows) + +SELECT a FROM f_test_tbl7 ORDER BY 1; +ERROR: value "9999999999" is out of range for type integer +-- FDW-529: Fix server crash caused due to missed handling of Param node for +-- comparison expressions while preparing query filter. +CREATE OR REPLACE FUNCTION fdw529_test_param_where() RETURNS int AS $$ +DECLARE + val1 INT := 5; + val2 INT := 10; + cnt INT; +BEGIN + SELECT count(*) INTO cnt FROM f_mongo_test WHERE a > val1 AND a < val2; + RETURN cnt; +END +$$ LANGUAGE plpgsql; +SELECT fdw529_test_param_where(); + fdw529_test_param_where +------------------------- + 4 +(1 row) + +SELECT fdw529_test_param_where(); + fdw529_test_param_where +------------------------- + 4 +(1 row) + +SELECT fdw529_test_param_where(); + fdw529_test_param_where +------------------------- + 4 +(1 row) + +SELECT fdw529_test_param_where(); + fdw529_test_param_where +------------------------- + 4 +(1 row) + +SELECT fdw529_test_param_where(); + fdw529_test_param_where +------------------------- + 4 +(1 row) + +-- This should not crash +SELECT fdw529_test_param_where(); + fdw529_test_param_where +------------------------- + 4 +(1 row) + +-- FDW-669: Fix issue join pushdown doesn't return a result for join condition +-- on sub-column. This has been fixed by omitting a dot (".") from variables +-- used (declared by $let field) to form the MongoDB query pipeline. +SELECT * FROM testlog t INNER JOIN testdevice d + ON d.level = t."logMeta.nestMore.level"; + _id | log | logMeta.logMac | logMeta.nestMore.level | _id | name | mac | level +--------------------------+-----------+----------------+------------------------+--------------------------+-------------+--------------+------- + 658040214898199d6e0173d0 | hello log | 001122334455 | 3 | 6580400c4898199d6e0173cd | test device | 001122334455 | 3 +(1 row) + +-- Cleanup +DELETE FROM f_mongo_test WHERE a != 0; +DROP TABLE l_test_tbl1; +DROP TABLE l_test_tbl2; +DROP TABLE l_mongo_test; +DROP VIEW smpl_vw; +DROP VIEW comp_vw; +DROP VIEW temp_vw; +DROP VIEW mul_tbl_view; +DROP FUNCTION test_param_where(); +DROP FUNCTION fdw529_test_param_where(); +DROP FOREIGN TABLE f_mongo_test; +DROP FOREIGN TABLE f_test_tbl1; +DROP FOREIGN TABLE f_test_tbl2; +DROP FOREIGN TABLE countries; +DROP FOREIGN TABLE country_elections; +DROP FOREIGN TABLE main_exports; +DROP FOREIGN TABLE test_json; +DROP FOREIGN TABLE test_jsonb; +DROP FOREIGN TABLE test_text; +DROP FOREIGN TABLE test_varchar; +DROP FOREIGN TABLE f_test5; +DROP FOREIGN TABLE f_test_tbl4; +DROP FOREIGN TABLE f_test_tbl5; +DROP FOREIGN TABLE f_test_tbl6; +DROP FOREIGN TABLE f_test_tbl7; +DROP FOREIGN TABLE testlog; +DROP FOREIGN TABLE testdevice; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/expected/server_options.out b/expected/server_options.out new file mode 100644 index 0000000..53c8771 --- /dev/null +++ b/expected/server_options.out @@ -0,0 +1,183 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` +-- Before running this file user must create database mongo_fdw_regress on +-- MongoDB with all permission for MONGO_USER_NAME user with MONGO_PASS +-- password and ran mongodb_init.sh file to load collections. +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +NOTICE: extension "mongo_fdw" already exists, skipping +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; +-- Port outside ushort range. Error. +CREATE SERVER mongo_server1 FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port '65537'); +ERROR: port value "65537" is out of range for type unsigned short +ALTER SERVER mongo_server OPTIONS (SET port '65537'); +ERROR: port value "65537" is out of range for type unsigned short +-- Validate extension, server and mapping details +CREATE OR REPLACE FUNCTION show_details(host TEXT, port TEXT, uid TEXT, pwd TEXT) RETURNS int AS $$ +DECLARE + ext TEXT; + srv TEXT; + sopts TEXT; + uopts TEXT; +BEGIN + SELECT e.fdwname, srvname, array_to_string(s.srvoptions, ','), array_to_string(u.umoptions, ',') + INTO ext, srv, sopts, uopts + FROM pg_foreign_data_wrapper e LEFT JOIN pg_foreign_server s ON e.oid = s.srvfdw LEFT JOIN pg_user_mapping u ON s.oid = u.umserver + WHERE e.fdwname = 'mongo_fdw' + ORDER BY 1, 2, 3, 4; + + raise notice 'Extension : %', ext; + raise notice 'Server : %', srv; + + IF strpos(sopts, host) <> 0 AND strpos(sopts, port) <> 0 THEN + raise notice 'Server_Options : matched'; + END IF; + + IF strpos(uopts, uid) <> 0 AND strpos(uopts, pwd) <> 0 THEN + raise notice 'User_Mapping_Options : matched'; + END IF; + + return 1; +END; +$$ language plpgsql; +SELECT show_details(:MONGO_HOST, :MONGO_PORT, :MONGO_USER_NAME, :MONGO_PASS); +NOTICE: Extension : mongo_fdw +NOTICE: Server : mongo_server +NOTICE: Server_Options : matched + show_details +-------------- + 1 +(1 row) + +-- Create foreign tables and perform basic SQL operations +CREATE FOREIGN TABLE f_mongo_test (_id name, a int, b varchar) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'mongo_test'); +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; + a | b +---+----------------------- + 0 | mongo_test collection +(1 row) + +INSERT INTO f_mongo_test VALUES ('0', 2, 'mongo_test insert'); +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; + a | b +---+----------------------- + 0 | mongo_test collection + 2 | mongo_test insert +(2 rows) + +UPDATE f_mongo_test SET b = 'mongo_test update' WHERE a = 2; +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; + a | b +---+----------------------- + 0 | mongo_test collection + 2 | mongo_test update +(2 rows) + +DELETE FROM f_mongo_test WHERE a = 2; +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; + a | b +---+----------------------- + 0 | mongo_test collection +(1 row) + +-- Test SSL option when MongoDB server running in non-SSL mode. +-- Set non-boolean value, should throw an error. +ALTER SERVER mongo_server OPTIONS (ssl '1'); +ERROR: ssl requires a Boolean value +ALTER SERVER mongo_server OPTIONS (ssl 'x'); +ERROR: ssl requires a Boolean value +-- Check for default value i.e. false +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; + a | b +---+----------------------- + 0 | mongo_test collection +(1 row) + +-- Set 'true'. +ALTER SERVER mongo_server OPTIONS (ssl 'true'); +-- Results into an error as MongoDB server is running in non-SSL mode. +\set VERBOSITY terse +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; +ERROR: could not connect to server mongo_server +\set VERBOSITY default +-- Switch back to 'false'. +ALTER SERVER mongo_server OPTIONS (SET ssl 'false'); +-- Should now be successful. +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; + a | b +---+----------------------- + 0 | mongo_test collection +(1 row) + +-- Alter server to add authentication_database option +ALTER SERVER mongo_server OPTIONS (ADD authentication_database 'NOT_EXIST_DB'); +ALTER USER MAPPING FOR public SERVER mongo_server + OPTIONS (ADD username :MONGO_USER_NAME, password :MONGO_PASS); +-- Below query will fail with authentication error as user cannot be +-- authenticated against given authentication_database. +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; +ERROR: could not connect to server mongo_server +HINT: Mongo error: "Authentication failed." +-- Now changed to valid authentication_database so select query should work. +ALTER SERVER mongo_server + OPTIONS (SET authentication_database 'mongo_fdw_regress'); +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; + a | b +---+----------------------- + 0 | mongo_test collection +(1 row) + +ALTER SERVER mongo_server + OPTIONS (DROP authentication_database); +ALTER USER MAPPING FOR public SERVER mongo_server + OPTIONS (DROP username, DROP password); +-- FDW-464: Support use_remote_estimate option at server level. +-- Check only boolean values are accepted. +ALTER SERVER mongo_server OPTIONS (ADD use_remote_estimate 'abc11'); +ERROR: use_remote_estimate requires a Boolean value +-- Check default behaviour. Should be 'false'. +EXPLAIN(COSTS OFF) +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; + QUERY PLAN +--------------------------------------------------------- + Sort + Sort Key: a, b + -> Foreign Scan on f_mongo_test + Foreign Namespace: mongo_fdw_regress.mongo_test +(4 rows) + +-- Enable remote estimation. +ALTER SERVER mongo_server OPTIONS (ADD use_remote_estimate 'true'); +EXPLAIN(COSTS OFF) +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; + QUERY PLAN +--------------------------------------------------------- + Sort + Sort Key: a, b + -> Foreign Scan on f_mongo_test + Foreign Namespace: mongo_fdw_regress.mongo_test +(4 rows) + +-- Disable remote estimation. +ALTER SERVER mongo_server OPTIONS (SET use_remote_estimate 'false'); +EXPLAIN(COSTS OFF) +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; + QUERY PLAN +--------------------------------------------------------- + Sort + Sort Key: a, b + -> Foreign Scan on f_mongo_test + Foreign Namespace: mongo_fdw_regress.mongo_test +(4 rows) + +-- Cleanup +DROP FOREIGN TABLE f_mongo_test; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/mongo-c-driver b/mongo-c-driver new file mode 160000 index 0000000..2929c2d --- /dev/null +++ b/mongo-c-driver @@ -0,0 +1 @@ +Subproject commit 2929c2d2c856a57ecdfef4d61f8e479b6ef96463 diff --git a/mongo-c-driver-v0.6/.astylerc b/mongo-c-driver-v0.6/.astylerc deleted file mode 100644 index 9f3851e..0000000 --- a/mongo-c-driver-v0.6/.astylerc +++ /dev/null @@ -1,4 +0,0 @@ ---style=java ---pad-paren-in ---pad-paren-in ---align-pointer=name diff --git a/mongo-c-driver-v0.6/.gitignore b/mongo-c-driver-v0.6/.gitignore deleted file mode 100644 index ea672fa..0000000 --- a/mongo-c-driver-v0.6/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ - -*~ -*.o -*.os -*.obj -*.a -*.lib -*.so -*.dylib -.scon* -*.pyc -*.swp - -.DS_Store - -test_* -benchmark -benchmark.exe - -tags - -config.log - -docs/html -docs/source/sphinx/build -docs/source/doxygen diff --git a/mongo-c-driver-v0.6/APACHE-2.0.txt b/mongo-c-driver-v0.6/APACHE-2.0.txt deleted file mode 100644 index d645695..0000000 --- a/mongo-c-driver-v0.6/APACHE-2.0.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/mongo-c-driver-v0.6/HISTORY.md b/mongo-c-driver-v0.6/HISTORY.md deleted file mode 100644 index 9f1e9af..0000000 --- a/mongo-c-driver-v0.6/HISTORY.md +++ /dev/null @@ -1,201 +0,0 @@ -# MongoDB C Driver History - -## 0.6 -2012-6-3 -** API CHANGE ** - -Version 0.6 supports write concern. This involves a backward-breaking -API change, as the write functions now take an optional write_concern -object. - -The driver now also supports the MONGO_CONTINUE_ON_ERROR flag for -batch inserts. - -The new function prototypes are as follows: - -* int mongo_insert( mongo *conn, const char *ns, const bson *data, - mongo_write_concern *custom_write_concern ); - -* int mongo_insert_batch( mongo *conn, const char *ns, - const bson **data, int num, mongo_write_concern *custom_write_concern ); - -* int mongo_update( mongo *conn, const char *ns, const bson *cond, - const bson *op, int flags, mongo_write_concern *custom_write_concern, - int flags ); - -* int mongo_remove( mongo *conn, const char *ns, const bson *cond, - mongo_write_concern *custom_write_concern ); - -* Allow DBRefs (i.e., allows keys $ref, $id, and $db) -* Added mongo_create_capped_collection(). -* Fixed some bugs in the SCons and Makefile build scripts. -* Fixes for SCons and Makefile shared library install targets. -* Other minor bug fixes. - -## 0.5.2 -2012-5-4 - -* Validate collection and database names on insert. -* Validate insert limits using max BSON size. -* Support getaddrinfo and SO_RCVTIMEO and SO_SNDTIMEO on Windows. -* Store errno/WSAGetLastError() on errors. -* Various bug fixes and refactorings. -* Update error reporting docs. - -## 0.5.1 - -* Env for POSIX, WIN32, and standard C. -* Various bug fixes. - -## 0.5 -2012-3-31 - -* Separate cursor-specific errors into their own enum: mongo_cursor_error_t. -* Catch $err return on bad queries and store the result in conn->getlasterrorcode - and conn->getlasterrstr. -* On queries that return $err, set cursor->err to MONGO_CURSOR_QUERY_FAIL. -* When passing bad BSON to a cursor object, set cursor->err to MONGO_CURSOR_BSON_ERROR, - and store the specific BSON error on the conn->err field. -* Remove bson_copy_basic(). -* bson_copy() will copy finished bson objects only. -* bson_copy() returns BSON_OK on success and BSON_ERROR on failure. -* Added a Makefile for easy compile and install on Linux and OS X. -* Replica set connect fixes. - -## 0.4 - -THIS RELEASE INCLUDES NUMEROUS BACKWARD-BREAKING CHANGES. -These changes have been made for extensibility, consistency, -and ease of use. Please read the following release notes -carefully, and study the updated tutorial. - -API Principles: - -1. Present a consistent interface for all objects: connections, - cursors, bson objects, and bson iterators. -2. Require no knowledge of an object's implementation to use the API. -3. Allow users to allocate objects on the stack or on the heap. -4. Integrate API with new error reporting strategy. -5. Be concise, except where it impairs clarity. - -Changes: - -* mongo_replset_init_conn has been renamed to mongo_replset_init. -* bson_buffer has been removed. All functions for building bson - objects now take objects of type bson. The new pattern looks like this: - - Example: - - bson b[1]; - bson_init( b ); - bson_append_int( b, "foo", 1 ); - bson_finish( b ); - /* The object is ready to use. - When finished, destroy it. */ - bson_destroy( b ); - -* mongo_connection has been renamed to mongo. - - Example: - - mongo conn[1]; - mongo_connect( conn, '127.0.0.1', 27017 ); - /* Connection is ready. Destroy when down. */ - mongo_destroy( conn ); - -* New cursor builder API for clearer code: - - Example: - - mongo_cursor cursor[1]; - mongo_cursor_init( cursor, conn, "test.foo" ); - - bson query[1]; - - bson_init( query ); - bson_append_int( query, "bar", 1 ); - bson_finish( query ); - - bson fields[1]; - - bson_init( fields ); - bson_append_int( fields, "baz", 1 ); - bson_finish( fields ); - - mongo_cursor_set_query( cursor, query ); - mongo_cursor_set_fields( cursor, fields ); - mongo_cursor_set_limit( cursor, 10 ); - mongo_cursor_set_skip( cursor, 10 ); - - while( mongo_cursor_next( cursor ) == MONGO_OK ) - bson_print( mongo_cursor_bson( cursor ) ); - -* bson_iterator_init now takes a (bson*) instead of a (const char*). This is consistent - with bson_find, which also takes a (bson*). If you want to initiate a bson iterator - with a buffer, use the new function bson_iterator_from_buffer. -* With the addition of the mongo_cursor_bson function, it's now no - longer necessary to know how bson and mongo_cursor objects are implemented. - - Example: - - bson b[1]; - bson_iterator i[1]; - - bson_iterator_init( i, b ); - - /* With a cursor */ - bson_iterator_init( i, mongo_cursor_bson( cursor ) ); - -* Added mongo_cursor_data and bson_data functions, which return the - raw bson buffer as a (const char *). -* All constants that were once lower case are now - upper case. These include: MONGO_OP_MSG, MONGO_OP_UPDATE, MONGO_OP_INSERT, - MONGO_OP_QUERY, MONGO_OP_GET_MORE, MONGO_OP_DELETE, MONGO_OP_KILL_CURSORS - BSON_EOO, BSON_DOUBLE, BSON_STRING, BSON_OBJECT, BSON_ARRAY, BSON_BINDATA, - BSON_UNDEFINED, BSON_OID, BSON_BOOL, BSON_DATE, BSON_NULL, BSON_REGEX, BSON_DBREF, - BSON_CODE, BSON_SYMBOL, BSON_CODEWSCOPE, BSON_INT, BSON_TIMESTAMP, BSON_LONG, - MONGO_CONN_SUCCESS, MONGO_CONN_BAD_ARG, MONGO_CONN_NO_SOCKET, MONGO_CONN_FAIL, - MONGO_CONN_NOT_MASTER, MONGO_CONN_BAD_SET_NAME, MONGO_CONN_CANNOT_FIND_PRIMARY - If your programs use any of these constants, you must convert them to their - upper case forms, or you will see compile errors. -* The error handling strategy has been changed. Exceptions are not longer being used. -* Functions taking a mongo_connection object now return either MONGO_OK or MONGO_ERROR. - In case of an error, an error code of type mongo_error_t will be indicated on the - mongo_connection->err field. -* Functions taking a bson object now return either BSON_OK or BSON_ERROR. - In case of an error, an error code of type bson_validity_t will be indicated on the - bson->err or bson_buffer->err field. -* Calls to mongo_cmd_get_last_error store the error status on the - mongo->lasterrcode and mongo->lasterrstr fields. -* bson_print now prints all types. -* Users may now set custom malloc, realloc, free, printf, sprintf, and fprintf fields. -* Groundwork for modules for supporting platform-specific features (e.g., socket timeouts). -* Added mongo_set_op_timeout for setting socket timeout. To take advantage of this, you must - compile with --use-platform=LINUX. The compiles with platform/linux/net.h instead of the - top-level net.h. -* Fixed tailable cursors. -* GridFS API is now in-line with the new driver API. In particular, all of the - following functions now return MONGO_OK or MONGO_ERROR: gridfs_init, - gridfile_init, gridfile_writer_done, gridfs_store_buffer, gridfs_store_file, - and gridfs_find_query. -* Fixed a few memory leaks. - -## 0.3 -2011-4-14 - -* Support replica sets. -* Better standard connection API. -* GridFS write buffers iteratively. -* Fixes for working with large GridFS files (> 3GB) -* bson_append_string_n and family (Gergely Nagy) - -## 0.2 -2011-2-11 - -* GridFS support (Chris Triolo). -* BSON Timestamp type support. - -## 0.1 -2009-11-30 - -* Initial release. diff --git a/mongo-c-driver-v0.6/Makefile b/mongo-c-driver-v0.6/Makefile deleted file mode 100644 index ee21bd4..0000000 --- a/mongo-c-driver-v0.6/Makefile +++ /dev/null @@ -1,199 +0,0 @@ -# MongoDB C Driver Makefile -# -# Copyright 2009, 2010 10gen Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Version -MONGO_MAJOR=0 -MONGO_MINOR=6 -MONGO_PATCH=0 -BSON_MAJOR=$(MONGO_MAJOR) -BSON_MINOR=$(MONGO_MINOR) -BSON_PATCH=$(MONGO_PATCH) - -# Library names -MONGO_LIBNAME=libmongoc -BSON_LIBNAME=libbson - -# Standard or posix env. -ENV?=posix - -# TODO: add replica set test, cpp test, platform tests, json_test -TESTS=test/auth_test test/bson_test test/bson_subobject_test test/count_delete_test \ - test/cursors_test test/endian_swap_test test/errors_test test/examples_test \ - test/functions_test test/gridfs_test test/helpers_test \ - test/oid_test test/resize_test test/simple_test test/sizes_test test/update_test \ - test/validate_test test/write_concern_test test/commands_test -MONGO_OBJECTS=src/bson.o src/encoding.o src/gridfs.o src/md5.o src/mongo.o \ - src/numbers.o -BSON_OBJECTS=src/bson.o src/numbers.o src/encoding.o - -ifeq ($(ENV),posix) - TESTS+=test/env_posix_test - MONGO_OBJECTS+=src/env_posix.o -else - MONGO_OBJECTS+=src/env_standard.o -endif - -DYN_MONGO_OBJECTS=$(foreach i,$(MONGO_OBJECTS),$(patsubst %.o,%.os,$(i))) -DYN_BSON_OBJECTS=$(foreach i,$(BSON_OBJECTS),$(patsubst %.o,%.os,$(i))) - -# Compile flags -ALL_DEFINES=$(DEFINES) -ALL_DEFINES+=-D_POSIX_SOURCE -CC:=$(shell sh -c 'type $(CC) >/dev/null 2>/dev/null && echo $(CC) || echo gcc') -DYN_FLAGS:=-fPIC -DMONGO_DLL_BUILD - -# Endianness check -endian := $(shell sh -c 'echo "ab" | od -x | grep "6261" >/dev/null && echo little || echo big') -ifeq ($(endian),big) - ALL_DEFINES+=-DMONGO_BIG_ENDIAN -endif - -# Int64 type check -int64:=$(shell ./check_int64.sh $(CC) stdint.h && echo stdint) -ifeq ($(int64),stdint) - ALL_DEFINES+=-DMONGO_HAVE_STDINT -else - int64:=$(shell ./check_int64.sh $(CC) unistd.h && echo unistd) - ifeq ($(int64),unistd) - ALL_DEFINES+=-DMONGO_HAVE_UNISTD - endif -endif -$(shell rm header_check.tmp tmp.c) - -TEST_DEFINES=$(ALL_DEFINES) -TEST_DEFINES+=-DTEST_SERVER="\"127.0.0.1\"" - -OPTIMIZATION?=-O3 -WARNINGS?=-Wall -DEBUG?=-ggdb -STD?=c99 -PEDANTIC?=-pedantic -ALL_CFLAGS=-std=$(STD) $(PEDANTIC) $(CFLAGS) $(OPTIMIZATION) $(WARNINGS) $(DEBUG) $(ALL_DEFINES) -ALL_LDFLAGS=$(LDFLAGS) - -# Shared libraries -DYLIBSUFFIX=so -STLIBSUFFIX=a - -MONGO_DYLIBNAME=$(MONGO_LIBNAME).$(DYLIBSUFFIX) -MONGO_DYLIB_MAJOR_NAME=$(MONGO_DYLIBNAME).$(MONGO_MAJOR) -MONGO_DYLIB_MINOR_NAME=$(MONGO_DYLIB_MAJOR_NAME).$(MONGO_MINOR) -MONGO_DYLIB_PATCH_NAME=$(MONGO_DYLIB_MINOR_NAME).$(MONGO_PATCH) -MONGO_DYLIB_MAKE_CMD=$(CC) -shared -Wl,-soname,$(MONGO_DYLIB_MINOR_NAME) -o $(MONGO_DYLIBNAME) $(ALL_LDFLAGS) $(DYN_MONGO_OBJECTS) - -BSON_DYLIBNAME=$(BSON_LIBNAME).$(DYLIBSUFFIX) -BSON_DYLIB_MAJOR_NAME=$(BSON_DYLIBNAME).$(BSON_MAJOR) -BSON_DYLIB_MINOR_NAME=$(BSON_DYLIB_MAJOR_NAME).$(BSON_MINOR) -BSON_DYLIB_PATCH_NAME=$(BSON_DYLIB_MINOR_NAME).$(BSON_PATCH) -BSON_DYLIB_MAKE_CMD=$(CC) -shared -Wl,-soname,$(BSON_DYLIB_MINOR_NAME) -o $(BSON_DYLIBNAME) $(ALL_LDFLAGS) $(DYN_BSON_OBJECTS) - -# Static libraries -MONGO_STLIBNAME=$(MONGO_LIBNAME).$(STLIBSUFFIX) -BSON_STLIBNAME=$(BSON_LIBNAME).$(STLIBSUFFIX) - -# Overrides -kernel_name := $(shell sh -c 'uname -s 2>/dev/null || echo not') -ifeq ($(kernel_name),SunOS) - ALL_LDFLAGS+=-ldl -lnsl -lsocket - INSTALL_CMD=cp -r - MONGO_DYLIB_MAKE_CMD=$(CC) -G -o $(MONGO_DYLIBNAME) -h $(MONGO_DYLIB_MINOR_NAME) $(ALL_LDFLAGS) - BSON_DYLIB_MAKE_CMD=$(CC) -G -o $(BSON_DYLIBNAME) -h $(BSON_DYLIB_MINOR_NAME) $(ALL_LDFLAGS) -endif -ifeq ($(kernel_name),Darwin) - ALL_CFLAGS+=-std=$(STD) $(CFLAGS) $(OPTIMIZATION) $(WARNINGS) $(DEBUG) $(ALL_DEFINES) - DYLIBSUFFIX=dylib - MONGO_DYLIB_MINOR_NAME=$(MONGO_LIBNAME).$(DYLIBSUFFIX).$(MONGO_MAJOR).$(MONGO_MINOR) - MONGO_DYLIB_MAJOR_NAME=$(MONGO_LIBNAME).$(DYLIBSUFFIX).$(MONGO_MAJOR) - MONGO_DYLIB_MAKE_CMD=$(CC) -shared -Wl,-install_name,$(MONGO_DYLIB_MINOR_NAME) -o $(MONGO_DYLIBNAME) - - BSON_DYLIB_MINOR_NAME=$(BSON_LIBNAME).$(DYLIBSUFFIX).$(BSON_MAJOR).$(BSON_MINOR) - BSON_DYLIB_MAJOR_NAME=$(BSON_LIBNAME).$(DYLIBSUFFIX).$(BSON_MAJOR) - BSON_DYLIB_MAKE_CMD=$(CC) -shared -Wl,-install_name,$(BSON_DYLIB_MINOR_NAME) -o $(BSON_DYLIBNAME) -endif - -# Installation -ifeq ($(kernel_name),SunOS) - INSTALL?=cp -r -endif -INSTALL?= cp -a -INSTALL_INCLUDE_PATH?=/usr/local/include -INSTALL_LIBRARY_PATH?=/usr/local/lib - -# TARGETS -all: $(MONGO_DYLIBNAME) $(BSON_DYLIBNAME) $(MONGO_STLIBNAME) $(BSON_STLIBNAME) - -# Dependency targets. Run 'make deps' to generate these. -bson.o: src/bson.c src/bson.h src/encoding.h -encoding.o: src/encoding.c src/bson.h src/encoding.h -env_standard.o: src/env_standard.c src/env.h src/mongo.h src/bson.h -env_posix.o: src/env_posix.c src/env.h src/mongo.h src/bson.h -gridfs.o: src/gridfs.c src/gridfs.h src/mongo.h src/bson.h -md5.o: src/md5.c src/md5.h -mongo.o: src/mongo.c src/mongo.h src/bson.h src/md5.h src/env.h -numbers.o: src/numbers.c - -$(MONGO_DYLIBNAME): $(DYN_MONGO_OBJECTS) - $(MONGO_DYLIB_MAKE_CMD) - -$(MONGO_STLIBNAME): $(MONGO_OBJECTS) - $(AR) -rs $@ $(MONGO_OBJECTS) - -$(BSON_DYLIBNAME): $(DYN_BSON_OBJECTS) - $(BSON_DYLIB_MAKE_CMD) - -$(BSON_STLIBNAME): $(BSON_OBJECTS) - $(AR) -rs $@ $(BSON_OBJECTS) - -install: - mkdir -p $(INSTALL_INCLUDE_PATH) $(INSTALL_LIBRARY_PATH) - $(INSTALL) src/mongo.h src/bson.h $(INSTALL_INCLUDE_PATH) - $(INSTALL) $(MONGO_DYLIBNAME) $(INSTALL_LIBRARY_PATH)/$(MONGO_DYLIB_PATCH_NAME) - $(INSTALL) $(BSON_DYLIBNAME) $(INSTALL_LIBRARY_PATH)/$(BSON_DYLIB_PATCH_NAME) - cd $(INSTALL_LIBRARY_PATH) && ln -sf $(MONGO_DYLIB_PATCH_NAME) $(MONGO_DYLIB_MINOR_NAME) - cd $(INSTALL_LIBRARY_PATH) && ln -sf $(BSON_DYLIB_PATCH_NAME) $(BSON_DYLIB_MINOR_NAME) - cd $(INSTALL_LIBRARY_PATH) && ln -sf $(MONGO_DYLIB_MINOR_NAME) $(MONGO_DYLIBNAME) - cd $(INSTALL_LIBRARY_PATH) && ln -sf $(BSON_DYLIB_MINOR_NAME) $(BSON_DYLIBNAME) - $(INSTALL) $(MONGO_STLIBNAME) $(INSTALL_LIBRARY_PATH) - $(INSTALL) $(BSON_STLIBNAME) $(INSTALL_LIBRARY_PATH) - -test: $(TESTS) - sh runtests.sh - -valgrind: $(TESTS) - sh runtests.sh -v - -docs: - python docs/buildscripts/docs.py - -clean: - rm -rf $(MONGO_DYLIBNAME) $(MONGO_STLIBNAME) $(BSON_DYLIBNAME) $(BSON_STLIBNAME) src/*.o src/*.os test/*_test - -deps: - $(CC) -MM -DMONGO_HAVE_STDINT src/*.c - -32bit: - $(MAKE) CFLAGS="-m32" LDFLAGS="-pg" - -%_test: %_test.c $(MONGO_STLIBNAME) - $(CC) -o $@ -L. -Isrc $(TEST_DEFINES) $(ALL_LDFLAGS) $< $(MONGO_STLIBNAME) - -%.o: %.c - $(CC) -o $@ -c $(ALL_CFLAGS) $< - -%.os: %.c - $(CC) -o $@ -c $(ALL_CFLAGS) $(DYN_FLAGS) $< - -.PHONY: clean docs test diff --git a/mongo-c-driver-v0.6/README.md b/mongo-c-driver-v0.6/README.md deleted file mode 100644 index d5ecb27..0000000 --- a/mongo-c-driver-v0.6/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# MongoDB C Driver - -This is then 10gen-supported MongoDB C driver. There are two goals for this driver. -The first is to provide a strict, default compilation option for ultimate portability, -no dependencies, and generic embeddability. - -The second is to support more advanced, platform-specific features, like socket timeout, -by providing an interface for platform-specific modules. - -Until the 1.0 release, this driver should be considered alpha. Keep in mind that the API will be in flux until then. - -# Documentation - -Documentation exists in the project's `docs` folder. You can read the latest -docs online at (http://api.mongodb.org/c/current/). - -The docs are built using Sphinx and Doxygen. If you have these tools installed, then -you can build the docs with scons: - - scons docs - -The html docs will appear in docs/html. - -# Building - -First check out the version you want to build. *Always build from a particular tag, since HEAD may be -a work in progress.* For example, to build version 0.6, run: - - git checkout v0.6 - -You can then build the driver with scons: - - scons - -For more build options, see the docs. - -## Running the tests -Make sure that you're running mongod on 127.0.0.1 on the default port (27017). The replica set -test assumes a replica set with at least three nodes running at 127.0.0.1 and starting at port -30000. Note that the driver does not recognize 'localhost' as a valid host name. - -To compile and run the tests: - - scons test - -# Error Handling -Most functions return MONGO_OK or BSON_OK on success and MONGO_ERROR or BSON_ERROR on failure. -Specific error codes and error strings are then stored in the `err` and `errstr` fields of the -`mongo` and `bson` objects. It is the client's responsibility to check for errors and handle -them appropriately. - -# ISSUES - -You can report bugs, request new features, and view this driver's roadmap -using [JIRA](http://jira.mongodb.org/browse/CDRIVER). - -# CREDITS - -* Gergely Nagy - Non-null-terminated string support. -* Josh Rotenberg - Initial Doxygen setup and a significant chunk of documentation. - -# LICENSE - -Unless otherwise specified in a source file, sources in this -repository are published under the terms of the Apache License version -2.0, a copy of which is in this repository as APACHE-2.0.txt. diff --git a/mongo-c-driver-v0.6/SConstruct b/mongo-c-driver-v0.6/SConstruct deleted file mode 100644 index 90a8f31..0000000 --- a/mongo-c-driver-v0.6/SConstruct +++ /dev/null @@ -1,297 +0,0 @@ -# -*- mode: python; -*- - -MAJOR_VERSION = "0" -MINOR_VERSION = "6" -PATCH_VERSION = "0" -VERSION = MAJOR_VERSION + "." + MINOR_VERSION + "." + PATCH_VERSION - -# --- options ---- -AddOption('--test-server', - dest='test_server', - default='127.0.0.1', - type='string', - nargs=1, - action='store', - help='IP address of server to use for testing') - -AddOption('--seed-start-port', - dest='seed_start_port', - default=30000, - type='int', - nargs=1, - action='store', - help='IP address of server to use for testing') - -AddOption('--c99', - dest='use_c99', - default=False, - action='store_true', - help='Compile with c99 (recommended for gcc)') - -AddOption('--m32', - dest='use_m32', - default=False, - action='store_true', - help='Compile with m32 (required for 32 bit applications on 64 bit machines)') - -AddOption('--addrinfo', - dest='use_addrinfo', - default=False, - action='store_true', - help='Compile with addrinfo to make use of internet address info when connecting') - -AddOption('--d', - dest='optimize', - default=True, - action='store_false', - help='disable optimizations') - -AddOption('--standard-env', - dest='standard_env', - default=False, - action='store_true', - help='Set this option if you want to use basic, platform-agnostic networking.') - -AddOption('--install-library-path', - dest='install_library_path', - default='/usr/local/lib', - action='store', - help='The shared library install path. Defaults to /usr/local/lib.') - -AddOption('--install-include-path', - dest='install_include_path', - default='/usr/local/include', - action='store', - help='The header install path. Defaults to /usr/local/include.') - -import os, sys - -if GetOption('use_m32'): - msvs_arch = "x86" -else: - msvs_arch = "amd64" -print "Compiling for " + msvs_arch -env = Environment(ENV=os.environ, MSVS_ARCH=msvs_arch, TARGET_ARCH=msvs_arch) - -# ---- Docs ---- -def build_docs(env, target, source): - buildscript_path = os.path.join(os.path.abspath("docs")) - sys.path.insert(0, buildscript_path) - import buildscripts - from buildscripts import docs - docs.main() - -env.Alias("docs", [], [build_docs]) -env.AlwaysBuild("docs") - -# ---- Platforms ---- -PLATFORM_TESTS = [] -if GetOption('standard_env'): - NET_LIB = "src/env_standard.c" -elif os.sys.platform in ["darwin", "linux2"]: - NET_LIB = "src/env_posix.c" - PLATFORM_TESTS = [ "env_posix" ] -elif 'win32' == os.sys.platform: - NET_LIB = "src/env_win32.c" - PLATFORM_TESTS = [ "env_win32" ] -else: - NET_LIB = "src/env_standard.c" - -# ---- Libraries ---- -if os.sys.platform in ["darwin", "linux2"]: - env.Append( CPPFLAGS="-pedantic -Wall -ggdb -DMONGO_HAVE_STDINT" ) - if not GetOption('standard_env'): - env.Append( CPPFLAGS=" -D_POSIX_SOURCE" ) - env.Append( CPPPATH=["/opt/local/include/"] ) - env.Append( LIBPATH=["/opt/local/lib/"] ) - - if GetOption('use_c99'): - env.Append( CFLAGS=" -std=c99 " ) - env.Append( CXXDEFINES="MONGO_HAVE_STDINT" ) - else: - env.Append( CFLAGS=" -ansi " ) - - if GetOption('optimize'): - env.Append( CPPFLAGS=" -O3 " ) - # -O3 benchmarks *significantly* faster than -O2 when disabling networking -elif 'win32' == os.sys.platform: - env.Append( LIBS='ws2_32' ) - -#we shouldn't need these options in c99 mode -if not GetOption('use_c99'): - conf = Configure(env) - - if not conf.CheckType('int64_t'): - if conf.CheckType('int64_t', '#include \n'): - conf.env.Append( CPPDEFINES="MONGO_HAVE_STDINT" ) - elif conf.CheckType('int64_t', '#include \n'): - conf.env.Append( CPPDEFINES="MONGO_HAVE_UNISTD" ) - elif conf.CheckType('__int64'): - conf.env.Append( CPPDEFINES="MONGO_USE__INT64" ) - elif conf.CheckType('long long int'): - conf.env.Append( CPPDEFINES="MONGO_USE_LONG_LONG_INT" ) - else: - print "*** what is your 64 bit int type? ****" - Exit(1) - - env = conf.Finish() - -have_libjson = False -conf = Configure(env) -if conf.CheckLib('json'): - have_libjson = True -env = conf.Finish() - -if GetOption('use_m32'): - if 'win32' != os.sys.platform: - env.Append( CPPFLAGS=" -m32" ) - env.Append( SHLINKFLAGS=" -m32" ) - -if GetOption('use_addrinfo'): - env.Append( CPPFLAGS=" -D_MONGO_USE_GETADDRINFO" ) - -if sys.byteorder == 'big': - env.Append( CPPDEFINES="MONGO_BIG_ENDIAN" ) - -env.Append( CPPPATH=["src/"] ) - -env.Append( CPPFLAGS=" -DMONGO_DLL_BUILD" ) -coreFiles = ["src/md5.c" ] -mFiles = [ "src/mongo.c", NET_LIB, "src/gridfs.c"] -bFiles = [ "src/bson.c", "src/numbers.c", "src/encoding.c"] - -mHeaders = ["src/mongo.h"] -bHeaders = ["src/bson.h"] -headers = mHeaders + bHeaders - -mLibFiles = coreFiles + mFiles + bFiles -bLibFiles = coreFiles + bFiles - -m = env.Library( "mongoc" , mLibFiles ) -b = env.Library( "bson" , bLibFiles ) -env.Default( env.Alias( "lib" , [ m[0] , b[0] ] ) ) - -# build the objects explicitly so that shared targets use the same -# environment (otherwise scons complains) -mSharedObjs = env.SharedObject(mLibFiles) -bSharedObjs = env.SharedObject(bLibFiles) - -bsonEnv = env.Clone() -if os.sys.platform == "linux2": - env.Append( SHLINKFLAGS = "-shared -Wl,-soname,libmongoc.so." + MAJOR_VERSION + "." + MINOR_VERSION ) - bsonEnv.Append( SHLINKFLAGS = "-shared -Wl,-soname,libbson.so." + MAJOR_VERSION + "." + MINOR_VERSION) - dynm = env.SharedLibrary( "mongoc" , mSharedObjs ) - dynb = bsonEnv.SharedLibrary( "bson" , bSharedObjs ) -else: - dynm = env.SharedLibrary( "mongoc" , mSharedObjs ) - dynb = env.SharedLibrary( "bson" , bSharedObjs ) - -# ---- Install ---- -if os.sys.platform == "darwin": - shared_obj_suffix = "dylib" -else: - shared_obj_suffix = "so" - -install_library_path = env.GetOption("install_library_path") -install_include_path = env.GetOption("install_include_path") -def remove_without_exception(filename): - try: - os.remove(filename) - except: - print "Could not find " + filename + ". Skipping removal." - -def makedirs_without_exception(path): - try: - os.makedirs(path) - except: - print path + ": already exists, skipping" - -mongoc_target = os.path.join(install_library_path, "libmongoc." + shared_obj_suffix) -mongoc_major_target = mongoc_target + "." + MAJOR_VERSION -mongoc_minor_target = mongoc_major_target + "." + MINOR_VERSION -mongoc_patch_target = mongoc_minor_target + "." + PATCH_VERSION - -bson_target = os.path.join(install_library_path, "libbson." + shared_obj_suffix) -bson_major_target = bson_target + "." + MAJOR_VERSION -bson_minor_target = bson_major_target + "." + MINOR_VERSION -bson_patch_target = bson_minor_target + "." + PATCH_VERSION - -def uninstall_shared_libraries(target=None, source=None, env=None): - remove_without_exception(mongoc_major_target) - remove_without_exception(mongoc_minor_target) - remove_without_exception(mongoc_patch_target) - remove_without_exception(mongoc_target) - - remove_without_exception(bson_major_target) - remove_without_exception(bson_minor_target) - remove_without_exception(bson_patch_target) - remove_without_exception(bson_target) - -def install_shared_libraries(target=None, source=None, env=None): - import shutil - uninstall_shared_libraries() - - makedirs_without_exception(install_library_path) - shutil.copy("libmongoc." + shared_obj_suffix, mongoc_patch_target) - os.symlink(mongoc_patch_target, mongoc_minor_target) - os.symlink(mongoc_minor_target, mongoc_target) - - shutil.copy("libbson." + shared_obj_suffix, bson_patch_target) - os.symlink(bson_patch_target, bson_minor_target) - os.symlink(bson_minor_target, bson_target) - -def install_headers(target=None, source=None, env=None): - import shutil - # -- uninstall headers here? - - makedirs_without_exception(install_include_path) - for hdr in headers: - shutil.copy(hdr, install_include_path) - -env.Alias("install", [], [install_shared_libraries, install_headers] ) - -env.Command("uninstall", [], uninstall_shared_libraries) - -env.Default( env.Alias( "sharedlib" , [ dynm[0] , dynb[0] ] ) ) -env.AlwaysBuild("install") - -# ---- Benchmarking ---- -benchmarkEnv = env.Clone() -benchmarkEnv.Append( CPPDEFINES=[('TEST_SERVER', r'\"%s\"'%GetOption('test_server')), -('SEED_START_PORT', r'%d'%GetOption('seed_start_port'))] ) -benchmarkEnv.Append( LIBS=[m, b] ) -benchmarkEnv.Prepend( LIBPATH=["."] ) -benchmarkEnv.Program( "benchmark" , [ "test/benchmark.c"] ) - -# ---- Tests ---- -testEnv = benchmarkEnv.Clone() -testCoreFiles = [ ] - -def run_tests( root, tests, env, alias ): - for name in tests: - filename = "%s/%s_test.c" % (root, name) - exe = "test_" + name - test = env.Program( exe , testCoreFiles + [filename] ) - test_alias = env.Alias(alias, [test], test[0].abspath + ' 2> ' + os.path.devnull) - AlwaysBuild(test_alias) - -tests = Split("write_concern commands sizes resize endian_swap bson bson_subobject simple update errors " -"count_delete auth gridfs validate examples helpers oid functions cursors") -tests += PLATFORM_TESTS - -# Run standard tests -run_tests("test", tests, testEnv, "test") - -if have_libjson: - tests.append('json') - testEnv.Append( LIBS=["json"] ) - -# special case for cpptest -test = testEnv.Program( 'test_cpp' , testCoreFiles + ['test/cpptest.cpp'] ) -test_alias = testEnv.Alias('test', [test], test[0].abspath + ' 2> '+ os.path.devnull) -AlwaysBuild(test_alias) - -# Run replica set test only -repl_testEnv = benchmarkEnv.Clone() -repl_tests = ["replica_set"] -run_tests("test", repl_tests, repl_testEnv, "repl_test") diff --git a/mongo-c-driver-v0.6/check_int64.sh b/mongo-c-driver-v0.6/check_int64.sh deleted file mode 100755 index 5e2ba54..0000000 --- a/mongo-c-driver-v0.6/check_int64.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -# -# Copyright 2009, 2010 10gen Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# $1 is C compiler. $2 is header file -cat <tmp.c && $1 -o header_check.tmp tmp.c -#include <$2> -int main() { int64_t i=0; return 0; } -EOF diff --git a/mongo-c-driver-v0.6/docs/buildscripts/__init__.py b/mongo-c-driver-v0.6/docs/buildscripts/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/mongo-c-driver-v0.6/docs/buildscripts/docs.py b/mongo-c-driver-v0.6/docs/buildscripts/docs.py deleted file mode 100644 index 1d15e27..0000000 --- a/mongo-c-driver-v0.6/docs/buildscripts/docs.py +++ /dev/null @@ -1,56 +0,0 @@ -"""Build the C client docs. -""" - -from __future__ import with_statement -import os -import shutil -import socket -import subprocess -import time -import urllib2 - -def clean_dir(dir): - try: - shutil.rmtree(dir) - except: - pass - os.makedirs(dir) - -def gen_api(dir): - clean_dir(dir) - clean_dir("docs/source/doxygen") - - with open(os.devnull, 'w') as null: - subprocess.call(["doxygen", "doxygenConfig"], stdout=null, stderr=null) - - os.rename("docs/source/doxygen/html", dir) - -def gen_sphinx(dir): - clean_dir(dir) - os.chdir("docs/source/sphinx") - - with open(os.devnull, 'w') as null: - subprocess.call(["make", "html"], stdout=null, stderr=null) - - os.chdir("../../../") - os.rename("docs/source/sphinx/build/html", dir) - -def version(): - """Get the driver version from doxygenConfig. - """ - with open("doxygenConfig") as f: - for line in f.readlines(): - if line.startswith("PROJECT_NUMBER"): - return line.split("=")[1].strip() - - -def main(): - print("Generating Sphinx docs in docs/html") - gen_sphinx("docs/html") - print("Generating Doxygen docs in docs/html/api") - gen_api("docs/html/api") - - -if __name__ == "__main__": - main() - diff --git a/mongo-c-driver-v0.6/docs/examples/example.c b/mongo-c-driver-v0.6/docs/examples/example.c deleted file mode 100644 index 5ef97af..0000000 --- a/mongo-c-driver-v0.6/docs/examples/example.c +++ /dev/null @@ -1,113 +0,0 @@ -#include "mongo.h" -#include -#include -#include - -int main() { - bson b, sub, out, empty; - bson_iterator it; - mongo conn; - mongo_cursor cursor; - int result; - - /* Create a rich document like this one: - * - * { _id: ObjectId("4d95ea712b752328eb2fc2cc"), - * user_id: ObjectId("4d95ea712b752328eb2fc2cd"), - * - * items: [ - * { sku: "col-123", - * name: "John Coltrane: Impressions", - * price: 1099, - * }, - * - * { sku: "young-456", - * name: "Larry Young: Unity", - * price: 1199 - * } - * ], - * - * address: { - * street: "59 18th St.", - * zip: 10010 - * }, - * - * total: 2298 - * } - */ - bson_init( &b ); - bson_append_new_oid( &b, "_id" ); - bson_append_new_oid( &b, "user_id" ); - - bson_append_start_array( &b, "items" ); - bson_append_start_object( &b, "0" ); - bson_append_string( &b, "name", "John Coltrane: Impressions" ); - bson_append_int( &b, "price", 1099 ); - bson_append_finish_object( &b ); - - bson_append_start_object( &b, "1" ); - bson_append_string( &b, "name", "Larry Young: Unity" ); - bson_append_int( &b, "price", 1199 ); - bson_append_finish_object( &b ); - bson_append_finish_object( &b ); - - bson_append_start_object( &b, "address" ); - bson_append_string( &b, "street", "59 18th St." ); - bson_append_int( &b, "zip", 10010 ); - bson_append_finish_object( &b ); - - bson_append_int( &b, "total", 2298 ); - - /* Finish the BSON obj. */ - bson_finish( &b ); - printf("Here's the whole BSON object:\n"); - bson_print( &b ); - - /* Advance to the 'items' array */ - bson_find( &it, &b, "items" ); - - /* Get the subobject representing items */ - bson_iterator_subobject( &it, &sub ); - - /* Now iterate that object */ - printf("And here's the inner sub-object by itself.\n"); - bson_print( &sub ); - - /* Now make a connection to MongoDB. */ - if( mongo_connect( &conn, "127.0.0.1", 27017 ) != MONGO_OK ) { - switch( conn.err ) { - case MONGO_CONN_NO_SOCKET: - printf( "FAIL: Could not create a socket!\n" ); - break; - case MONGO_CONN_FAIL: - printf( "FAIL: Could not connect to mongod. Make sure it's listening at 127.0.0.1:27017.\n" ); - break; - } - - exit( 1 ); - } - - /* Insert the sample BSON document. */ - if( mongo_insert( &conn, "test.records", &b ) != MONGO_OK ) { - printf( "FAIL: Failed to insert document with error %d\n", conn.err ); - exit( 1 ); - } - - /* Query for the sample document. */ - mongo_cursor_init( &cursor, &conn, "test.records" ); - mongo_cursor_set_query( &cursor, bson_empty( &empty ) ); - if( mongo_cursor_next( &cursor ) != MONGO_OK ) { - printf( "FAIL: Failed to find inserted document." ); - exit( 1 ); - } - - printf( "Found saved BSON object:\n" ); - bson_print( (bson *)mongo_cursor_bson( &cursor ) ); - - mongo_cmd_drop_collection( &conn, "test", "records", NULL ); - mongo_cursor_destroy( &cursor ); - bson_destroy( &b ); - mongo_destroy( &conn ); - - return 0; -} diff --git a/mongo-c-driver-v0.6/docs/source/sphinx/Makefile b/mongo-c-driver-v0.6/docs/source/sphinx/Makefile deleted file mode 100644 index 9eb025a..0000000 --- a/mongo-c-driver-v0.6/docs/source/sphinx/Makefile +++ /dev/null @@ -1,130 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = build - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - -rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/MongoDBCDriver.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/MongoDBCDriver.qhc" - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/MongoDBCDriver" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/MongoDBCDriver" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - make -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." diff --git a/mongo-c-driver-v0.6/docs/source/sphinx/make.bat b/mongo-c-driver-v0.6/docs/source/sphinx/make.bat deleted file mode 100644 index e3cc32a..0000000 --- a/mongo-c-driver-v0.6/docs/source/sphinx/make.bat +++ /dev/null @@ -1,155 +0,0 @@ -@ECHO OFF - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set BUILDDIR=build -set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source -if NOT "%PAPER%" == "" ( - set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% -) - -if "%1" == "" goto help - -if "%1" == "help" ( - :help - echo.Please use `make ^` where ^ is one of - echo. html to make standalone HTML files - echo. dirhtml to make HTML files named index.html in directories - echo. singlehtml to make a single large HTML file - echo. pickle to make pickle files - echo. json to make JSON files - echo. htmlhelp to make HTML files and a HTML help project - echo. qthelp to make HTML files and a qthelp project - echo. devhelp to make HTML files and a Devhelp project - echo. epub to make an epub - echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter - echo. text to make text files - echo. man to make manual pages - echo. changes to make an overview over all changed/added/deprecated items - echo. linkcheck to check all external links for integrity - echo. doctest to run all doctests embedded in the documentation if enabled - goto end -) - -if "%1" == "clean" ( - for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i - del /q /s %BUILDDIR%\* - goto end -) - -if "%1" == "html" ( - %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/html. - goto end -) - -if "%1" == "dirhtml" ( - %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. - goto end -) - -if "%1" == "singlehtml" ( - %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml - echo. - echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. - goto end -) - -if "%1" == "pickle" ( - %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle - echo. - echo.Build finished; now you can process the pickle files. - goto end -) - -if "%1" == "json" ( - %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json - echo. - echo.Build finished; now you can process the JSON files. - goto end -) - -if "%1" == "htmlhelp" ( - %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp - echo. - echo.Build finished; now you can run HTML Help Workshop with the ^ -.hhp project file in %BUILDDIR%/htmlhelp. - goto end -) - -if "%1" == "qthelp" ( - %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp - echo. - echo.Build finished; now you can run "qcollectiongenerator" with the ^ -.qhcp project file in %BUILDDIR%/qthelp, like this: - echo.^> qcollectiongenerator %BUILDDIR%\qthelp\MongoDBCDriver.qhcp - echo.To view the help file: - echo.^> assistant -collectionFile %BUILDDIR%\qthelp\MongoDBCDriver.ghc - goto end -) - -if "%1" == "devhelp" ( - %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp - echo. - echo.Build finished. - goto end -) - -if "%1" == "epub" ( - %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub - echo. - echo.Build finished. The epub file is in %BUILDDIR%/epub. - goto end -) - -if "%1" == "latex" ( - %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex - echo. - echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. - goto end -) - -if "%1" == "text" ( - %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text - echo. - echo.Build finished. The text files are in %BUILDDIR%/text. - goto end -) - -if "%1" == "man" ( - %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man - echo. - echo.Build finished. The manual pages are in %BUILDDIR%/man. - goto end -) - -if "%1" == "changes" ( - %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes - echo. - echo.The overview file is in %BUILDDIR%/changes. - goto end -) - -if "%1" == "linkcheck" ( - %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck - echo. - echo.Link check complete; look for any errors in the above output ^ -or in %BUILDDIR%/linkcheck/output.txt. - goto end -) - -if "%1" == "doctest" ( - %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest - echo. - echo.Testing of doctests in the sources finished, look at the ^ -results in %BUILDDIR%/doctest/output.txt. - goto end -) - -:end diff --git a/mongo-c-driver-v0.6/docs/source/sphinx/source/bson.rst b/mongo-c-driver-v0.6/docs/source/sphinx/source/bson.rst deleted file mode 100644 index 8c36a60..0000000 --- a/mongo-c-driver-v0.6/docs/source/sphinx/source/bson.rst +++ /dev/null @@ -1,218 +0,0 @@ -BSON -============================= - -BSON (i.e., binary structured object notation) is the binary format used -by MongoDB to store data and express queries and commands. To work with -MongoDB is to trade in BSON objects. This document describes how to -create, read, and destroy BSON objects using the MongoDB C Driver. - -Libraries ---------- - -A brief note on libraries. - -When you compile the driver, the BSON library is included in the -driver. This means that when you include ``mongo.h``, you have access -to all the functions declared in ``bson.h``. - -If you want to use BSON independently, you don't need ``libmongoc``: when you compile -the driver, you'll also get shared and static libraries for ``libbson``. You -can link to this library and simple require ``bson.h``. - -Using BSON objects ------------------- - -The pattern of BSON object usage is pretty simple. Here are the steps: - -1. Initiate a new BSON object. -2. Construct the object using the bson_append_* methods. -3. Pass the object to bson_finish() to finalize it. The object is now ready to use. -4. When you're done with it, pass the object to bson_destroy() to free up any allocated - memory. - -To demonstrate, let's create a BSON object corresponding to the simple JSON object -``{count: 1001}``. - -.. code-block:: c - - bson b[1]; - - bson_init( b ); - bson_append_int( b, "count", 1001 ); - bson_finish( b ); - - // BSON object now ready for use - - bson_destroy( b ); - -That's all there is to creating a basic object. - -Creating complex BSON objects -_____________________________ - -BSON objects can contain arrays as well as sub-objects. Here -we'll see how to create these by building the bson object -corresponding to the following JSON object: - -.. code-block:: javascript - - { - name: "Kyle", - - colors: [ "red", "blue", "green" ], - - address: { - city: "New York", - zip: "10011-4567" - } - } - -.. code-block:: c - - bson b[1]; - - bson_init( b ); - bson_append_string( b, "name", "Kyle" ); - - bson_append_start_array( b, "colors" ); - bson_append_string( b, "0", "red" ); - bson_append_string( b, "1", "blue" ); - bson_append_string( b, "2", "green" ); - bson_append_finish_array( b ); - - bson_append_start_object( b, "address" ); - bson_append_string( b, "city", "New York" ); - bson_append_string( b, "zip", "10011-4567" ); - bson_append_finish_object( b ); - - if( bson_finish( b ) != BSON_OK ) - printf(" Error. "); - -Notice that for the array, we have to manually set the index values -from "0" to *n*, where *n* is the number of elements in the array. - -You'll notice that some knowledge of the BSON specification and -of the available types is necessary. For that, take a few minutes to -consult the `official BSON specification `_. - -Error handling --------------- - -The names of BSON object values, as well as all strings, must be -encoded as valid UTF-8. The BSON library will automatically check -the encoding of strings as you create BSON objects, and if the objects -are invalid, you'll be able to check for this condition. All of the -bson_append_* methods will return either BSON_OK for BSON_ERROR. You -can check in your code for the BSON_ERROR condition and then see the -exact nature of the error by examining the bson->err field. This bitfield -can contain any of the following values: - -* BSON_VALID -* BSON_NOT_UTF8 -* BSON_FIELD_HAS_DOT -* BSON_FIELD_INIT_DOLLAR -* BSON_ALREADY_FINISHED - -The most important of these is ``BSON_NOT_UTF8`` because the BSON -objects cannot be used with MongoDB if they're not valid UTF8. - -To keep your code clean, you may want to check for BSON_OK only when -calling ``bson_finish()``. If the object is not valid, it will not be -finished, so it's quite important to check the return code here. - -Reading BSON objects --------------------- - -You can read through a BSON object using a ``bson_iterator``. For -a complete example, you may want to read through the implementation -of ``bson_print_raw()`` (in ``bson.h``). But the basic idea is to -initialize a ``bson_iterator`` object and then iterate over each -successive element using ``bson_iterator_next()``. Let's take an -example. Suppose we have a finished object of type ``bson*`` called ``b``: - -.. code-block:: c - - - bson_iterator i[1]; - bson_type type; - const char * key; - - bson_iterator_init( i, b ); - - type = bson_iterator_next( i ); - key = bson_iterator_key( i ); - - printf( "Type: %d, Key: %s\n", type, key ); - -We've advanced to the first element in the object, and we can print -both it's BSON numeric type and its key name. To print the value, -we need to use the type to find the correct method for reading the -value. For instance, if the element is a string, then we use -``bson_iterator_string`` to return the result: - -.. code-block:: c - - printf( "Value: %s\n", bson_iterator_string( i ) ); - -In addition to iterating over each successive BSON element, -we can use the ``bson_find()`` function to jump directly -to an element by name. Again, suppose that ``b`` is a pointer -to a ``bson`` object. If we want to jump to the element -named "address", we use ``bson_find()`` like so: - -.. code-block:: c - - bson_iterator i[1], sub[i]; - bson_type type; - - type = bson_find( i, b, "address" ); - -This will initialize the iterator, ``i``, and position -it at the element named "address". The return value -will be the "address" element's type. - -Reading sub-objects and arrays ------------------------------- - -Since "address" is a sub-object, we need to specially -iterate it. To do that, we get the raw value and initialize -a new BSON iterator like so: - -.. code-block:: c - - type = bson_find( i, b, "address" ); - - bson_iterator_subiterator( i, sub ); - -The function ``bson_iterator_subiterator`` initializes -the iterator ``sub`` and points it to the beginning of the -sub-object. From there, we can iterate over -``sub`` until we reach ``BSON_EOO``, indicating the end of the -sub-object. - -If you want to work with a sub-object by itself, there's -a function, ``bson_iterator_subobject``, for initializing -a new ``bson`` object with the value of the sub-object. Note -that this does not copy the object. If you want a copy of the -object, use ``bsop_copy()``. - -.. code-block:: c - - bson copy[1]; - - bson_copy( copy, sub ); - -Getting a Raw BSON Pointer --------------------------- - -Sometimes you'll want to access the ``char *`` that -points to the buffer storing the raw BSON object. For that, -use the ``bson_data()`` function. You can use this in concert -with the bson_iterator_from_buffer() function to initialize an -iterator: - -.. code-block:: c - - bson_iterator i[1]; - - bson_iterator_from_buffer( i, bson_data( b ) ); diff --git a/mongo-c-driver-v0.6/docs/source/sphinx/source/building.rst b/mongo-c-driver-v0.6/docs/source/sphinx/source/building.rst deleted file mode 100644 index 3ad3734..0000000 --- a/mongo-c-driver-v0.6/docs/source/sphinx/source/building.rst +++ /dev/null @@ -1,233 +0,0 @@ -Building the MongoDB C Driver -============================= - -First checkout the version you want to build. *Always build from a particular tag, since HEAD may be -a work in progress.* For example, to build version 0.5.1, run: - -.. code-block:: bash - - git checkout v0.5.1 - -Then follow the build steps below. - -Compile options with custom defines ----------------------------------- - -Before compiling, you should note the following compile options. - -For big-endian support, define: - -- ``MONGO_BIG_ENDIAN`` - -If your compiler has a plain ``bool`` type, define: - -- ``MONGO_HAVE_BOOL`` - -Alternatively, if you must include ``stdbool.h`` to get ``bool``, define: - -- ``MONGO_HAVE_STDBOOL`` - -If you're not using C99, then you must choose your 64-bit integer type by -defining one of these: - -- ``MONGO_HAVE_STDINT`` - Define this if you have ```` for int64_t. -- ``MONGO_HAVE_UNISTD`` - Define this if you have ```` for int64_t. -- ``MONGO_USE__INT64`` - Define this if ``__int64`` is your compiler's 64bit type (MSVC). -- ``MONGO_USE_LONG_LONG_INT`` - Define this if ``long long int`` is your compiler's 64-bit type. - -Building with Make: -------------------- - -If you're building the driver on posix-compliant platforms, including on OS X -and Linux, then you can build with ``make``. - -To compile the driver, run: - -.. code-block:: bash - - make - -This will build the following libraries: - -* libbson.a -* libbson.so (libbson.dylib) -* libmongoc.a -* lobmongoc.so (libmongoc.dylib) - -You can install the libraries with make as well: - -.. code-block:: bash - - make install - -And you can run the tests: - -.. code-block:: bash - - make test - -You can even build the docs: - -.. code-block:: bash - - make docs - -By default, ``make`` will build the project in ``c99`` mode. If you want to change the -language standard, set the value of STD. For example, if you want to build using -the ANSI C standard, set STD to c89: - -.. code-block:: bash - - make STD=c89 - -Once you've built and installed the libraries, you can compile the sample -with ``gcc`` like so: - -.. code-block:: bash - - gcc --std=c99 -I/usr/local/include -L/usr/local/lib -o example docs/examples/example.c -lmongoc - -If you want to statically link the program, add the ``-static`` option: - -.. code-block:: bash - - gcc --std=c99 -static -I/usr/local/include -L/usr/local/lib -o example docs/examples/example.c -lmongoc - -Then run the program: - -.. code-block:: bash - - ./example - -Building with SCons: --------------------- - -You may also build the driver using the Python build utility, SCons_. -This is required if you're building on Windows. Make sure you've -installed SCons, and then from the project root, enter: - -.. _SCons: http://www.scons.org/ - -.. code-block:: bash - - scons - -This will build static and dynamic libraries for both ``BSON`` and for the -the driver as a complete package. It's recommended that you build in C99 mode -with optimizations enabled: - -.. code-block:: bash - - scons --c99 - -Once you've built the libraries, you can compile a program with ``gcc`` like so: - -.. code-block:: bash - - gcc --std=c99 -static -Isrc -o example docs/example/example.c libmongoc.a - -On Posix systems, you may also install the libraries with scons: - -.. code-block:: bash - - scons install - -To build the docs: - -.. code-block:: bash - - scons docs - -Building on Windows -------------------- - -When building the driver on Windows, you must use the Python build -utility, SCons_. For your compiler, we recommend that you use Visual Studio. -If you don't have Visual Studio, a free version is available. Search for Visual -Studio C++ Express to find it. - -If you're running on 32-bit Windows, you must compile the driver in 32-bit mode: - -.. code-block:: bash - - scons --m32 - -If getaddrinfo and friends aren't available on your version of Windows, you may -compile without these features like so: - -.. code-block:: bash - - scons --m32 --standard-env - -Platform-specific features --------------------------- - -The original goal of the MongoDB C driver was to provide a very basic library -capable of being embedded anywhere. This goal is now evolving somewhat given -the increased use of the driver. In particular, it now makes sense to provide -platform-specific features, such as socket timeouts and DNS resolution, and to -return platform-specific error codes. - -To that end, we've organized all platform-specific code in the following files: - -* ``env_standard.c``: a standard, platform-agnostic implementation. -* ``env_posix.c``: an implementation geared for Posix-compliant systems (Linux, OS X). -* ``env_win32.c``: a Windows implementation. - -Each of these implements the interface defined in ``env.h``. - -When building with ``make``, we use ``env_posix.c``. When building with SCons_, we -use ``env_posix.c`` or ``env_win32.c``, depending on the platform. - -If you want to compile with the generic, platform implementation, you have to do so -explicity. In SCons_: - -.. code-block:: bash - - scons --standard-env - -Using ``make``: - -.. code-block:: bash - - make ENV=standard - -Dependencies ------------- - -The driver itself has no dependencies, but one of the tests shows how to create a JSON-to-BSON -converter. For that test to run, you'll need JSON-C_. - -.. _JSON-C: http://oss.metaparadigm.com/json-c/ - -Test suite ----------- - -Make sure that you're running mongod on 127.0.0.1 on the default port (27017). The replica set -test assumes a replica set with at least three nodes running at 127.0.0.1 and starting at port -30000. Note that the driver does not recognize 'localhost' as a valid host name. - -With make: - -.. code-block:: bash - - make test - -To compile and run the tests with SCons: - -.. code-block:: bash - - scons test - -You may optionally specify a remote server: - -.. code-block:: bash - - scons test --test-server=123.4.5.67 - -You may also specify an alternate starting port for the replica set members: - -.. code-block:: bash - - scons test --test-server=123.4.5.67 --seed-start-port=40000 - diff --git a/mongo-c-driver-v0.6/docs/source/sphinx/source/conf.py b/mongo-c-driver-v0.6/docs/source/sphinx/source/conf.py deleted file mode 100644 index 124f192..0000000 --- a/mongo-c-driver-v0.6/docs/source/sphinx/source/conf.py +++ /dev/null @@ -1,216 +0,0 @@ -# -*- coding: utf-8 -*- -# -# MongoDB C Driver documentation build configuration file, created by -# sphinx-quickstart on Wed Jun 22 12:23:03 2011. -# -# This file is execfile()d with the current directory set to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys, os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ----------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'MongoDB C Driver' -copyright = u'2011, 10gen' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '0.4' -# The full version, including alpha/beta/rc tags. -release = '0.4' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = [] - -# The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - - -# -- Options for HTML output --------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# html_theme = 'nature' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'MongoDBCDriverdoc' - - -# -- Options for LaTeX output -------------------------------------------------- - -# The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [ - ('index', 'MongoDBCDriver.tex', u'MongoDB C Driver Documentation', - u'10gen', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Additional stuff for the LaTeX preamble. -#latex_preamble = '' - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output -------------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -#man_pages = [ -# ('index', 'mongodbcdriver', u'MongoDB C Driver Documentation', -# [u'10gen, Inc.'], 1) -#] diff --git a/mongo-c-driver-v0.6/docs/source/sphinx/source/connections.rst b/mongo-c-driver-v0.6/docs/source/sphinx/source/connections.rst deleted file mode 100644 index 20064b4..0000000 --- a/mongo-c-driver-v0.6/docs/source/sphinx/source/connections.rst +++ /dev/null @@ -1,149 +0,0 @@ -Connections -============================= - -All operations against a MongoDB server or cluster require a connection object. This document -describes how to create and manage these connections. - -Basic connections ------------------ - -Use a basic connection to connect to a single MongoDB instances (``mongod``) or -to the router for a shard cluster (``mongos``). - -.. code-block:: c - - mongo conn[1]; - int result; - - result = mongo_connect( conn, "127.0.0.1", 27017 ); - -First we create the ``mongo`` object to manage the connection. Then we connect -using ``mongo_connect``. If the function returns ``MONGO_OK``, then we've -successfully connected. - -Notice that when specifying the host, we must use dot-decimal notation. If you'd like -to use a hostname, then you'll have to compile the driver with the ``--use-platform=LINUX`` -option and ensure that ``_MONGO_USE_GETADDRINFO`` is defined. - -In the event of an error, the result will be ``MONGO_ERROR``. You can then check the error -value by examining the connection's ``err`` field. Continuing: - -.. code-block:: c - - if( result != MONGO_OK ) { - switch( conn->err ) { - case MONGO_CONN_NO_SOCKET: break; /**< Could not create a socket. */ - case MONGO_CONN_FAIL: break; /**< An error occured while calling connect(). */ - case MONGO_CONN_ADDR_FAIL: break; /**< An error occured while calling getaddrinfo(). */ - case MONGO_CONN_NOT_MASTER: break; /**< Warning: connected to a non-master node (read-only). */ - } - -These are the most likely error scenarios. For all possible errors, -see the enum ``mongo_error_t``, and reference all constants beginning -with ``MONGO_CONN``. - -Once you've finished with your connection object, be sure to pass it to -``mongo_destroy()``. This will close the socket and clean up any allocated -memory: - -.. code-block:: c - - mongo_destroy( conn ); - -Replica set connections ------------------------ - -Use a replica set connection to connect to a replica set. - -The problem with connecting to a replica set is that you don't necessarily -know which node is the primary node at connection time. This MongoDB C driver -automatically figures out which node is the primary and then connects to it. - -To connection, you must provide: - -* The replica set's name - -And - -* At least one seed node. - -Here's how you go about that: - -.. code-block:: c - - mongo conn[1]; - int result; - - mongo_replset_init( conn, "rs-dc-1" ); - mongo_replset_add_seed( conn, '10.4.3.1', 27017 ); - mongo_replset_add_seed( conn, '10.4.3.2', 27017 ); - - result = mongo_replset_connect( conn ); - -First we initiaize the connection object, providing the name of the replica set, -in this case, "rs-dc-1." Next, we add two seed nodes. Finally, we connect -by pass the connection to ``mongo_replset_connect``. - -As with the basic connection, we'll want to check for any errors on connect. Notice -that there are two more error conditions we check for: - -.. code-block:: c - - if( result != MONGO_OK ) { - switch( conn->err ) - MONGO_CONN_NO_SOCKET: break; /**< Could not create a socket. */ - MONGO_CONN_FAIL: break; /**< An error occured while calling connect(). */ - MONGO_CONN_ADDR_FAIL: break; /**< An error occured while calling getaddrinfo(). */ - MONGO_CONN_NOT_MASTER: break; /**< Warning: connected to a non-master node (read-only). */ - MONGO_CONN_BAD_SET_NAME: break; /**< Given rs name doesn't match this replica set. */ - MONGO_CONN_NO_PRIMARY: break; /**< Can't find primary in replica set. Connection closed. */ - } - -When finished, be sure to destroy the connection object: - -.. code-block:: c - - mongo_destroy( conn ); - -Timeouts --------- - -You can set a timeout for read and write operation on the connection at any time: - -.. code-block:: c - - mongo_set_op_timeout( conn, 1000 ); - -This will set a 1000ms read-write timeout on the socket. If an operation fails, -you'll see a generic MONGO_IO_ERROR on the connection's ``err`` field. Future -versions of this driver will provide a more granular error code. - -Note this this will work only if you've compiled with driver with timeout support. - -I/O Errors and Reconnecting --------------------------- - -As you begin to use connection object to read and write data from MongoDB, -you may ocassionally encounter a ``MONGO_IO_ERROR``. In most cases, -you'll want to reconnect when you see this. Here's a very basic example: - -.. code-block:: c - - bson b[1]; - - bson_init( b ); - bson_append_string( b, "hello", "world" ); - bson_finish( b ); - - if( mongo_insert( conn, b ) == MONGO_ERROR && conn->err == MONGO_IO_ERROR ) - mongo_reconnect( conn ); - -When reconnecting, you'll want to check the return value to ensure that the connection -has succeeded. If you ever have any doubts about whether you're really connection, -you can verify the health of the connection like so: - -.. code-block:: c - - mongo_check_connection( conn ); - -This function will return ``MONGO_OK`` if we're in fact connected. diff --git a/mongo-c-driver-v0.6/docs/source/sphinx/source/errors.rst b/mongo-c-driver-v0.6/docs/source/sphinx/source/errors.rst deleted file mode 100644 index dd45009..0000000 --- a/mongo-c-driver-v0.6/docs/source/sphinx/source/errors.rst +++ /dev/null @@ -1,73 +0,0 @@ -Error Reporting -=============== - -The MongoDB C driver reports errors from three sources: - -* The operating system -* The MongoDB server -* The driver itself -- typically user input errors - -The driver's API is structured such that nearly all functions -return either `MONGO_OK` or `MONGO_ERROR`. When a function returns -`MONGO_ERROR`, you may examine the `mongo` object to see why the -error has occurred. - -Operating system errors ------------------------ - -A good example of an operating system error is a connection failure. - -.. code-block:: c - - mongo conn[1]; - - if ( mongo_connect( conn, "foo.example.com", 27017 ) == MONGO_ERROR ) { - printf( "mongo_error_t: %d\n", conn->err ); - printf( "errno (or WSAGetLastError() on Windows: %d\n", conn->errcode ); - printf( "Error string: %s\n", conn->errstr ); - exit( 1 ); - } - -First, we print the `mongo_error_t` to get the general error type. Consult the `mongo_error_t -definition `_ to interpret this. - -Next, we print the OS-level error code. On POSIX-compliant systems, this will be the value of -`errno `_; -on Windows, it's the value of `WSAGetLastError() `_. - -Finally, we print the error string, which gives is a few more details. This string may be -the OS-level translation of the error code (e.g., POSIX's -`strerror() `_), or it may be -a string generated by the driver itself which better describes the failure. - -MongoDB errors --------------- - -MongoDB itself produces errors that may be returned to the client after any query -or call to the `getlasterror` command. The code and strings for these errors -are stored in the `mongo` object's `lasterrcode` and `lasterrstring`, respectively. -We can force this sort of error by trying to run an invalid command: - -.. code-block:: c - - mongo conn[1]; - int res; - - if( mongo_connect( conn, "foo.example.com", 27017 ) == MONGO_ERROR ) { - exit( 1 ); - } - - if( mongo_simple_int_command( conn, "admin", "badCommand", 1, &out ) == MONGO_ERROR ) { - printf("Last error code: %d\n", conn->lasterrcode ); - printf("Last error string: %s\n", conn->lasterrstr ); - } - - -Clearing errors ---------------- - -To reset errors on the `mongo` object, run the `mongo_clear_errors` function: - -.. code-block:: c - - mongo_clear_errors( conn ); diff --git a/mongo-c-driver-v0.6/docs/source/sphinx/source/index.rst b/mongo-c-driver-v0.6/docs/source/sphinx/source/index.rst deleted file mode 100644 index 8005b19..0000000 --- a/mongo-c-driver-v0.6/docs/source/sphinx/source/index.rst +++ /dev/null @@ -1,46 +0,0 @@ -MongoDB C Driver Documentation -============================== - -Overview --------- - -The MongoDB C Driver is a 10gen-supported driver for MongoDB. -It's written in pure C. The goal is to be super strict for ultimate -portability, no dependencies, and generic embeddability. - -The driver is still considered alpha but is undergoing active -development. Support for replica sets was just added in v0.3.1. -The API was completely revamped in v0.4. Another backward-breaking -change (support for `write_concern`) was added in v0.6. - -:doc:`tutorial` - An overview of the driver's API. - -:doc:`building` - How to build the driver from source. - -:doc:`bson` - How to work with BSON objects. - -:doc:`connections` - How to connect to single nodes and to replica sets. - -:doc:`write_concern` - How to detect write errors and ensure various durability levels. - -:doc:`errors` - How errors are reported. - -`API Docs `_ - Doxygen-generated API docs. - -`Source code `_ - The source code is hosted on GitHub. - -.. toctree:: - :maxdepth: 2 - - tutorial - building - bson - connections diff --git a/mongo-c-driver-v0.6/docs/source/sphinx/source/tutorial.rst b/mongo-c-driver-v0.6/docs/source/sphinx/source/tutorial.rst deleted file mode 100644 index 857613a..0000000 --- a/mongo-c-driver-v0.6/docs/source/sphinx/source/tutorial.rst +++ /dev/null @@ -1,401 +0,0 @@ -MongoDB C Driver Tutorial -========================= - -This document shows how to use MongoDB from C. If you're not familiar with MongoDB. -you'll want to get a brief overview of the database and its shell API. The official -tutorial is a great place to start. - -Next, you'll want to install and run MongoDB. - -A working C program complete with examples from this tutorial can be -found in the examples folder of the source distribution. - -C API ------ - -When writing programs with the C driver, you'll be using four different -entities: connections, cursors, bson objects, and bson iterators. The APIs -for each of these follow a similiar pattern. You start by allocating an object, -either on the stack or the heap (the examples that follow all use the stack). You then -call an ``init`` function and use other function to build the object. When you're finished, -you pass the object to a ``destroy`` function. - -So, for instance, to create a new connection, start by allocating a ``mongo`` object: - -.. code-block:: c - - mongo conn; - -Next, initialize it: - -.. code-block:: c - - mongo_init( &conn ); - -Set any optional values, like a timeout, and then call ``mongo_connect``: - -.. code-block:: c - - mongo_set_op_timeout( &conn, 1000 ); - mongo_connect( &conn, "127.0.0.1", 27017 ); - -When you're finished, destroy the mongo object: - -.. code-block:: c - - mongo_destroy( &conn ); - -There are more details, but that's the basic pattern. Keep this in mind -as you learn the API and start using the driver. - -Connecting ----------- - -Let's start by that connects to the database: - -.. code-block:: c - - #include - #include "mongo.h" - - int main() { - mongo conn[1]; - int status = mongo_connect( conn, "127.0.0.1", 27017 ); - - if( status != MONGO_OK ) { - switch ( conn->err ) { - case MONGO_CONN_SUCCESS: printf( "connection succeeded\n" ); break; - case MONGO_CONN_NO_SOCKET: printf( "no socket\n" ); return 1; - case MONGO_CONN_FAIL: printf( "connection failed\n" ); return 1; - case MONGO_CONN_NOT_MASTER: printf( "not master\n" ); return 1; - } - } - - mongo_destroy( conn ); - - return 0; - } - -Building the sample program ---------------------------- - -If you are using ``gcc`` on Linux or OS X, you can compile with something like this, -depending on location of your include files: - -.. code-block:: bash - - $ gcc -Isrc --std=c99 /path/to/mongo-c-driver/src/*.c -I /path/to/mongo-c-driver/src/ tutorial.c -o tutorial - $ ./tutorial - connection succeeded - connection closed - - -Connecting to a replica set ---------------------------- - -The API for connecting to a replica set is slightly different. First you initialize -the connection object, specifying the replica set's name (in this case, "shard1"), -then you add seed nodes, and finally you connect. Here's an example: - -.. code-block:: c - - #include "mongo.h" - - int main() { - mongo conn[1]; - - mongo_replset_init( conn, "shard1" ); - mongo_replset_add_seed( conn, "10.4.3.22", 27017 ); - mongo_replset_add_seed( conn, "10.4.3.32", 27017 ); - - status = mongo_replset_connect( conn ); - - if( status != MONGO_OK ) { - // Check conn->err for error code. - } - - mongo_destroy( conn ); - - return 0; - } - -BSON ----- - -MongoDB database stores data in a format called *BSON*. BSON is a JSON-like binary object format. -To create BSON objects - - -.. code-block:: c - - bson b[1]; - - bson_init( b ) - bson_append_string( b, "name", "Joe" ); - bson_append_int( b, "age", 33 ); - bson_finish( b ); - - mongo_insert( conn, b ); - - bson_destroy( b ); - -Use the ``bson_append_new_oid()`` function to add an object id to your object. -The server will add an object id to the ``_id`` field if it is not included explicitly, -but it's best to create it client-side. When you do create the id, be sure to place it -at the beginning of the object, as we do here: - -.. code-block:: c - - bson b[1]; - - bson_init( b ); - bson_append_new_oid( b, "_id" ); - bson_append_string( b, "name", "Joe" ); - bson_append_int( b, "age", 33 ); - bson_finish( b ); - -When you're done using the ``bson`` object, remember pass it to -``bson_destroy()`` to free up the memory allocated by the buffer. - -.. code-block:: c - - bson_destroy( b ); - -Inserting a single document ---------------------------- - -Here's how we save our person object to the database's "people" collection: - -.. code-block:: c - - mongo_insert( conn, "tutorial.people", b ); - -The first parameter to ``mongo_insert`` is the pointer to the ``mongo_connection`` -object. The second parameter is the namespace, which include the database name, followed -by a dot followed by the collection name. Thus, ``tutorial`` is the database and ``people`` -is the collection name. The third parameter is a pointer to the ``bson`` object that -we created before. - -Inserting a batch of documents ------------------------------- - -We can do batch inserts as well: - -.. code-block:: c - - static void tutorial_insert_batch( mongo_connection *conn ) { - bson *p, **ps; - char *names[4]; - int ages[] = { 29, 24, 24, 32 }; - int i, n = 4; - names[0] = "Eliot"; names[1] = "Mike"; names[2] = "Mathias"; names[3] = "Richard"; - - ps = ( bson ** )malloc( sizeof( bson * ) * n); - - for ( i = 0; i < n; i++ ) { - p = ( bson * )malloc( sizeof( bson ) ); - bson_init( p ); - bson_append_new_oid( p_buf, "_id" ); - bson_append_string( p_buf, "name", names[i] ); - bson_append_int( p_buf, "age", ages[i] ); - bson_finish( p ); - ps[i] = p; - } - - mongo_insert_batch( conn, "tutorial.persons", ps, n ); - - for ( i = 0; i < n; i++ ) { - bson_destroy( ps[i] ); - free( ps[i] ); - } - } - -Simple Queries --------------- - -Let's now fetch all objects from the ``persons`` collection, and display them. - -.. code-block:: c - - static void tutorial_empty_query( mongo_connection *conn) { - mongo_cursor cursor[1]; - mongo_cursor_init( cursor, conn, "tutorial.persons" ); - - while( mongo_cursor_next( cursor ) == MONGO_OK ) - bson_print( &cursor->current ); - - mongo_cursor_destroy( cursor ); - } - -Here we use the most basic possible cursor, which iterates over all documents. This is the -equivalent of running ``db.persons.find()`` from the shell. - -You initialize a cursor with ``mongo_cursor_init()``. Whenever you finish with a cursor, -you must pass it to ``mongo_cursor_destroy()``. - -We use ``bson_print()`` to print an abbreviated JSON string representation of the object. - -Let's now write a function which prints out the name of all persons -whose age is 24: - -.. code-block:: c - - static void tutorial_simple_query( mongo_connection *conn ) { - bson query[1]; - mongo_cursor cursor[1]; - - bson_init( query ); - bson_append_int( query_buf, "age", 24 ); - bson_finish( query ); - - mongo_cursor_init( cursor, conn, "tutorial.persons" ); - mongo_cursor_set_query( cursor, query ); - - while( mongo_cursor_next( cursor ) == MONGO_OK ) { - bson_iterator iterator[1]; - if ( bson_find( iterator, mongo_cursor_bson( cursor ), "name" )) { - printf( "name: %s\n", bson_iterator_string( iterator ) ); - } - } - - bson_destroy( query ); - mongo_cursor_destroy( cursor ); - } - -Our query above, written as JSON, is equivalent to the following from the JavaScript shell: - -.. code-block:: javascript - - use tutorial - db.persons.find( { age: 24 } ) - -Complex Queries ---------------- - -Sometimes we want to do more then a simple query. We may want the results to -be sorted in a special way, or what the query to use a certain index. - -Let's add a sort clause to our previous query. This requires some knowledge of the -implementation of query specs in MongoDB. A query spec can either consist of: - -1. A query matcher alone, as in our previous example. - -or - -2. A query matcher, sort clause, hint enforcer, or explain directive. Each of these - is wrapped by the keys ``$query``, ``$orderby``, ``$hint``, and ``$explain``, respectively. - Most of the time, you'll only use ``$query`` and ``$orderby``. - -To add a sort clause to our previous query, we change our query spec from this: - -.. code-block:: c - - bson_init( query ); - bson_append_int( query, "age", 24 ); - bson_finish( query ); - -to this: - -.. code-block:: c - - bson_init( query ); - bson_append_start_object( query, "$query" ); - bson_append_int( query, "age", 24 ); - bson_append_finish_object( query ); - - bson_append_start_object( query, "$orderby" ); - bson_append_int( query, "name", 1); - bson_append_finish_object( query ); - bson_finish( query ); - -This is equivalent to the following query from the MongoDB shell: - -.. code-block:: javascript - - db.persons.find( { age: 24 } ).sort( { name: 1 } ); - - -Updating documents ------------------- - -Use the ``mongo_update()`` function to perform updates. -For example the following update in the MongoDB shell: - -.. code-block:: javascript - - use tutorial - db.persons.update( { name : 'Joe', age : 33 }, - { $inc : { visits : 1 } } ) - -is equivalent to the following C function: - -.. code-block:: c - - static void tutorial_update( mongo_connection *conn ) { - bson cond[1], op[1]; - - bson_init( cond ); - bson_append_string( cond, "name", "Joe"); - bson_append_int( cond, "age", 33); - bson_finish( cond ); - - bson_init( op ); - bson_append_start_object( op, "$inc" ); - bson_append_int( op, "visits", 1 ); - bson_append_finish_object( op ); - bson_finish( op ); - - mongo_update( conn, "tutorial.persons", cond, op, MONGO_UPDATE_BASIC ); - - bson_destroy( cond ); - bson_destroy( op ); - } - -The final argument to ``mongo_update()`` is a bitfield storing update options. If -you want to update all documents matching the ``cond``, you must use ``MONGO_UPDATE_MULTI``. -For upserts, use ``MONGO_UPDATE_UPSERT``. Here's an example: - -.. code-block:: c - - mongo_update( conn, "tutorial.persons", cond, op, MONGO_UPDATE_MULTI ); - -Indexing --------- - -Now we'll create a couple of indexes. The first is a simple index on ``name``, and -the second is a compound index on ``name`` and ``age``. - -.. code-block:: c - - static void tutorial_index( mongo_connection *conn ) { - bson key[1]; - - bson_init( key ); - bson_append_int( key, "name", 1 ); - bson_finish( key ); - - mongo_create_index( conn, "tutorial.persons", key, 0, NULL ); - - bson_destroy( key ); - - printf( "simple index created on \"name\"\n" ); - - bson_init( key ); - bson_append_int( key, "age", 1 ); - bson_append_int( key, "name", 1 ); - bson_finish( key ); - - mongo_create_index( conn, "tutorial.persons", key, 0, NULL ); - - bson_destroy( key ); - - printf( "compound index created on \"age\", \"name\"\n" ); - } - - - -Further Reading ---------------- - -This overview just touches on the basics of using Mongo from C. For more examples, -check out the other documentation pages, and have a look at the driver's test cases. diff --git a/mongo-c-driver-v0.6/docs/source/sphinx/source/write_concern.rst b/mongo-c-driver-v0.6/docs/source/sphinx/source/write_concern.rst deleted file mode 100644 index 03c766b..0000000 --- a/mongo-c-driver-v0.6/docs/source/sphinx/source/write_concern.rst +++ /dev/null @@ -1,106 +0,0 @@ -Write Concern (a.k.a. "Safe Mode") -================================== - -All writes issued from the drivers for MongoDB are "fire-and-forget" by default. -In practice, this means that by default, failed writes aren't reported. -For this reason, "fire-and-forget" writes are recommended -only for cases where losing a few writes is acceptable (logging, anayltics, etc.). - -In all other scenarios, you should ensure that your writes run as a round trip -to the server. This requires that you enable write concern or "safe mode", as it's -called elsewhere. - -In addition to reporting write errors, write concern also allows you to ensure -that your write are replicated to a particular number of servers to a set -of servers tagged with a given value. See the -`write concern docs `_ for details. - -Implementation and API ----------------------- - -Write concern is implemented by appending a call to the ``getlasterror`` -command after each write. You can certainly do this manually, but nearly all of the drivers -provide a write concern API for simplicty. To read about the options for ``getlasterror``, -and hence the options for write concern, -`see the MongoDB getlasterror docs `_. - -The MongoDB C driver supports write concern on two levels. You can set the write -concern on a ``mongo`` connection object, in which case that write concern level will -be used for every write. You can also specify a write concern for any individual -write operation (``mongo_insert()``, ``mongo_insert_batch()``, ``mongo_update()``, -or ``mongo_remove``). This will override any default write concern set on the -connection level. - -Example -------- - -.. code-block:: c - - #include - #include - - #define ASSERT(x) \ - do{ \ - if(!(x)){ \ - printf("\nFailed ASSERT [%s] (%d):\n %s\n\n", __FILE__, __LINE__, #x); \ - exit(1); \ - }\ - }while(0) - - int main() { - mongo conn[1]; - mongo_write_concern write_concern[1]; - bson b[1]; - - if( mongo_connect( conn, "127.0.0.1", 27017 ) == MONGO_ERROR ) { - printf( "Failed to connect!\n" ); - exit(1); - } - - mongo_cmd_drop_collection( conn, "test", "foo", NULL ); - - /* Initialize the write concern object.*/ - mongo_write_concern_init( write_concern ); - write_concern->w = 1; - mongo_write_concern_finish( write_concern ); - - bson_init( b ); - bson_append_new_oid( b ); - bson_finish( b ); - - ASSERT( mongo_insert( conn, "test.foo", b, wc ) == MONGO_ERROR ); - - /* If we try to insert the same document again, - we'll get an error due to the unique index on _id.*/ - ASSERT( mongo_insert( conn, "test.foo", b, wc ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_WRITE_ERROR ); - printf( "Error message: %s\n", conn->lasterrstr ); - - /* Clear any stored errors.*/ - mongo_clear_errors( conn ); - - /* We'll get the same error if we set a default write concern - on the connection object but don't set it on insert.*/ - mongo_set_write_concern( conn, write_concern ); - ASSERT( mongo_insert( conn, "test.foo", b, wc ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_WRITE_ERROR ); - printf( "Error message: %s\n", conn->lasterrstr ); - - mongo_write_concern_destroy( write_concern ); - bson_destroy( b ); - mongo_destroy( conn ); - - return 0; - } - -Notes ------ - -As you'll see in the code sample, the process for creating a write concern object -is to initialize it, manually set any write concern values (e.g., ``w``, ``wtimeout`` -for values of ``w`` greater than 1, ``j``, etc.), and then call ``mongo_write_concern_finish()`` -on it. This will effectively create the equivalent ``getlasterror`` command. Note you must call -``mongo_write_concern_destroy()`` when you're finished with the write concern object. - -And for a longer example, see the -`C driver's write concern tests `_. diff --git a/mongo-c-driver-v0.6/doxygenConfig b/mongo-c-driver-v0.6/doxygenConfig deleted file mode 100644 index 310ffd2..0000000 --- a/mongo-c-driver-v0.6/doxygenConfig +++ /dev/null @@ -1,316 +0,0 @@ -#--------------------------------------------------------------------------- -# Project related configuration options -#--------------------------------------------------------------------------- -DOXYFILE_ENCODING = UTF-8 -PROJECT_NAME = MongoDB C Driver -PROJECT_NUMBER = 0.6 -OUTPUT_DIRECTORY = docs/source/doxygen -CREATE_SUBDIRS = NO -OUTPUT_LANGUAGE = English -BRIEF_MEMBER_DESC = YES -REPEAT_BRIEF = YES -ABBREVIATE_BRIEF = "The $name class" \ - "The $name widget" \ - "The $name file" \ - is \ - provides \ - specifies \ - contains \ - represents \ - a \ - an \ - the -ALWAYS_DETAILED_SEC = NO -INLINE_INHERITED_MEMB = NO -FULL_PATH_NAMES = NO -STRIP_FROM_PATH = -STRIP_FROM_INC_PATH = -SHORT_NAMES = NO -JAVADOC_AUTOBRIEF = YES -QT_AUTOBRIEF = NO -MULTILINE_CPP_IS_BRIEF = NO -INHERIT_DOCS = YES -SEPARATE_MEMBER_PAGES = NO -TAB_SIZE = 8 -ALIASES = -OPTIMIZE_OUTPUT_FOR_C = NO -OPTIMIZE_OUTPUT_JAVA = NO -OPTIMIZE_FOR_FORTRAN = NO -OPTIMIZE_OUTPUT_VHDL = NO -EXTENSION_MAPPING = -BUILTIN_STL_SUPPORT = NO -CPP_CLI_SUPPORT = NO -SIP_SUPPORT = NO -IDL_PROPERTY_SUPPORT = YES -DISTRIBUTE_GROUP_DOC = NO -SUBGROUPING = YES -TYPEDEF_HIDES_STRUCT = NO -SYMBOL_CACHE_SIZE = 0 - -#--------------------------------------------------------------------------- -# Build related configuration options -#--------------------------------------------------------------------------- - -# *** -# ERH - this controls whether all classes in files are documented or just the ones with tags -# *** -EXTRACT_ALL = NO - -EXTRACT_PRIVATE = NO -EXTRACT_STATIC = NO -EXTRACT_LOCAL_CLASSES = NO -EXTRACT_LOCAL_METHODS = NO -EXTRACT_ANON_NSPACES = NO -HIDE_UNDOC_MEMBERS = NO -HIDE_UNDOC_CLASSES = NO -HIDE_FRIEND_COMPOUNDS = NO -HIDE_IN_BODY_DOCS = NO -INTERNAL_DOCS = NO -CASE_SENSE_NAMES = NO -HIDE_SCOPE_NAMES = NO -SHOW_INCLUDE_FILES = YES -INLINE_INFO = YES -SORT_MEMBER_DOCS = YES -SORT_BRIEF_DOCS = NO -SORT_GROUP_NAMES = NO -SORT_BY_SCOPE_NAME = NO -GENERATE_TODOLIST = YES -GENERATE_TESTLIST = YES -GENERATE_BUGLIST = YES -GENERATE_DEPRECATEDLIST= YES -ENABLED_SECTIONS = -MAX_INITIALIZER_LINES = 30 -SHOW_USED_FILES = YES -SHOW_DIRECTORIES = NO -SHOW_FILES = YES -SHOW_NAMESPACES = YES -FILE_VERSION_FILTER = -LAYOUT_FILE = - -#--------------------------------------------------------------------------- -# configuration options related to warning and progress messages -#--------------------------------------------------------------------------- -QUIET = NO -WARNINGS = YES -WARN_IF_UNDOCUMENTED = YES -WARN_IF_DOC_ERROR = YES -WARN_NO_PARAMDOC = NO -WARN_FORMAT = "$file:$line: $text" -WARN_LOGFILE = - -#--------------------------------------------------------------------------- -# configuration options related to the input files -#--------------------------------------------------------------------------- -INPUT = src -INPUT_ENCODING = UTF-8 -FILE_PATTERNS = *.c \ - *.cc \ - *.cxx \ - *.cpp \ - *.c++ \ - *.d \ - *.java \ - *.ii \ - *.ixx \ - *.ipp \ - *.i++ \ - *.inl \ - *.h \ - *.hh \ - *.hxx \ - *.hpp \ - *.h++ \ - *.idl \ - *.odl \ - *.cs \ - *.php \ - *.php3 \ - *.inc \ - *.m \ - *.mm \ - *.dox \ - *.py \ - *.f90 \ - *.f \ - *.vhd \ - *.vhdl -RECURSIVE = YES -EXCLUDE = -EXCLUDE_SYMLINKS = NO -EXCLUDE_PATTERNS = -EXCLUDE_SYMBOLS = -EXAMPLE_PATH = -EXAMPLE_PATTERNS = * -EXAMPLE_RECURSIVE = NO -IMAGE_PATH = -INPUT_FILTER = -FILTER_PATTERNS = -FILTER_SOURCE_FILES = NO - -#--------------------------------------------------------------------------- -# configuration options related to source browsing -#--------------------------------------------------------------------------- -SOURCE_BROWSER = NO -INLINE_SOURCES = NO -STRIP_CODE_COMMENTS = YES -REFERENCED_BY_RELATION = NO -REFERENCES_RELATION = NO -REFERENCES_LINK_SOURCE = YES -USE_HTAGS = NO -VERBATIM_HEADERS = YES - -#--------------------------------------------------------------------------- -# configuration options related to the alphabetical class index -#--------------------------------------------------------------------------- -ALPHABETICAL_INDEX = NO -COLS_IN_ALPHA_INDEX = 5 -IGNORE_PREFIX = - -#--------------------------------------------------------------------------- -# configuration options related to the HTML output -#--------------------------------------------------------------------------- -GENERATE_HTML = YES -HTML_OUTPUT = html -HTML_FILE_EXTENSION = .html -HTML_HEADER = -HTML_FOOTER = -HTML_STYLESHEET = -HTML_ALIGN_MEMBERS = YES -HTML_DYNAMIC_SECTIONS = NO -GENERATE_DOCSET = NO -DOCSET_FEEDNAME = "Doxygen generated docs" -DOCSET_BUNDLE_ID = org.doxygen.Project -GENERATE_HTMLHELP = NO -CHM_FILE = -HHC_LOCATION = -GENERATE_CHI = NO -CHM_INDEX_ENCODING = -BINARY_TOC = NO -TOC_EXPAND = NO -GENERATE_QHP = NO -QCH_FILE = -QHP_NAMESPACE = -QHP_VIRTUAL_FOLDER = doc -QHP_CUST_FILTER_NAME = -QHP_CUST_FILTER_ATTRS = -QHP_SECT_FILTER_ATTRS = -QHG_LOCATION = -DISABLE_INDEX = NO -ENUM_VALUES_PER_LINE = 4 -GENERATE_TREEVIEW = NONE -TREEVIEW_WIDTH = 250 -FORMULA_FONTSIZE = 10 - -#--------------------------------------------------------------------------- -# configuration options related to the LaTeX output -#--------------------------------------------------------------------------- -GENERATE_LATEX = YES -LATEX_OUTPUT = latex -LATEX_CMD_NAME = latex -MAKEINDEX_CMD_NAME = makeindex -COMPACT_LATEX = NO -PAPER_TYPE = a4wide -EXTRA_PACKAGES = -LATEX_HEADER = -PDF_HYPERLINKS = YES -USE_PDFLATEX = YES -LATEX_BATCHMODE = NO -LATEX_HIDE_INDICES = NO - -#--------------------------------------------------------------------------- -# configuration options related to the RTF output -#--------------------------------------------------------------------------- -GENERATE_RTF = NO -RTF_OUTPUT = rtf -COMPACT_RTF = NO -RTF_HYPERLINKS = NO -RTF_STYLESHEET_FILE = -RTF_EXTENSIONS_FILE = - -#--------------------------------------------------------------------------- -# configuration options related to the man page output -#--------------------------------------------------------------------------- -GENERATE_MAN = NO -MAN_OUTPUT = man -MAN_EXTENSION = .3 -MAN_LINKS = NO - -#--------------------------------------------------------------------------- -# configuration options related to the XML output -#--------------------------------------------------------------------------- -GENERATE_XML = NO -XML_OUTPUT = xml -XML_SCHEMA = -XML_DTD = -XML_PROGRAMLISTING = YES - -#--------------------------------------------------------------------------- -# configuration options for the AutoGen Definitions output -#--------------------------------------------------------------------------- -GENERATE_AUTOGEN_DEF = NO - -#--------------------------------------------------------------------------- -# configuration options related to the Perl module output -#--------------------------------------------------------------------------- -GENERATE_PERLMOD = NO -PERLMOD_LATEX = NO -PERLMOD_PRETTY = YES -PERLMOD_MAKEVAR_PREFIX = - -#--------------------------------------------------------------------------- -# Configuration options related to the preprocessor -#--------------------------------------------------------------------------- -ENABLE_PREPROCESSING = YES -MACRO_EXPANSION = NO -EXPAND_ONLY_PREDEF = NO -SEARCH_INCLUDES = YES -INCLUDE_PATH = -INCLUDE_FILE_PATTERNS = -PREDEFINED = -EXPAND_AS_DEFINED = -SKIP_FUNCTION_MACROS = YES - -#--------------------------------------------------------------------------- -# Configuration::additions related to external references -#--------------------------------------------------------------------------- -TAGFILES = -GENERATE_TAGFILE = -ALLEXTERNALS = NO -EXTERNAL_GROUPS = YES -PERL_PATH = /usr/bin/perl - -#--------------------------------------------------------------------------- -# Configuration options related to the dot tool -#--------------------------------------------------------------------------- -CLASS_DIAGRAMS = YES -MSCGEN_PATH = -HIDE_UNDOC_RELATIONS = YES -HAVE_DOT = NO -DOT_FONTNAME = FreeSans -DOT_FONTSIZE = 10 -DOT_FONTPATH = -CLASS_GRAPH = YES -COLLABORATION_GRAPH = YES -GROUP_GRAPHS = YES -UML_LOOK = NO -TEMPLATE_RELATIONS = NO -INCLUDE_GRAPH = YES -INCLUDED_BY_GRAPH = YES -CALL_GRAPH = NO -CALLER_GRAPH = NO -GRAPHICAL_HIERARCHY = YES -DIRECTORY_GRAPH = YES -DOT_IMAGE_FORMAT = png -DOT_PATH = -DOTFILE_DIRS = -DOT_GRAPH_MAX_NODES = 50 -MAX_DOT_GRAPH_DEPTH = 0 -DOT_TRANSPARENT = NO -DOT_MULTI_TARGETS = NO -GENERATE_LEGEND = YES -DOT_CLEANUP = YES - -#--------------------------------------------------------------------------- -# Options related to the search engine -#--------------------------------------------------------------------------- -SEARCHENGINE = NO diff --git a/mongo-c-driver-v0.6/runtests.sh b/mongo-c-driver-v0.6/runtests.sh deleted file mode 100644 index 3723753..0000000 --- a/mongo-c-driver-v0.6/runtests.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash -# Arguments -v for valgrind - -usage() -{ -cat < -#include -#include -#include -#include - -#include "bson.h" -#include "encoding.h" - -const int initialBufferSize = 128; - -/* only need one of these */ -static const int zero = 0; - -/* Custom standard function pointers. */ -void *( *bson_malloc_func )( size_t ) = malloc; -void *( *bson_realloc_func )( void *, size_t ) = realloc; -void ( *bson_free_func )( void * ) = free; -#ifdef R_SAFETY_NET -bson_printf_func bson_printf; -#else -bson_printf_func bson_printf = printf; -#endif -bson_fprintf_func bson_fprintf = fprintf; -bson_sprintf_func bson_sprintf = sprintf; - -static int _bson_errprintf( const char *, ... ); -bson_printf_func bson_errprintf = _bson_errprintf; - -/* ObjectId fuzz functions. */ -static int ( *oid_fuzz_func )( void ) = NULL; -static int ( *oid_inc_func )( void ) = NULL; - -/* ---------------------------- - READING - ------------------------------ */ - -MONGO_EXPORT bson* bson_create() { - return (bson*)bson_malloc(sizeof(bson)); -} - -MONGO_EXPORT void bson_dispose(bson* b) { - bson_free(b); -} - -MONGO_EXPORT bson *bson_empty( bson *obj ) { - static char *data = "\005\0\0\0\0"; - bson_init_data( obj, data ); - obj->finished = 1; - obj->err = 0; - obj->errstr = NULL; - obj->stackPos = 0; - return obj; -} - -MONGO_EXPORT int bson_copy( bson *out, const bson *in ) { - if ( !out ) return BSON_ERROR; - if ( !in->finished ) return BSON_ERROR; - bson_init_size( out, bson_size( in ) ); - memcpy( out->data, in->data, bson_size( in ) ); - out->finished = 1; - - return BSON_OK; -} - -int bson_init_data( bson *b, char *data ) { - b->data = data; - return BSON_OK; -} - -int bson_init_finished_data( bson *b, char *data ) { - bson_init_data( b, data ); - b->finished = 1; - return BSON_OK; -} - -static void _bson_reset( bson *b ) { - b->finished = 0; - b->stackPos = 0; - b->err = 0; - b->errstr = NULL; -} - -MONGO_EXPORT int bson_size( const bson *b ) { - int i; - if ( ! b || ! b->data ) - return 0; - bson_little_endian32( &i, b->data ); - return i; -} - -MONGO_EXPORT int bson_buffer_size( const bson *b ) { - return (b->cur - b->data + 1); -} - - -MONGO_EXPORT const char *bson_data( const bson *b ) { - return (const char *)b->data; -} - -static char hexbyte( char hex ) { - switch ( hex ) { - case '0': - return 0x0; - case '1': - return 0x1; - case '2': - return 0x2; - case '3': - return 0x3; - case '4': - return 0x4; - case '5': - return 0x5; - case '6': - return 0x6; - case '7': - return 0x7; - case '8': - return 0x8; - case '9': - return 0x9; - case 'a': - case 'A': - return 0xa; - case 'b': - case 'B': - return 0xb; - case 'c': - case 'C': - return 0xc; - case 'd': - case 'D': - return 0xd; - case 'e': - case 'E': - return 0xe; - case 'f': - case 'F': - return 0xf; - default: - return 0x0; /* something smarter? */ - } -} - -MONGO_EXPORT void bson_oid_from_string( bson_oid_t *oid, const char *str ) { - int i; - for ( i=0; i<12; i++ ) { - oid->bytes[i] = ( hexbyte( str[2*i] ) << 4 ) | hexbyte( str[2*i + 1] ); - } -} - -MONGO_EXPORT void bson_oid_to_string( const bson_oid_t *oid, char *str ) { - static const char hex[16] = {'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'}; - int i; - for ( i=0; i<12; i++ ) { - str[2*i] = hex[( oid->bytes[i] & 0xf0 ) >> 4]; - str[2*i + 1] = hex[ oid->bytes[i] & 0x0f ]; - } - str[24] = '\0'; -} - -MONGO_EXPORT void bson_set_oid_fuzz( int ( *func )( void ) ) { - oid_fuzz_func = func; -} - -MONGO_EXPORT void bson_set_oid_inc( int ( *func )( void ) ) { - oid_inc_func = func; -} - -MONGO_EXPORT void bson_oid_gen( bson_oid_t *oid ) { - static int incr = 0; - static int fuzz = 0; - int i; - int t = time( NULL ); - - if( oid_inc_func ) - i = oid_inc_func(); - else - i = incr++; - - if ( !fuzz ) { - if ( oid_fuzz_func ) - fuzz = oid_fuzz_func(); - else { - srand( t ); - fuzz = rand(); - } - } - - bson_big_endian32( &oid->ints[0], &t ); - oid->ints[1] = fuzz; - bson_big_endian32( &oid->ints[2], &i ); -} - -MONGO_EXPORT time_t bson_oid_generated_time( bson_oid_t *oid ) { - time_t out; - bson_big_endian32( &out, &oid->ints[0] ); - - return out; -} - -MONGO_EXPORT void bson_print( const bson *b ) { - bson_print_raw( b->data , 0 ); -} - -MONGO_EXPORT void bson_print_raw( const char *data , int depth ) { - bson_iterator i; - const char *key; - int temp; - bson_timestamp_t ts; - char oidhex[25]; - bson scope; - bson_iterator_from_buffer( &i, data ); - - while ( bson_iterator_next( &i ) ) { - bson_type t = bson_iterator_type( &i ); - if ( t == 0 ) - break; - key = bson_iterator_key( &i ); - - for ( temp=0; temp<=depth; temp++ ) - bson_printf( "\t" ); - bson_printf( "%s : %d \t " , key , t ); - switch ( t ) { - case BSON_DOUBLE: - bson_printf( "%f" , bson_iterator_double( &i ) ); - break; - case BSON_STRING: - bson_printf( "%s" , bson_iterator_string( &i ) ); - break; - case BSON_SYMBOL: - bson_printf( "SYMBOL: %s" , bson_iterator_string( &i ) ); - break; - case BSON_OID: - bson_oid_to_string( bson_iterator_oid( &i ), oidhex ); - bson_printf( "%s" , oidhex ); - break; - case BSON_BOOL: - bson_printf( "%s" , bson_iterator_bool( &i ) ? "true" : "false" ); - break; - case BSON_DATE: - bson_printf( "%ld" , ( long int )bson_iterator_date( &i ) ); - break; - case BSON_BINDATA: - bson_printf( "BSON_BINDATA" ); - break; - case BSON_UNDEFINED: - bson_printf( "BSON_UNDEFINED" ); - break; - case BSON_NULL: - bson_printf( "BSON_NULL" ); - break; - case BSON_REGEX: - bson_printf( "BSON_REGEX: %s", bson_iterator_regex( &i ) ); - break; - case BSON_CODE: - bson_printf( "BSON_CODE: %s", bson_iterator_code( &i ) ); - break; - case BSON_CODEWSCOPE: - bson_printf( "BSON_CODE_W_SCOPE: %s", bson_iterator_code( &i ) ); - bson_init( &scope ); - bson_iterator_code_scope( &i, &scope ); - bson_printf( "\n\t SCOPE: " ); - bson_print( &scope ); - break; - case BSON_INT: - bson_printf( "%d" , bson_iterator_int( &i ) ); - break; - case BSON_LONG: - bson_printf( "%lld" , ( uint64_t )bson_iterator_long( &i ) ); - break; - case BSON_TIMESTAMP: - ts = bson_iterator_timestamp( &i ); - bson_printf( "i: %d, t: %d", ts.i, ts.t ); - break; - case BSON_OBJECT: - case BSON_ARRAY: - bson_printf( "\n" ); - bson_print_raw( bson_iterator_value( &i ) , depth + 1 ); - break; - default: - bson_errprintf( "can't print type : %d\n" , t ); - } - bson_printf( "\n" ); - } -} - -/* ---------------------------- - ITERATOR - ------------------------------ */ - -MONGO_EXPORT bson_iterator* bson_iterator_create() { - return ( bson_iterator* )malloc( sizeof( bson_iterator ) ); -} - -MONGO_EXPORT void bson_iterator_dispose(bson_iterator* i) { - free(i); -} - -MONGO_EXPORT void bson_iterator_init( bson_iterator *i, const bson *b ) { - i->cur = b->data + 4; - i->first = 1; -} - -MONGO_EXPORT void bson_iterator_from_buffer( bson_iterator *i, const char *buffer ) { - i->cur = buffer + 4; - i->first = 1; -} - -MONGO_EXPORT bson_type bson_find( bson_iterator *it, const bson *obj, const char *name ) { - bson_iterator_init( it, (bson *)obj ); - while( bson_iterator_next( it ) ) { - if ( strcmp( name, bson_iterator_key( it ) ) == 0 ) - break; - } - return bson_iterator_type( it ); -} - -MONGO_EXPORT bson_bool_t bson_iterator_more( const bson_iterator *i ) { - return *( i->cur ); -} - -MONGO_EXPORT bson_type bson_iterator_next( bson_iterator *i ) { - int ds; - - if ( i->first ) { - i->first = 0; - return ( bson_type )( *i->cur ); - } - - switch ( bson_iterator_type( i ) ) { - case BSON_EOO: - return BSON_EOO; /* don't advance */ - case BSON_UNDEFINED: - case BSON_NULL: - ds = 0; - break; - case BSON_BOOL: - ds = 1; - break; - case BSON_INT: - ds = 4; - break; - case BSON_LONG: - case BSON_DOUBLE: - case BSON_TIMESTAMP: - case BSON_DATE: - ds = 8; - break; - case BSON_OID: - ds = 12; - break; - case BSON_STRING: - case BSON_SYMBOL: - case BSON_CODE: - ds = 4 + bson_iterator_int_raw( i ); - break; - case BSON_BINDATA: - ds = 5 + bson_iterator_int_raw( i ); - break; - case BSON_OBJECT: - case BSON_ARRAY: - case BSON_CODEWSCOPE: - ds = bson_iterator_int_raw( i ); - break; - case BSON_DBREF: - ds = 4+12 + bson_iterator_int_raw( i ); - break; - case BSON_REGEX: { - const char *s = bson_iterator_value( i ); - const char *p = s; - p += strlen( p )+1; - p += strlen( p )+1; - ds = p-s; - break; - } - - default: { - char msg[] = "unknown type: 000000000000"; - bson_numstr( msg+14, ( unsigned )( i->cur[0] ) ); - bson_fatal_msg( 0, msg ); - return 0; - } - } - - i->cur += 1 + strlen( i->cur + 1 ) + 1 + ds; - - return ( bson_type )( *i->cur ); -} - -MONGO_EXPORT bson_type bson_iterator_type( const bson_iterator *i ) { - return ( bson_type )i->cur[0]; -} - -MONGO_EXPORT const char *bson_iterator_key( const bson_iterator *i ) { - return i->cur + 1; -} - -MONGO_EXPORT const char *bson_iterator_value( const bson_iterator *i ) { - const char *t = i->cur + 1; - t += strlen( t ) + 1; - return t; -} - -/* types */ - -int bson_iterator_int_raw( const bson_iterator *i ) { - int out; - bson_little_endian32( &out, bson_iterator_value( i ) ); - return out; -} - -double bson_iterator_double_raw( const bson_iterator *i ) { - double out; - bson_little_endian64( &out, bson_iterator_value( i ) ); - return out; -} - -int64_t bson_iterator_long_raw( const bson_iterator *i ) { - int64_t out; - bson_little_endian64( &out, bson_iterator_value( i ) ); - return out; -} - -bson_bool_t bson_iterator_bool_raw( const bson_iterator *i ) { - return bson_iterator_value( i )[0]; -} - -MONGO_EXPORT bson_oid_t *bson_iterator_oid( const bson_iterator *i ) { - return ( bson_oid_t * )bson_iterator_value( i ); -} - -MONGO_EXPORT int bson_iterator_int( const bson_iterator *i ) { - switch ( bson_iterator_type( i ) ) { - case BSON_INT: - return bson_iterator_int_raw( i ); - case BSON_LONG: - return bson_iterator_long_raw( i ); - case BSON_DOUBLE: - return bson_iterator_double_raw( i ); - default: - return 0; - } -} - -MONGO_EXPORT double bson_iterator_double( const bson_iterator *i ) { - switch ( bson_iterator_type( i ) ) { - case BSON_INT: - return bson_iterator_int_raw( i ); - case BSON_LONG: - return bson_iterator_long_raw( i ); - case BSON_DOUBLE: - return bson_iterator_double_raw( i ); - default: - return 0; - } -} - -MONGO_EXPORT int64_t bson_iterator_long( const bson_iterator *i ) { - switch ( bson_iterator_type( i ) ) { - case BSON_INT: - return bson_iterator_int_raw( i ); - case BSON_LONG: - return bson_iterator_long_raw( i ); - case BSON_DOUBLE: - return bson_iterator_double_raw( i ); - default: - return 0; - } -} - -MONGO_EXPORT bson_timestamp_t bson_iterator_timestamp( const bson_iterator *i ) { - bson_timestamp_t ts; - bson_little_endian32( &( ts.i ), bson_iterator_value( i ) ); - bson_little_endian32( &( ts.t ), bson_iterator_value( i ) + 4 ); - return ts; -} - - -MONGO_EXPORT int bson_iterator_timestamp_time( const bson_iterator *i ) { - int time; - bson_little_endian32( &time, bson_iterator_value( i ) + 4 ); - return time; -} - - -MONGO_EXPORT int bson_iterator_timestamp_increment( const bson_iterator *i ) { - int increment; - bson_little_endian32( &increment, bson_iterator_value( i ) ); - return increment; -} - - -MONGO_EXPORT bson_bool_t bson_iterator_bool( const bson_iterator *i ) { - switch ( bson_iterator_type( i ) ) { - case BSON_BOOL: - return bson_iterator_bool_raw( i ); - case BSON_INT: - return bson_iterator_int_raw( i ) != 0; - case BSON_LONG: - return bson_iterator_long_raw( i ) != 0; - case BSON_DOUBLE: - return bson_iterator_double_raw( i ) != 0; - case BSON_EOO: - case BSON_NULL: - return 0; - default: - return 1; - } -} - -MONGO_EXPORT const char *bson_iterator_string( const bson_iterator *i ) { - switch ( bson_iterator_type( i ) ) { - case BSON_STRING: - case BSON_SYMBOL: - return bson_iterator_value( i ) + 4; - default: - return ""; - } -} - -int bson_iterator_string_len( const bson_iterator *i ) { - return bson_iterator_int_raw( i ); -} - -MONGO_EXPORT const char *bson_iterator_code( const bson_iterator *i ) { - switch ( bson_iterator_type( i ) ) { - case BSON_STRING: - case BSON_CODE: - return bson_iterator_value( i ) + 4; - case BSON_CODEWSCOPE: - return bson_iterator_value( i ) + 8; - default: - return NULL; - } -} - -MONGO_EXPORT void bson_iterator_code_scope( const bson_iterator *i, bson *scope ) { - if ( bson_iterator_type( i ) == BSON_CODEWSCOPE ) { - int code_len; - bson_little_endian32( &code_len, bson_iterator_value( i )+4 ); - bson_init_data( scope, ( void * )( bson_iterator_value( i )+8+code_len ) ); - _bson_reset( scope ); - scope->finished = 1; - } else { - bson_empty( scope ); - } -} - -MONGO_EXPORT bson_date_t bson_iterator_date( const bson_iterator *i ) { - return bson_iterator_long_raw( i ); -} - -MONGO_EXPORT time_t bson_iterator_time_t( const bson_iterator *i ) { - return bson_iterator_date( i ) / 1000; -} - -MONGO_EXPORT int bson_iterator_bin_len( const bson_iterator *i ) { - return ( bson_iterator_bin_type( i ) == BSON_BIN_BINARY_OLD ) - ? bson_iterator_int_raw( i ) - 4 - : bson_iterator_int_raw( i ); -} - -MONGO_EXPORT char bson_iterator_bin_type( const bson_iterator *i ) { - return bson_iterator_value( i )[4]; -} - -MONGO_EXPORT const char *bson_iterator_bin_data( const bson_iterator *i ) { - return ( bson_iterator_bin_type( i ) == BSON_BIN_BINARY_OLD ) - ? bson_iterator_value( i ) + 9 - : bson_iterator_value( i ) + 5; -} - -MONGO_EXPORT const char *bson_iterator_regex( const bson_iterator *i ) { - return bson_iterator_value( i ); -} - -MONGO_EXPORT const char *bson_iterator_regex_opts( const bson_iterator *i ) { - const char *p = bson_iterator_value( i ); - return p + strlen( p ) + 1; - -} - -MONGO_EXPORT void bson_iterator_subobject( const bson_iterator *i, bson *sub ) { - bson_init_data( sub, ( char * )bson_iterator_value( i ) ); - _bson_reset( sub ); - sub->finished = 1; -} - -MONGO_EXPORT void bson_iterator_subiterator( const bson_iterator *i, bson_iterator *sub ) { - bson_iterator_from_buffer( sub, bson_iterator_value( i ) ); -} - -/* ---------------------------- - BUILDING - ------------------------------ */ - -static void _bson_init_size( bson *b, int size ) { - if( size == 0 ) - b->data = NULL; - else - b->data = ( char * )bson_malloc( size ); - b->dataSize = size; - b->cur = b->data + 4; - _bson_reset( b ); -} - -MONGO_EXPORT void bson_init( bson *b ) { - _bson_init_size( b, initialBufferSize ); -} - -void bson_init_size( bson *b, int size ) { - _bson_init_size( b, size ); -} - -void bson_append_byte( bson *b, char c ) { - b->cur[0] = c; - b->cur++; -} - -void bson_append( bson *b, const void *data, int len ) { - memcpy( b->cur , data , len ); - b->cur += len; -} - -void bson_append32( bson *b, const void *data ) { - bson_little_endian32( b->cur, data ); - b->cur += 4; -} - -void bson_append64( bson *b, const void *data ) { - bson_little_endian64( b->cur, data ); - b->cur += 8; -} - -int bson_ensure_space( bson *b, const int bytesNeeded ) { - int pos = b->cur - b->data; - char *orig = b->data; - int new_size; - - if ( pos + bytesNeeded <= b->dataSize ) - return BSON_OK; - - new_size = 1.5 * ( b->dataSize + bytesNeeded ); - - if( new_size < b->dataSize ) { - if( ( b->dataSize + bytesNeeded ) < INT_MAX ) - new_size = INT_MAX; - else { - b->err = BSON_SIZE_OVERFLOW; - return BSON_ERROR; - } - } - - b->data = bson_realloc( b->data, new_size ); - if ( !b->data ) - bson_fatal_msg( !!b->data, "realloc() failed" ); - - b->dataSize = new_size; - b->cur += b->data - orig; - - return BSON_OK; -} - -MONGO_EXPORT int bson_finish( bson *b ) { - int i; - - if( b->err & BSON_NOT_UTF8 ) - return BSON_ERROR; - - if ( ! b->finished ) { - if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR; - bson_append_byte( b, 0 ); - i = b->cur - b->data; - bson_little_endian32( b->data, &i ); - b->finished = 1; - } - - return BSON_OK; -} - -MONGO_EXPORT void bson_destroy( bson *b ) { - if (b) { - bson_free( b->data ); - b->err = 0; - b->data = 0; - b->cur = 0; - b->finished = 1; - } -} - -static int bson_append_estart( bson *b, int type, const char *name, const int dataSize ) { - const int len = strlen( name ) + 1; - - if ( b->finished ) { - b->err |= BSON_ALREADY_FINISHED; - return BSON_ERROR; - } - - if ( bson_ensure_space( b, 1 + len + dataSize ) == BSON_ERROR ) { - return BSON_ERROR; - } - - if( bson_check_field_name( b, ( const char * )name, len - 1 ) == BSON_ERROR ) { - bson_builder_error( b ); - return BSON_ERROR; - } - - bson_append_byte( b, ( char )type ); - bson_append( b, name, len ); - return BSON_OK; -} - -/* ---------------------------- - BUILDING TYPES - ------------------------------ */ - -MONGO_EXPORT int bson_append_int( bson *b, const char *name, const int i ) { - if ( bson_append_estart( b, BSON_INT, name, 4 ) == BSON_ERROR ) - return BSON_ERROR; - bson_append32( b , &i ); - return BSON_OK; -} - -MONGO_EXPORT int bson_append_long( bson *b, const char *name, const int64_t i ) { - if ( bson_append_estart( b , BSON_LONG, name, 8 ) == BSON_ERROR ) - return BSON_ERROR; - bson_append64( b , &i ); - return BSON_OK; -} - -MONGO_EXPORT int bson_append_double( bson *b, const char *name, const double d ) { - if ( bson_append_estart( b, BSON_DOUBLE, name, 8 ) == BSON_ERROR ) - return BSON_ERROR; - bson_append64( b , &d ); - return BSON_OK; -} - -MONGO_EXPORT int bson_append_bool( bson *b, const char *name, const bson_bool_t i ) { - if ( bson_append_estart( b, BSON_BOOL, name, 1 ) == BSON_ERROR ) - return BSON_ERROR; - bson_append_byte( b , i != 0 ); - return BSON_OK; -} - -MONGO_EXPORT int bson_append_null( bson *b, const char *name ) { - if ( bson_append_estart( b , BSON_NULL, name, 0 ) == BSON_ERROR ) - return BSON_ERROR; - return BSON_OK; -} - -MONGO_EXPORT int bson_append_undefined( bson *b, const char *name ) { - if ( bson_append_estart( b, BSON_UNDEFINED, name, 0 ) == BSON_ERROR ) - return BSON_ERROR; - return BSON_OK; -} - -int bson_append_string_base( bson *b, const char *name, - const char *value, int len, bson_type type ) { - - int sl = len + 1; - if ( bson_check_string( b, ( const char * )value, sl - 1 ) == BSON_ERROR ) - return BSON_ERROR; - if ( bson_append_estart( b, type, name, 4 + sl ) == BSON_ERROR ) { - return BSON_ERROR; - } - bson_append32( b , &sl ); - bson_append( b , value , sl - 1 ); - bson_append( b , "\0" , 1 ); - return BSON_OK; -} - -MONGO_EXPORT int bson_append_string( bson *b, const char *name, const char *value ) { - return bson_append_string_base( b, name, value, strlen ( value ), BSON_STRING ); -} - -MONGO_EXPORT int bson_append_symbol( bson *b, const char *name, const char *value ) { - return bson_append_string_base( b, name, value, strlen ( value ), BSON_SYMBOL ); -} - -MONGO_EXPORT int bson_append_code( bson *b, const char *name, const char *value ) { - return bson_append_string_base( b, name, value, strlen ( value ), BSON_CODE ); -} - -MONGO_EXPORT int bson_append_string_n( bson *b, const char *name, const char *value, int len ) { - return bson_append_string_base( b, name, value, len, BSON_STRING ); -} - -MONGO_EXPORT int bson_append_symbol_n( bson *b, const char *name, const char *value, int len ) { - return bson_append_string_base( b, name, value, len, BSON_SYMBOL ); -} - -MONGO_EXPORT int bson_append_code_n( bson *b, const char *name, const char *value, int len ) { - return bson_append_string_base( b, name, value, len, BSON_CODE ); -} - -MONGO_EXPORT int bson_append_code_w_scope_n( bson *b, const char *name, - const char *code, int len, const bson *scope ) { - - int sl = len + 1; - int size = 4 + 4 + sl + bson_size( scope ); - if ( bson_append_estart( b, BSON_CODEWSCOPE, name, size ) == BSON_ERROR ) - return BSON_ERROR; - bson_append32( b, &size ); - bson_append32( b, &sl ); - bson_append( b, code, sl ); - bson_append( b, scope->data, bson_size( scope ) ); - return BSON_OK; -} - -MONGO_EXPORT int bson_append_code_w_scope( bson *b, const char *name, const char *code, const bson *scope ) { - return bson_append_code_w_scope_n( b, name, code, strlen ( code ), scope ); -} - -MONGO_EXPORT int bson_append_binary( bson *b, const char *name, char type, const char *str, int len ) { - if ( type == BSON_BIN_BINARY_OLD ) { - int subtwolen = len + 4; - if ( bson_append_estart( b, BSON_BINDATA, name, 4+1+4+len ) == BSON_ERROR ) - return BSON_ERROR; - bson_append32( b, &subtwolen ); - bson_append_byte( b, type ); - bson_append32( b, &len ); - bson_append( b, str, len ); - } else { - if ( bson_append_estart( b, BSON_BINDATA, name, 4+1+len ) == BSON_ERROR ) - return BSON_ERROR; - bson_append32( b, &len ); - bson_append_byte( b, type ); - bson_append( b, str, len ); - } - return BSON_OK; -} - -MONGO_EXPORT int bson_append_oid( bson *b, const char *name, const bson_oid_t *oid ) { - if ( bson_append_estart( b, BSON_OID, name, 12 ) == BSON_ERROR ) - return BSON_ERROR; - bson_append( b , oid , 12 ); - return BSON_OK; -} - -MONGO_EXPORT int bson_append_new_oid( bson *b, const char *name ) { - bson_oid_t oid; - bson_oid_gen( &oid ); - return bson_append_oid( b, name, &oid ); -} - -MONGO_EXPORT int bson_append_regex( bson *b, const char *name, const char *pattern, const char *opts ) { - const int plen = strlen( pattern )+1; - const int olen = strlen( opts )+1; - if ( bson_append_estart( b, BSON_REGEX, name, plen + olen ) == BSON_ERROR ) - return BSON_ERROR; - if ( bson_check_string( b, pattern, plen - 1 ) == BSON_ERROR ) - return BSON_ERROR; - bson_append( b , pattern , plen ); - bson_append( b , opts , olen ); - return BSON_OK; -} - -MONGO_EXPORT int bson_append_bson( bson *b, const char *name, const bson *bson ) { - if ( bson_append_estart( b, BSON_OBJECT, name, bson_size( bson ) ) == BSON_ERROR ) - return BSON_ERROR; - bson_append( b , bson->data , bson_size( bson ) ); - return BSON_OK; -} - -MONGO_EXPORT int bson_append_element( bson *b, const char *name_or_null, const bson_iterator *elem ) { - bson_iterator next = *elem; - int size; - - bson_iterator_next( &next ); - size = next.cur - elem->cur; - - if ( name_or_null == NULL ) { - if( bson_ensure_space( b, size ) == BSON_ERROR ) - return BSON_ERROR; - bson_append( b, elem->cur, size ); - } else { - int data_size = size - 2 - strlen( bson_iterator_key( elem ) ); - bson_append_estart( b, elem->cur[0], name_or_null, data_size ); - bson_append( b, bson_iterator_value( elem ), data_size ); - } - - return BSON_OK; -} - -MONGO_EXPORT int bson_append_timestamp( bson *b, const char *name, bson_timestamp_t *ts ) { - if ( bson_append_estart( b, BSON_TIMESTAMP, name, 8 ) == BSON_ERROR ) return BSON_ERROR; - - bson_append32( b , &( ts->i ) ); - bson_append32( b , &( ts->t ) ); - - return BSON_OK; -} - -MONGO_EXPORT int bson_append_timestamp2( bson *b, const char *name, int time, int increment ) { - if ( bson_append_estart( b, BSON_TIMESTAMP, name, 8 ) == BSON_ERROR ) return BSON_ERROR; - - bson_append32( b , &increment ); - bson_append32( b , &time ); - return BSON_OK; -} - -MONGO_EXPORT int bson_append_date( bson *b, const char *name, bson_date_t millis ) { - if ( bson_append_estart( b, BSON_DATE, name, 8 ) == BSON_ERROR ) return BSON_ERROR; - bson_append64( b , &millis ); - return BSON_OK; -} - -MONGO_EXPORT int bson_append_time_t( bson *b, const char *name, time_t secs ) { - return bson_append_date( b, name, ( bson_date_t )secs * 1000 ); -} - -MONGO_EXPORT int bson_append_start_object( bson *b, const char *name ) { - if ( bson_append_estart( b, BSON_OBJECT, name, 5 ) == BSON_ERROR ) return BSON_ERROR; - b->stack[ b->stackPos++ ] = b->cur - b->data; - bson_append32( b , &zero ); - return BSON_OK; -} - -MONGO_EXPORT int bson_append_start_array( bson *b, const char *name ) { - if ( bson_append_estart( b, BSON_ARRAY, name, 5 ) == BSON_ERROR ) return BSON_ERROR; - b->stack[ b->stackPos++ ] = b->cur - b->data; - bson_append32( b , &zero ); - return BSON_OK; -} - -MONGO_EXPORT int bson_append_finish_object( bson *b ) { - char *start; - int i; - if ( bson_ensure_space( b, 1 ) == BSON_ERROR ) return BSON_ERROR; - bson_append_byte( b , 0 ); - - start = b->data + b->stack[ --b->stackPos ]; - i = b->cur - start; - bson_little_endian32( start, &i ); - - return BSON_OK; -} - -MONGO_EXPORT double bson_int64_to_double( int64_t i64 ) { - return (double)i64; -} - -MONGO_EXPORT int bson_append_finish_array( bson *b ) { - return bson_append_finish_object( b ); -} - -/* Error handling and allocators. */ - -static bson_err_handler err_handler = NULL; - -MONGO_EXPORT bson_err_handler set_bson_err_handler( bson_err_handler func ) { - bson_err_handler old = err_handler; - err_handler = func; - return old; -} - -MONGO_EXPORT void bson_free( void *ptr ) { - bson_free_func( ptr ); -} - -MONGO_EXPORT void *bson_malloc( int size ) { - void *p; - p = bson_malloc_func( size ); - bson_fatal_msg( !!p, "malloc() failed" ); - return p; -} - -void *bson_realloc( void *ptr, int size ) { - void *p; - p = bson_realloc_func( ptr, size ); - bson_fatal_msg( !!p, "realloc() failed" ); - return p; -} - -int _bson_errprintf( const char *format, ... ) { - va_list ap; - int ret; - va_start( ap, format ); -#ifndef R_SAFETY_NET - ret = vfprintf( stderr, format, ap ); -#endif - va_end( ap ); - - return ret; -} - -/** - * This method is invoked when a non-fatal bson error is encountered. - * Calls the error handler if available. - * - * @param - */ -void bson_builder_error( bson *b ) { - if( err_handler ) - err_handler( "BSON error." ); -} - -void bson_fatal( int ok ) { - bson_fatal_msg( ok, "" ); -} - -void bson_fatal_msg( int ok , const char *msg ) { - if ( ok ) - return; - - if ( err_handler ) { - err_handler( msg ); - } -#ifndef R_SAFETY_NET - bson_errprintf( "error: %s\n" , msg ); - exit( -5 ); -#endif -} - - -/* Efficiently copy an integer to a string. */ -extern const char bson_numstrs[1000][4]; - -void bson_numstr( char *str, int i ) { - if( i < 1000 ) - memcpy( str, bson_numstrs[i], 4 ); - else - bson_sprintf( str,"%d", i ); -} - -MONGO_EXPORT void bson_swap_endian64( void *outp, const void *inp ) { - const char *in = ( const char * )inp; - char *out = ( char * )outp; - - out[0] = in[7]; - out[1] = in[6]; - out[2] = in[5]; - out[3] = in[4]; - out[4] = in[3]; - out[5] = in[2]; - out[6] = in[1]; - out[7] = in[0]; - -} - -MONGO_EXPORT void bson_swap_endian32( void *outp, const void *inp ) { - const char *in = ( const char * )inp; - char *out = ( char * )outp; - - out[0] = in[3]; - out[1] = in[2]; - out[2] = in[1]; - out[3] = in[0]; -} diff --git a/mongo-c-driver-v0.6/src/bson.h b/mongo-c-driver-v0.6/src/bson.h deleted file mode 100644 index b866948..0000000 --- a/mongo-c-driver-v0.6/src/bson.h +++ /dev/null @@ -1,1038 +0,0 @@ -/** - * @file bson.h - * @brief BSON Declarations - */ - -/* Copyright 2009-2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef BSON_H_ -#define BSON_H_ - -#include -#include -#include -#include -#include - -#ifdef __GNUC__ - #define MONGO_INLINE static __inline__ - #define MONGO_EXPORT -#else - #define MONGO_INLINE static - #ifdef MONGO_STATIC_BUILD - #define MONGO_EXPORT - #elif defined(MONGO_DLL_BUILD) - #define MONGO_EXPORT __declspec(dllexport) - #else - #define MONGO_EXPORT __declspec(dllimport) - #endif -#endif - -#ifdef __cplusplus -#define MONGO_EXTERN_C_START extern "C" { -#define MONGO_EXTERN_C_END } -#else -#define MONGO_EXTERN_C_START -#define MONGO_EXTERN_C_END -#endif - -#if defined(MONGO_HAVE_STDINT) || __STDC_VERSION__ >= 199901L -#include -#elif defined(MONGO_HAVE_UNISTD) -#include -#elif defined(MONGO_USE__INT64) -typedef __int64 int64_t; -typedef unsigned __int64 uint64_t; -#elif defined(MONGO_USE_LONG_LONG_INT) -typedef long long int int64_t; -typedef unsigned long long int uint64_t; -#else -#error Must compile with c99 or define MONGO_HAVE_STDINT, MONGO_HAVE_UNISTD, MONGO_USE__INT64, or MONGO_USE_LONG_INT. -#endif - -#ifdef MONGO_BIG_ENDIAN -#define bson_little_endian64(out, in) ( bson_swap_endian64(out, in) ) -#define bson_little_endian32(out, in) ( bson_swap_endian32(out, in) ) -#define bson_big_endian64(out, in) ( memcpy(out, in, 8) ) -#define bson_big_endian32(out, in) ( memcpy(out, in, 4) ) -#else -#define bson_little_endian64(out, in) ( memcpy(out, in, 8) ) -#define bson_little_endian32(out, in) ( memcpy(out, in, 4) ) -#define bson_big_endian64(out, in) ( bson_swap_endian64(out, in) ) -#define bson_big_endian32(out, in) ( bson_swap_endian32(out, in) ) -#endif - -MONGO_EXTERN_C_START - -#define BSON_OK 0 -#define BSON_ERROR -1 - -enum bson_error_t { - BSON_SIZE_OVERFLOW = 1 /**< Trying to create a BSON object larger than INT_MAX. */ -}; - -enum bson_validity_t { - BSON_VALID = 0, /**< BSON is valid and UTF-8 compliant. */ - BSON_NOT_UTF8 = ( 1<<1 ), /**< A key or a string is not valid UTF-8. */ - BSON_FIELD_HAS_DOT = ( 1<<2 ), /**< Warning: key contains '.' character. */ - BSON_FIELD_INIT_DOLLAR = ( 1<<3 ), /**< Warning: key starts with '$' character. */ - BSON_ALREADY_FINISHED = ( 1<<4 ) /**< Trying to modify a finished BSON object. */ -}; - -enum bson_binary_subtype_t { - BSON_BIN_BINARY = 0, - BSON_BIN_FUNC = 1, - BSON_BIN_BINARY_OLD = 2, - BSON_BIN_UUID = 3, - BSON_BIN_MD5 = 5, - BSON_BIN_USER = 128 -}; - -typedef enum { - BSON_EOO = 0, - BSON_DOUBLE = 1, - BSON_STRING = 2, - BSON_OBJECT = 3, - BSON_ARRAY = 4, - BSON_BINDATA = 5, - BSON_UNDEFINED = 6, - BSON_OID = 7, - BSON_BOOL = 8, - BSON_DATE = 9, - BSON_NULL = 10, - BSON_REGEX = 11, - BSON_DBREF = 12, /**< Deprecated. */ - BSON_CODE = 13, - BSON_SYMBOL = 14, - BSON_CODEWSCOPE = 15, - BSON_INT = 16, - BSON_TIMESTAMP = 17, - BSON_LONG = 18 -} bson_type; - -typedef int bson_bool_t; - -typedef struct { - const char *cur; - bson_bool_t first; -} bson_iterator; - -typedef struct { - char *data; /**< Pointer to a block of data in this BSON object. */ - char *cur; /**< Pointer to the current position. */ - int dataSize; /**< The number of bytes allocated to char *data. */ - bson_bool_t finished; /**< When finished, the BSON object can no longer be modified. */ - int stack[32]; /**< A stack used to keep track of nested BSON elements. */ - int stackPos; /**< Index of current stack position. */ - int err; /**< Bitfield representing errors or warnings on this buffer */ - char *errstr; /**< A string representation of the most recent error or warning. */ -} bson; - -#pragma pack(1) -typedef union { - char bytes[12]; - int ints[3]; -} bson_oid_t; -#pragma pack() - -typedef int64_t bson_date_t; /* milliseconds since epoch UTC */ - -typedef struct { - int i; /* increment */ - int t; /* time in seconds */ -} bson_timestamp_t; - -/* ---------------------------- - READING - ------------------------------ */ - -MONGO_EXPORT bson* bson_create(); -MONGO_EXPORT void bson_dispose(bson* b); - -/** - * Size of a BSON object. - * - * @param b the BSON object. - * - * @return the size. - */ -MONGO_EXPORT int bson_size( const bson *b ); -MONGO_EXPORT int bson_buffer_size( const bson *b ); - -/** - * Print a string representation of a BSON object. - * - * @param b the BSON object to print. - */ -MONGO_EXPORT void bson_print( const bson *b ); - -/** - * Return a pointer to the raw buffer stored by this bson object. - * - * @param b a BSON object - */ -MONGO_EXPORT const char *bson_data( const bson *b ); - -/** - * Print a string representation of a BSON object. - * - * @param bson the raw data to print. - * @param depth the depth to recurse the object.x - */ -MONGO_EXPORT void bson_print_raw( const char *bson , int depth ); - -/** - * Advance a bson_iterator to the named field. - * - * @param it the bson_iterator to use. - * @param obj the BSON object to use. - * @param name the name of the field to find. - * - * @return the type of the found object or BSON_EOO if it is not found. - */ -MONGO_EXPORT bson_type bson_find( bson_iterator *it, const bson *obj, const char *name ); - - -MONGO_EXPORT bson_iterator* bson_iterator_create(); -MONGO_EXPORT void bson_iterator_dispose(bson_iterator*); -/** - * Initialize a bson_iterator. - * - * @param i the bson_iterator to initialize. - * @param bson the BSON object to associate with the iterator. - */ -MONGO_EXPORT void bson_iterator_init( bson_iterator *i , const bson *b ); - -/** - * Initialize a bson iterator from a const char* buffer. Note - * that this is mostly used internally. - * - * @param i the bson_iterator to initialize. - * @param buffer the buffer to point to. - */ -MONGO_EXPORT void bson_iterator_from_buffer( bson_iterator *i, const char *buffer ); - -/* more returns true for eoo. best to loop with bson_iterator_next(&it) */ -/** - * Check to see if the bson_iterator has more data. - * - * @param i the iterator. - * - * @return returns true if there is more data. - */ -MONGO_EXPORT bson_bool_t bson_iterator_more( const bson_iterator *i ); - -/** - * Point the iterator at the next BSON object. - * - * @param i the bson_iterator. - * - * @return the type of the next BSON object. - */ -MONGO_EXPORT bson_type bson_iterator_next( bson_iterator *i ); - -/** - * Get the type of the BSON object currently pointed to by the iterator. - * - * @param i the bson_iterator - * - * @return the type of the current BSON object. - */ -MONGO_EXPORT bson_type bson_iterator_type( const bson_iterator *i ); - -/** - * Get the key of the BSON object currently pointed to by the iterator. - * - * @param i the bson_iterator - * - * @return the key of the current BSON object. - */ -MONGO_EXPORT const char *bson_iterator_key( const bson_iterator *i ); - -/** - * Get the value of the BSON object currently pointed to by the iterator. - * - * @param i the bson_iterator - * - * @return the value of the current BSON object. - */ -MONGO_EXPORT const char *bson_iterator_value( const bson_iterator *i ); - -/* these convert to the right type (return 0 if non-numeric) */ -/** - * Get the double value of the BSON object currently pointed to by the - * iterator. - * - * @param i the bson_iterator - * - * @return the value of the current BSON object. - */ -MONGO_EXPORT double bson_iterator_double( const bson_iterator *i ); - -/** - * Get the int value of the BSON object currently pointed to by the iterator. - * - * @param i the bson_iterator - * - * @return the value of the current BSON object. - */ -MONGO_EXPORT int bson_iterator_int( const bson_iterator *i ); - -/** - * Get the long value of the BSON object currently pointed to by the iterator. - * - * @param i the bson_iterator - * - * @return the value of the current BSON object. - */ -MONGO_EXPORT int64_t bson_iterator_long( const bson_iterator *i ); - -/* return the bson timestamp as a whole or in parts */ -/** - * Get the timestamp value of the BSON object currently pointed to by - * the iterator. - * - * @param i the bson_iterator - * - * @return the value of the current BSON object. - */ -MONGO_EXPORT bson_timestamp_t bson_iterator_timestamp( const bson_iterator *i ); -MONGO_EXPORT int bson_iterator_timestamp_time( const bson_iterator *i ); -MONGO_EXPORT int bson_iterator_timestamp_increment( const bson_iterator *i ); - -/** - * Get the boolean value of the BSON object currently pointed to by - * the iterator. - * - * @param i the bson_iterator - * - * @return the value of the current BSON object. - */ -/* false: boolean false, 0 in any type, or null */ -/* true: anything else (even empty strings and objects) */ -MONGO_EXPORT bson_bool_t bson_iterator_bool( const bson_iterator *i ); - -/** - * Get the double value of the BSON object currently pointed to by the - * iterator. Assumes the correct type is used. - * - * @param i the bson_iterator - * - * @return the value of the current BSON object. - */ -/* these assume you are using the right type */ -double bson_iterator_double_raw( const bson_iterator *i ); - -/** - * Get the int value of the BSON object currently pointed to by the - * iterator. Assumes the correct type is used. - * - * @param i the bson_iterator - * - * @return the value of the current BSON object. - */ -int bson_iterator_int_raw( const bson_iterator *i ); - -/** - * Get the long value of the BSON object currently pointed to by the - * iterator. Assumes the correct type is used. - * - * @param i the bson_iterator - * - * @return the value of the current BSON object. - */ -int64_t bson_iterator_long_raw( const bson_iterator *i ); - -/** - * Get the bson_bool_t value of the BSON object currently pointed to by the - * iterator. Assumes the correct type is used. - * - * @param i the bson_iterator - * - * @return the value of the current BSON object. - */ -bson_bool_t bson_iterator_bool_raw( const bson_iterator *i ); - -/** - * Get the bson_oid_t value of the BSON object currently pointed to by the - * iterator. - * - * @param i the bson_iterator - * - * @return the value of the current BSON object. - */ -MONGO_EXPORT bson_oid_t *bson_iterator_oid( const bson_iterator *i ); - -/** - * Get the string value of the BSON object currently pointed to by the - * iterator. - * - * @param i the bson_iterator - * - * @return the value of the current BSON object. - */ -/* these can also be used with bson_code and bson_symbol*/ -MONGO_EXPORT const char *bson_iterator_string( const bson_iterator *i ); - -/** - * Get the string length of the BSON object currently pointed to by the - * iterator. - * - * @param i the bson_iterator - * - * @return the length of the current BSON object. - */ -int bson_iterator_string_len( const bson_iterator *i ); - -/** - * Get the code value of the BSON object currently pointed to by the - * iterator. Works with bson_code, bson_codewscope, and BSON_STRING - * returns NULL for everything else. - * - * @param i the bson_iterator - * - * @return the code value of the current BSON object. - */ -/* works with bson_code, bson_codewscope, and BSON_STRING */ -/* returns NULL for everything else */ -MONGO_EXPORT const char *bson_iterator_code( const bson_iterator *i ); - -/** - * Calls bson_empty on scope if not a bson_codewscope - * - * @param i the bson_iterator. - * @param scope the bson scope. - */ -/* calls bson_empty on scope if not a bson_codewscope */ -MONGO_EXPORT void bson_iterator_code_scope( const bson_iterator *i, bson *scope ); - -/** - * Get the date value of the BSON object currently pointed to by the - * iterator. - * - * @param i the bson_iterator - * - * @return the date value of the current BSON object. - */ -/* both of these only work with bson_date */ -MONGO_EXPORT bson_date_t bson_iterator_date( const bson_iterator *i ); - -/** - * Get the time value of the BSON object currently pointed to by the - * iterator. - * - * @param i the bson_iterator - * - * @return the time value of the current BSON object. - */ -MONGO_EXPORT time_t bson_iterator_time_t( const bson_iterator *i ); - -/** - * Get the length of the BSON binary object currently pointed to by the - * iterator. - * - * @param i the bson_iterator - * - * @return the length of the current BSON binary object. - */ -MONGO_EXPORT int bson_iterator_bin_len( const bson_iterator *i ); - -/** - * Get the type of the BSON binary object currently pointed to by the - * iterator. - * - * @param i the bson_iterator - * - * @return the type of the current BSON binary object. - */ -MONGO_EXPORT char bson_iterator_bin_type( const bson_iterator *i ); - -/** - * Get the value of the BSON binary object currently pointed to by the - * iterator. - * - * @param i the bson_iterator - * - * @return the value of the current BSON binary object. - */ -MONGO_EXPORT const char *bson_iterator_bin_data( const bson_iterator *i ); - -/** - * Get the value of the BSON regex object currently pointed to by the - * iterator. - * - * @param i the bson_iterator - * - * @return the value of the current BSON regex object. - */ -MONGO_EXPORT const char *bson_iterator_regex( const bson_iterator *i ); - -/** - * Get the options of the BSON regex object currently pointed to by the - * iterator. - * - * @param i the bson_iterator. - * - * @return the options of the current BSON regex object. - */ -MONGO_EXPORT const char *bson_iterator_regex_opts( const bson_iterator *i ); - -/* these work with BSON_OBJECT and BSON_ARRAY */ -/** - * Get the BSON subobject currently pointed to by the - * iterator. - * - * @param i the bson_iterator. - * @param sub the BSON subobject destination. - */ -MONGO_EXPORT void bson_iterator_subobject( const bson_iterator *i, bson *sub ); - -/** - * Get a bson_iterator that on the BSON subobject. - * - * @param i the bson_iterator. - * @param sub the iterator to point at the BSON subobject. - */ -MONGO_EXPORT void bson_iterator_subiterator( const bson_iterator *i, bson_iterator *sub ); - -/* str must be at least 24 hex chars + null byte */ -/** - * Create a bson_oid_t from a string. - * - * @param oid the bson_oid_t destination. - * @param str a null terminated string comprised of at least 24 hex chars. - */ -MONGO_EXPORT void bson_oid_from_string( bson_oid_t *oid, const char *str ); - -/** - * Create a string representation of the bson_oid_t. - * - * @param oid the bson_oid_t source. - * @param str the string representation destination. - */ -MONGO_EXPORT void bson_oid_to_string( const bson_oid_t *oid, char *str ); - -/** - * Create a bson_oid object. - * - * @param oid the destination for the newly created bson_oid_t. - */ -MONGO_EXPORT void bson_oid_gen( bson_oid_t *oid ); - -/** - * Set a function to be used to generate the second four bytes - * of an object id. - * - * @param func a pointer to a function that returns an int. - */ -MONGO_EXPORT void bson_set_oid_fuzz( int ( *func )( void ) ); - -/** - * Set a function to be used to generate the incrementing part - * of an object id (last four bytes). If you need thread-safety - * in generating object ids, you should set this function. - * - * @param func a pointer to a function that returns an int. - */ -MONGO_EXPORT void bson_set_oid_inc( int ( *func )( void ) ); - -/** - * Get the time a bson_oid_t was created. - * - * @param oid the bson_oid_t. - */ -MONGO_EXPORT time_t bson_oid_generated_time( bson_oid_t *oid ); /* Gives the time the OID was created */ - -/* ---------------------------- - BUILDING - ------------------------------ */ - -/** - * Initialize a new bson object. If not created - * with bson_new, you must initialize each new bson - * object using this function. - * - * @note When finished, you must pass the bson object to - * bson_destroy( ). - */ -MONGO_EXPORT void bson_init( bson *b ); - -/** - * Initialize a BSON object, and point its data - * pointer to the provided char*. - * - * @param b the BSON object to initialize. - * @param data the raw BSON data. - * - * @return BSON_OK or BSON_ERROR. - */ -int bson_init_data( bson *b , char *data ); -int bson_init_finished_data( bson *b, char *data ) ; - -/** - * Initialize a BSON object, and set its - * buffer to the given size. - * - * @param b the BSON object to initialize. - * @param size the initial size of the buffer. - * - * @return BSON_OK or BSON_ERROR. - */ -void bson_init_size( bson *b, int size ); - -/** - * Grow a bson object. - * - * @param b the bson to grow. - * @param bytesNeeded the additional number of bytes needed. - * - * @return BSON_OK or BSON_ERROR with the bson error object set. - * Exits if allocation fails. - */ -int bson_ensure_space( bson *b, const int bytesNeeded ); - -/** - * Finalize a bson object. - * - * @param b the bson object to finalize. - * - * @return the standard error code. To deallocate memory, - * call bson_destroy on the bson object. - */ -MONGO_EXPORT int bson_finish( bson *b ); - -/** - * Destroy a bson object. - * - * @param b the bson object to destroy. - * - */ -MONGO_EXPORT void bson_destroy( bson *b ); - -/** - * Returns a pointer to a static empty BSON object. - * - * @param obj the BSON object to initialize. - * - * @return the empty initialized BSON object. - */ -/* returns pointer to static empty bson object */ -MONGO_EXPORT bson *bson_empty( bson *obj ); - -/** - * Make a complete copy of the a BSON object. - * The source bson object must be in a finished - * state; otherwise, the copy will fail. - * - * @param out the copy destination BSON object. - * @param in the copy source BSON object. - */ -MONGO_EXPORT int bson_copy( bson *out, const bson *in ); /* puts data in new buffer. NOOP if out==NULL */ - -/** - * Append a previously created bson_oid_t to a bson object. - * - * @param b the bson to append to. - * @param name the key for the bson_oid_t. - * @param oid the bson_oid_t to append. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_oid( bson *b, const char *name, const bson_oid_t *oid ); - -/** - * Append a bson_oid_t to a bson. - * - * @param b the bson to append to. - * @param name the key for the bson_oid_t. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_new_oid( bson *b, const char *name ); - -/** - * Append an int to a bson. - * - * @param b the bson to append to. - * @param name the key for the int. - * @param i the int to append. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_int( bson *b, const char *name, const int i ); - -/** - * Append an long to a bson. - * - * @param b the bson to append to. - * @param name the key for the long. - * @param i the long to append. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_long( bson *b, const char *name, const int64_t i ); - -/** - * Append an double to a bson. - * - * @param b the bson to append to. - * @param name the key for the double. - * @param d the double to append. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_double( bson *b, const char *name, const double d ); - -/** - * Append a string to a bson. - * - * @param b the bson to append to. - * @param name the key for the string. - * @param str the string to append. - * - * @return BSON_OK or BSON_ERROR. -*/ -MONGO_EXPORT int bson_append_string( bson *b, const char *name, const char *str ); - -/** - * Append len bytes of a string to a bson. - * - * @param b the bson to append to. - * @param name the key for the string. - * @param str the string to append. - * @param len the number of bytes from str to append. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_string_n( bson *b, const char *name, const char *str, int len ); - -/** - * Append a symbol to a bson. - * - * @param b the bson to append to. - * @param name the key for the symbol. - * @param str the symbol to append. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_symbol( bson *b, const char *name, const char *str ); - -/** - * Append len bytes of a symbol to a bson. - * - * @param b the bson to append to. - * @param name the key for the symbol. - * @param str the symbol to append. - * @param len the number of bytes from str to append. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_symbol_n( bson *b, const char *name, const char *str, int len ); - -/** - * Append code to a bson. - * - * @param b the bson to append to. - * @param name the key for the code. - * @param str the code to append. - * @param len the number of bytes from str to append. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_code( bson *b, const char *name, const char *str ); - -/** - * Append len bytes of code to a bson. - * - * @param b the bson to append to. - * @param name the key for the code. - * @param str the code to append. - * @param len the number of bytes from str to append. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_code_n( bson *b, const char *name, const char *str, int len ); - -/** - * Append code to a bson with scope. - * - * @param b the bson to append to. - * @param name the key for the code. - * @param str the string to append. - * @param scope a BSON object containing the scope. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_code_w_scope( bson *b, const char *name, const char *code, const bson *scope ); - -/** - * Append len bytes of code to a bson with scope. - * - * @param b the bson to append to. - * @param name the key for the code. - * @param str the string to append. - * @param len the number of bytes from str to append. - * @param scope a BSON object containing the scope. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_code_w_scope_n( bson *b, const char *name, const char *code, int size, const bson *scope ); - -/** - * Append binary data to a bson. - * - * @param b the bson to append to. - * @param name the key for the data. - * @param type the binary data type. - * @param str the binary data. - * @param len the length of the data. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_binary( bson *b, const char *name, char type, const char *str, int len ); - -/** - * Append a bson_bool_t to a bson. - * - * @param b the bson to append to. - * @param name the key for the boolean value. - * @param v the bson_bool_t to append. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_bool( bson *b, const char *name, const bson_bool_t v ); - -/** - * Append a null value to a bson. - * - * @param b the bson to append to. - * @param name the key for the null value. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_null( bson *b, const char *name ); - -/** - * Append an undefined value to a bson. - * - * @param b the bson to append to. - * @param name the key for the undefined value. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_undefined( bson *b, const char *name ); - -/** - * Append a regex value to a bson. - * - * @param b the bson to append to. - * @param name the key for the regex value. - * @param pattern the regex pattern to append. - * @param the regex options. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_regex( bson *b, const char *name, const char *pattern, const char *opts ); - -/** - * Append bson data to a bson. - * - * @param b the bson to append to. - * @param name the key for the bson data. - * @param bson the bson object to append. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_bson( bson *b, const char *name, const bson *bson ); - -/** - * Append a BSON element to a bson from the current point of an iterator. - * - * @param b the bson to append to. - * @param name_or_null the key for the BSON element, or NULL. - * @param elem the bson_iterator. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_element( bson *b, const char *name_or_null, const bson_iterator *elem ); - -/** - * Append a bson_timestamp_t value to a bson. - * - * @param b the bson to append to. - * @param name the key for the timestampe value. - * @param ts the bson_timestamp_t value to append. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_timestamp( bson *b, const char *name, bson_timestamp_t *ts ); -MONGO_EXPORT int bson_append_timestamp2( bson *b, const char *name, int time, int increment ); - -/* these both append a bson_date */ -/** - * Append a bson_date_t value to a bson. - * - * @param b the bson to append to. - * @param name the key for the date value. - * @param millis the bson_date_t to append. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_date( bson *b, const char *name, bson_date_t millis ); - -/** - * Append a time_t value to a bson. - * - * @param b the bson to append to. - * @param name the key for the date value. - * @param secs the time_t to append. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_time_t( bson *b, const char *name, time_t secs ); - -/** - * Start appending a new object to a bson. - * - * @param b the bson to append to. - * @param name the name of the new object. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_start_object( bson *b, const char *name ); - -/** - * Start appending a new array to a bson. - * - * @param b the bson to append to. - * @param name the name of the new array. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_start_array( bson *b, const char *name ); - -/** - * Finish appending a new object or array to a bson. - * - * @param b the bson to append to. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_finish_object( bson *b ); - -/** - * Finish appending a new object or array to a bson. This - * is simply an alias for bson_append_finish_object. - * - * @param b the bson to append to. - * - * @return BSON_OK or BSON_ERROR. - */ -MONGO_EXPORT int bson_append_finish_array( bson *b ); - -void bson_numstr( char *str, int i ); - -void bson_incnumstr( char *str ); - -/* Error handling and standard library function over-riding. */ -/* -------------------------------------------------------- */ - -/* bson_err_handlers shouldn't return!!! */ -typedef void( *bson_err_handler )( const char *errmsg ); - -typedef int (*bson_printf_func)( const char *, ... ); -typedef int (*bson_fprintf_func)( FILE *, const char *, ... ); -typedef int (*bson_sprintf_func)( char *, const char *, ... ); - -extern void *( *bson_malloc_func )( size_t ); -extern void *( *bson_realloc_func )( void *, size_t ); -extern void ( *bson_free_func )( void * ); - -extern bson_printf_func bson_printf; -extern bson_fprintf_func bson_fprintf; -extern bson_sprintf_func bson_sprintf; -extern bson_printf_func bson_errprintf; - -MONGO_EXPORT void bson_free( void *ptr ); - -/** - * Allocates memory and checks return value, exiting fatally if malloc() fails. - * - * @param size bytes to allocate. - * - * @return a pointer to the allocated memory. - * - * @sa malloc(3) - */ -MONGO_EXPORT void *bson_malloc( int size ); - -/** - * Changes the size of allocated memory and checks return value, - * exiting fatally if realloc() fails. - * - * @param ptr pointer to the space to reallocate. - * @param size bytes to allocate. - * - * @return a pointer to the allocated memory. - * - * @sa realloc() - */ -void *bson_realloc( void *ptr, int size ); - -/** - * Set a function for error handling. - * - * @param func a bson_err_handler function. - * - * @return the old error handling function, or NULL. - */ -MONGO_EXPORT bson_err_handler set_bson_err_handler( bson_err_handler func ); - -/* does nothing if ok != 0 */ -/** - * Exit fatally. - * - * @param ok exits if ok is equal to 0. - */ -void bson_fatal( int ok ); - -/** - * Exit fatally with an error message. - * - * @param ok exits if ok is equal to 0. - * @param msg prints to stderr before exiting. - */ -void bson_fatal_msg( int ok, const char *msg ); - -/** - * Invoke the error handler, but do not exit. - * - * @param b the buffer object. - */ -void bson_builder_error( bson *b ); - -/** - * Cast an int64_t to double. This is necessary for embedding in - * certain environments. - * - */ -MONGO_EXPORT double bson_int64_to_double( int64_t i64 ); - -MONGO_EXPORT void bson_swap_endian32( void *outp, const void *inp ); -MONGO_EXPORT void bson_swap_endian64( void *outp, const void *inp ); - -MONGO_EXTERN_C_END -#endif diff --git a/mongo-c-driver-v0.6/src/encoding.c b/mongo-c-driver-v0.6/src/encoding.c deleted file mode 100644 index 45d0d27..0000000 --- a/mongo-c-driver-v0.6/src/encoding.c +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Copyright 2009-2012 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Portions Copyright 2001 Unicode, Inc. - * - * Disclaimer - * - * This source code is provided as is by Unicode, Inc. No claims are - * made as to fitness for any particular purpose. No warranties of any - * kind are expressed or implied. The recipient agrees to determine - * applicability of information provided. If this file has been - * purchased on magnetic or optical media from Unicode, Inc., the - * sole remedy for any claim will be exchange of defective media - * within 90 days of receipt. - * - * Limitations on Rights to Redistribute This Code - * - * Unicode, Inc. hereby grants the right to freely use the information - * supplied in this file in the creation of products supporting the - * Unicode Standard, and to make copies of this file in any form - * for internal or external distribution as long as this notice - * remains attached. - */ - - -#include "bson.h" -#include "encoding.h" - -/* - * Index into the table below with the first byte of a UTF-8 sequence to - * get the number of trailing bytes that are supposed to follow it. - */ -static const char trailingBytesForUTF8[256] = { - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 -}; - -/* --------------------------------------------------------------------- */ - -/* - * Utility routine to tell whether a sequence of bytes is legal UTF-8. - * This must be called with the length pre-determined by the first byte. - * The length can be set by: - * length = trailingBytesForUTF8[*source]+1; - * and the sequence is illegal right away if there aren't that many bytes - * available. - * If presented with a length > 4, this returns 0. The Unicode - * definition of UTF-8 goes up to 4-byte sequences. - */ -static int isLegalUTF8( const unsigned char *source, int length ) { - unsigned char a; - const unsigned char *srcptr = source + length; - switch ( length ) { - default: - return 0; - /* Everything else falls through when "true"... */ - case 4: - if ( ( a = ( *--srcptr ) ) < 0x80 || a > 0xBF ) return 0; - case 3: - if ( ( a = ( *--srcptr ) ) < 0x80 || a > 0xBF ) return 0; - case 2: - if ( ( a = ( *--srcptr ) ) > 0xBF ) return 0; - switch ( *source ) { - /* no fall-through in this inner switch */ - case 0xE0: - if ( a < 0xA0 ) return 0; - break; - case 0xF0: - if ( a < 0x90 ) return 0; - break; - case 0xF4: - if ( a > 0x8F ) return 0; - break; - default: - if ( a < 0x80 ) return 0; - } - case 1: - if ( *source >= 0x80 && *source < 0xC2 ) return 0; - if ( *source > 0xF4 ) return 0; - } - return 1; -} - -/* If the name is part of a db ref ($ref, $db, or $id), then return true. */ -static int bson_string_is_db_ref( const unsigned char *string, const int length ) { - int result = 0; - - if( length >= 4 ) { - if( string[1] == 'r' && string[2] == 'e' && string[3] == 'f' ) - result = 1; - } - else if( length >= 3 ) { - if( string[1] == 'i' && string[2] == 'd' ) - result = 1; - else if( string[1] == 'd' && string[2] == 'b' ) - result = 1; - } - - return result; -} - -static int bson_validate_string( bson *b, const unsigned char *string, - const int length, const char check_utf8, const char check_dot, - const char check_dollar ) { - - int position = 0; - int sequence_length = 1; - - if( check_dollar && string[0] == '$' ) { - if( !bson_string_is_db_ref( string, length ) ) - b->err |= BSON_FIELD_INIT_DOLLAR; - } - - while ( position < length ) { - if ( check_dot && *( string + position ) == '.' ) { - b->err |= BSON_FIELD_HAS_DOT; - } - - if ( check_utf8 ) { - sequence_length = trailingBytesForUTF8[*( string + position )] + 1; - if ( ( position + sequence_length ) > length ) { - b->err |= BSON_NOT_UTF8; - return BSON_ERROR; - } - if ( !isLegalUTF8( string + position, sequence_length ) ) { - b->err |= BSON_NOT_UTF8; - return BSON_ERROR; - } - } - position += sequence_length; - } - - return BSON_OK; -} - - -int bson_check_string( bson *b, const char *string, - const int length ) { - - return bson_validate_string( b, ( const unsigned char * )string, length, 1, 0, 0 ); -} - -int bson_check_field_name( bson *b, const char *string, - const int length ) { - - return bson_validate_string( b, ( const unsigned char * )string, length, 1, 1, 1 ); -} diff --git a/mongo-c-driver-v0.6/src/encoding.h b/mongo-c-driver-v0.6/src/encoding.h deleted file mode 100644 index f13c31e..0000000 --- a/mongo-c-driver-v0.6/src/encoding.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2009-2012 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef BSON_ENCODING_H_ -#define BSON_ENCODING_H_ - -MONGO_EXTERN_C_START - -/** - * Check that a field name is valid UTF8, does not start with a '$', - * and contains no '.' characters. Set bson bit field appropriately. - * Note that we don't need to check for '\0' because we're using - * strlen(3), which stops at '\0'. - * - * @param b The bson object to which field name will be appended. - * @param string The field name as char*. - * @param length The length of the field name. - * - * @return BSON_OK if valid UTF8 and BSON_ERROR if not. All BSON strings must be - * valid UTF8. This function will also check whether the string - * contains '.' or starts with '$', since the validity of this depends on context. - * Set the value of b->err appropriately. - */ -int bson_check_field_name( bson *b, const char *string, - const int length ); - -/** - * Check that a string is valid UTF8. Sets the buffer bit field appropriately. - * - * @param b The bson object to which string will be appended. - * @param string The string to check. - * @param length The length of the string. - * - * @return BSON_OK if valid UTF-8; otherwise, BSON_ERROR. - * Sets b->err on error. - */ -bson_bool_t bson_check_string( bson *b, const char *string, - const int length ); - -MONGO_EXTERN_C_END -#endif diff --git a/mongo-c-driver-v0.6/src/env.h b/mongo-c-driver-v0.6/src/env.h deleted file mode 100644 index 463ee32..0000000 --- a/mongo-c-driver-v0.6/src/env.h +++ /dev/null @@ -1,39 +0,0 @@ -/** @file env.h */ - -/* Copyright 2009-2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* Header for generic net.h */ -#ifndef MONGO_ENV_H_ -#define MONGO_ENV_H_ - -#include "mongo.h" - -MONGO_EXTERN_C_START - -/* This is a no-op in the generic implementation. */ -int mongo_env_set_socket_op_timeout( mongo *conn, int millis ); -int mongo_env_read_socket( mongo *conn, void *buf, int len ); -int mongo_env_write_socket( mongo *conn, const void *buf, int len ); -int mongo_env_socket_connect( mongo *conn, const char *host, int port ); - -/* Initialize socket services */ -MONGO_EXPORT int mongo_env_sock_init( void ); - -/* Close a socket */ -MONGO_EXPORT int mongo_env_close_socket( int socket ); - -MONGO_EXTERN_C_END -#endif diff --git a/mongo-c-driver-v0.6/src/env_posix.c b/mongo-c-driver-v0.6/src/env_posix.c deleted file mode 100644 index f1020ca..0000000 --- a/mongo-c-driver-v0.6/src/env_posix.c +++ /dev/null @@ -1,165 +0,0 @@ -/* env_posix.c */ - -/* Copyright 2009-2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* Networking and other niceties for POSIX systems. */ -#include "env.h" -#include "mongo.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifndef NI_MAXSERV -# define NI_MAXSERV 32 -#endif - -int mongo_env_close_socket( int socket ) { - return close( socket ); -} - -int mongo_env_sock_init( void ) { - return 0; -} - -int mongo_env_write_socket( mongo *conn, const void *buf, int len ) { - const char *cbuf = buf; -#ifdef __APPLE__ - int flags = 0; -#else - int flags = MSG_NOSIGNAL; -#endif - - while ( len ) { - int sent = send( conn->sock, cbuf, len, flags ); - if ( sent == -1 ) { - if (errno == EPIPE) - conn->connected = 0; - __mongo_set_error( conn, MONGO_IO_ERROR, strerror( errno ), errno ); - return MONGO_ERROR; - } - cbuf += sent; - len -= sent; - } - - return MONGO_OK; -} - -int mongo_env_read_socket( mongo *conn, void *buf, int len ) { - char *cbuf = buf; - while ( len ) { - int sent = recv( conn->sock, cbuf, len, 0 ); - if ( sent == 0 || sent == -1 ) { - __mongo_set_error( conn, MONGO_IO_ERROR, strerror( errno ), errno ); - return MONGO_ERROR; - } - cbuf += sent; - len -= sent; - } - - return MONGO_OK; -} - -int mongo_env_set_socket_op_timeout( mongo *conn, int millis ) { - struct timeval tv; - tv.tv_sec = millis / 1000; - tv.tv_usec = ( millis % 1000 ) * 1000; - - if ( setsockopt( conn->sock, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof( tv ) ) == -1 ) { - conn->err = MONGO_IO_ERROR; - __mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_RCVTIMEO failed.", errno ); - return MONGO_ERROR; - } - - if ( setsockopt( conn->sock, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof( tv ) ) == -1 ) { - __mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_SNDTIMEO failed.", errno ); - return MONGO_ERROR; - } - - return MONGO_OK; -} - -int mongo_env_socket_connect( mongo *conn, const char *host, int port ) { - char port_str[NI_MAXSERV]; - int status; - - struct addrinfo ai_hints; - struct addrinfo *ai_list = NULL; - struct addrinfo *ai_ptr = NULL; - - conn->sock = 0; - conn->connected = 0; - sprintf(port_str,"%d",port); - - bson_sprintf( port_str, "%d", port ); - - memset( &ai_hints, 0, sizeof( ai_hints ) ); -#ifdef AI_ADDRCONFIG - ai_hints.ai_flags = AI_ADDRCONFIG; -#endif - ai_hints.ai_family = AF_UNSPEC; - ai_hints.ai_socktype = SOCK_STREAM; - - status = getaddrinfo( host, port_str, &ai_hints, &ai_list ); - if ( status != 0 ) { - bson_errprintf( "getaddrinfo failed: %s", gai_strerror( status ) ); - conn->err = MONGO_CONN_ADDR_FAIL; - return MONGO_ERROR; - } - - for ( ai_ptr = ai_list; ai_ptr != NULL; ai_ptr = ai_ptr->ai_next ) { - conn->sock = socket( ai_ptr->ai_family, ai_ptr->ai_socktype, ai_ptr->ai_protocol ); - if ( conn->sock < 0 ) { - conn->sock = 0; - continue; - } - - status = connect( conn->sock, ai_ptr->ai_addr, ai_ptr->ai_addrlen ); - if ( status != 0 ) { - mongo_env_close_socket( conn->sock ); - conn->sock = 0; - continue; - } - - if ( ai_ptr->ai_protocol == IPPROTO_TCP ) { - int flag = 1; - - setsockopt( conn->sock, IPPROTO_TCP, TCP_NODELAY, - ( void * ) &flag, sizeof( flag ) ); - if ( conn->op_timeout_ms > 0 ) - mongo_env_set_socket_op_timeout( conn, conn->op_timeout_ms ); - } - - conn->connected = 1; - break; - } - - freeaddrinfo( ai_list ); - - if ( ! conn->connected ) { - conn->err = MONGO_CONN_FAIL; - return MONGO_ERROR; - } - - return MONGO_OK; -} diff --git a/mongo-c-driver-v0.6/src/env_standard.c b/mongo-c-driver-v0.6/src/env_standard.c deleted file mode 100644 index 36fa9f6..0000000 --- a/mongo-c-driver-v0.6/src/env_standard.c +++ /dev/null @@ -1,168 +0,0 @@ -/* env_standard.c */ - -/* Copyright 2009-2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* Vanilla networking designed to work on all systems. */ -#include "env.h" -#include -#include - -#ifdef _WIN32 - #ifdef _MSC_VER - #include // send,recv,socklen_t etc - #include // addrinfo - #else - #include - #include - typedef int socklen_t; - #endif -#else -#include -#include -#include -#include -#include -#include -#include -#include -#endif - -#ifndef NI_MAXSERV -# define NI_MAXSERV 32 -#endif - -int mongo_env_close_socket( int socket ) { -#ifdef _WIN32 - return closesocket( socket ); -#else - return close( socket ); -#endif -} - -int mongo_env_write_socket( mongo *conn, const void *buf, int len ) { - const char *cbuf = buf; -#ifdef _WIN32 - int flags = 0; -#else -#ifdef __APPLE__ - int flags = 0; -#else - int flags = MSG_NOSIGNAL; -#endif -#endif - - while ( len ) { - int sent = send( conn->sock, cbuf, len, flags ); - if ( sent == -1 ) { - if (errno == EPIPE) - conn->connected = 0; - conn->err = MONGO_IO_ERROR; - return MONGO_ERROR; - } - cbuf += sent; - len -= sent; - } - - return MONGO_OK; -} - -int mongo_env_read_socket( mongo *conn, void *buf, int len ) { - char *cbuf = buf; - while ( len ) { - int sent = recv( conn->sock, cbuf, len, 0 ); - if ( sent == 0 || sent == -1 ) { - conn->err = MONGO_IO_ERROR; - return MONGO_ERROR; - } - cbuf += sent; - len -= sent; - } - - return MONGO_OK; -} - -/* This is a no-op in the generic implementation. */ -int mongo_env_set_socket_op_timeout( mongo *conn, int millis ) { - return MONGO_OK; -} - -int mongo_env_socket_connect( mongo *conn, const char *host, int port ) { - struct sockaddr_in sa; - socklen_t addressSize; - int flag = 1; - - if ( ( conn->sock = socket( AF_INET, SOCK_STREAM, 0 ) ) < 0 ) { - conn->sock = 0; - conn->err = MONGO_CONN_NO_SOCKET; - return MONGO_ERROR; - } - - memset( sa.sin_zero , 0 , sizeof( sa.sin_zero ) ); - sa.sin_family = AF_INET; - sa.sin_port = htons( port ); - sa.sin_addr.s_addr = inet_addr( host ); - addressSize = sizeof( sa ); - - if ( connect( conn->sock, ( struct sockaddr * )&sa, addressSize ) == -1 ) { - mongo_env_close_socket( conn->sock ); - conn->connected = 0; - conn->sock = 0; - conn->err = MONGO_CONN_FAIL; - return MONGO_ERROR; - } - - setsockopt( conn->sock, IPPROTO_TCP, TCP_NODELAY, ( char * ) &flag, sizeof( flag ) ); - - if( conn->op_timeout_ms > 0 ) - mongo_env_set_socket_op_timeout( conn, conn->op_timeout_ms ); - - conn->connected = 1; - - return MONGO_OK; -} - -MONGO_EXPORT int mongo_env_sock_init( void ) { - -#if defined(_WIN32) - WSADATA wsaData; - WORD wVers; -#elif defined(SIGPIPE) - struct sigaction act; -#endif - - static int called_once; - static int retval; - if (called_once) return retval; - called_once = 1; - -#if defined(_WIN32) - wVers = MAKEWORD(1, 1); - retval = (WSAStartup(wVers, &wsaData) == 0); -#elif defined(MACINTOSH) - GUSISetup(GUSIwithInternetSockets); - retval = 1; -#elif defined(SIGPIPE) - retval = 1; - if (sigaction(SIGPIPE, (struct sigaction *)NULL, &act) < 0) - retval = 0; - else if (act.sa_handler == SIG_DFL) { - act.sa_handler = SIG_IGN; - if (sigaction(SIGPIPE, &act, (struct sigaction *)NULL) < 0) - retval = 0; - } -#endif - return retval; -} diff --git a/mongo-c-driver-v0.6/src/env_win32.c b/mongo-c-driver-v0.6/src/env_win32.c deleted file mode 100644 index 4b38928..0000000 --- a/mongo-c-driver-v0.6/src/env_win32.c +++ /dev/null @@ -1,178 +0,0 @@ -/* env_win32.c */ - -/* Copyright 2009-2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* Networking and other niceties for WIN32. */ -#include "env.h" -#include "mongo.h" -#include - -#ifdef _MSC_VER -#include // send,recv,socklen_t etc -#include // addrinfo -#else -#include // send,recv,socklen_t etc -#include -typedef int socklen_t; -#endif - -#ifndef NI_MAXSERV -# define NI_MAXSERV 32 -#endif - -int mongo_env_close_socket( int socket ) { - return closesocket( socket ); -} - -int mongo_env_write_socket( mongo *conn, const void *buf, int len ) { - const char *cbuf = buf; - int flags = 0; - - while ( len ) { - int sent = send( conn->sock, cbuf, len, flags ); - if ( sent == -1 ) { - __mongo_set_error( conn, MONGO_IO_ERROR, NULL, WSAGetLastError() ); - conn->connected = 0; - return MONGO_ERROR; - } - cbuf += sent; - len -= sent; - } - - return MONGO_OK; -} - -int mongo_env_read_socket( mongo *conn, void *buf, int len ) { - char *cbuf = buf; - - while ( len ) { - int sent = recv( conn->sock, cbuf, len, 0 ); - if ( sent == 0 || sent == -1 ) { - __mongo_set_error( conn, MONGO_IO_ERROR, NULL, WSAGetLastError() ); - return MONGO_ERROR; - } - cbuf += sent; - len -= sent; - } - - return MONGO_OK; -} - -int mongo_env_set_socket_op_timeout( mongo *conn, int millis ) { - if ( setsockopt( conn->sock, SOL_SOCKET, SO_RCVTIMEO, (const char *)&millis, - sizeof( millis ) ) == -1 ) { - __mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_RCVTIMEO failed.", - WSAGetLastError() ); - return MONGO_ERROR; - } - - if ( setsockopt( conn->sock, SOL_SOCKET, SO_SNDTIMEO, (const char *)&millis, - sizeof( millis ) ) == -1 ) { - __mongo_set_error( conn, MONGO_IO_ERROR, "setsockopt SO_SNDTIMEO failed.", - WSAGetLastError() ); - return MONGO_ERROR; - } - - return MONGO_OK; -} - -int mongo_env_socket_connect( mongo *conn, const char *host, int port ) { - char port_str[NI_MAXSERV]; - char errstr[MONGO_ERR_LEN]; - int status; - - struct addrinfo ai_hints; - struct addrinfo *ai_list = NULL; - struct addrinfo *ai_ptr = NULL; - - conn->sock = 0; - conn->connected = 0; - - bson_sprintf( port_str, "%d", port ); - - memset( &ai_hints, 0, sizeof( ai_hints ) ); - ai_hints.ai_family = AF_UNSPEC; - ai_hints.ai_socktype = SOCK_STREAM; - ai_hints.ai_protocol = IPPROTO_TCP; - - status = getaddrinfo( host, port_str, &ai_hints, &ai_list ); - if ( status != 0 ) { - bson_sprintf( errstr, "getaddrinfo failed with error %d", status ); - __mongo_set_error( conn, MONGO_CONN_ADDR_FAIL, errstr, WSAGetLastError() ); - return MONGO_ERROR; - } - - for ( ai_ptr = ai_list; ai_ptr != NULL; ai_ptr = ai_ptr->ai_next ) { - conn->sock = socket( ai_ptr->ai_family, ai_ptr->ai_socktype, - ai_ptr->ai_protocol ); - - if ( conn->sock < 0 ) { - __mongo_set_error( conn, MONGO_SOCKET_ERROR, "socket() failed", - WSAGetLastError() ); - conn->sock = 0; - continue; - } - - status = connect( conn->sock, ai_ptr->ai_addr, ai_ptr->ai_addrlen ); - if ( status != 0 ) { - __mongo_set_error( conn, MONGO_SOCKET_ERROR, "connect() failed", - WSAGetLastError() ); - mongo_env_close_socket( conn->sock ); - conn->sock = 0; - continue; - } - - if ( ai_ptr->ai_protocol == IPPROTO_TCP ) { - int flag = 1; - - setsockopt( conn->sock, IPPROTO_TCP, TCP_NODELAY, - ( void * ) &flag, sizeof( flag ) ); - - if ( conn->op_timeout_ms > 0 ) - mongo_env_set_socket_op_timeout( conn, conn->op_timeout_ms ); - } - - conn->connected = 1; - break; - } - - freeaddrinfo( ai_list ); - - if ( ! conn->connected ) { - conn->err = MONGO_CONN_FAIL; - return MONGO_ERROR; - } - else { - mongo_clear_errors( conn ); - return MONGO_OK; - } -} - -MONGO_EXPORT int mongo_env_sock_init( void ) { - - WSADATA wsaData; - WORD wVers; - static int called_once; - static int retval; - - if (called_once) return retval; - - called_once = 1; - wVers = MAKEWORD(1, 1); - retval = (WSAStartup(wVers, &wsaData) == 0); - - return retval; -} diff --git a/mongo-c-driver-v0.6/src/gridfs.c b/mongo-c-driver-v0.6/src/gridfs.c deleted file mode 100644 index db80dea..0000000 --- a/mongo-c-driver-v0.6/src/gridfs.c +++ /dev/null @@ -1,712 +0,0 @@ -/* gridfs.c */ - -/* Copyright 2009-2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "gridfs.h" -#include -#include -#include -#include - -MONGO_EXPORT gridfs* gridfs_create() { - return (gridfs*)bson_malloc(sizeof(gridfs)); -} - -MONGO_EXPORT void gridfs_dispose(gridfs* gfs) { - free(gfs); -} - -MONGO_EXPORT gridfile* gridfile_create() { - return (gridfile*)bson_malloc(sizeof(gridfile)); -} - -MONGO_EXPORT void gridfile_dispose(gridfile* gf) { - free(gf); -} - -MONGO_EXPORT void gridfile_get_descriptor(gridfile* gf, bson* out) { - *out = *gf->meta; -} - - -static bson *chunk_new( bson_oid_t id, int chunkNumber, - const char *data, int len ) { - bson *b = bson_malloc( sizeof( bson ) ); - - bson_init( b ); - bson_append_oid( b, "files_id", &id ); - bson_append_int( b, "n", chunkNumber ); - bson_append_binary( b, "data", BSON_BIN_BINARY, data, len ); - bson_finish( b ); - return b; -} - -static void chunk_free( bson *oChunk ) { - bson_destroy( oChunk ); - bson_free( oChunk ); -} - -int gridfs_init( mongo *client, const char *dbname, const char *prefix, - gridfs *gfs ) { - - int options; - bson b; - bson_bool_t success; - - gfs->client = client; - - /* Allocate space to own the dbname */ - gfs->dbname = ( const char * )bson_malloc( strlen( dbname )+1 ); - strcpy( ( char * )gfs->dbname, dbname ); - - /* Allocate space to own the prefix */ - if ( prefix == NULL ) prefix = "fs"; - gfs->prefix = ( const char * )bson_malloc( strlen( prefix )+1 ); - strcpy( ( char * )gfs->prefix, prefix ); - - /* Allocate space to own files_ns */ - gfs->files_ns = - ( const char * ) bson_malloc ( strlen( prefix )+strlen( dbname )+strlen( ".files" )+2 ); - strcpy( ( char * )gfs->files_ns, dbname ); - strcat( ( char * )gfs->files_ns, "." ); - strcat( ( char * )gfs->files_ns, prefix ); - strcat( ( char * )gfs->files_ns, ".files" ); - - /* Allocate space to own chunks_ns */ - gfs->chunks_ns = ( const char * ) bson_malloc( strlen( prefix ) + strlen( dbname ) - + strlen( ".chunks" ) + 2 ); - strcpy( ( char * )gfs->chunks_ns, dbname ); - strcat( ( char * )gfs->chunks_ns, "." ); - strcat( ( char * )gfs->chunks_ns, prefix ); - strcat( ( char * )gfs->chunks_ns, ".chunks" ); - - bson_init( &b ); - bson_append_int( &b, "filename", 1 ); - bson_finish( &b ); - options = 0; - success = ( mongo_create_index( gfs->client, gfs->files_ns, &b, options, NULL ) == MONGO_OK ); - bson_destroy( &b ); - if ( !success ) { - bson_free( ( char * )gfs->dbname ); - bson_free( ( char * )gfs->prefix ); - bson_free( ( char * )gfs->files_ns ); - bson_free( ( char * )gfs->chunks_ns ); - return MONGO_ERROR; - } - - bson_init( &b ); - bson_append_int( &b, "files_id", 1 ); - bson_append_int( &b, "n", 1 ); - bson_finish( &b ); - options = MONGO_INDEX_UNIQUE; - success = ( mongo_create_index( gfs->client, gfs->chunks_ns, &b, options, NULL ) == MONGO_OK ); - bson_destroy( &b ); - if ( !success ) { - bson_free( ( char * )gfs->dbname ); - bson_free( ( char * )gfs->prefix ); - bson_free( ( char * )gfs->files_ns ); - bson_free( ( char * )gfs->chunks_ns ); - return MONGO_ERROR; - } - - return MONGO_OK; -} - -MONGO_EXPORT void gridfs_destroy( gridfs *gfs ) { - if ( gfs == NULL ) return; - if ( gfs->dbname ) bson_free( ( char * )gfs->dbname ); - if ( gfs->prefix ) bson_free( ( char * )gfs->prefix ); - if ( gfs->files_ns ) bson_free( ( char * )gfs->files_ns ); - if ( gfs->chunks_ns ) bson_free( ( char * )gfs->chunks_ns ); -} - -static int gridfs_insert_file( gridfs *gfs, const char *name, - const bson_oid_t id, gridfs_offset length, - const char *contenttype ) { - bson command; - bson ret; - bson res; - bson_iterator it; - int result; - int64_t d; - - /* Check run md5 */ - bson_init( &command ); - bson_append_oid( &command, "filemd5", &id ); - bson_append_string( &command, "root", gfs->prefix ); - bson_finish( &command ); - result = mongo_run_command( gfs->client, gfs->dbname, &command, &res ); - bson_destroy( &command ); - if (result != MONGO_OK) - return result; - - /* Create and insert BSON for file metadata */ - bson_init( &ret ); - bson_append_oid( &ret, "_id", &id ); - if ( name != NULL && *name != '\0' ) { - bson_append_string( &ret, "filename", name ); - } - bson_append_long( &ret, "length", length ); - bson_append_int( &ret, "chunkSize", DEFAULT_CHUNK_SIZE ); - d = ( bson_date_t )1000*time( NULL ); - bson_append_date( &ret, "uploadDate", d); - bson_find( &it, &res, "md5" ); - bson_append_string( &ret, "md5", bson_iterator_string( &it ) ); - bson_destroy( &res ); - if ( contenttype != NULL && *contenttype != '\0' ) { - bson_append_string( &ret, "contentType", contenttype ); - } - bson_finish( &ret ); - result = mongo_insert( gfs->client, gfs->files_ns, &ret, NULL ); - bson_destroy( &ret ); - - return result; -} - -MONGO_EXPORT int gridfs_store_buffer( gridfs *gfs, const char *data, - gridfs_offset length, const char *remotename, - const char *contenttype ) { - - char const *end = data + length; - const char *data_ptr = data; - bson_oid_t id; - int chunkNumber = 0; - int chunkLen; - bson *oChunk; - - /* Large files Assertion */ - /* assert( length <= 0xffffffff ); */ - - /* Generate and append an oid*/ - bson_oid_gen( &id ); - - /* Insert the file's data chunk by chunk */ - while ( data_ptr < end ) { - chunkLen = DEFAULT_CHUNK_SIZE < ( unsigned int )( end - data_ptr ) ? - DEFAULT_CHUNK_SIZE : ( unsigned int )( end - data_ptr ); - oChunk = chunk_new( id, chunkNumber, data_ptr, chunkLen ); - mongo_insert( gfs->client, gfs->chunks_ns, oChunk, NULL ); - chunk_free( oChunk ); - chunkNumber++; - data_ptr += chunkLen; - } - - /* Inserts file's metadata */ - return gridfs_insert_file( gfs, remotename, id, length, contenttype ); -} - -MONGO_EXPORT void gridfile_writer_init( gridfile *gfile, gridfs *gfs, - const char *remote_name, const char *content_type ) { - gfile->gfs = gfs; - - bson_oid_gen( &( gfile->id ) ); - gfile->chunk_num = 0; - gfile->length = 0; - gfile->pending_len = 0; - gfile->pending_data = NULL; - - gfile->remote_name = ( char * )bson_malloc( strlen( remote_name ) + 1 ); - strcpy( ( char * )gfile->remote_name, remote_name ); - - gfile->content_type = ( char * )bson_malloc( strlen( content_type ) + 1 ); - strcpy( ( char * )gfile->content_type, content_type ); -} - -MONGO_EXPORT void gridfile_write_buffer( gridfile *gfile, const char *data, - gridfs_offset length ) { - - int bytes_left = 0; - int data_partial_len = 0; - int chunks_to_write = 0; - char *buffer; - bson *oChunk; - gridfs_offset to_write = length + gfile->pending_len; - - if ( to_write < DEFAULT_CHUNK_SIZE ) { /* Less than one chunk to write */ - if( gfile->pending_data ) { - gfile->pending_data = ( char * )bson_realloc( ( void * )gfile->pending_data, gfile->pending_len + to_write ); - memcpy( gfile->pending_data + gfile->pending_len, data, length ); - } else if ( to_write > 0 ) { - gfile->pending_data = ( char * )bson_malloc( to_write ); - memcpy( gfile->pending_data, data, length ); - } - gfile->pending_len += length; - - } else { /* At least one chunk of data to write */ - chunks_to_write = to_write / DEFAULT_CHUNK_SIZE; - bytes_left = to_write % DEFAULT_CHUNK_SIZE; - - /* If there's a pending chunk to be written, we need to combine - * the buffer provided up to DEFAULT_CHUNK_SIZE. - */ - if ( gfile->pending_len > 0 ) { - data_partial_len = DEFAULT_CHUNK_SIZE - gfile->pending_len; - buffer = ( char * )bson_malloc( DEFAULT_CHUNK_SIZE ); - memcpy( buffer, gfile->pending_data, gfile->pending_len ); - memcpy( buffer + gfile->pending_len, data, data_partial_len ); - - oChunk = chunk_new( gfile->id, gfile->chunk_num, buffer, DEFAULT_CHUNK_SIZE ); - mongo_insert( gfile->gfs->client, gfile->gfs->chunks_ns, oChunk, NULL ); - chunk_free( oChunk ); - gfile->chunk_num++; - gfile->length += DEFAULT_CHUNK_SIZE; - data += data_partial_len; - - chunks_to_write--; - - bson_free( buffer ); - } - - while( chunks_to_write > 0 ) { - oChunk = chunk_new( gfile->id, gfile->chunk_num, data, DEFAULT_CHUNK_SIZE ); - mongo_insert( gfile->gfs->client, gfile->gfs->chunks_ns, oChunk, NULL ); - chunk_free( oChunk ); - gfile->chunk_num++; - chunks_to_write--; - gfile->length += DEFAULT_CHUNK_SIZE; - data += DEFAULT_CHUNK_SIZE; - } - - bson_free( gfile->pending_data ); - - /* If there are any leftover bytes, store them as pending data. */ - if( bytes_left == 0 ) - gfile->pending_data = NULL; - else { - gfile->pending_data = ( char * )bson_malloc( bytes_left ); - memcpy( gfile->pending_data, data, bytes_left ); - } - - gfile->pending_len = bytes_left; - } -} - -MONGO_EXPORT int gridfile_writer_done( gridfile *gfile ) { - - /* write any remaining pending chunk data. - * pending data will always take up less than one chunk */ - bson *oChunk; - int response; - if( gfile->pending_data ) { - oChunk = chunk_new( gfile->id, gfile->chunk_num, gfile->pending_data, gfile->pending_len ); - mongo_insert( gfile->gfs->client, gfile->gfs->chunks_ns, oChunk, NULL ); - chunk_free( oChunk ); - bson_free( gfile->pending_data ); - gfile->length += gfile->pending_len; - } - - /* insert into files collection */ - response = gridfs_insert_file( gfile->gfs, gfile->remote_name, gfile->id, - gfile->length, gfile->content_type ); - - bson_free( gfile->remote_name ); - bson_free( gfile->content_type ); - - return response; -} - -int gridfs_store_file( gridfs *gfs, const char *filename, - const char *remotename, const char *contenttype ) { - - char buffer[DEFAULT_CHUNK_SIZE]; - FILE *fd; - bson_oid_t id; - int chunkNumber = 0; - gridfs_offset length = 0; - gridfs_offset chunkLen = 0; - bson *oChunk; - - /* Open the file and the correct stream */ - if ( strcmp( filename, "-" ) == 0 ) fd = stdin; - else { - fd = fopen( filename, "rb" ); - if (fd == NULL) - return MONGO_ERROR; - } - - /* Generate and append an oid*/ - bson_oid_gen( &id ); - - /* Insert the file chunk by chunk */ - chunkLen = fread( buffer, 1, DEFAULT_CHUNK_SIZE, fd ); - do { - oChunk = chunk_new( id, chunkNumber, buffer, chunkLen ); - mongo_insert( gfs->client, gfs->chunks_ns, oChunk, NULL ); - chunk_free( oChunk ); - length += chunkLen; - chunkNumber++; - chunkLen = fread( buffer, 1, DEFAULT_CHUNK_SIZE, fd ); - } while ( chunkLen != 0 ); - - /* Close the file stream */ - if ( fd != stdin ) fclose( fd ); - - /* Large files Assertion */ - /* assert(length <= 0xffffffff); */ - - /* Optional Remote Name */ - if ( remotename == NULL || *remotename == '\0' ) { - remotename = filename; - } - - /* Inserts file's metadata */ - return gridfs_insert_file( gfs, remotename, id, length, contenttype ); -} - -MONGO_EXPORT void gridfs_remove_filename( gridfs *gfs, const char *filename ) { - bson query; - mongo_cursor *files; - bson file; - bson_iterator it; - bson_oid_t id; - bson b; - - bson_init( &query ); - bson_append_string( &query, "filename", filename ); - bson_finish( &query ); - files = mongo_find( gfs->client, gfs->files_ns, &query, NULL, 0, 0, 0 ); - bson_destroy( &query ); - - /* Remove each file and it's chunks from files named filename */ - while ( mongo_cursor_next( files ) == MONGO_OK ) { - file = files->current; - bson_find( &it, &file, "_id" ); - id = *bson_iterator_oid( &it ); - - /* Remove the file with the specified id */ - bson_init( &b ); - bson_append_oid( &b, "_id", &id ); - bson_finish( &b ); - mongo_remove( gfs->client, gfs->files_ns, &b, NULL ); - bson_destroy( &b ); - - /* Remove all chunks from the file with the specified id */ - bson_init( &b ); - bson_append_oid( &b, "files_id", &id ); - bson_finish( &b ); - mongo_remove( gfs->client, gfs->chunks_ns, &b, NULL ); - bson_destroy( &b ); - } - - mongo_cursor_destroy( files ); -} - -int gridfs_find_query( gridfs *gfs, bson *query, - gridfile *gfile ) { - - bson uploadDate; - bson finalQuery; - bson out; - int i; - - bson_init( &uploadDate ); - bson_append_int( &uploadDate, "uploadDate", -1 ); - bson_finish( &uploadDate ); - - bson_init( &finalQuery ); - bson_append_bson( &finalQuery, "query", query ); - bson_append_bson( &finalQuery, "orderby", &uploadDate ); - bson_finish( &finalQuery ); - - i = ( mongo_find_one( gfs->client, gfs->files_ns, - &finalQuery, NULL, &out ) == MONGO_OK ); - bson_destroy( &uploadDate ); - bson_destroy( &finalQuery ); - if ( !i ) - return MONGO_ERROR; - else { - gridfile_init( gfs, &out, gfile ); - bson_destroy( &out ); - return MONGO_OK; - } -} - -int gridfs_find_filename( gridfs *gfs, const char *filename, - gridfile *gfile ) - -{ - bson query; - int i; - - bson_init( &query ); - bson_append_string( &query, "filename", filename ); - bson_finish( &query ); - i = gridfs_find_query( gfs, &query, gfile ); - bson_destroy( &query ); - return i; -} - -int gridfile_init( gridfs *gfs, bson *meta, gridfile *gfile ) - -{ - gfile->gfs = gfs; - gfile->pos = 0; - gfile->meta = ( bson * )bson_malloc( sizeof( bson ) ); - if ( gfile->meta == NULL ) return MONGO_ERROR; - bson_copy( gfile->meta, meta ); - return MONGO_OK; -} - -MONGO_EXPORT void gridfile_destroy( gridfile *gfile ) - -{ - bson_destroy( gfile->meta ); - bson_free( gfile->meta ); -} - -bson_bool_t gridfile_exists( gridfile *gfile ) { - return ( bson_bool_t )( gfile != NULL || gfile->meta == NULL ); -} - -MONGO_EXPORT const char *gridfile_get_filename( gridfile *gfile ) { - bson_iterator it; - - bson_find( &it, gfile->meta, "filename" ); - return bson_iterator_string( &it ); -} - -MONGO_EXPORT int gridfile_get_chunksize( gridfile *gfile ) { - bson_iterator it; - - bson_find( &it, gfile->meta, "chunkSize" ); - return bson_iterator_int( &it ); -} - -MONGO_EXPORT gridfs_offset gridfile_get_contentlength( gridfile *gfile ) { - bson_iterator it; - - bson_find( &it, gfile->meta, "length" ); - - if( bson_iterator_type( &it ) == BSON_INT ) - return ( gridfs_offset )bson_iterator_int( &it ); - else - return ( gridfs_offset )bson_iterator_long( &it ); -} - -MONGO_EXPORT const char *gridfile_get_contenttype( gridfile *gfile ) { - bson_iterator it; - - if ( bson_find( &it, gfile->meta, "contentType" ) ) - return bson_iterator_string( &it ); - else return NULL; -} - -MONGO_EXPORT bson_date_t gridfile_get_uploaddate( gridfile *gfile ) { - bson_iterator it; - - bson_find( &it, gfile->meta, "uploadDate" ); - return bson_iterator_date( &it ); -} - -MONGO_EXPORT const char *gridfile_get_md5( gridfile *gfile ) { - bson_iterator it; - - bson_find( &it, gfile->meta, "md5" ); - return bson_iterator_string( &it ); -} - -const char *gridfile_get_field( gridfile *gfile, const char *name ) { - bson_iterator it; - - bson_find( &it, gfile->meta, name ); - return bson_iterator_value( &it ); -} - -bson_bool_t gridfile_get_boolean( gridfile *gfile, const char *name ) { - bson_iterator it; - - bson_find( &it, gfile->meta, name ); - return bson_iterator_bool( &it ); -} - -MONGO_EXPORT void gridfile_get_metadata( gridfile *gfile, bson* out ) { - bson_iterator it; - - if ( bson_find( &it, gfile->meta, "metadata" ) ) - bson_iterator_subobject( &it, out ); - else - bson_empty( out ); -} - -MONGO_EXPORT int gridfile_get_numchunks( gridfile *gfile ) { - bson_iterator it; - gridfs_offset length; - gridfs_offset chunkSize; - double numchunks; - - bson_find( &it, gfile->meta, "length" ); - - if( bson_iterator_type( &it ) == BSON_INT ) - length = ( gridfs_offset )bson_iterator_int( &it ); - else - length = ( gridfs_offset )bson_iterator_long( &it ); - - bson_find( &it, gfile->meta, "chunkSize" ); - chunkSize = bson_iterator_int( &it ); - numchunks = ( ( double )length/( double )chunkSize ); - return ( numchunks - ( int )numchunks > 0 ) - ? ( int )( numchunks+1 ) - : ( int )( numchunks ); -} - -MONGO_EXPORT void gridfile_get_chunk( gridfile *gfile, int n, bson* out ) { - bson query; - - bson_iterator it; - bson_oid_t id; - int result; - - bson_init( &query ); - bson_find( &it, gfile->meta, "_id" ); - id = *bson_iterator_oid( &it ); - bson_append_oid( &query, "files_id", &id ); - bson_append_int( &query, "n", n ); - bson_finish( &query ); - - result = (mongo_find_one(gfile->gfs->client, - gfile->gfs->chunks_ns, - &query, NULL, out ) == MONGO_OK ); - bson_destroy( &query ); - if (!result) { - bson empty; - bson_empty(&empty); - bson_copy(out, &empty); - } -} - -MONGO_EXPORT mongo_cursor *gridfile_get_chunks( gridfile *gfile, int start, int size ) { - bson_iterator it; - bson_oid_t id; - bson gte; - bson query; - bson orderby; - bson command; - mongo_cursor *cursor; - - bson_find( &it, gfile->meta, "_id" ); - id = *bson_iterator_oid( &it ); - - bson_init( &query ); - bson_append_oid( &query, "files_id", &id ); - if ( size == 1 ) { - bson_append_int( &query, "n", start ); - } else { - bson_init( >e ); - bson_append_int( >e, "$gte", start ); - bson_finish( >e ); - bson_append_bson( &query, "n", >e ); - bson_destroy( >e ); - } - bson_finish( &query ); - - bson_init( &orderby ); - bson_append_int( &orderby, "n", 1 ); - bson_finish( &orderby ); - - bson_init( &command ); - bson_append_bson( &command, "query", &query ); - bson_append_bson( &command, "orderby", &orderby ); - bson_finish( &command ); - - cursor = mongo_find( gfile->gfs->client, gfile->gfs->chunks_ns, - &command, NULL, size, 0, 0 ); - - bson_destroy( &command ); - bson_destroy( &query ); - bson_destroy( &orderby ); - - return cursor; -} - -gridfs_offset gridfile_write_file( gridfile *gfile, FILE *stream ) { - int i; - size_t len; - bson chunk; - bson_iterator it; - const char *data; - const int num = gridfile_get_numchunks( gfile ); - - for ( i=0; ipos < size ) - ? contentlength - gfile->pos - : size; - bytes_left = size; - - first_chunk = ( gfile->pos )/chunksize; - last_chunk = ( gfile->pos+size-1 )/chunksize; - total_chunks = last_chunk - first_chunk + 1; - chunks = gridfile_get_chunks( gfile, first_chunk, total_chunks ); - - for ( i = 0; i < total_chunks; i++ ) { - mongo_cursor_next( chunks ); - chunk = chunks->current; - bson_find( &it, &chunk, "data" ); - chunk_len = bson_iterator_bin_len( &it ); - chunk_data = bson_iterator_bin_data( &it ); - if ( i == 0 ) { - chunk_data += ( gfile->pos )%chunksize; - chunk_len -= ( gfile->pos )%chunksize; - } - if ( bytes_left > chunk_len ) { - memcpy( buf, chunk_data, chunk_len ); - bytes_left -= chunk_len; - buf += chunk_len; - } else { - memcpy( buf, chunk_data, bytes_left ); - } - } - - mongo_cursor_destroy( chunks ); - gfile->pos = gfile->pos + size; - - return size; -} - -MONGO_EXPORT gridfs_offset gridfile_seek( gridfile *gfile, gridfs_offset offset ) { - gridfs_offset length; - - length = gridfile_get_contentlength( gfile ); - gfile->pos = length < offset ? length : offset; - return gfile->pos; -} diff --git a/mongo-c-driver-v0.6/src/gridfs.h b/mongo-c-driver-v0.6/src/gridfs.h deleted file mode 100644 index d313e1a..0000000 --- a/mongo-c-driver-v0.6/src/gridfs.h +++ /dev/null @@ -1,332 +0,0 @@ -/** @file gridfs.h - * - * @brief GridFS declarations - * - * */ - -/* Copyright 2009-2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "mongo.h" - -#ifndef MONGO_GRIDFS_H_ -#define MONGO_GRIDFS_H_ - -enum {DEFAULT_CHUNK_SIZE = 256 * 1024}; - -typedef uint64_t gridfs_offset; - -/* A GridFS represents a single collection of GridFS files in the database. */ -typedef struct { - mongo *client; /**> The client to db-connection. */ - const char *dbname; /**> The root database name */ - const char *prefix; /**> The prefix of the GridFS's collections, default is NULL */ - const char *files_ns; /**> The namespace where the file's metadata is stored */ - const char *chunks_ns; /**. The namespace where the files's data is stored in chunks */ -} gridfs; - -/* A GridFile is a single GridFS file. */ -typedef struct { - gridfs *gfs; /**> The GridFS where the GridFile is located */ - bson *meta; /**> The GridFile's bson object where all its metadata is located */ - gridfs_offset pos; /**> The position is the offset in the file */ - bson_oid_t id; /**> The files_id of the gridfile */ - char *remote_name; /**> The name of the gridfile as a string */ - char *content_type; /**> The gridfile's content type */ - gridfs_offset length; /**> The length of this gridfile */ - int chunk_num; /**> The number of the current chunk being written to */ - char *pending_data; /**> A buffer storing data still to be written to chunks */ - int pending_len; /**> Length of pending_data buffer */ -} gridfile; - -MONGO_EXPORT gridfs* gridfs_create(); -MONGO_EXPORT void gridfs_dispose(gridfs* gfs); -MONGO_EXPORT gridfile* gridfile_create(); -MONGO_EXPORT void gridfile_dispose(gridfile* gf); -MONGO_EXPORT void gridfile_get_descriptor(gridfile* gf, bson* out); - -/** - * Initializes a GridFS object - * @param client - db connection - * @param dbname - database name - * @param prefix - collection prefix, default is fs if NULL or empty - * @param gfs - the GridFS object to initialize - * - * @return - MONGO_OK or MONGO_ERROR. - */ -MONGO_EXPORT int gridfs_init( mongo *client, const char *dbname, - const char *prefix, gridfs *gfs ); - -/** - * Destroys a GridFS object. Call this when finished with - * the object.. - * - * @param gfs a grid - */ -MONGO_EXPORT void gridfs_destroy( gridfs *gfs ); - -/** - * Initializes a gridfile for writing incrementally with gridfs_write_buffer. - * Once initialized, you can write any number of buffers with gridfs_write_buffer. - * When done, you must call gridfs_writer_done to save the file metadata. - * - */ -MONGO_EXPORT void gridfile_writer_init( gridfile *gfile, gridfs *gfs, const char *remote_name, - const char *content_type ); - -/** - * Write to a GridFS file incrementally. You can call this function any number - * of times with a new buffer each time. This allows you to effectively - * stream to a GridFS file. When finished, be sure to call gridfs_writer_done. - * - */ -MONGO_EXPORT void gridfile_write_buffer( gridfile *gfile, const char *data, - gridfs_offset length ); - -/** - * Signal that writing of this gridfile is complete by - * writing any buffered chunks along with the entry in the - * files collection. - * - * @return - MONGO_OK or MONGO_ERROR. - */ -MONGO_EXPORT int gridfile_writer_done( gridfile *gfile ); - -/** - * Store a buffer as a GridFS file. - * @param gfs - the working GridFS - * @param data - pointer to buffer to store in GridFS - * @param length - length of the buffer - * @param remotename - filename for use in the database - * @param contenttype - optional MIME type for this object - * - * @return - MONGO_OK or MONGO_ERROR. - */ -MONGO_EXPORT int gridfs_store_buffer( gridfs *gfs, const char *data, gridfs_offset length, - const char *remotename, - const char *contenttype ); - -/** - * Open the file referenced by filename and store it as a GridFS file. - * @param gfs - the working GridFS - * @param filename - local filename relative to the process - * @param remotename - optional filename for use in the database - * @param contenttype - optional MIME type for this object - * - * @return - MONGO_OK or MONGO_ERROR. - */ -MONGO_EXPORT int gridfs_store_file( gridfs *gfs, const char *filename, - const char *remotename, const char *contenttype ); - -/** - * Removes the files referenced by filename from the db - * @param gfs - the working GridFS - * @param filename - the filename of the file/s to be removed - */ -MONGO_EXPORT void gridfs_remove_filename( gridfs *gfs, const char *filename ); - -/** - * Find the first file matching the provided query within the - * GridFS files collection, and return the file as a GridFile. - * - * @param gfs - the working GridFS - * @param query - a pointer to the bson with the query data - * @param gfile - the output GridFile to be initialized - * - * @return MONGO_OK if successful, MONGO_ERROR otherwise - */ -MONGO_EXPORT int gridfs_find_query( gridfs *gfs, bson *query, gridfile *gfile ); - -/** - * Find the first file referenced by filename within the GridFS - * and return it as a GridFile - * @param gfs - the working GridFS - * @param filename - filename of the file to find - * @param gfile - the output GridFile to be intialized - * - * @return MONGO_OK or MONGO_ERROR. - */ -MONGO_EXPORT int gridfs_find_filename( gridfs *gfs, const char *filename, gridfile *gfile ); - -/** - * Initializes a GridFile containing the GridFS and file bson - * @param gfs - the GridFS where the GridFile is located - * @param meta - the file object - * @param gfile - the output GridFile that is being initialized - * - * @return - MONGO_OK or MONGO_ERROR. - */ -MONGO_EXPORT int gridfile_init( gridfs *gfs, bson *meta, gridfile *gfile ); - -/** - * Destroys the GridFile - * - * @param oGridFIle - the GridFile being destroyed - */ -MONGO_EXPORT void gridfile_destroy( gridfile *gfile ); - -/** - * Returns whether or not the GridFile exists - * @param gfile - the GridFile being examined - */ -MONGO_EXPORT bson_bool_t gridfile_exists( gridfile *gfile ); - -/** - * Returns the filename of GridFile - * @param gfile - the working GridFile - * - * @return - the filename of the Gridfile - */ -MONGO_EXPORT const char *gridfile_get_filename( gridfile *gfile ); - -/** - * Returns the size of the chunks of the GridFile - * @param gfile - the working GridFile - * - * @return - the size of the chunks of the Gridfile - */ -MONGO_EXPORT int gridfile_get_chunksize( gridfile *gfile ); - -/** - * Returns the length of GridFile's data - * - * @param gfile - the working GridFile - * - * @return - the length of the Gridfile's data - */ -MONGO_EXPORT gridfs_offset gridfile_get_contentlength( gridfile *gfile ); - -/** - * Returns the MIME type of the GridFile - * - * @param gfile - the working GridFile - * - * @return - the MIME type of the Gridfile - * (NULL if no type specified) - */ -MONGO_EXPORT const char *gridfile_get_contenttype( gridfile *gfile ); - -/** - * Returns the upload date of GridFile - * - * @param gfile - the working GridFile - * - * @return - the upload date of the Gridfile - */ -MONGO_EXPORT bson_date_t gridfile_get_uploaddate( gridfile *gfile ); - -/** - * Returns the MD5 of GridFile - * - * @param gfile - the working GridFile - * - * @return - the MD5 of the Gridfile - */ -MONGO_EXPORT const char *gridfile_get_md5( gridfile *gfile ); - -/** - * Returns the field in GridFile specified by name - * - * @param gfile - the working GridFile - * @param name - the name of the field to be returned - * - * @return - the data of the field specified - * (NULL if none exists) - */ -const char *gridfile_get_field( gridfile *gfile, - const char *name ); - -/** - * Returns a boolean field in GridFile specified by name - * @param gfile - the working GridFile - * @param name - the name of the field to be returned - * - * @return - the boolean of the field specified - * (NULL if none exists) - */ -bson_bool_t gridfile_get_boolean( gridfile *gfile, - const char *name ); - -/** - * Returns the metadata of GridFile - * @param gfile - the working GridFile - * - * @return - the metadata of the Gridfile in a bson object - * (an empty bson is returned if none exists) - */ -MONGO_EXPORT void gridfile_get_metadata( gridfile *gfile, bson* out ); - -/** - * Returns the number of chunks in the GridFile - * @param gfile - the working GridFile - * - * @return - the number of chunks in the Gridfile - */ -MONGO_EXPORT int gridfile_get_numchunks( gridfile *gfile ); - -/** - * Returns chunk n of GridFile - * @param gfile - the working GridFile - * - * @return - the nth chunk of the Gridfile - */ -MONGO_EXPORT void gridfile_get_chunk( gridfile *gfile, int n, bson* out ); - -/** - * Returns a mongo_cursor of *size* chunks starting with chunk *start* - * - * @param gfile - the working GridFile - * @param start - the first chunk in the cursor - * @param size - the number of chunks to be returned - * - * @return - mongo_cursor of the chunks (must be destroyed after use) - */ -MONGO_EXPORT mongo_cursor *gridfile_get_chunks( gridfile *gfile, int start, int size ); - -/** - * Writes the GridFile to a stream - * - * @param gfile - the working GridFile - * @param stream - the file stream to write to - */ -MONGO_EXPORT gridfs_offset gridfile_write_file( gridfile *gfile, FILE *stream ); - -/** - * Reads length bytes from the GridFile to a buffer - * and updates the position in the file. - * (assumes the buffer is large enough) - * (if size is greater than EOF gridfile_read reads until EOF) - * - * @param gfile - the working GridFile - * @param size - the amount of bytes to be read - * @param buf - the buffer to read to - * - * @return - the number of bytes read - */ -MONGO_EXPORT gridfs_offset gridfile_read( gridfile *gfile, gridfs_offset size, char *buf ); - -/** - * Updates the position in the file - * (If the offset goes beyond the contentlength, - * the position is updated to the end of the file.) - * - * @param gfile - the working GridFile - * @param offset - the position to update to - * - * @return - resulting offset location - */ -MONGO_EXPORT gridfs_offset gridfile_seek( gridfile *gfile, gridfs_offset offset ); - -#endif diff --git a/mongo-c-driver-v0.6/src/md5.c b/mongo-c-driver-v0.6/src/md5.c deleted file mode 100644 index 68edd29..0000000 --- a/mongo-c-driver-v0.6/src/md5.c +++ /dev/null @@ -1,381 +0,0 @@ -/* - Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved. - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - L. Peter Deutsch - ghost@aladdin.com - - */ -/* $Id: md5.c,v 1.6 2002/04/13 19:20:28 lpd Exp $ */ -/* - Independent implementation of MD5 (RFC 1321). - - This code implements the MD5 Algorithm defined in RFC 1321, whose - text is available at - http://www.ietf.org/rfc/rfc1321.txt - The code is derived from the text of the RFC, including the test suite - (section A.5) but excluding the rest of Appendix A. It does not include - any code or documentation that is identified in the RFC as being - copyrighted. - - The original and principal author of md5.c is L. Peter Deutsch - . Other authors are noted in the change history - that follows (in reverse chronological order): - - 2002-04-13 lpd Clarified derivation from RFC 1321; now handles byte order - either statically or dynamically; added missing #include - in library. - 2002-03-11 lpd Corrected argument list for main(), and added int return - type, in test program and T value program. - 2002-02-21 lpd Added missing #include in test program. - 2000-07-03 lpd Patched to eliminate warnings about "constant is - unsigned in ANSI C, signed in traditional"; made test program - self-checking. - 1999-11-04 lpd Edited comments slightly for automatic TOC extraction. - 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5). - 1999-05-03 lpd Original version. - */ - -#include "md5.h" -#include - -#undef BYTE_ORDER /* 1 = big-endian, -1 = little-endian, 0 = unknown */ -#ifdef MONGO_BIG_ENDIAN -# define BYTE_ORDER 1 -#else -# define BYTE_ORDER -1 -#endif - -#define T_MASK ((mongo_md5_word_t)~0) -#define T1 /* 0xd76aa478 */ (T_MASK ^ 0x28955b87) -#define T2 /* 0xe8c7b756 */ (T_MASK ^ 0x173848a9) -#define T3 0x242070db -#define T4 /* 0xc1bdceee */ (T_MASK ^ 0x3e423111) -#define T5 /* 0xf57c0faf */ (T_MASK ^ 0x0a83f050) -#define T6 0x4787c62a -#define T7 /* 0xa8304613 */ (T_MASK ^ 0x57cfb9ec) -#define T8 /* 0xfd469501 */ (T_MASK ^ 0x02b96afe) -#define T9 0x698098d8 -#define T10 /* 0x8b44f7af */ (T_MASK ^ 0x74bb0850) -#define T11 /* 0xffff5bb1 */ (T_MASK ^ 0x0000a44e) -#define T12 /* 0x895cd7be */ (T_MASK ^ 0x76a32841) -#define T13 0x6b901122 -#define T14 /* 0xfd987193 */ (T_MASK ^ 0x02678e6c) -#define T15 /* 0xa679438e */ (T_MASK ^ 0x5986bc71) -#define T16 0x49b40821 -#define T17 /* 0xf61e2562 */ (T_MASK ^ 0x09e1da9d) -#define T18 /* 0xc040b340 */ (T_MASK ^ 0x3fbf4cbf) -#define T19 0x265e5a51 -#define T20 /* 0xe9b6c7aa */ (T_MASK ^ 0x16493855) -#define T21 /* 0xd62f105d */ (T_MASK ^ 0x29d0efa2) -#define T22 0x02441453 -#define T23 /* 0xd8a1e681 */ (T_MASK ^ 0x275e197e) -#define T24 /* 0xe7d3fbc8 */ (T_MASK ^ 0x182c0437) -#define T25 0x21e1cde6 -#define T26 /* 0xc33707d6 */ (T_MASK ^ 0x3cc8f829) -#define T27 /* 0xf4d50d87 */ (T_MASK ^ 0x0b2af278) -#define T28 0x455a14ed -#define T29 /* 0xa9e3e905 */ (T_MASK ^ 0x561c16fa) -#define T30 /* 0xfcefa3f8 */ (T_MASK ^ 0x03105c07) -#define T31 0x676f02d9 -#define T32 /* 0x8d2a4c8a */ (T_MASK ^ 0x72d5b375) -#define T33 /* 0xfffa3942 */ (T_MASK ^ 0x0005c6bd) -#define T34 /* 0x8771f681 */ (T_MASK ^ 0x788e097e) -#define T35 0x6d9d6122 -#define T36 /* 0xfde5380c */ (T_MASK ^ 0x021ac7f3) -#define T37 /* 0xa4beea44 */ (T_MASK ^ 0x5b4115bb) -#define T38 0x4bdecfa9 -#define T39 /* 0xf6bb4b60 */ (T_MASK ^ 0x0944b49f) -#define T40 /* 0xbebfbc70 */ (T_MASK ^ 0x4140438f) -#define T41 0x289b7ec6 -#define T42 /* 0xeaa127fa */ (T_MASK ^ 0x155ed805) -#define T43 /* 0xd4ef3085 */ (T_MASK ^ 0x2b10cf7a) -#define T44 0x04881d05 -#define T45 /* 0xd9d4d039 */ (T_MASK ^ 0x262b2fc6) -#define T46 /* 0xe6db99e5 */ (T_MASK ^ 0x1924661a) -#define T47 0x1fa27cf8 -#define T48 /* 0xc4ac5665 */ (T_MASK ^ 0x3b53a99a) -#define T49 /* 0xf4292244 */ (T_MASK ^ 0x0bd6ddbb) -#define T50 0x432aff97 -#define T51 /* 0xab9423a7 */ (T_MASK ^ 0x546bdc58) -#define T52 /* 0xfc93a039 */ (T_MASK ^ 0x036c5fc6) -#define T53 0x655b59c3 -#define T54 /* 0x8f0ccc92 */ (T_MASK ^ 0x70f3336d) -#define T55 /* 0xffeff47d */ (T_MASK ^ 0x00100b82) -#define T56 /* 0x85845dd1 */ (T_MASK ^ 0x7a7ba22e) -#define T57 0x6fa87e4f -#define T58 /* 0xfe2ce6e0 */ (T_MASK ^ 0x01d3191f) -#define T59 /* 0xa3014314 */ (T_MASK ^ 0x5cfebceb) -#define T60 0x4e0811a1 -#define T61 /* 0xf7537e82 */ (T_MASK ^ 0x08ac817d) -#define T62 /* 0xbd3af235 */ (T_MASK ^ 0x42c50dca) -#define T63 0x2ad7d2bb -#define T64 /* 0xeb86d391 */ (T_MASK ^ 0x14792c6e) - - -static void -mongo_md5_process(mongo_md5_state_t *pms, const mongo_md5_byte_t *data /*[64]*/) -{ - mongo_md5_word_t - a = pms->abcd[0], b = pms->abcd[1], - c = pms->abcd[2], d = pms->abcd[3]; - mongo_md5_word_t t; -#if BYTE_ORDER > 0 - /* Define storage only for big-endian CPUs. */ - mongo_md5_word_t X[16]; -#else - /* Define storage for little-endian or both types of CPUs. */ - mongo_md5_word_t xbuf[16]; - const mongo_md5_word_t *X; -#endif - - { -#if BYTE_ORDER == 0 - /* - * Determine dynamically whether this is a big-endian or - * little-endian machine, since we can use a more efficient - * algorithm on the latter. - */ - static const int w = 1; - - if (*((const mongo_md5_byte_t *)&w)) /* dynamic little-endian */ -#endif -#if BYTE_ORDER <= 0 /* little-endian */ - { - /* - * On little-endian machines, we can process properly aligned - * data without copying it. - */ - if (!((data - (const mongo_md5_byte_t *)0) & 3)) { - /* data are properly aligned */ - X = (const mongo_md5_word_t *)data; - } else { - /* not aligned */ - memcpy(xbuf, data, 64); - X = xbuf; - } - } -#endif -#if BYTE_ORDER == 0 - else /* dynamic big-endian */ -#endif -#if BYTE_ORDER >= 0 /* big-endian */ - { - /* - * On big-endian machines, we must arrange the bytes in the - * right order. - */ - const mongo_md5_byte_t *xp = data; - int i; - -# if BYTE_ORDER == 0 - X = xbuf; /* (dynamic only) */ -# else -# define xbuf X /* (static only) */ -# endif - for (i = 0; i < 16; ++i, xp += 4) - xbuf[i] = xp[0] + (xp[1] << 8) + (xp[2] << 16) + (xp[3] << 24); - } -#endif - } - -#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32 - (n)))) - - /* Round 1. */ - /* Let [abcd k s i] denote the operation - a = b + ((a + F(b,c,d) + X[k] + T[i]) <<< s). */ -#define F(x, y, z) (((x) & (y)) | (~(x) & (z))) -#define SET(a, b, c, d, k, s, Ti)\ - t = a + F(b,c,d) + X[k] + Ti;\ - a = ROTATE_LEFT(t, s) + b - /* Do the following 16 operations. */ - SET(a, b, c, d, 0, 7, T1); - SET(d, a, b, c, 1, 12, T2); - SET(c, d, a, b, 2, 17, T3); - SET(b, c, d, a, 3, 22, T4); - SET(a, b, c, d, 4, 7, T5); - SET(d, a, b, c, 5, 12, T6); - SET(c, d, a, b, 6, 17, T7); - SET(b, c, d, a, 7, 22, T8); - SET(a, b, c, d, 8, 7, T9); - SET(d, a, b, c, 9, 12, T10); - SET(c, d, a, b, 10, 17, T11); - SET(b, c, d, a, 11, 22, T12); - SET(a, b, c, d, 12, 7, T13); - SET(d, a, b, c, 13, 12, T14); - SET(c, d, a, b, 14, 17, T15); - SET(b, c, d, a, 15, 22, T16); -#undef SET - - /* Round 2. */ - /* Let [abcd k s i] denote the operation - a = b + ((a + G(b,c,d) + X[k] + T[i]) <<< s). */ -#define G(x, y, z) (((x) & (z)) | ((y) & ~(z))) -#define SET(a, b, c, d, k, s, Ti)\ - t = a + G(b,c,d) + X[k] + Ti;\ - a = ROTATE_LEFT(t, s) + b - /* Do the following 16 operations. */ - SET(a, b, c, d, 1, 5, T17); - SET(d, a, b, c, 6, 9, T18); - SET(c, d, a, b, 11, 14, T19); - SET(b, c, d, a, 0, 20, T20); - SET(a, b, c, d, 5, 5, T21); - SET(d, a, b, c, 10, 9, T22); - SET(c, d, a, b, 15, 14, T23); - SET(b, c, d, a, 4, 20, T24); - SET(a, b, c, d, 9, 5, T25); - SET(d, a, b, c, 14, 9, T26); - SET(c, d, a, b, 3, 14, T27); - SET(b, c, d, a, 8, 20, T28); - SET(a, b, c, d, 13, 5, T29); - SET(d, a, b, c, 2, 9, T30); - SET(c, d, a, b, 7, 14, T31); - SET(b, c, d, a, 12, 20, T32); -#undef SET - - /* Round 3. */ - /* Let [abcd k s t] denote the operation - a = b + ((a + H(b,c,d) + X[k] + T[i]) <<< s). */ -#define H(x, y, z) ((x) ^ (y) ^ (z)) -#define SET(a, b, c, d, k, s, Ti)\ - t = a + H(b,c,d) + X[k] + Ti;\ - a = ROTATE_LEFT(t, s) + b - /* Do the following 16 operations. */ - SET(a, b, c, d, 5, 4, T33); - SET(d, a, b, c, 8, 11, T34); - SET(c, d, a, b, 11, 16, T35); - SET(b, c, d, a, 14, 23, T36); - SET(a, b, c, d, 1, 4, T37); - SET(d, a, b, c, 4, 11, T38); - SET(c, d, a, b, 7, 16, T39); - SET(b, c, d, a, 10, 23, T40); - SET(a, b, c, d, 13, 4, T41); - SET(d, a, b, c, 0, 11, T42); - SET(c, d, a, b, 3, 16, T43); - SET(b, c, d, a, 6, 23, T44); - SET(a, b, c, d, 9, 4, T45); - SET(d, a, b, c, 12, 11, T46); - SET(c, d, a, b, 15, 16, T47); - SET(b, c, d, a, 2, 23, T48); -#undef SET - - /* Round 4. */ - /* Let [abcd k s t] denote the operation - a = b + ((a + I(b,c,d) + X[k] + T[i]) <<< s). */ -#define I(x, y, z) ((y) ^ ((x) | ~(z))) -#define SET(a, b, c, d, k, s, Ti)\ - t = a + I(b,c,d) + X[k] + Ti;\ - a = ROTATE_LEFT(t, s) + b - /* Do the following 16 operations. */ - SET(a, b, c, d, 0, 6, T49); - SET(d, a, b, c, 7, 10, T50); - SET(c, d, a, b, 14, 15, T51); - SET(b, c, d, a, 5, 21, T52); - SET(a, b, c, d, 12, 6, T53); - SET(d, a, b, c, 3, 10, T54); - SET(c, d, a, b, 10, 15, T55); - SET(b, c, d, a, 1, 21, T56); - SET(a, b, c, d, 8, 6, T57); - SET(d, a, b, c, 15, 10, T58); - SET(c, d, a, b, 6, 15, T59); - SET(b, c, d, a, 13, 21, T60); - SET(a, b, c, d, 4, 6, T61); - SET(d, a, b, c, 11, 10, T62); - SET(c, d, a, b, 2, 15, T63); - SET(b, c, d, a, 9, 21, T64); -#undef SET - - /* Then perform the following additions. (That is increment each - of the four registers by the value it had before this block - was started.) */ - pms->abcd[0] += a; - pms->abcd[1] += b; - pms->abcd[2] += c; - pms->abcd[3] += d; -} - -MONGO_EXPORT void -mongo_md5_init(mongo_md5_state_t *pms) -{ - pms->count[0] = pms->count[1] = 0; - pms->abcd[0] = 0x67452301; - pms->abcd[1] = /*0xefcdab89*/ T_MASK ^ 0x10325476; - pms->abcd[2] = /*0x98badcfe*/ T_MASK ^ 0x67452301; - pms->abcd[3] = 0x10325476; -} - -MONGO_EXPORT void -mongo_md5_append(mongo_md5_state_t *pms, const mongo_md5_byte_t *data, int nbytes) -{ - const mongo_md5_byte_t *p = data; - int left = nbytes; - int offset = (pms->count[0] >> 3) & 63; - mongo_md5_word_t nbits = (mongo_md5_word_t)(nbytes << 3); - - if (nbytes <= 0) - return; - - /* Update the message length. */ - pms->count[1] += nbytes >> 29; - pms->count[0] += nbits; - if (pms->count[0] < nbits) - pms->count[1]++; - - /* Process an initial partial block. */ - if (offset) { - int copy = (offset + nbytes > 64 ? 64 - offset : nbytes); - - memcpy(pms->buf + offset, p, copy); - if (offset + copy < 64) - return; - p += copy; - left -= copy; - mongo_md5_process(pms, pms->buf); - } - - /* Process full blocks. */ - for (; left >= 64; p += 64, left -= 64) - mongo_md5_process(pms, p); - - /* Process a final partial block. */ - if (left) - memcpy(pms->buf, p, left); -} - -MONGO_EXPORT void -mongo_md5_finish(mongo_md5_state_t *pms, mongo_md5_byte_t digest[16]) -{ - static const mongo_md5_byte_t pad[64] = { - 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - }; - mongo_md5_byte_t data[8]; - int i; - - /* Save the length before padding. */ - for (i = 0; i < 8; ++i) - data[i] = (mongo_md5_byte_t)(pms->count[i >> 2] >> ((i & 3) << 3)); - /* Pad to 56 bytes mod 64. */ - mongo_md5_append(pms, pad, ((55 - (pms->count[0] >> 3)) & 63) + 1); - /* Append the length. */ - mongo_md5_append(pms, data, 8); - for (i = 0; i < 16; ++i) - digest[i] = (mongo_md5_byte_t)(pms->abcd[i >> 2] >> ((i & 3) << 3)); -} diff --git a/mongo-c-driver-v0.6/src/md5.h b/mongo-c-driver-v0.6/src/md5.h deleted file mode 100644 index 342b6a7..0000000 --- a/mongo-c-driver-v0.6/src/md5.h +++ /dev/null @@ -1,92 +0,0 @@ -/* - Copyright (C) 1999, 2002 Aladdin Enterprises. All rights reserved. - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - L. Peter Deutsch - ghost@aladdin.com - - */ -/* $Id: md5.h,v 1.4 2002/04/13 19:20:28 lpd Exp $ */ -/* - Independent implementation of MD5 (RFC 1321). - - This code implements the MD5 Algorithm defined in RFC 1321, whose - text is available at - http://www.ietf.org/rfc/rfc1321.txt - The code is derived from the text of the RFC, including the test suite - (section A.5) but excluding the rest of Appendix A. It does not include - any code or documentation that is identified in the RFC as being - copyrighted. - - The original and principal author of md5.h is L. Peter Deutsch - . Other authors are noted in the change history - that follows (in reverse chronological order): - - 2002-04-13 lpd Removed support for non-ANSI compilers; removed - references to Ghostscript; clarified derivation from RFC 1321; - now handles byte order either statically or dynamically. - 1999-11-04 lpd Edited comments slightly for automatic TOC extraction. - 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5); - added conditionalization for C++ compilation from Martin - Purschke . - 1999-05-03 lpd Original version. - */ - -#ifndef MONGO_MD5_H_ -#define MONGO_MD5_H_ - -/* - * This package supports both compile-time and run-time determination of CPU - * byte order. If ARCH_IS_BIG_ENDIAN is defined as 0, the code will be - * compiled to run only on little-endian CPUs; if ARCH_IS_BIG_ENDIAN is - * defined as non-zero, the code will be compiled to run only on big-endian - * CPUs; if ARCH_IS_BIG_ENDIAN is not defined, the code will be compiled to - * run on either big- or little-endian CPUs, but will run slightly less - * efficiently on either one than if ARCH_IS_BIG_ENDIAN is defined. - */ -#include "bson.h" - -typedef unsigned char mongo_md5_byte_t; /* 8-bit byte */ -typedef unsigned int mongo_md5_word_t; /* 32-bit word */ - -/* Define the state of the MD5 Algorithm. */ -typedef struct mongo_md5_state_s { - mongo_md5_word_t count[2]; /* message length in bits, lsw first */ - mongo_md5_word_t abcd[4]; /* digest buffer */ - mongo_md5_byte_t buf[64]; /* accumulate block */ -} mongo_md5_state_t; - -#ifdef __cplusplus -extern "C" -{ -#endif - -/* Initialize the algorithm. */ -MONGO_EXPORT void mongo_md5_init(mongo_md5_state_t *pms); - -/* Append a string to the message. */ -MONGO_EXPORT void mongo_md5_append(mongo_md5_state_t *pms, const mongo_md5_byte_t *data, int nbytes); - -/* Finish the message and return the digest. */ -MONGO_EXPORT void mongo_md5_finish(mongo_md5_state_t *pms, mongo_md5_byte_t digest[16]); - -#ifdef __cplusplus -} /* end extern "C" */ -#endif - -#endif /* MONGO_MD5_H_ */ diff --git a/mongo-c-driver-v0.6/src/mongo.c b/mongo-c-driver-v0.6/src/mongo.c deleted file mode 100644 index daf92b4..0000000 --- a/mongo-c-driver-v0.6/src/mongo.c +++ /dev/null @@ -1,1705 +0,0 @@ -/* mongo.c */ - -/* Copyright 2009-2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "mongo.h" -#include "md5.h" -#include "env.h" - -#include -#include -#include -#include - -MONGO_EXPORT mongo* mongo_create() { - return (mongo*)bson_malloc(sizeof(mongo)); -} - - -MONGO_EXPORT void mongo_dispose(mongo* conn) { - free(conn); -} - -MONGO_EXPORT int mongo_get_err(mongo* conn) { - return conn->err; -} - - -MONGO_EXPORT int mongo_is_connected(mongo* conn) { - return conn->connected != 0; -} - - -MONGO_EXPORT int mongo_get_op_timeout(mongo* conn) { - return conn->op_timeout_ms; -} - - -const char* _get_host_port(mongo_host_port* hp) { - static char _hp[sizeof(hp->host)+12]; - bson_sprintf(_hp, "%s:%d", hp->host, hp->port); - return _hp; -} - - -MONGO_EXPORT const char* mongo_get_primary(mongo* conn) { - mongo* conn_ = (mongo*)conn; - if( !(conn_->connected) || (conn_->primary->host == '\0') ) - return NULL; - return _get_host_port(conn_->primary); -} - - -MONGO_EXPORT int mongo_get_socket(mongo* conn) { - mongo* conn_ = (mongo*)conn; - return conn_->sock; -} - - -MONGO_EXPORT int mongo_get_host_count(mongo* conn) { - mongo_replset* r = conn->replset; - mongo_host_port* hp; - int count = 0; - if (!r) return 0; - for (hp = r->hosts; hp; hp = hp->next) - ++count; - return count; -} - - -MONGO_EXPORT const char* mongo_get_host(mongo* conn, int i) { - mongo_replset* r = conn->replset; - mongo_host_port* hp; - int count = 0; - if (!r) return 0; - for (hp = r->hosts; hp; hp = hp->next) { - if (count == i) - return _get_host_port(hp); - ++count; - } - return 0; -} - - -MONGO_EXPORT mongo_cursor* mongo_cursor_create() { - return (mongo_cursor*)bson_malloc(sizeof(mongo_cursor)); -} - - -MONGO_EXPORT void mongo_cursor_dispose(mongo_cursor* cursor) { - free(cursor); -} - - -MONGO_EXPORT int mongo_get_server_err(mongo* conn) { - return conn->lasterrcode; -} - - -MONGO_EXPORT const char* mongo_get_server_err_string(mongo* conn) { - return conn->lasterrstr; -} - -MONGO_EXPORT void __mongo_set_error( mongo *conn, mongo_error_t err, const char *str, - int errcode ) { - int errstr_size, str_size; - - conn->err = err; - conn->errcode = errcode; - - if( str ) { - str_size = strlen( str ) + 1; - errstr_size = str_size > MONGO_ERR_LEN ? MONGO_ERR_LEN : str_size; - memcpy( conn->errstr, str, errstr_size ); - conn->errstr[errstr_size-1] = '\0'; - } -} - -MONGO_EXPORT void mongo_clear_errors( mongo *conn ) { - conn->err = 0; - conn->errcode = 0; - conn->lasterrcode = 0; - memset( conn->errstr, 0, MONGO_ERR_LEN ); - memset( conn->lasterrstr, 0, MONGO_ERR_LEN ); -} - -/* Note: this function returns a char* which must be freed. */ -static char *mongo_ns_to_cmd_db( const char *ns ) { - char *current = NULL; - char *cmd_db_name = NULL; - int len = 0; - - for( current = (char *)ns; *current != '.'; current++ ) { - len++; - } - - cmd_db_name = (char *)bson_malloc( len + 6 ); - strncpy( cmd_db_name, ns, len ); - strncpy( cmd_db_name + len, ".$cmd", 6 ); - - return cmd_db_name; -} - -MONGO_EXPORT int mongo_validate_ns( mongo *conn, const char *ns ) { - char *last = NULL; - char *current = NULL; - const char *db_name = ns; - char *collection_name = NULL; - char errmsg[64]; - int ns_len = 0; - - /* If the first character is a '.', fail. */ - if( *ns == '.' ) { - __mongo_set_error( conn, MONGO_NS_INVALID, "ns cannot start with a '.'.", 0 ); - return MONGO_ERROR; - } - - /* Find the division between database and collection names. */ - for( current = (char *)ns; *current != '\0'; current++ ) { - if( *current == '.' ) { - current++; - break; - } - } - - /* Fail because the ns doesn't contain a '.' - * or the collection part starts with a dot. */ - if( *current == '\0' || *current == '.' ) { - __mongo_set_error( conn, MONGO_NS_INVALID, "ns cannot start with a '.'.", 0 ); - return MONGO_ERROR; - } - - /* Fail if collection length is 0. */ - if( *(current + 1) == '\0' ) { - __mongo_set_error( conn, MONGO_NS_INVALID, "Collection name missing.", 0 ); - return MONGO_ERROR; - } - - - /* Point to the beginning of the collection name. */ - collection_name = current; - - /* Ensure that the database name is greater than one char.*/ - if( collection_name - 1 == db_name ) { - __mongo_set_error( conn, MONGO_NS_INVALID, "Database name missing.", 0 ); - return MONGO_ERROR; - } - - /* Go back and validate the database name. */ - for( current = (char *)db_name; *current != '.'; current++ ) { - switch( *current ) { - case ' ': - case '$': - case '/': - case '\\': - __mongo_set_error( conn, MONGO_NS_INVALID, - "Database name may not contain ' ', '$', '/', or '\\'", 0 ); - return MONGO_ERROR; - default: - break; - } - - ns_len++; - } - - /* Add one to the length for the '.' character. */ - ns_len++; - - /* Now validate the collection name. */ - for( current = collection_name; *current != '\0'; current++ ) { - - /* Cannot have two consecutive dots. */ - if( last && *last == '.' && *current == '.' ) { - __mongo_set_error( conn, MONGO_NS_INVALID, - "Collection may not contain two consecutive '.'", 0 ); - return MONGO_ERROR; - } - - /* Cannot contain a '$' */ - if( *current == '$' ) { - __mongo_set_error( conn, MONGO_NS_INVALID, - "Collection may not contain '$'", 0 ); - return MONGO_ERROR; - } - - last = current; - ns_len++; - } - - if( ns_len > 128 ) { - bson_sprintf( errmsg, "Namespace too long; has %d but must <= 128.", - ns_len ); - __mongo_set_error( conn, MONGO_NS_INVALID, errmsg, 0 ); - return MONGO_ERROR; - } - - /* Cannot end with a '.' */ - if( *(current - 1) == '.' ) { - __mongo_set_error( conn, MONGO_NS_INVALID, - "Collection may not end with '.'", 0 ); - return MONGO_ERROR; - } - - return MONGO_OK; -} - -static void mongo_set_last_error( mongo *conn, bson_iterator *it, bson *obj ) { - int result_len = bson_iterator_string_len( it ); - const char *result_string = bson_iterator_string( it ); - int len = result_len < MONGO_ERR_LEN ? result_len : MONGO_ERR_LEN; - memcpy( conn->lasterrstr, result_string, len ); - - if( bson_find( it, obj, "code" ) != BSON_NULL ) - conn->lasterrcode = bson_iterator_int( it ); -} - -static const int ZERO = 0; -static const int ONE = 1; -mongo_message *mongo_message_create( int len , int id , int responseTo , int op ) { - mongo_message *mm = ( mongo_message * )bson_malloc( len ); - - if ( !id ) - id = rand(); - - /* native endian (converted on send) */ - mm->head.len = len; - mm->head.id = id; - mm->head.responseTo = responseTo; - mm->head.op = op; - - return mm; -} - -/* Always calls bson_free(mm) */ -int mongo_message_send( mongo *conn, mongo_message *mm ) { - mongo_header head; /* little endian */ - int res; - bson_little_endian32( &head.len, &mm->head.len ); - bson_little_endian32( &head.id, &mm->head.id ); - bson_little_endian32( &head.responseTo, &mm->head.responseTo ); - bson_little_endian32( &head.op, &mm->head.op ); - - res = mongo_env_write_socket( conn, &head, sizeof( head ) ); - if( res != MONGO_OK ) { - bson_free( mm ); - return res; - } - - res = mongo_env_write_socket( conn, &mm->data, mm->head.len - sizeof( head ) ); - if( res != MONGO_OK ) { - bson_free( mm ); - return res; - } - - bson_free( mm ); - return MONGO_OK; -} - -int mongo_read_response( mongo *conn, mongo_reply **reply ) { - mongo_header head; /* header from network */ - mongo_reply_fields fields; /* header from network */ - mongo_reply *out; /* native endian */ - unsigned int len; - int res; - - mongo_env_read_socket( conn, &head, sizeof( head ) ); - mongo_env_read_socket( conn, &fields, sizeof( fields ) ); - - bson_little_endian32( &len, &head.len ); - - if ( len < sizeof( head )+sizeof( fields ) || len > 64*1024*1024 ) - return MONGO_READ_SIZE_ERROR; /* most likely corruption */ - - out = ( mongo_reply * )bson_malloc( len ); - - out->head.len = len; - bson_little_endian32( &out->head.id, &head.id ); - bson_little_endian32( &out->head.responseTo, &head.responseTo ); - bson_little_endian32( &out->head.op, &head.op ); - - bson_little_endian32( &out->fields.flag, &fields.flag ); - bson_little_endian64( &out->fields.cursorID, &fields.cursorID ); - bson_little_endian32( &out->fields.start, &fields.start ); - bson_little_endian32( &out->fields.num, &fields.num ); - - res = mongo_env_read_socket( conn, &out->objs, len-sizeof( head )-sizeof( fields ) ); - if( res != MONGO_OK ) { - bson_free( out ); - return res; - } - - *reply = out; - - return MONGO_OK; -} - - -char *mongo_data_append( char *start , const void *data , int len ) { - memcpy( start , data , len ); - return start + len; -} - -char *mongo_data_append32( char *start , const void *data ) { - bson_little_endian32( start , data ); - return start + 4; -} - -char *mongo_data_append64( char *start , const void *data ) { - bson_little_endian64( start , data ); - return start + 8; -} - -/* Connection API */ - -static int mongo_check_is_master( mongo *conn ) { - bson out; - bson_iterator it; - bson_bool_t ismaster = 0; - int max_bson_size = MONGO_DEFAULT_MAX_BSON_SIZE; - - out.data = NULL; - - if ( mongo_simple_int_command( conn, "admin", "ismaster", 1, &out ) == MONGO_OK ) { - if( bson_find( &it, &out, "ismaster" ) ) - ismaster = bson_iterator_bool( &it ); - if( bson_find( &it, &out, "maxBsonObjectSize" ) ) { - max_bson_size = bson_iterator_int( &it ); - } - conn->max_bson_size = max_bson_size; - } else { - return MONGO_ERROR; - } - - bson_destroy( &out ); - - if( ismaster ) - return MONGO_OK; - else { - conn->err = MONGO_CONN_NOT_MASTER; - return MONGO_ERROR; - } -} - -MONGO_EXPORT void mongo_init_sockets( void ) { - mongo_env_sock_init(); -} - -MONGO_EXPORT void mongo_init( mongo *conn ) { - memset( conn, 0, sizeof( mongo ) ); - conn->max_bson_size = MONGO_DEFAULT_MAX_BSON_SIZE; -} - -MONGO_EXPORT int mongo_connect( mongo *conn , const char *host, int port ) { - mongo_init( conn ); - - conn->primary = bson_malloc( sizeof( mongo_host_port ) ); - strncpy( conn->primary->host, host, strlen( host ) + 1 ); - conn->primary->port = port; - conn->primary->next = NULL; - - if( mongo_env_socket_connect( conn, host, port ) != MONGO_OK ) - return MONGO_ERROR; - - if( mongo_check_is_master( conn ) != MONGO_OK ) - return MONGO_ERROR; - else - return MONGO_OK; -} - -MONGO_EXPORT void mongo_replset_init( mongo *conn, const char *name ) { - mongo_init( conn ); - - conn->replset = bson_malloc( sizeof( mongo_replset ) ); - conn->replset->primary_connected = 0; - conn->replset->seeds = NULL; - conn->replset->hosts = NULL; - conn->replset->name = ( char * )bson_malloc( strlen( name ) + 1 ); - memcpy( conn->replset->name, name, strlen( name ) + 1 ); - - conn->primary = bson_malloc( sizeof( mongo_host_port ) ); - conn->primary->host[0] = '\0'; - conn->primary->next = NULL; -} - -static void mongo_replset_add_node( mongo_host_port **list, const char *host, int port ) { - mongo_host_port *host_port = bson_malloc( sizeof( mongo_host_port ) ); - host_port->port = port; - host_port->next = NULL; - strncpy( host_port->host, host, strlen( host ) + 1 ); - - if( *list == NULL ) - *list = host_port; - else { - mongo_host_port *p = *list; - while( p->next != NULL ) - p = p->next; - p->next = host_port; - } -} - -static void mongo_replset_free_list( mongo_host_port **list ) { - mongo_host_port *node = *list; - mongo_host_port *prev; - - while( node != NULL ) { - prev = node; - node = node->next; - bson_free( prev ); - } - - *list = NULL; -} - -MONGO_EXPORT void mongo_replset_add_seed( mongo *conn, const char *host, int port ) { - mongo_replset_add_node( &conn->replset->seeds, host, port ); -} - -void mongo_parse_host( const char *host_string, mongo_host_port *host_port ) { - int len, idx, split; - len = split = idx = 0; - - /* Split the host_port string at the ':' */ - while( 1 ) { - if( *( host_string + len ) == '\0' ) - break; - if( *( host_string + len ) == ':' ) - split = len; - - len++; - } - - /* If 'split' is set, we know the that port exists; - * Otherwise, we set the default port. */ - idx = split ? split : len; - memcpy( host_port->host, host_string, idx ); - memcpy( host_port->host + idx, "\0", 1 ); - if( split ) - host_port->port = atoi( host_string + idx + 1 ); - else - host_port->port = MONGO_DEFAULT_PORT; -} - -static void mongo_replset_check_seed( mongo *conn ) { - bson out; - bson hosts; - const char *data; - bson_iterator it; - bson_iterator it_sub; - const char *host_string; - mongo_host_port *host_port = NULL; - - out.data = NULL; - - hosts.data = NULL; - - if( mongo_simple_int_command( conn, "admin", "ismaster", 1, &out ) == MONGO_OK ) { - - if( bson_find( &it, &out, "hosts" ) ) { - data = bson_iterator_value( &it ); - bson_iterator_from_buffer( &it_sub, data ); - - /* Iterate over host list, adding each host to the - * connection's host list. */ - while( bson_iterator_next( &it_sub ) ) { - host_string = bson_iterator_string( &it_sub ); - - host_port = bson_malloc( sizeof( mongo_host_port ) ); - mongo_parse_host( host_string, host_port ); - - if( host_port ) { - mongo_replset_add_node( &conn->replset->hosts, - host_port->host, host_port->port ); - - bson_free( host_port ); - host_port = NULL; - } - } - } - } - - bson_destroy( &out ); - bson_destroy( &hosts ); - mongo_env_close_socket( conn->sock ); - conn->sock = 0; - conn->connected = 0; - -} - -/* Find out whether the current connected node is master, and - * verify that the node's replica set name matched the provided name - */ -static int mongo_replset_check_host( mongo *conn ) { - - bson out; - bson_iterator it; - bson_bool_t ismaster = 0; - const char *set_name; - int max_bson_size = MONGO_DEFAULT_MAX_BSON_SIZE; - - out.data = NULL; - - if ( mongo_simple_int_command( conn, "admin", "ismaster", 1, &out ) == MONGO_OK ) { - if( bson_find( &it, &out, "ismaster" ) ) - ismaster = bson_iterator_bool( &it ); - - if( bson_find( &it, &out, "maxBsonObjectSize" ) ) - max_bson_size = bson_iterator_int( &it ); - conn->max_bson_size = max_bson_size; - - if( bson_find( &it, &out, "setName" ) ) { - set_name = bson_iterator_string( &it ); - if( strcmp( set_name, conn->replset->name ) != 0 ) { - bson_destroy( &out ); - conn->err = MONGO_CONN_BAD_SET_NAME; - return MONGO_ERROR; - } - } - } - - bson_destroy( &out ); - - if( ismaster ) { - conn->replset->primary_connected = 1; - } else { - mongo_env_close_socket( conn->sock ); - } - - return MONGO_OK; -} - -MONGO_EXPORT int mongo_replset_connect( mongo *conn ) { - - int res = 0; - mongo_host_port *node; - - conn->sock = 0; - conn->connected = 0; - - /* First iterate over the seed nodes to get the canonical list of hosts - * from the replica set. Break out once we have a host list. - */ - node = conn->replset->seeds; - while( node != NULL ) { - res = mongo_env_socket_connect( conn, ( const char * )&node->host, node->port ); - if( res == MONGO_OK ) { - mongo_replset_check_seed( conn ); - if( conn->replset->hosts ) - break; - } - node = node->next; - } - - /* Iterate over the host list, checking for the primary node. */ - if( !conn->replset->hosts ) { - conn->err = MONGO_CONN_NO_PRIMARY; - return MONGO_ERROR; - } else { - node = conn->replset->hosts; - - while( node != NULL ) { - res = mongo_env_socket_connect( conn, ( const char * )&node->host, node->port ); - - if( res == MONGO_OK ) { - if( mongo_replset_check_host( conn ) != MONGO_OK ) - return MONGO_ERROR; - - /* Primary found, so return. */ - else if( conn->replset->primary_connected ) { - strncpy( conn->primary->host, node->host, strlen( node->host ) + 1 ); - conn->primary->port = node->port; - return MONGO_OK; - } - - /* No primary, so close the connection. */ - else { - mongo_env_close_socket( conn->sock ); - conn->sock = 0; - conn->connected = 0; - } - } - - node = node->next; - } - } - - - conn->err = MONGO_CONN_NO_PRIMARY; - return MONGO_ERROR; -} - -MONGO_EXPORT int mongo_set_op_timeout( mongo *conn, int millis ) { - conn->op_timeout_ms = millis; - if( conn->sock && conn->connected ) - mongo_env_set_socket_op_timeout( conn, millis ); - - return MONGO_OK; -} - -MONGO_EXPORT int mongo_reconnect( mongo *conn ) { - int res; - mongo_disconnect( conn ); - - if( conn->replset ) { - conn->replset->primary_connected = 0; - mongo_replset_free_list( &conn->replset->hosts ); - conn->replset->hosts = NULL; - res = mongo_replset_connect( conn ); - return res; - } else - return mongo_env_socket_connect( conn, conn->primary->host, conn->primary->port ); -} - -MONGO_EXPORT int mongo_check_connection( mongo *conn ) { - if( ! conn->connected ) - return MONGO_ERROR; - - if( mongo_simple_int_command( conn, "admin", "ping", 1, NULL ) == MONGO_OK ) - return MONGO_OK; - else - return MONGO_ERROR; -} - -MONGO_EXPORT void mongo_disconnect( mongo *conn ) { - if( ! conn->connected ) - return; - - if( conn->replset ) { - conn->replset->primary_connected = 0; - mongo_replset_free_list( &conn->replset->hosts ); - conn->replset->hosts = NULL; - } - - mongo_env_close_socket( conn->sock ); - - conn->sock = 0; - conn->connected = 0; -} - -MONGO_EXPORT void mongo_destroy( mongo *conn ) { - mongo_disconnect( conn ); - - if( conn->replset ) { - mongo_replset_free_list( &conn->replset->seeds ); - mongo_replset_free_list( &conn->replset->hosts ); - bson_free( conn->replset->name ); - bson_free( conn->replset ); - conn->replset = NULL; - } - - bson_free( conn->primary ); - - mongo_clear_errors( conn ); -} - -/* Determine whether this BSON object is valid for the given operation. */ -static int mongo_bson_valid( mongo *conn, const bson *bson, int write ) { - int size; - - size = bson_size( bson ); - if( size > conn->max_bson_size ) { - conn->err = MONGO_BSON_TOO_LARGE; - return MONGO_ERROR; - } - - if( ! bson->finished ) { - conn->err = MONGO_BSON_NOT_FINISHED; - return MONGO_ERROR; - } - - if( bson->err & BSON_NOT_UTF8 ) { - conn->err = MONGO_BSON_INVALID; - return MONGO_ERROR; - } - - if( write ) { - if( ( bson->err & BSON_FIELD_HAS_DOT ) || - ( bson->err & BSON_FIELD_INIT_DOLLAR ) ) { - - conn->err = MONGO_BSON_INVALID; - return MONGO_ERROR; - - } - } - - conn->err = 0; - - return MONGO_OK; -} - -/* Determine whether this BSON object is valid for the given operation. */ -static int mongo_cursor_bson_valid( mongo_cursor *cursor, const bson *bson ) { - if( ! bson->finished ) { - cursor->err = MONGO_CURSOR_BSON_ERROR; - cursor->conn->err = MONGO_BSON_NOT_FINISHED; - return MONGO_ERROR; - } - - if( bson->err & BSON_NOT_UTF8 ) { - cursor->err = MONGO_CURSOR_BSON_ERROR; - cursor->conn->err = MONGO_BSON_INVALID; - return MONGO_ERROR; - } - - return MONGO_OK; -} - -static int mongo_check_last_error( mongo *conn, const char *ns, - mongo_write_concern *write_concern ) { - - bson response = {NULL, 0}; - bson fields; - bson_iterator it; - int res = 0; - char *cmd_ns = mongo_ns_to_cmd_db( ns ); - - res = mongo_find_one( conn, cmd_ns, write_concern->cmd, bson_empty( &fields ), &response ); - bson_free( cmd_ns ); - - if( res != MONGO_OK ) - return MONGO_ERROR; - else { - if( ( bson_find( &it, &response, "$err" ) == BSON_STRING ) || - ( bson_find( &it, &response, "err" ) == BSON_STRING ) ) { - - __mongo_set_error( conn, MONGO_WRITE_ERROR, - "See conn->lasterrstr for details.", 0 ); - mongo_set_last_error( conn, &it, &response ); - return MONGO_ERROR; - } else - return MONGO_OK; - } -} - -static int mongo_choose_write_concern( mongo *conn, - mongo_write_concern *custom_write_concern, - mongo_write_concern **write_concern ) { - - if( custom_write_concern ) { - *write_concern = custom_write_concern; - } - else if( conn->write_concern ) { - *write_concern = conn->write_concern; - } - - if( *write_concern && !((*write_concern)->cmd) ) { - __mongo_set_error( conn, MONGO_WRITE_CONCERN_INVALID, - "Must call mongo_write_concern_finish() before using *write_concern.", 0 ); - return MONGO_ERROR; - } - else - return MONGO_OK; -} - - -/********************************************************************* -CRUD API -**********************************************************************/ - -MONGO_EXPORT int mongo_insert( mongo *conn, const char *ns, - const bson *bson, mongo_write_concern *custom_write_concern ) { - - char *data; - mongo_message *mm; - mongo_write_concern *write_concern = NULL; - - if( mongo_validate_ns( conn, ns ) != MONGO_OK ) - return MONGO_ERROR; - - if( mongo_bson_valid( conn, bson, 1 ) != MONGO_OK ) { - return MONGO_ERROR; - } - - if( mongo_choose_write_concern( conn, custom_write_concern, - &write_concern ) == MONGO_ERROR ) { - return MONGO_ERROR; - } - - mm = mongo_message_create( 16 /* header */ - + 4 /* ZERO */ - + strlen( ns ) - + 1 + bson_size( bson ) - , 0, 0, MONGO_OP_INSERT ); - - data = &mm->data; - data = mongo_data_append32( data, &ZERO ); - data = mongo_data_append( data, ns, strlen( ns ) + 1 ); - data = mongo_data_append( data, bson->data, bson_size( bson ) ); - - - /* TODO: refactor so that we can send the insert message - and the getlasterror messages together. */ - if( write_concern ) { - if( mongo_message_send( conn, mm ) == MONGO_ERROR ) { - return MONGO_ERROR; - } - - return mongo_check_last_error( conn, ns, write_concern ); - } - else { - return mongo_message_send( conn, mm ); - } -} - -MONGO_EXPORT int mongo_insert_batch( mongo *conn, const char *ns, - const bson **bsons, int count, mongo_write_concern *custom_write_concern, - int flags ) { - - mongo_message *mm; - mongo_write_concern *write_concern = NULL; - int i; - char *data; - int overhead = 16 + 4 + strlen( ns ) + 1; - int size = overhead; - - if( mongo_validate_ns( conn, ns ) != MONGO_OK ) - return MONGO_ERROR; - - for( i=0; i conn->max_bson_size ) { - conn->err = MONGO_BSON_TOO_LARGE; - return MONGO_ERROR; - } - - if( mongo_choose_write_concern( conn, custom_write_concern, - &write_concern ) == MONGO_ERROR ) { - return MONGO_ERROR; - } - - mm = mongo_message_create( size , 0 , 0 , MONGO_OP_INSERT ); - - data = &mm->data; - if( flags & MONGO_CONTINUE_ON_ERROR ) - data = mongo_data_append32( data, &ONE ); - else - data = mongo_data_append32( data, &ZERO ); - data = mongo_data_append( data, ns, strlen( ns ) + 1 ); - - for( i=0; idata, bson_size( bsons[i] ) ); - } - - /* TODO: refactor so that we can send the insert message - * and the getlasterror messages together. */ - if( write_concern ) { - if( mongo_message_send( conn, mm ) == MONGO_ERROR ) { - return MONGO_ERROR; - } - - return mongo_check_last_error( conn, ns, write_concern ); - } - else { - return mongo_message_send( conn, mm ); - } -} - -MONGO_EXPORT int mongo_update( mongo *conn, const char *ns, const bson *cond, - const bson *op, int flags, mongo_write_concern *custom_write_concern ) { - - char *data; - mongo_message *mm; - mongo_write_concern *write_concern = NULL; - - /* Make sure that the op BSON is valid UTF-8. - * TODO: decide whether to check cond as well. - * */ - if( mongo_bson_valid( conn, ( bson * )op, 0 ) != MONGO_OK ) { - return MONGO_ERROR; - } - - if( mongo_choose_write_concern( conn, custom_write_concern, - &write_concern ) == MONGO_ERROR ) { - return MONGO_ERROR; - } - - mm = mongo_message_create( 16 /* header */ - + 4 /* ZERO */ - + strlen( ns ) + 1 - + 4 /* flags */ - + bson_size( cond ) - + bson_size( op ) - , 0 , 0 , MONGO_OP_UPDATE ); - - data = &mm->data; - data = mongo_data_append32( data, &ZERO ); - data = mongo_data_append( data, ns, strlen( ns ) + 1 ); - data = mongo_data_append32( data, &flags ); - data = mongo_data_append( data, cond->data, bson_size( cond ) ); - data = mongo_data_append( data, op->data, bson_size( op ) ); - - /* TODO: refactor so that we can send the insert message - * and the getlasterror messages together. */ - if( write_concern ) { - if( mongo_message_send( conn, mm ) == MONGO_ERROR ) { - return MONGO_ERROR; - } - - return mongo_check_last_error( conn, ns, write_concern ); - } - else { - return mongo_message_send( conn, mm ); - } -} - -MONGO_EXPORT int mongo_remove( mongo *conn, const char *ns, const bson *cond, - mongo_write_concern *custom_write_concern ) { - - char *data; - mongo_message *mm; - mongo_write_concern *write_concern = NULL; - - /* Make sure that the BSON is valid UTF-8. - * TODO: decide whether to check cond as well. - * */ - if( mongo_bson_valid( conn, ( bson * )cond, 0 ) != MONGO_OK ) { - return MONGO_ERROR; - } - - if( mongo_choose_write_concern( conn, custom_write_concern, - &write_concern ) == MONGO_ERROR ) { - return MONGO_ERROR; - } - - mm = mongo_message_create( 16 /* header */ - + 4 /* ZERO */ - + strlen( ns ) + 1 - + 4 /* ZERO */ - + bson_size( cond ) - , 0 , 0 , MONGO_OP_DELETE ); - - data = &mm->data; - data = mongo_data_append32( data, &ZERO ); - data = mongo_data_append( data, ns, strlen( ns ) + 1 ); - data = mongo_data_append32( data, &ZERO ); - data = mongo_data_append( data, cond->data, bson_size( cond ) ); - - /* TODO: refactor so that we can send the insert message - * and the getlasterror messages together. */ - if( write_concern ) { - if( mongo_message_send( conn, mm ) == MONGO_ERROR ) { - return MONGO_ERROR; - } - - return mongo_check_last_error( conn, ns, write_concern ); - } - else { - return mongo_message_send( conn, mm ); - } -} - - -/********************************************************************* -Write Concern API -**********************************************************************/ - -MONGO_EXPORT void mongo_write_concern_init( mongo_write_concern *write_concern ) { - memset( write_concern, 0, sizeof( mongo_write_concern ) ); -} - -MONGO_EXPORT int mongo_write_concern_finish( mongo_write_concern *write_concern ) { - bson *command; - - /* Destory any existing serialized write concern object and reuse it. */ - if( write_concern->cmd ) { - bson_destroy( write_concern->cmd ); - command = write_concern->cmd; - } - else - command = (bson *)bson_malloc( sizeof( bson ) ); - - if( !command ) { - return MONGO_ERROR; - } - - bson_init( command ); - - bson_append_int( command, "getlasterror", 1 ); - - if( write_concern->mode ) { - bson_append_string( command, "w", write_concern->mode ); - } - - else if( write_concern->w ) { - bson_append_int( command, "w", write_concern->w ); - } - - if( write_concern->wtimeout ) { - bson_append_int( command, "wtimeout", write_concern->wtimeout ); - } - - if( write_concern->j ) { - bson_append_int( command, "j", write_concern->j ); - } - - if( write_concern->fsync ) { - bson_append_int( command, "fsync", write_concern->fsync ); - } - - bson_finish( command ); - - /* write_concern now owns the BSON command object. - * This is freed in mongo_write_concern_destroy(). */ - write_concern->cmd = command; - - return MONGO_OK; -} - -MONGO_EXPORT void mongo_write_concern_destroy( mongo_write_concern *write_concern ) { - if( !write_concern ) - return; - - if( write_concern->cmd ) - bson_destroy( write_concern->cmd ); - - bson_free( write_concern->cmd ); -} - -MONGO_EXPORT void mongo_set_write_concern( mongo *conn, - mongo_write_concern *write_concern ) { - - conn->write_concern = write_concern; -} - -/** - * Free the write_concern object (specifically, the BSON object that it holds). - */ -MONGO_EXPORT void mongo_write_concern_destroy( mongo_write_concern *write_concern ); - - -static int mongo_cursor_op_query( mongo_cursor *cursor ) { - int res; - bson empty; - char *data; - mongo_message *mm; - bson temp; - bson_iterator it; - - /* Clear any errors. */ - mongo_clear_errors( cursor->conn ); - - /* Set up default values for query and fields, if necessary. */ - if( ! cursor->query ) - cursor->query = bson_empty( &empty ); - else if( mongo_cursor_bson_valid( cursor, cursor->query ) != MONGO_OK ) - return MONGO_ERROR; - - if( ! cursor->fields ) - cursor->fields = bson_empty( &empty ); - else if( mongo_cursor_bson_valid( cursor, cursor->fields ) != MONGO_OK ) - return MONGO_ERROR; - - mm = mongo_message_create( 16 + /* header */ - 4 + /* options */ - strlen( cursor->ns ) + 1 + /* ns */ - 4 + 4 + /* skip,return */ - bson_size( cursor->query ) + - bson_size( cursor->fields ) , - 0 , 0 , MONGO_OP_QUERY ); - - data = &mm->data; - data = mongo_data_append32( data , &cursor->options ); - data = mongo_data_append( data , cursor->ns , strlen( cursor->ns ) + 1 ); - data = mongo_data_append32( data , &cursor->skip ); - data = mongo_data_append32( data , &cursor->limit ); - data = mongo_data_append( data , cursor->query->data , bson_size( cursor->query ) ); - if ( cursor->fields ) - data = mongo_data_append( data , cursor->fields->data , bson_size( cursor->fields ) ); - - bson_fatal_msg( ( data == ( ( char * )mm ) + mm->head.len ), "query building fail!" ); - - res = mongo_message_send( cursor->conn , mm ); - if( res != MONGO_OK ) { - return MONGO_ERROR; - } - - res = mongo_read_response( cursor->conn, ( mongo_reply ** )&( cursor->reply ) ); - if( res != MONGO_OK ) { - return MONGO_ERROR; - } - - if( cursor->reply->fields.num == 1 ) { - bson_init_data( &temp, &cursor->reply->objs ); - if( bson_find( &it, &temp, "$err" ) ) { - mongo_set_last_error( cursor->conn, &it, &temp ); - cursor->err = MONGO_CURSOR_QUERY_FAIL; - return MONGO_ERROR; - } - } - - cursor->seen += cursor->reply->fields.num; - cursor->flags |= MONGO_CURSOR_QUERY_SENT; - return MONGO_OK; -} - -static int mongo_cursor_get_more( mongo_cursor *cursor ) { - int res; - - if( cursor->limit > 0 && cursor->seen >= cursor->limit ) { - cursor->err = MONGO_CURSOR_EXHAUSTED; - return MONGO_ERROR; - } else if( ! cursor->reply ) { - cursor->err = MONGO_CURSOR_INVALID; - return MONGO_ERROR; - } else if( ! cursor->reply->fields.cursorID ) { - cursor->err = MONGO_CURSOR_EXHAUSTED; - return MONGO_ERROR; - } else { - char *data; - int sl = strlen( cursor->ns )+1; - int limit = 0; - mongo_message *mm; - - if( cursor->limit > 0 ) - limit = cursor->limit - cursor->seen; - - mm = mongo_message_create( 16 /*header*/ - +4 /*ZERO*/ - +sl - +4 /*numToReturn*/ - +8 /*cursorID*/ - , 0, 0, MONGO_OP_GET_MORE ); - data = &mm->data; - data = mongo_data_append32( data, &ZERO ); - data = mongo_data_append( data, cursor->ns, sl ); - data = mongo_data_append32( data, &limit ); - data = mongo_data_append64( data, &cursor->reply->fields.cursorID ); - - bson_free( cursor->reply ); - res = mongo_message_send( cursor->conn, mm ); - if( res != MONGO_OK ) { - mongo_cursor_destroy( cursor ); - return MONGO_ERROR; - } - - res = mongo_read_response( cursor->conn, &( cursor->reply ) ); - if( res != MONGO_OK ) { - mongo_cursor_destroy( cursor ); - return MONGO_ERROR; - } - cursor->current.data = NULL; - cursor->seen += cursor->reply->fields.num; - - return MONGO_OK; - } -} - -MONGO_EXPORT mongo_cursor *mongo_find( mongo *conn, const char *ns, const bson *query, - const bson *fields, int limit, int skip, int options ) { - - mongo_cursor *cursor = ( mongo_cursor * )bson_malloc( sizeof( mongo_cursor ) ); - mongo_cursor_init( cursor, conn, ns ); - cursor->flags |= MONGO_CURSOR_MUST_FREE; - - mongo_cursor_set_query( cursor, query ); - mongo_cursor_set_fields( cursor, fields ); - mongo_cursor_set_limit( cursor, limit ); - mongo_cursor_set_skip( cursor, skip ); - mongo_cursor_set_options( cursor, options ); - - if( mongo_cursor_op_query( cursor ) == MONGO_OK ) - return cursor; - else { - mongo_cursor_destroy( cursor ); - return NULL; - } -} - -MONGO_EXPORT int mongo_find_one( mongo *conn, const char *ns, const bson *query, - const bson *fields, bson *out ) { - - mongo_cursor cursor[1]; - mongo_cursor_init( cursor, conn, ns ); - mongo_cursor_set_query( cursor, query ); - mongo_cursor_set_fields( cursor, fields ); - mongo_cursor_set_limit( cursor, 1 ); - - if ( mongo_cursor_next( cursor ) == MONGO_OK ) { - if( out ) { - bson_init_size( out, bson_size( (bson *)&cursor->current ) ); - memcpy( out->data, cursor->current.data, - bson_size( (bson *)&cursor->current ) ); - out->finished = 1; - } - mongo_cursor_destroy( cursor ); - return MONGO_OK; - } else { - mongo_cursor_destroy( cursor ); - return MONGO_ERROR; - } -} - -MONGO_EXPORT void mongo_cursor_init( mongo_cursor *cursor, mongo *conn, const char *ns ) { - memset( cursor, 0, sizeof( mongo_cursor ) ); - cursor->conn = conn; - cursor->ns = ( const char * )bson_malloc( strlen( ns ) + 1 ); - strncpy( ( char * )cursor->ns, ns, strlen( ns ) + 1 ); - cursor->current.data = NULL; -} - -MONGO_EXPORT void mongo_cursor_set_query( mongo_cursor *cursor, const bson *query ) { - cursor->query = query; -} - -MONGO_EXPORT void mongo_cursor_set_fields( mongo_cursor *cursor, const bson *fields ) { - cursor->fields = fields; -} - -MONGO_EXPORT void mongo_cursor_set_skip( mongo_cursor *cursor, int skip ) { - cursor->skip = skip; -} - -MONGO_EXPORT void mongo_cursor_set_limit( mongo_cursor *cursor, int limit ) { - cursor->limit = limit; -} - -MONGO_EXPORT void mongo_cursor_set_options( mongo_cursor *cursor, int options ) { - cursor->options = options; -} - -MONGO_EXPORT const char *mongo_cursor_data( mongo_cursor *cursor ) { - return cursor->current.data; -} - -MONGO_EXPORT const bson *mongo_cursor_bson( mongo_cursor *cursor ) { - return (const bson *)&(cursor->current); -} - -MONGO_EXPORT int mongo_cursor_next( mongo_cursor *cursor ) { - char *next_object; - char *message_end; - - if( ! ( cursor->flags & MONGO_CURSOR_QUERY_SENT ) ) - if( mongo_cursor_op_query( cursor ) != MONGO_OK ) - return MONGO_ERROR; - - if( !cursor->reply ) - return MONGO_ERROR; - - /* no data */ - if ( cursor->reply->fields.num == 0 ) { - - /* Special case for tailable cursors. */ - if( cursor->reply->fields.cursorID ) { - if( ( mongo_cursor_get_more( cursor ) != MONGO_OK ) || - cursor->reply->fields.num == 0 ) { - return MONGO_ERROR; - } - } - - else - return MONGO_ERROR; - } - - /* first */ - if ( cursor->current.data == NULL ) { - bson_init_finished_data( &cursor->current, &cursor->reply->objs ); - return MONGO_OK; - } - - next_object = cursor->current.data + bson_size( &cursor->current ); - message_end = ( char * )cursor->reply + cursor->reply->head.len; - - if ( next_object >= message_end ) { - if( mongo_cursor_get_more( cursor ) != MONGO_OK ) - return MONGO_ERROR; - - /* If there's still a cursor id, then the message should be pending. */ - if( cursor->reply->fields.num == 0 && cursor->reply->fields.cursorID ) { - cursor->err = MONGO_CURSOR_PENDING; - return MONGO_ERROR; - } - - bson_init_finished_data( &cursor->current, &cursor->reply->objs ); - } else { - bson_init_finished_data( &cursor->current, next_object ); - } - - return MONGO_OK; -} - -MONGO_EXPORT int mongo_cursor_destroy( mongo_cursor *cursor ) { - int result = MONGO_OK; - - if ( !cursor ) return result; - - /* Kill cursor if live. */ - if ( cursor->reply && cursor->reply->fields.cursorID ) { - mongo *conn = cursor->conn; - mongo_message *mm = mongo_message_create( 16 /*header*/ - +4 /*ZERO*/ - +4 /*numCursors*/ - +8 /*cursorID*/ - , 0, 0, MONGO_OP_KILL_CURSORS ); - char *data = &mm->data; - data = mongo_data_append32( data, &ZERO ); - data = mongo_data_append32( data, &ONE ); - data = mongo_data_append64( data, &cursor->reply->fields.cursorID ); - - result = mongo_message_send( conn, mm ); - } - - bson_free( cursor->reply ); - bson_free( ( void * )cursor->ns ); - - if( cursor->flags & MONGO_CURSOR_MUST_FREE ) - bson_free( cursor ); - - return result; -} - -/* MongoDB Helper Functions */ - -MONGO_EXPORT int mongo_create_index( mongo *conn, const char *ns, const bson *key, int options, bson *out ) { - bson b; - bson_iterator it; - char name[255] = {'_'}; - int i = 1; - char idxns[1024]; - - bson_iterator_init( &it, key ); - while( i < 255 && bson_iterator_next( &it ) ) { - strncpy( name + i, bson_iterator_key( &it ), 255 - i ); - i += strlen( bson_iterator_key( &it ) ); - } - name[254] = '\0'; - - bson_init( &b ); - bson_append_bson( &b, "key", key ); - bson_append_string( &b, "ns", ns ); - bson_append_string( &b, "name", name ); - if ( options & MONGO_INDEX_UNIQUE ) - bson_append_bool( &b, "unique", 1 ); - if ( options & MONGO_INDEX_DROP_DUPS ) - bson_append_bool( &b, "dropDups", 1 ); - if ( options & MONGO_INDEX_BACKGROUND ) - bson_append_bool( &b, "background", 1 ); - if ( options & MONGO_INDEX_SPARSE ) - bson_append_bool( &b, "sparse", 1 ); - bson_finish( &b ); - - strncpy( idxns, ns, 1024-16 ); - strcpy( strchr( idxns, '.' ), ".system.indexes" ); - mongo_insert( conn, idxns, &b, NULL ); - bson_destroy( &b ); - - *strchr( idxns, '.' ) = '\0'; /* just db not ns */ - return mongo_cmd_get_last_error( conn, idxns, out ); -} - -MONGO_EXPORT bson_bool_t mongo_create_simple_index( mongo *conn, const char *ns, const char *field, int options, bson *out ) { - bson b; - bson_bool_t success; - - bson_init( &b ); - bson_append_int( &b, field, 1 ); - bson_finish( &b ); - - success = mongo_create_index( conn, ns, &b, options, out ); - bson_destroy( &b ); - return success; -} - -MONGO_EXPORT int mongo_create_capped_collection( mongo *conn, const char *db, - const char *collection, int size, int max, bson *out ) { - - bson b; - int result; - - bson_init( &b ); - bson_append_string( &b, "create", collection ); - bson_append_bool( &b, "capped", 1 ); - bson_append_int( &b, "size", size ); - if( max > 0 ) - bson_append_int( &b, "max", size ); - bson_finish( &b ); - - result = mongo_run_command( conn, db, &b, out ); - - bson_destroy( &b ); - - return result; -} - -MONGO_EXPORT double mongo_count( mongo *conn, const char *db, const char *ns, const bson *query ) { - bson cmd; - bson out = {NULL, 0}; - double count = -1; - - bson_init( &cmd ); - bson_append_string( &cmd, "count", ns ); - if ( query && bson_size( query ) > 5 ) /* not empty */ - bson_append_bson( &cmd, "query", query ); - bson_finish( &cmd ); - - if( mongo_run_command( conn, db, &cmd, &out ) == MONGO_OK ) { - bson_iterator it; - if( bson_find( &it, &out, "n" ) ) - count = bson_iterator_double( &it ); - bson_destroy( &cmd ); - bson_destroy( &out ); - return count; - } else { - bson_destroy( &out ); - bson_destroy( &cmd ); - return MONGO_ERROR; - } -} - -MONGO_EXPORT int mongo_run_command( mongo *conn, const char *db, const bson *command, - bson *out ) { - - bson response = {NULL, 0}; - bson fields; - int sl = strlen( db ); - char *ns = bson_malloc( sl + 5 + 1 ); /* ".$cmd" + nul */ - int res, success = 0; - - strcpy( ns, db ); - strcpy( ns+sl, ".$cmd" ); - - res = mongo_find_one( conn, ns, command, bson_empty( &fields ), &response ); - bson_free( ns ); - - if( res != MONGO_OK ) - return MONGO_ERROR; - else { - bson_iterator it; - if( bson_find( &it, &response, "ok" ) ) - success = bson_iterator_bool( &it ); - - if( !success ) { - conn->err = MONGO_COMMAND_FAILED; - return MONGO_ERROR; - } else { - if( out ) - *out = response; - return MONGO_OK; - } - } -} - -MONGO_EXPORT int mongo_simple_int_command( mongo *conn, const char *db, - const char *cmdstr, int arg, bson *realout ) { - - bson out = {NULL, 0}; - bson cmd; - int result; - - bson_init( &cmd ); - bson_append_int( &cmd, cmdstr, arg ); - bson_finish( &cmd ); - - result = mongo_run_command( conn, db, &cmd, &out ); - - bson_destroy( &cmd ); - - if ( realout ) - *realout = out; - else - bson_destroy( &out ); - - return result; -} - -MONGO_EXPORT int mongo_simple_str_command( mongo *conn, const char *db, - const char *cmdstr, const char *arg, bson *realout ) { - - bson out = {NULL, 0}; - int result; - - bson cmd; - bson_init( &cmd ); - bson_append_string( &cmd, cmdstr, arg ); - bson_finish( &cmd ); - - result = mongo_run_command( conn, db, &cmd, &out ); - - bson_destroy( &cmd ); - - if ( realout ) - *realout = out; - else - bson_destroy( &out ); - - return result; -} - -MONGO_EXPORT int mongo_cmd_drop_db( mongo *conn, const char *db ) { - return mongo_simple_int_command( conn, db, "dropDatabase", 1, NULL ); -} - -MONGO_EXPORT int mongo_cmd_drop_collection( mongo *conn, const char *db, const char *collection, bson *out ) { - return mongo_simple_str_command( conn, db, "drop", collection, out ); -} - -MONGO_EXPORT void mongo_cmd_reset_error( mongo *conn, const char *db ) { - mongo_simple_int_command( conn, db, "reseterror", 1, NULL ); -} - -static int mongo_cmd_get_error_helper( mongo *conn, const char *db, - bson *realout, const char *cmdtype ) { - - bson out = {NULL,0}; - bson_bool_t haserror = 0; - - /* Reset last error codes. */ - mongo_clear_errors( conn ); - - /* If there's an error, store its code and string in the connection object. */ - if( mongo_simple_int_command( conn, db, cmdtype, 1, &out ) == MONGO_OK ) { - bson_iterator it; - haserror = ( bson_find( &it, &out, "err" ) != BSON_NULL ); - if( haserror ) mongo_set_last_error( conn, &it, &out ); - } - - if( realout ) - *realout = out; /* transfer of ownership */ - else - bson_destroy( &out ); - - if( haserror ) - return MONGO_ERROR; - else - return MONGO_OK; -} - -MONGO_EXPORT int mongo_cmd_get_prev_error( mongo *conn, const char *db, bson *out ) { - return mongo_cmd_get_error_helper( conn, db, out, "getpreverror" ); -} - -MONGO_EXPORT int mongo_cmd_get_last_error( mongo *conn, const char *db, bson *out ) { - return mongo_cmd_get_error_helper( conn, db, out, "getlasterror" ); -} - -MONGO_EXPORT bson_bool_t mongo_cmd_ismaster( mongo *conn, bson *realout ) { - bson out = {NULL,0}; - bson_bool_t ismaster = 0; - - if ( mongo_simple_int_command( conn, "admin", "ismaster", 1, &out ) == MONGO_OK ) { - bson_iterator it; - bson_find( &it, &out, "ismaster" ); - ismaster = bson_iterator_bool( &it ); - } - - if( realout ) - *realout = out; /* transfer of ownership */ - else - bson_destroy( &out ); - - return ismaster; -} - -static void digest2hex( mongo_md5_byte_t digest[16], char hex_digest[33] ) { - static const char hex[16] = {'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'}; - int i; - for ( i=0; i<16; i++ ) { - hex_digest[2*i] = hex[( digest[i] & 0xf0 ) >> 4]; - hex_digest[2*i + 1] = hex[ digest[i] & 0x0f ]; - } - hex_digest[32] = '\0'; -} - -static void mongo_pass_digest( const char *user, const char *pass, char hex_digest[33] ) { - mongo_md5_state_t st; - mongo_md5_byte_t digest[16]; - - mongo_md5_init( &st ); - mongo_md5_append( &st, ( const mongo_md5_byte_t * )user, strlen( user ) ); - mongo_md5_append( &st, ( const mongo_md5_byte_t * )":mongo:", 7 ); - mongo_md5_append( &st, ( const mongo_md5_byte_t * )pass, strlen( pass ) ); - mongo_md5_finish( &st, digest ); - digest2hex( digest, hex_digest ); -} - -MONGO_EXPORT int mongo_cmd_add_user( mongo *conn, const char *db, const char *user, const char *pass ) { - bson user_obj; - bson pass_obj; - char hex_digest[33]; - char *ns = bson_malloc( strlen( db ) + strlen( ".system.users" ) + 1 ); - int res; - - strcpy( ns, db ); - strcpy( ns+strlen( db ), ".system.users" ); - - mongo_pass_digest( user, pass, hex_digest ); - - bson_init( &user_obj ); - bson_append_string( &user_obj, "user", user ); - bson_finish( &user_obj ); - - bson_init( &pass_obj ); - bson_append_start_object( &pass_obj, "$set" ); - bson_append_string( &pass_obj, "pwd", hex_digest ); - bson_append_finish_object( &pass_obj ); - bson_finish( &pass_obj ); - - res = mongo_update( conn, ns, &user_obj, &pass_obj, MONGO_UPDATE_UPSERT, NULL ); - - bson_free( ns ); - bson_destroy( &user_obj ); - bson_destroy( &pass_obj ); - - return res; -} - -MONGO_EXPORT bson_bool_t mongo_cmd_authenticate( mongo *conn, const char *db, const char *user, const char *pass ) { - bson from_db; - bson cmd; - bson out; - const char *nonce; - int result; - - mongo_md5_state_t st; - mongo_md5_byte_t digest[16]; - char hex_digest[33]; - - if( mongo_simple_int_command( conn, db, "getnonce", 1, &from_db ) == MONGO_OK ) { - bson_iterator it; - bson_find( &it, &from_db, "nonce" ); - nonce = bson_iterator_string( &it ); - } else { - return MONGO_ERROR; - } - - mongo_pass_digest( user, pass, hex_digest ); - - mongo_md5_init( &st ); - mongo_md5_append( &st, ( const mongo_md5_byte_t * )nonce, strlen( nonce ) ); - mongo_md5_append( &st, ( const mongo_md5_byte_t * )user, strlen( user ) ); - mongo_md5_append( &st, ( const mongo_md5_byte_t * )hex_digest, 32 ); - mongo_md5_finish( &st, digest ); - digest2hex( digest, hex_digest ); - - bson_init( &cmd ); - bson_append_int( &cmd, "authenticate", 1 ); - bson_append_string( &cmd, "user", user ); - bson_append_string( &cmd, "nonce", nonce ); - bson_append_string( &cmd, "key", hex_digest ); - bson_finish( &cmd ); - - bson_destroy( &from_db ); - - result = mongo_run_command( conn, db, &cmd, &out ); - - bson_destroy( &from_db ); - bson_destroy( &cmd ); - - return result; -} diff --git a/mongo-c-driver-v0.6/src/mongo.h b/mongo-c-driver-v0.6/src/mongo.h deleted file mode 100644 index 57ba921..0000000 --- a/mongo-c-driver-v0.6/src/mongo.h +++ /dev/null @@ -1,824 +0,0 @@ -/** - * @file mongo.h - * @brief Main MongoDB Declarations - */ - -/* Copyright 2009-2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MONGO_H_ -#define MONGO_H_ - -#include "bson.h" - -MONGO_EXTERN_C_START - -#define MONGO_MAJOR 0 -#define MONGO_MINOR 6 -#define MONGO_PATCH 0 - -#define MONGO_OK 0 -#define MONGO_ERROR -1 - -#define MONGO_DEFAULT_PORT 27017 - -#define MONGO_DEFAULT_MAX_BSON_SIZE 4 * 1024 * 1024 - -#define MONGO_ERR_LEN 128 - -typedef enum mongo_error_t { - MONGO_CONN_SUCCESS = 0, /**< Connection success! */ - MONGO_CONN_NO_SOCKET, /**< Could not create a socket. */ - MONGO_CONN_FAIL, /**< An error occured while calling connect(). */ - MONGO_CONN_ADDR_FAIL, /**< An error occured while calling getaddrinfo(). */ - MONGO_CONN_NOT_MASTER, /**< Warning: connected to a non-master node (read-only). */ - MONGO_CONN_BAD_SET_NAME, /**< Given rs name doesn't match this replica set. */ - MONGO_CONN_NO_PRIMARY, /**< Can't find primary in replica set. Connection closed. */ - - MONGO_IO_ERROR, /**< An error occurred while reading or writing on the socket. */ - MONGO_SOCKET_ERROR, /**< Other socket error. */ - MONGO_READ_SIZE_ERROR, /**< The response is not the expected length. */ - MONGO_COMMAND_FAILED, /**< The command returned with 'ok' value of 0. */ - MONGO_WRITE_ERROR, /**< Write with given write_concern returned an error. */ - MONGO_NS_INVALID, /**< The name for the ns (database or collection) is invalid. */ - MONGO_BSON_INVALID, /**< BSON not valid for the specified op. */ - MONGO_BSON_NOT_FINISHED, /**< BSON object has not been finished. */ - MONGO_BSON_TOO_LARGE, /**< BSON object exceeds max BSON size. */ - MONGO_WRITE_CONCERN_INVALID /**< Supplied write concern object is invalid. */ -} mongo_error_t; - -typedef enum mongo_cursor_error_t { - MONGO_CURSOR_EXHAUSTED, /**< The cursor has no more results. */ - MONGO_CURSOR_INVALID, /**< The cursor has timed out or is not recognized. */ - MONGO_CURSOR_PENDING, /**< Tailable cursor still alive but no data. */ - MONGO_CURSOR_QUERY_FAIL, /**< The server returned an '$err' object, indicating query failure. - See conn->lasterrcode and conn->lasterrstr for details. */ - MONGO_CURSOR_BSON_ERROR /**< Something is wrong with the BSON provided. See conn->err - for details. */ -} mongo_cursor_error_t; - -enum mongo_cursor_flags { - MONGO_CURSOR_MUST_FREE = 1, /**< mongo_cursor_destroy should free cursor. */ - MONGO_CURSOR_QUERY_SENT = ( 1<<1 ) /**< Initial query has been sent. */ -}; - -enum mongo_index_opts { - MONGO_INDEX_UNIQUE = ( 1<<0 ), - MONGO_INDEX_DROP_DUPS = ( 1<<2 ), - MONGO_INDEX_BACKGROUND = ( 1<<3 ), - MONGO_INDEX_SPARSE = ( 1<<4 ) -}; - -enum mongo_update_opts { - MONGO_UPDATE_UPSERT = 0x1, - MONGO_UPDATE_MULTI = 0x2, - MONGO_UPDATE_BASIC = 0x4 -}; - -enum mongo_insert_opts { - MONGO_CONTINUE_ON_ERROR = 0x1 -}; - -enum mongo_cursor_opts { - MONGO_TAILABLE = ( 1<<1 ), /**< Create a tailable cursor. */ - MONGO_SLAVE_OK = ( 1<<2 ), /**< Allow queries on a non-primary node. */ - MONGO_NO_CURSOR_TIMEOUT = ( 1<<4 ), /**< Disable cursor timeouts. */ - MONGO_AWAIT_DATA = ( 1<<5 ), /**< Momentarily block for more data. */ - MONGO_EXHAUST = ( 1<<6 ), /**< Stream in multiple 'more' packages. */ - MONGO_PARTIAL = ( 1<<7 ) /**< Allow reads even if a shard is down. */ -}; - -enum mongo_operations { - MONGO_OP_MSG = 1000, - MONGO_OP_UPDATE = 2001, - MONGO_OP_INSERT = 2002, - MONGO_OP_QUERY = 2004, - MONGO_OP_GET_MORE = 2005, - MONGO_OP_DELETE = 2006, - MONGO_OP_KILL_CURSORS = 2007 -}; - -#pragma pack(1) -typedef struct { - int len; - int id; - int responseTo; - int op; -} mongo_header; - -typedef struct { - mongo_header head; - char data; -} mongo_message; - -typedef struct { - int flag; /* FIX THIS COMMENT non-zero on failure */ - int64_t cursorID; - int start; - int num; -} mongo_reply_fields; - -typedef struct { - mongo_header head; - mongo_reply_fields fields; - char objs; -} mongo_reply; -#pragma pack() - -typedef struct mongo_host_port { - char host[255]; - int port; - struct mongo_host_port *next; -} mongo_host_port; - -typedef struct mongo_write_concern { - int w; /**< Number of nodes this write should be replicated to. */ - int wtimeout; /**< Number of milliseconds before replication timeout. */ - int j; /**< If non-zero, block until the journal sync. */ - int fsync; /**< Same a j with journaling enabled; otherwise, call fsync. */ - const char *mode; /**< Either "majority" or a getlasterrormode. Overrides w value. */ - - bson *cmd; /**< The BSON object representing the getlasterror command. */ -} mongo_write_concern; - -typedef struct { - mongo_host_port *seeds; /**< List of seeds provided by the user. */ - mongo_host_port *hosts; /**< List of host/ports given by the replica set */ - char *name; /**< Name of the replica set. */ - bson_bool_t primary_connected; /**< Primary node connection status. */ -} mongo_replset; - -typedef struct mongo { - mongo_host_port *primary; /**< Primary connection info. */ - mongo_replset *replset; /**< replset object if connected to a replica set. */ - int sock; /**< Socket file descriptor. */ - int flags; /**< Flags on this connection object. */ - int conn_timeout_ms; /**< Connection timeout in milliseconds. */ - int op_timeout_ms; /**< Read and write timeout in milliseconds. */ - int max_bson_size; /**< Largest BSON object allowed on this connection. */ - bson_bool_t connected; /**< Connection status. */ - mongo_write_concern *write_concern; /**< The default write concern. */ - - mongo_error_t err; /**< Most recent driver error code. */ - int errcode; /**< Most recent errno or WSAGetLastError(). */ - char errstr[MONGO_ERR_LEN]; /**< String version of error. */ - int lasterrcode; /**< getlasterror code from the server. */ - char lasterrstr[MONGO_ERR_LEN]; /**< getlasterror string from the server. */ -} mongo; - -typedef struct { - mongo_reply *reply; /**< reply is owned by cursor */ - mongo *conn; /**< connection is *not* owned by cursor */ - const char *ns; /**< owned by cursor */ - int flags; /**< Flags used internally by this drivers. */ - int seen; /**< Number returned so far. */ - bson current; /**< This cursor's current bson object. */ - mongo_cursor_error_t err; /**< Errors on this cursor. */ - const bson *query; /**< Bitfield containing cursor options. */ - const bson *fields;/**< Bitfield containing cursor options. */ - int options; /**< Bitfield containing cursor options. */ - int limit; /**< Bitfield containing cursor options. */ - int skip; /**< Bitfield containing cursor options. */ -} mongo_cursor; - -/********************************************************************* -Connection API -**********************************************************************/ - -/** Initialize sockets for Windows. - */ -MONGO_EXPORT void mongo_init_sockets(); - -/** - * Initialize a new mongo connection object. You must initialize each mongo - * object using this function. - * - * @note When finished, you must pass this object to - * mongo_destroy( ). - * - * @param conn a mongo connection object allocated on the stack - * or heap. - */ -MONGO_EXPORT void mongo_init( mongo *conn ); - -/** - * Connect to a single MongoDB server. - * - * @param conn a mongo object. - * @param host a numerical network address or a network hostname. - * @param port the port to connect to. - * - * @return MONGO_OK or MONGO_ERROR on failure. On failure, a constant of type - * mongo_error_t will be set on the conn->err field. - */ -MONGO_EXPORT int mongo_connect( mongo *conn , const char *host, int port ); - -/** - * Set up this connection object for connecting to a replica set. - * To connect, pass the object to mongo_replset_connect(). - * - * @param conn a mongo object. - * @param name the name of the replica set to connect to. - * */ -MONGO_EXPORT void mongo_replset_init( mongo *conn, const char *name ); - -/** - * Add a seed node to the replica set connection object. - * - * You must specify at least one seed node before connecting to a replica set. - * - * @param conn a mongo object. - * @param host a numerical network address or a network hostname. - * @param port the port to connect to. - */ -MONGO_EXPORT void mongo_replset_add_seed( mongo *conn, const char *host, int port ); - -/** - * Utility function for converting a host-port string to a mongo_host_port. - * - * @param host_string a string containing either a host or a host and port separated - * by a colon. - * @param host_port the mongo_host_port object to write the result to. - */ -void mongo_parse_host( const char *host_string, mongo_host_port *host_port ); - -/** - * Utility function for validation database and collection names. - * - * @param conn a mongo object. - * - * @return MONGO_OK or MONGO_ERROR on failure. On failure, a constant of type - * mongo_conn_return_t will be set on the conn->err field. - * - */ -MONGO_EXPORT int mongo_validate_ns( mongo *conn, const char *ns ); - -/** - * Connect to a replica set. - * - * Before passing a connection object to this function, you must already have called - * mongo_set_replset and mongo_replset_add_seed. - * - * @param conn a mongo object. - * - * @return MONGO_OK or MONGO_ERROR on failure. On failure, a constant of type - * mongo_conn_return_t will be set on the conn->err field. - */ -MONGO_EXPORT int mongo_replset_connect( mongo *conn ); - -/** Set a timeout for operations on this connection. This - * is a platform-specific feature, and only work on *nix - * system. You must also compile for linux to support this. - * - * @param conn a mongo object. - * @param millis timeout time in milliseconds. - * - * @return MONGO_OK. On error, return MONGO_ERROR and - * set the conn->err field. - */ -MONGO_EXPORT int mongo_set_op_timeout( mongo *conn, int millis ); - -/** - * Ensure that this connection is healthy by performing - * a round-trip to the server. - * - * @param conn a mongo connection - * - * @return MONGO_OK if connected; otherwise, MONGO_ERROR. - */ -MONGO_EXPORT int mongo_check_connection( mongo *conn ); - -/** - * Try reconnecting to the server using the existing connection settings. - * - * This function will disconnect the current socket. If you've authenticated, - * you'll need to re-authenticate after calling this function. - * - * @param conn a mongo object. - * - * @return MONGO_OK or MONGO_ERROR and - * set the conn->err field. - */ -MONGO_EXPORT int mongo_reconnect( mongo *conn ); - -/** - * Close the current connection to the server. After calling - * this function, you may call mongo_reconnect with the same - * connection object. - * - * @param conn a mongo object. - */ -MONGO_EXPORT void mongo_disconnect( mongo *conn ); - -/** - * Close any existing connection to the server and free all allocated - * memory associated with the conn object. - * - * You must always call this function when finished with the connection object. - * - * @param conn a mongo object. - */ -MONGO_EXPORT void mongo_destroy( mongo *conn ); - -/** - * Specify the write concern object that this connection should use - * by default for all writes (inserts, updates, and deletes). This value - * can be overridden by passing a write_concern object to any write function. - * - * @param conn a mongo object. - * @param write_concern pointer to a write concern object. - * - */ -MONGO_EXPORT void mongo_set_write_concern( mongo *conn, - mongo_write_concern *write_concern ); - - -/********************************************************************* -CRUD API -**********************************************************************/ - -/** - * Insert a BSON document into a MongoDB server. This function - * will fail if the supplied BSON struct is not UTF-8 or if - * the keys are invalid for insert (contain '.' or start with '$'). - * - * The default write concern set on the conn object will be used. - * - * @param conn a mongo object. - * @param ns the namespace. - * @param data the bson data. - * @param custom_write_concern a write concern object that will - * override any write concern set on the conn object. - * - * @return MONGO_OK or MONGO_ERROR. If the conn->err - * field is MONGO_BSON_INVALID, check the err field - * on the bson struct for the reason. - */ -MONGO_EXPORT int mongo_insert( mongo *conn, const char *ns, const bson *data, - mongo_write_concern *custom_write_concern ); - -/** - * Insert a batch of BSON documents into a MongoDB server. This function - * will fail if any of the documents to be inserted is invalid. - * - * The default write concern set on the conn object will be used. - * - * @param conn a mongo object. - * @param ns the namespace. - * @param data the bson data. - * @param num the number of documents in data. - * @param custom_write_concern a write concern object that will - * override any write concern set on the conn object. - * @param flags flags on this batch insert. Currently, this value - * may be 0 or MONGO_CONTINUE_ON_ERROR, which will cause the - * batch insert to continue even if a given insert in the batch fails. - * - * @return MONGO_OK or MONGO_ERROR. - * - */ -MONGO_EXPORT int mongo_insert_batch( mongo *conn, const char *ns, - const bson **data, int num, mongo_write_concern *custom_write_concern, - int flags ); - -/** - * Update a document in a MongoDB server. - * - * The default write concern set on the conn object will be used. - * - * @param conn a mongo object. - * @param ns the namespace. - * @param cond the bson update query. - * @param op the bson update data. - * @param flags flags for the update. - * @param custom_write_concern a write concern object that will - * override any write concern set on the conn object. - * - * @return MONGO_OK or MONGO_ERROR with error stored in conn object. - * - */ -MONGO_EXPORT int mongo_update( mongo *conn, const char *ns, const bson *cond, - const bson *op, int flags, mongo_write_concern *custom_write_concern ); - -/** - * Remove a document from a MongoDB server. - * - * The default write concern set on the conn object will be used. - * - * @param conn a mongo object. - * @param ns the namespace. - * @param cond the bson query. - * @param custom_write_concern a write concern object that will - * override any write concern set on the conn object. - * - * @return MONGO_OK or MONGO_ERROR with error stored in conn object. - */ -MONGO_EXPORT int mongo_remove( mongo *conn, const char *ns, const bson *cond, - mongo_write_concern *custom_write_concern ); - - -/********************************************************************* -Write Concern API -**********************************************************************/ - -/** - * Initialize a mongo_write_concern object. Effectively zeroes out the struct. - * - */ -MONGO_EXPORT void mongo_write_concern_init( mongo_write_concern *write_concern ); - -/** - * Finish this write concern object by serializing the literal getlasterror - * command that will be sent to the server. - * - * You must call mongo_write_concern_destroy() to free the serialized BSON. - * - */ -MONGO_EXPORT int mongo_write_concern_finish( mongo_write_concern *write_concern ); - -/** - * Free the write_concern object (specifically, the BSON that it owns). - * - */ -MONGO_EXPORT void mongo_write_concern_destroy( mongo_write_concern *write_concern ); - -/********************************************************************* -Cursor API -**********************************************************************/ - -/** - * Find documents in a MongoDB server. - * - * @param conn a mongo object. - * @param ns the namespace. - * @param query the bson query. - * @param fields a bson document of fields to be returned. - * @param limit the maximum number of documents to retrun. - * @param skip the number of documents to skip. - * @param options A bitfield containing cursor options. - * - * @return A cursor object allocated on the heap or NULL if - * an error has occurred. For finer-grained error checking, - * use the cursor builder API instead. - */ -MONGO_EXPORT mongo_cursor *mongo_find( mongo *conn, const char *ns, const bson *query, - const bson *fields, int limit, int skip, int options ); - -/** - * Initalize a new cursor object. - * - * @param cursor - * @param ns the namespace, represented as the the database - * name and collection name separated by a dot. e.g., "test.users" - */ -MONGO_EXPORT void mongo_cursor_init( mongo_cursor *cursor, mongo *conn, const char *ns ); - -/** - * Set the bson object specifying this cursor's query spec. If - * your query is the empty bson object "{}", then you need not - * set this value. - * - * @param cursor - * @param query a bson object representing the query spec. This may - * be either a simple query spec or a complex spec storing values for - * $query, $orderby, $hint, and/or $explain. See - * http://www.mongodb.org/display/DOCS/Mongo+Wire+Protocol for details. - */ -MONGO_EXPORT void mongo_cursor_set_query( mongo_cursor *cursor, const bson *query ); - -/** - * Set the fields to return for this cursor. If you want to return - * all fields, you need not set this value. - * - * @param cursor - * @param fields a bson object representing the fields to return. - * See http://www.mongodb.org/display/DOCS/Retrieving+a+Subset+of+Fields. - */ -MONGO_EXPORT void mongo_cursor_set_fields( mongo_cursor *cursor, const bson *fields ); - -/** - * Set the number of documents to skip. - * - * @param cursor - * @param skip - */ -MONGO_EXPORT void mongo_cursor_set_skip( mongo_cursor *cursor, int skip ); - -/** - * Set the number of documents to return. - * - * @param cursor - * @param limit - */ -MONGO_EXPORT void mongo_cursor_set_limit( mongo_cursor *cursor, int limit ); - -/** - * Set any of the available query options (e.g., MONGO_TAILABLE). - * - * @param cursor - * @param options a bitfield storing query options. See - * mongo_cursor_bitfield_t for available constants. - */ -MONGO_EXPORT void mongo_cursor_set_options( mongo_cursor *cursor, int options ); - -/** - * Return the current BSON object data as a const char*. This is useful - * for creating bson iterators with bson_iterator_init. - * - * @param cursor - */ -MONGO_EXPORT const char *mongo_cursor_data( mongo_cursor *cursor ); - -/** - * Return the current BSON object data as a const char*. This is useful - * for creating bson iterators with bson_iterator_init. - * - * @param cursor - */ -MONGO_EXPORT const bson *mongo_cursor_bson( mongo_cursor *cursor ); - -/** - * Iterate the cursor, returning the next item. When successful, - * the returned object will be stored in cursor->current; - * - * @param cursor - * - * @return MONGO_OK. On error, returns MONGO_ERROR and sets - * cursor->err with a value of mongo_error_t. - */ -MONGO_EXPORT int mongo_cursor_next( mongo_cursor *cursor ); - -/** - * Destroy a cursor object. When finished with a cursor, you - * must pass it to this function. - * - * @param cursor the cursor to destroy. - * - * @return MONGO_OK or an error code. On error, check cursor->conn->err - * for errors. - */ -MONGO_EXPORT int mongo_cursor_destroy( mongo_cursor *cursor ); - -/** - * Find a single document in a MongoDB server. - * - * @param conn a mongo object. - * @param ns the namespace. - * @param query the bson query. - * @param fields a bson document of the fields to be returned. - * @param out a bson document in which to put the query result. - * - */ -/* out can be NULL if you don't care about results. useful for commands */ -MONGO_EXPORT int mongo_find_one( mongo *conn, const char *ns, const bson *query, - const bson *fields, bson *out ); - - -/********************************************************************* -Command API and Helpers -**********************************************************************/ - -/** - * Count the number of documents in a collection matching a query. - * - * @param conn a mongo object. - * @param db the db name. - * @param coll the collection name. - * @param query the BSON query. - * - * @return the number of matching documents. If the command fails, - * MONGO_ERROR is returned. - */ -MONGO_EXPORT double mongo_count( mongo *conn, const char *db, const char *coll, - const bson *query ); - -/** - * Create a compound index. - * - * @param conn a mongo object. - * @param ns the namespace. - * @param data the bson index data. - * @param options a bitfield for setting index options. Possibilities include - * MONGO_INDEX_UNIQUE, MONGO_INDEX_DROP_DUPS, MONGO_INDEX_BACKGROUND, - * and MONGO_INDEX_SPARSE. - * @param out a bson document containing errors, if any. - * - * @return MONGO_OK if index is created successfully; otherwise, MONGO_ERROR. - */ -MONGO_EXPORT int mongo_create_index( mongo *conn, const char *ns, - const bson *key, int options, bson *out ); - -/** - * Create a capped collection. - * - * @param conn a mongo object. - * @param ns the namespace (e.g., "dbname.collectioname") - * @param size the size of the capped collection in bytes. - * @param max the max number of documents this collection is - * allowed to contain. If zero, this argument will be ignored - * and the server will use the collection's size to age document out. - * If using this option, ensure that the total size can contain this - * number of documents. - */ -MONGO_EXPORT int mongo_create_capped_collection( mongo *conn, const char *db, - const char *collection, int size, int max, bson *out ); - -/** - * Create an index with a single key. - * - * @param conn a mongo object. - * @param ns the namespace. - * @param field the index key. - * @param options index options. - * @param out a BSON document containing errors, if any. - * - * @return true if the index was created. - */ -MONGO_EXPORT bson_bool_t mongo_create_simple_index( mongo *conn, const char *ns, - const char *field, int options, bson *out ); - -/** - * Run a command on a MongoDB server. - * - * @param conn a mongo object. - * @param db the name of the database. - * @param command the BSON command to run. - * @param out the BSON result of the command. - * - * @return MONGO_OK if the command ran without error. - */ -MONGO_EXPORT int mongo_run_command( mongo *conn, const char *db, - const bson *command, bson *out ); - -/** - * Run a command that accepts a simple string key and integer value. - * - * @param conn a mongo object. - * @param db the name of the database. - * @param cmd the command to run. - * @param arg the integer argument to the command. - * @param out the BSON result of the command. - * - * @return MONGO_OK or an error code. - * - */ -MONGO_EXPORT int mongo_simple_int_command( mongo *conn, const char *db, - const char *cmd, int arg, bson *out ); - -/** - * Run a command that accepts a simple string key and value. - * - * @param conn a mongo object. - * @param db the name of the database. - * @param cmd the command to run. - * @param arg the string argument to the command. - * @param out the BSON result of the command. - * - * @return true if the command ran without error. - * - */ -MONGO_EXPORT int mongo_simple_str_command( mongo *conn, const char *db, - const char *cmd, const char *arg, bson *out ); - -/** - * Drop a database. - * - * @param conn a mongo object. - * @param db the name of the database to drop. - * - * @return MONGO_OK or an error code. - */ -MONGO_EXPORT int mongo_cmd_drop_db( mongo *conn, const char *db ); - -/** - * Drop a collection. - * - * @param conn a mongo object. - * @param db the name of the database. - * @param collection the name of the collection to drop. - * @param out a BSON document containing the result of the command. - * - * @return true if the collection drop was successful. - */ -MONGO_EXPORT int mongo_cmd_drop_collection( mongo *conn, const char *db, - const char *collection, bson *out ); - -/** - * Add a database user. - * - * @param conn a mongo object. - * @param db the database in which to add the user. - * @param user the user name - * @param pass the user password - * - * @return MONGO_OK or MONGO_ERROR. - */ -MONGO_EXPORT int mongo_cmd_add_user( mongo *conn, const char *db, - const char *user, const char *pass ); - -/** - * Authenticate a user. - * - * @param conn a mongo object. - * @param db the database to authenticate against. - * @param user the user name to authenticate. - * @param pass the user's password. - * - * @return MONGO_OK on sucess and MONGO_ERROR on failure. - */ -MONGO_EXPORT int mongo_cmd_authenticate( mongo *conn, const char *db, - const char *user, const char *pass ); - -/** - * Check if the current server is a master. - * - * @param conn a mongo object. - * @param out a BSON result of the command. - * - * @return true if the server is a master. - */ -/* return value is master status */ -MONGO_EXPORT bson_bool_t mongo_cmd_ismaster( mongo *conn, bson *out ); - -/** - * Get the error for the last command with the current connection. - * - * @param conn a mongo object. - * @param db the name of the database. - * @param out a BSON object containing the error details. - * - * @return MONGO_OK if no error and MONGO_ERROR on error. On error, check the values - * of conn->lasterrcode and conn->lasterrstr for the error status. - */ -MONGO_EXPORT int mongo_cmd_get_last_error( mongo *conn, const char *db, bson *out ); - -/** - * Get the most recent error with the current connection. - * - * @param conn a mongo object. - * @param db the name of the database. - * @param out a BSON object containing the error details. - * - * @return MONGO_OK if no error and MONGO_ERROR on error. On error, check the values - * of conn->lasterrcode and conn->lasterrstr for the error status. - */ -MONGO_EXPORT int mongo_cmd_get_prev_error( mongo *conn, const char *db, bson *out ); - -/** - * Reset the error state for the connection. - * - * @param conn a mongo object. - * @param db the name of the database. - */ -MONGO_EXPORT void mongo_cmd_reset_error( mongo *conn, const char *db ); - - -/********************************************************************* -Utility API -**********************************************************************/ - -MONGO_EXPORT mongo* mongo_create(); -MONGO_EXPORT void mongo_dispose(mongo* conn); -MONGO_EXPORT int mongo_get_err(mongo* conn); -MONGO_EXPORT int mongo_is_connected(mongo* conn); -MONGO_EXPORT int mongo_get_op_timeout(mongo* conn); -MONGO_EXPORT const char* mongo_get_primary(mongo* conn); -MONGO_EXPORT int mongo_get_socket(mongo* conn) ; -MONGO_EXPORT int mongo_get_host_count(mongo* conn); -MONGO_EXPORT const char* mongo_get_host(mongo* conn, int i); -MONGO_EXPORT mongo_cursor* mongo_cursor_create(); -MONGO_EXPORT void mongo_cursor_dispose(mongo_cursor* cursor); -MONGO_EXPORT int mongo_get_server_err(mongo* conn); -MONGO_EXPORT const char* mongo_get_server_err_string(mongo* conn); - -/** - * Set an error on a mongo connection object. Mostly for internal use. - * - * @param conn a mongo connection object. - * @param err a driver error code of mongo_error_t. - * @param errstr a string version of the error. - * @param errorcode Currently errno or WSAGetLastError(). - */ -MONGO_EXPORT void __mongo_set_error( mongo *conn, mongo_error_t err, - const char *errstr, int errorcode ); -/** - * Clear all errors stored on a mongo connection object. - * - * @param conn a mongo connection object. - */ -MONGO_EXPORT void mongo_clear_errors( mongo *conn ); - -MONGO_EXTERN_C_END - -#endif diff --git a/mongo-c-driver-v0.6/src/numbers.c b/mongo-c-driver-v0.6/src/numbers.c deleted file mode 100644 index b3032d5..0000000 --- a/mongo-c-driver-v0.6/src/numbers.c +++ /dev/null @@ -1,127 +0,0 @@ -/* Copyright 2009-2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* all the numbers that fit in a 4 byte string */ -const char bson_numstrs[1000][4] = { - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", - "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", - "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", - "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", - "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", - "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", - "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", - "70", "71", "72", "73", "74", "75", "76", "77", "78", "79", - "80", "81", "82", "83", "84", "85", "86", "87", "88", "89", - "90", "91", "92", "93", "94", "95", "96", "97", "98", "99", - - "100", "101", "102", "103", "104", "105", "106", "107", "108", "109", - "110", "111", "112", "113", "114", "115", "116", "117", "118", "119", - "120", "121", "122", "123", "124", "125", "126", "127", "128", "129", - "130", "131", "132", "133", "134", "135", "136", "137", "138", "139", - "140", "141", "142", "143", "144", "145", "146", "147", "148", "149", - "150", "151", "152", "153", "154", "155", "156", "157", "158", "159", - "160", "161", "162", "163", "164", "165", "166", "167", "168", "169", - "170", "171", "172", "173", "174", "175", "176", "177", "178", "179", - "180", "181", "182", "183", "184", "185", "186", "187", "188", "189", - "190", "191", "192", "193", "194", "195", "196", "197", "198", "199", - - "200", "201", "202", "203", "204", "205", "206", "207", "208", "209", - "210", "211", "212", "213", "214", "215", "216", "217", "218", "219", - "220", "221", "222", "223", "224", "225", "226", "227", "228", "229", - "230", "231", "232", "233", "234", "235", "236", "237", "238", "239", - "240", "241", "242", "243", "244", "245", "246", "247", "248", "249", - "250", "251", "252", "253", "254", "255", "256", "257", "258", "259", - "260", "261", "262", "263", "264", "265", "266", "267", "268", "269", - "270", "271", "272", "273", "274", "275", "276", "277", "278", "279", - "280", "281", "282", "283", "284", "285", "286", "287", "288", "289", - "290", "291", "292", "293", "294", "295", "296", "297", "298", "299", - - "300", "301", "302", "303", "304", "305", "306", "307", "308", "309", - "310", "311", "312", "313", "314", "315", "316", "317", "318", "319", - "320", "321", "322", "323", "324", "325", "326", "327", "328", "329", - "330", "331", "332", "333", "334", "335", "336", "337", "338", "339", - "340", "341", "342", "343", "344", "345", "346", "347", "348", "349", - "350", "351", "352", "353", "354", "355", "356", "357", "358", "359", - "360", "361", "362", "363", "364", "365", "366", "367", "368", "369", - "370", "371", "372", "373", "374", "375", "376", "377", "378", "379", - "380", "381", "382", "383", "384", "385", "386", "387", "388", "389", - "390", "391", "392", "393", "394", "395", "396", "397", "398", "399", - - "400", "401", "402", "403", "404", "405", "406", "407", "408", "409", - "410", "411", "412", "413", "414", "415", "416", "417", "418", "419", - "420", "421", "422", "423", "424", "425", "426", "427", "428", "429", - "430", "431", "432", "433", "434", "435", "436", "437", "438", "439", - "440", "441", "442", "443", "444", "445", "446", "447", "448", "449", - "450", "451", "452", "453", "454", "455", "456", "457", "458", "459", - "460", "461", "462", "463", "464", "465", "466", "467", "468", "469", - "470", "471", "472", "473", "474", "475", "476", "477", "478", "479", - "480", "481", "482", "483", "484", "485", "486", "487", "488", "489", - "490", "491", "492", "493", "494", "495", "496", "497", "498", "499", - - "500", "501", "502", "503", "504", "505", "506", "507", "508", "509", - "510", "511", "512", "513", "514", "515", "516", "517", "518", "519", - "520", "521", "522", "523", "524", "525", "526", "527", "528", "529", - "530", "531", "532", "533", "534", "535", "536", "537", "538", "539", - "540", "541", "542", "543", "544", "545", "546", "547", "548", "549", - "550", "551", "552", "553", "554", "555", "556", "557", "558", "559", - "560", "561", "562", "563", "564", "565", "566", "567", "568", "569", - "570", "571", "572", "573", "574", "575", "576", "577", "578", "579", - "580", "581", "582", "583", "584", "585", "586", "587", "588", "589", - "590", "591", "592", "593", "594", "595", "596", "597", "598", "599", - - "600", "601", "602", "603", "604", "605", "606", "607", "608", "609", - "610", "611", "612", "613", "614", "615", "616", "617", "618", "619", - "620", "621", "622", "623", "624", "625", "626", "627", "628", "629", - "630", "631", "632", "633", "634", "635", "636", "637", "638", "639", - "640", "641", "642", "643", "644", "645", "646", "647", "648", "649", - "650", "651", "652", "653", "654", "655", "656", "657", "658", "659", - "660", "661", "662", "663", "664", "665", "666", "667", "668", "669", - "670", "671", "672", "673", "674", "675", "676", "677", "678", "679", - "680", "681", "682", "683", "684", "685", "686", "687", "688", "689", - "690", "691", "692", "693", "694", "695", "696", "697", "698", "699", - - "700", "701", "702", "703", "704", "705", "706", "707", "708", "709", - "710", "711", "712", "713", "714", "715", "716", "717", "718", "719", - "720", "721", "722", "723", "724", "725", "726", "727", "728", "729", - "730", "731", "732", "733", "734", "735", "736", "737", "738", "739", - "740", "741", "742", "743", "744", "745", "746", "747", "748", "749", - "750", "751", "752", "753", "754", "755", "756", "757", "758", "759", - "760", "761", "762", "763", "764", "765", "766", "767", "768", "769", - "770", "771", "772", "773", "774", "775", "776", "777", "778", "779", - "780", "781", "782", "783", "784", "785", "786", "787", "788", "789", - "790", "791", "792", "793", "794", "795", "796", "797", "798", "799", - - "800", "801", "802", "803", "804", "805", "806", "807", "808", "809", - "810", "811", "812", "813", "814", "815", "816", "817", "818", "819", - "820", "821", "822", "823", "824", "825", "826", "827", "828", "829", - "830", "831", "832", "833", "834", "835", "836", "837", "838", "839", - "840", "841", "842", "843", "844", "845", "846", "847", "848", "849", - "850", "851", "852", "853", "854", "855", "856", "857", "858", "859", - "860", "861", "862", "863", "864", "865", "866", "867", "868", "869", - "870", "871", "872", "873", "874", "875", "876", "877", "878", "879", - "880", "881", "882", "883", "884", "885", "886", "887", "888", "889", - "890", "891", "892", "893", "894", "895", "896", "897", "898", "899", - - "900", "901", "902", "903", "904", "905", "906", "907", "908", "909", - "910", "911", "912", "913", "914", "915", "916", "917", "918", "919", - "920", "921", "922", "923", "924", "925", "926", "927", "928", "929", - "930", "931", "932", "933", "934", "935", "936", "937", "938", "939", - "940", "941", "942", "943", "944", "945", "946", "947", "948", "949", - "950", "951", "952", "953", "954", "955", "956", "957", "958", "959", - "960", "961", "962", "963", "964", "965", "966", "967", "968", "969", - "970", "971", "972", "973", "974", "975", "976", "977", "978", "979", - "980", "981", "982", "983", "984", "985", "986", "987", "988", "989", - "990", "991", "992", "993", "994", "995", "996", "997", "998", "999", -}; diff --git a/mongo-c-driver-v0.6/test/auth_test.c b/mongo-c-driver-v0.6/test/auth_test.c deleted file mode 100644 index e356850..0000000 --- a/mongo-c-driver-v0.6/test/auth_test.c +++ /dev/null @@ -1,29 +0,0 @@ -#include "test.h" -#include "mongo.h" -#include -#include -#include - -static const char *db = "test"; - -int main() { - - mongo conn[1]; - - INIT_SOCKETS_FOR_WINDOWS; - - if ( mongo_connect( conn , TEST_SERVER, 27017 ) ) { - printf( "failed to connect\n" ); - exit( 1 ); - } - - mongo_cmd_drop_db( conn, db ); - - ASSERT( mongo_cmd_authenticate( conn, db, "user", "password" ) == MONGO_ERROR ); - mongo_cmd_add_user( conn, db, "user", "password" ); - ASSERT( mongo_cmd_authenticate( conn, db, "user", "password" ) == MONGO_OK ); - - mongo_cmd_drop_db( conn, db ); - mongo_destroy( conn ); - return 0; -} diff --git a/mongo-c-driver-v0.6/test/benchmark_test.c b/mongo-c-driver-v0.6/test/benchmark_test.c deleted file mode 100644 index e83268a..0000000 --- a/mongo-c-driver-v0.6/test/benchmark_test.c +++ /dev/null @@ -1,454 +0,0 @@ -/* test.c */ - -#include "test.h" -#include "mongo.h" -#include -#include -#include - -#ifndef _WIN32 -#include -#endif - -/* supports preprocessor concatenation */ -#define DB "benchmarks" - -/* finds without indexes */ -#define DO_SLOW_TESTS 1 - -#ifndef TEST_SERVER -#define TEST_SERVER "127.0.0.1" -#endif - -#define PER_TRIAL 5000 -#define BATCH_SIZE 100 - -static mongo conn[1]; - -static void make_small( bson *out, int i ) { - bson_init( out ); - bson_append_new_oid( out, "_id" ); - bson_append_int( out, "x", i ); - bson_finish( out ); -} - -static void make_medium( bson *out, int i ) { - bson_init( out ); - bson_append_new_oid( out, "_id" ); - bson_append_int( out, "x", i ); - bson_append_int( out, "integer", 5 ); - bson_append_double( out, "number", 5.05 ); - bson_append_bool( out, "boolean", 0 ); - - bson_append_start_array( out, "array" ); - bson_append_string( out, "0", "test" ); - bson_append_string( out, "1", "benchmark" ); - bson_append_finish_object( out ); - - bson_finish( out ); -} - -static const char *words[14] = { - "10gen","web","open","source","application","paas", - "platform-as-a-service","technology","helps", - "developers","focus","building","mongodb","mongo" -}; - -static void make_large( bson *out, int i ) { - int num; - char numstr[4]; - bson_init( out ); - - bson_append_new_oid( out, "_id" ); - bson_append_int( out, "x", i ); - bson_append_string( out, "base_url", "http://www.example.com/test-me" ); - bson_append_int( out, "total_word_count", 6743 ); - bson_append_int( out, "access_time", 999 ); /*TODO use date*/ - - bson_append_start_object( out, "meta_tags" ); - bson_append_string( out, "description", "i am a long description string" ); - bson_append_string( out, "author", "Holly Man" ); - bson_append_string( out, "dynamically_created_meta_tag", "who know\n what" ); - bson_append_finish_object( out ); - - bson_append_start_object( out, "page_structure" ); - bson_append_int( out, "counted_tags", 3450 ); - bson_append_int( out, "no_of_js_attached", 10 ); - bson_append_int( out, "no_of_images", 6 ); - bson_append_finish_object( out ); - - - bson_append_start_array( out, "harvested_words" ); - for ( num=0; num < 14*20; num++ ) { - bson_numstr( numstr, num ); - bson_append_string( out, numstr, words[num%14] ); - } - bson_append_finish_object( out ); - - bson_finish( out ); -} - -static void serialize_small_test() { - int i; - bson b; - for ( i=0; i -#include -#include - -int main() { - bson_iterator it[1], it2[1]; - bson b[1]; - bson sub[1]; - bson copy[1]; - bson_type type; - - bson_init( b ); - bson_append_string( b, "foo", "hello" ); - - { - bson_append_start_object( b, "o" ); - bson_append_string( b, "bar", "goodbye" ); - bson_append_finish_object( b ); - } - - bson_iterator_init( it, b ); - - bson_iterator_next( it ); - type = bson_iterator_next( it ); - - ASSERT( BSON_OBJECT == type ); - - bson_iterator_subobject( it, sub ); - ASSERT( sub->finished == 1 ); - - bson_iterator_init( it2, sub ); - - type = bson_iterator_next( it2 ); - ASSERT( BSON_STRING == type ); - type = bson_iterator_next( it2 ); - ASSERT( BSON_EOO == type ); - - bson_copy( copy, sub ); - - ASSERT( 1 == copy->finished ); - ASSERT( 0 == copy->stackPos ); - ASSERT( 0 == copy->err ); - - bson_destroy( b ); - - return 0; -} - diff --git a/mongo-c-driver-v0.6/test/bson_test.c b/mongo-c-driver-v0.6/test/bson_test.c deleted file mode 100644 index a75276e..0000000 --- a/mongo-c-driver-v0.6/test/bson_test.c +++ /dev/null @@ -1,274 +0,0 @@ -#include "test.h" -#include "bson.h" -#include -#include -#include - -int test_bson_generic() { - - bson_iterator it, it2, it3; - bson_oid_t oid; - bson_timestamp_t ts; - bson_timestamp_t ts_result; - bson b[1]; - bson copy[1]; - bson scope[1]; - - ts.i = 1; - ts.t = 2; - - bson_init( b ); - bson_append_double( b, "d", 3.14 ); - bson_append_string( b, "s", "hello" ); - bson_append_string_n( b, "s_n", "goodbye cruel world", 7 ); - - { - bson_append_start_object( b, "o" ); - bson_append_start_array( b, "a" ); - bson_append_binary( b, "0", 8, "w\0rld", 5 ); - bson_append_finish_object( b ); - bson_append_finish_object( b ); - } - - bson_append_undefined( b, "u" ); - - bson_oid_from_string( &oid, "010203040506070809101112" ); - ASSERT( !memcmp( oid.bytes, "\x001\x002\x003\x004\x005\x006\x007\x008\x009\x010\x011\x012", 12 ) ); - bson_append_oid( b, "oid", &oid ); - - bson_append_bool( b, "b", 1 ); - bson_append_date( b, "date", 0x0102030405060708 ); - bson_append_null( b, "n" ); - bson_append_regex( b, "r", "^asdf", "imx" ); - /* no dbref test (deprecated) */ - bson_append_code( b, "c", "function(){}" ); - bson_append_code_n( b, "c_n", "function(){}garbage", 12 ); - bson_append_symbol( b, "symbol", "symbol" ); - bson_append_symbol_n( b, "symbol_n", "symbol and garbage", 6 ); - - { - bson_init( scope ); - bson_append_int( scope, "i", 123 ); - bson_finish( scope ); - - bson_append_code_w_scope( b, "cws", "function(){return i}", scope ); - bson_destroy( scope ); - } - - bson_append_timestamp( b, "timestamp", &ts ); - bson_append_long( b, "l", 0x1122334455667788 ); - - /* Ensure that we can't copy a non-finished object. */ - ASSERT( bson_copy( copy, b ) == BSON_ERROR ); - - bson_finish( b ); - - ASSERT( b->err == BSON_VALID ); - - /* Test append after finish. */ - ASSERT( bson_append_string( b, "foo", "bar" ) == BSON_ERROR ); - ASSERT( b->err & BSON_ALREADY_FINISHED ); - - ASSERT( bson_copy( copy, b ) == BSON_OK ); - - ASSERT( 1 == copy->finished ); - ASSERT( 0 == copy->err ); - - bson_print( b ); - - bson_iterator_init( &it, b ); - - ASSERT( bson_iterator_more( &it ) ); - ASSERT( bson_iterator_next( &it ) == BSON_DOUBLE ); - ASSERT( bson_iterator_type( &it ) == BSON_DOUBLE ); - ASSERT( !strcmp( bson_iterator_key( &it ), "d" ) ); - ASSERT( bson_iterator_double( &it ) == 3.14 ); - - ASSERT( bson_iterator_more( &it ) ); - ASSERT( bson_iterator_next( &it ) == BSON_STRING ); - ASSERT( bson_iterator_type( &it ) == BSON_STRING ); - ASSERT( !strcmp( bson_iterator_key( &it ), "s" ) ); - ASSERT( !strcmp( bson_iterator_string( &it ), "hello" ) ); - ASSERT( strcmp( bson_iterator_string( &it ), "" ) ); - - ASSERT( bson_iterator_more( &it ) ); - ASSERT( bson_iterator_next( &it ) == BSON_STRING ); - ASSERT( bson_iterator_type( &it ) == BSON_STRING ); - ASSERT( !strcmp( bson_iterator_key( &it ), "s_n" ) ); - ASSERT( !strcmp( bson_iterator_string( &it ), "goodbye" ) ); - - ASSERT( bson_iterator_more( &it ) ); - ASSERT( bson_iterator_next( &it ) == BSON_OBJECT ); - ASSERT( bson_iterator_type( &it ) == BSON_OBJECT ); - ASSERT( !strcmp( bson_iterator_key( &it ), "o" ) ); - bson_iterator_subiterator( &it, &it2 ); - - ASSERT( bson_iterator_more( &it2 ) ); - ASSERT( bson_iterator_next( &it2 ) == BSON_ARRAY ); - ASSERT( bson_iterator_type( &it2 ) == BSON_ARRAY ); - ASSERT( !strcmp( bson_iterator_key( &it2 ), "a" ) ); - bson_iterator_subiterator( &it2, &it3 ); - - ASSERT( bson_iterator_more( &it3 ) ); - ASSERT( bson_iterator_next( &it3 ) == BSON_BINDATA ); - ASSERT( bson_iterator_type( &it3 ) == BSON_BINDATA ); - ASSERT( !strcmp( bson_iterator_key( &it3 ), "0" ) ); - ASSERT( bson_iterator_bin_type( &it3 ) == 8 ); - ASSERT( bson_iterator_bin_len( &it3 ) == 5 ); - ASSERT( !memcmp( bson_iterator_bin_data( &it3 ), "w\0rld", 5 ) ); - - ASSERT( bson_iterator_more( &it3 ) ); - ASSERT( bson_iterator_next( &it3 ) == BSON_EOO ); - ASSERT( bson_iterator_type( &it3 ) == BSON_EOO ); - ASSERT( !bson_iterator_more( &it3 ) ); - - ASSERT( bson_iterator_more( &it2 ) ); - ASSERT( bson_iterator_next( &it2 ) == BSON_EOO ); - ASSERT( bson_iterator_type( &it2 ) == BSON_EOO ); - ASSERT( !bson_iterator_more( &it2 ) ); - - ASSERT( bson_iterator_more( &it ) ); - ASSERT( bson_iterator_next( &it ) == BSON_UNDEFINED ); - ASSERT( bson_iterator_type( &it ) == BSON_UNDEFINED ); - ASSERT( !strcmp( bson_iterator_key( &it ), "u" ) ); - - ASSERT( bson_iterator_more( &it ) ); - ASSERT( bson_iterator_next( &it ) == BSON_OID ); - ASSERT( bson_iterator_type( &it ) == BSON_OID ); - ASSERT( !strcmp( bson_iterator_key( &it ), "oid" ) ); - ASSERT( !memcmp( bson_iterator_oid( &it )->bytes, "\x001\x002\x003\x004\x005\x006\x007\x008\x009\x010\x011\x012", 12 ) ); - ASSERT( bson_iterator_oid( &it )->ints[0] == oid.ints[0] ); - ASSERT( bson_iterator_oid( &it )->ints[1] == oid.ints[1] ); - ASSERT( bson_iterator_oid( &it )->ints[2] == oid.ints[2] ); - - ASSERT( bson_iterator_more( &it ) ); - ASSERT( bson_iterator_next( &it ) == BSON_BOOL ); - ASSERT( bson_iterator_type( &it ) == BSON_BOOL ); - ASSERT( !strcmp( bson_iterator_key( &it ), "b" ) ); - ASSERT( bson_iterator_bool( &it ) == 1 ); - - ASSERT( bson_iterator_more( &it ) ); - ASSERT( bson_iterator_next( &it ) == BSON_DATE ); - ASSERT( bson_iterator_type( &it ) == BSON_DATE ); - ASSERT( !strcmp( bson_iterator_key( &it ), "date" ) ); - ASSERT( bson_iterator_date( &it ) == 0x0102030405060708 ); - - ASSERT( bson_iterator_more( &it ) ); - ASSERT( bson_iterator_next( &it ) == BSON_NULL ); - ASSERT( bson_iterator_type( &it ) == BSON_NULL ); - ASSERT( !strcmp( bson_iterator_key( &it ), "n" ) ); - - ASSERT( bson_iterator_more( &it ) ); - ASSERT( bson_iterator_next( &it ) == BSON_REGEX ); - ASSERT( bson_iterator_type( &it ) == BSON_REGEX ); - ASSERT( !strcmp( bson_iterator_key( &it ), "r" ) ); - ASSERT( !strcmp( bson_iterator_regex( &it ), "^asdf" ) ); - ASSERT( !strcmp( bson_iterator_regex_opts( &it ), "imx" ) ); - - ASSERT( bson_iterator_more( &it ) ); - ASSERT( bson_iterator_next( &it ) == BSON_CODE ); - ASSERT( bson_iterator_type( &it ) == BSON_CODE ); - ASSERT( !strcmp( bson_iterator_code(&it), "function(){}") ); - ASSERT( !strcmp( bson_iterator_key( &it ), "c" ) ); - ASSERT( !strcmp( bson_iterator_string( &it ), "" ) ); - - ASSERT( bson_iterator_more( &it ) ); - ASSERT( bson_iterator_next( &it ) == BSON_CODE ); - ASSERT( bson_iterator_type( &it ) == BSON_CODE ); - ASSERT( !strcmp( bson_iterator_key( &it ), "c_n" ) ); - ASSERT( !strcmp( bson_iterator_string( &it ), "" ) ); - ASSERT( !strcmp( bson_iterator_code( &it ), "function(){}" ) ); - - ASSERT( bson_iterator_more( &it ) ); - ASSERT( bson_iterator_next( &it ) == BSON_SYMBOL ); - ASSERT( bson_iterator_type( &it ) == BSON_SYMBOL ); - ASSERT( !strcmp( bson_iterator_key( &it ), "symbol" ) ); - ASSERT( !strcmp( bson_iterator_string( &it ), "symbol" ) ); - - ASSERT( bson_iterator_more( &it ) ); - ASSERT( bson_iterator_next( &it ) == BSON_SYMBOL ); - ASSERT( bson_iterator_type( &it ) == BSON_SYMBOL ); - ASSERT( !strcmp( bson_iterator_key( &it ), "symbol_n" ) ); - ASSERT( !strcmp( bson_iterator_string( &it ), "symbol" ) ); - - ASSERT( bson_iterator_more( &it ) ); - ASSERT( bson_iterator_next( &it ) == BSON_CODEWSCOPE ); - ASSERT( bson_iterator_type( &it ) == BSON_CODEWSCOPE ); - ASSERT( !strcmp( bson_iterator_key( &it ), "cws" ) ); - ASSERT( !strcmp( bson_iterator_code( &it ), "function(){return i}" ) ); - - { - bson scope; - bson_iterator_code_scope( &it, &scope ); - bson_iterator_init( &it2, &scope ); - - ASSERT( bson_iterator_more( &it2 ) ); - ASSERT( bson_iterator_next( &it2 ) == BSON_INT ); - ASSERT( bson_iterator_type( &it2 ) == BSON_INT ); - ASSERT( !strcmp( bson_iterator_key( &it2 ), "i" ) ); - ASSERT( bson_iterator_int( &it2 ) == 123 ); - - ASSERT( bson_iterator_more( &it2 ) ); - ASSERT( bson_iterator_next( &it2 ) == BSON_EOO ); - ASSERT( bson_iterator_type( &it2 ) == BSON_EOO ); - ASSERT( !bson_iterator_more( &it2 ) ); - } - - ASSERT( bson_iterator_more( &it ) ); - ASSERT( bson_iterator_next( &it ) == BSON_TIMESTAMP ); - ASSERT( bson_iterator_type( &it ) == BSON_TIMESTAMP ); - ASSERT( !strcmp( bson_iterator_key( &it ), "timestamp" ) ); - ts_result = bson_iterator_timestamp( &it ); - ASSERT( ts_result.i == 1 ); - ASSERT( ts_result.t == 2 ); - - ASSERT( bson_iterator_more( &it ) ); - ASSERT( bson_iterator_next( &it ) == BSON_LONG ); - ASSERT( bson_iterator_type( &it ) == BSON_LONG ); - ASSERT( !strcmp( bson_iterator_key( &it ), "l" ) ); - ASSERT( bson_iterator_long( &it ) == 0x1122334455667788 ); - - ASSERT( bson_iterator_more( &it ) ); - ASSERT( bson_iterator_next( &it ) == BSON_EOO ); - ASSERT( bson_iterator_type( &it ) == BSON_EOO ); - ASSERT( !bson_iterator_more( &it ) ); - - bson_destroy( b ); - - return 0; -} - -int test_bson_iterator() { - bson b[1]; - bson_iterator i[1]; - - bson_iterator_init( i, bson_empty( b ) ); - bson_iterator_next( i ); - bson_iterator_type( i ); - - bson_find( i, bson_empty( b ), "foo" ); - - return 0; -} - -int test_bson_size( void ) { - bson bsmall[1]; - - bson_init( bsmall ); - bson_append_int( bsmall, "a", 1 ); - bson_finish( bsmall ); - - ASSERT( bson_size( bsmall ) == 12 ); - - return 0; -} - -int main() { - - test_bson_generic(); - test_bson_iterator(); - test_bson_size(); - - return 0; -} - diff --git a/mongo-c-driver-v0.6/test/commands_test.c b/mongo-c-driver-v0.6/test/commands_test.c deleted file mode 100644 index 75a894e..0000000 --- a/mongo-c-driver-v0.6/test/commands_test.c +++ /dev/null @@ -1,44 +0,0 @@ -/* commands_test.c */ - -#include "test.h" -#include "mongo.h" -#include -#include -#include - -int main() { - mongo conn[1]; - bson cmd[1]; - bson out[1]; - bson_iterator it[1]; - - const char *db = "test"; - const char *col = "c.capped"; - - INIT_SOCKETS_FOR_WINDOWS; - - if ( mongo_connect( conn , TEST_SERVER , 27017 ) ) { - printf( "failed to connect\n" ); - exit( 1 ); - } - - mongo_cmd_drop_collection( conn, db, col, NULL ); - - ASSERT( mongo_create_capped_collection( conn, db, col, - 1024, 100, NULL ) == MONGO_OK ); - - bson_init( cmd ); - bson_append_string( cmd, "collstats", col ); - bson_finish( cmd ); - - ASSERT( mongo_run_command( conn, db, cmd, out ) == MONGO_OK ); - - ASSERT( bson_find( it, out, "capped" ) == BSON_INT ); - ASSERT( bson_find( it, out, "max" ) == BSON_INT ); - - mongo_cmd_drop_collection( conn, "test", col, NULL ); - mongo_cmd_drop_db( conn, db ); - - mongo_destroy( conn ); - return 0; -} diff --git a/mongo-c-driver-v0.6/test/count_delete_test.c b/mongo-c-driver-v0.6/test/count_delete_test.c deleted file mode 100644 index 3200947..0000000 --- a/mongo-c-driver-v0.6/test/count_delete_test.c +++ /dev/null @@ -1,64 +0,0 @@ -/* count_delete.c */ - -#include "test.h" -#include "mongo.h" -#include -#include -#include - -int main() { - mongo conn[1]; - bson b; - int i; - - const char *db = "test"; - const char *col = "c.simple"; - const char *ns = "test.c.simple"; - - INIT_SOCKETS_FOR_WINDOWS; - - if ( mongo_connect( conn , TEST_SERVER , 27017 ) ) { - printf( "failed to connect\n" ); - exit( 1 ); - } - - /* if the collection doesn't exist dropping it will fail */ - if ( !mongo_cmd_drop_collection( conn, "test", col, NULL ) - && mongo_count( conn, db, col, NULL ) != 0 ) { - printf( "failed to drop collection\n" ); - exit( 1 ); - } - - for( i=0; i< 5; i++ ) { - bson_init( &b ); - - bson_append_new_oid( &b, "_id" ); - bson_append_int( &b , "a" , i+1 ); /* 1 to 5 */ - bson_finish( &b ); - - mongo_insert( conn , ns , &b, NULL ); - bson_destroy( &b ); - } - - /* query: {a: {$gt: 3}} */ - bson_init( &b ); - { - bson_append_start_object( &b, "a" ); - bson_append_int( &b, "$gt", 3 ); - bson_append_finish_object( &b ); - } - bson_finish( &b ); - - ASSERT( mongo_count( conn, db, col, NULL ) == 5 ); - ASSERT( mongo_count( conn, db, col, &b ) == 2 ); - - mongo_remove( conn, ns, &b, NULL ); - - ASSERT( mongo_count( conn, db, col, NULL ) == 3 ); - ASSERT( mongo_count( conn, db, col, &b ) == 0 ); - - bson_destroy( &b ); - mongo_cmd_drop_db( conn, db ); - mongo_destroy( conn ); - return 0; -} diff --git a/mongo-c-driver-v0.6/test/cpptest.cpp b/mongo-c-driver-v0.6/test/cpptest.cpp deleted file mode 100644 index 1b5000f..0000000 --- a/mongo-c-driver-v0.6/test/cpptest.cpp +++ /dev/null @@ -1,50 +0,0 @@ -#include "mongo.h" -#include "test.h" -#include -#include -#include - -// this is just a simple test to make sure everything works when compiled with a c++ compiler - -using namespace std; - -int main(){ - mongo conn[1]; - bson b; - - INIT_SOCKETS_FOR_WINDOWS; - - if (mongo_connect( conn, TEST_SERVER, 27017 )){ - cout << "failed to connect" << endl; - return 1; - } - - for(int i=0; i< 5; i++){ - bson_init( &b ); - - bson_append_new_oid( &b, "_id" ); - bson_append_double( &b , "a" , 17 ); - bson_append_int( &b , "b" , 17 ); - bson_append_string( &b , "c" , "17" ); - - { - bson_append_start_object( &b , "d" ); - bson_append_int( &b, "i", 71 ); - bson_append_finish_object( &b ); - } - { - bson_append_start_array( &b , "e" ); - bson_append_int( &b, "0", 71 ); - bson_append_string( &b, "1", "71" ); - bson_append_finish_object( &b ); - } - - bson_finish(&b); - bson_destroy(&b); - } - - mongo_destroy( conn ); - - return 0; -} - diff --git a/mongo-c-driver-v0.6/test/cursors_test.c b/mongo-c-driver-v0.6/test/cursors_test.c deleted file mode 100644 index 048d06a..0000000 --- a/mongo-c-driver-v0.6/test/cursors_test.c +++ /dev/null @@ -1,204 +0,0 @@ -/* cursors.c */ - -#include "test.h" -#include "mongo.h" -#include -#include -#include -#include - -void create_capped_collection( mongo *conn ) { - bson b; - - bson_init( &b ); - bson_append_string( &b, "create", "cursors" ); - bson_append_bool( &b, "capped", 1 ); - bson_append_int( &b, "size", 1000000 ); - bson_finish( &b ); - - ASSERT( mongo_run_command( conn, "test", &b, NULL ) == MONGO_OK ); - - bson_destroy( &b ); -} - -void insert_sample_data( mongo *conn, int n ) { - bson b; - int i; - - for( i=0; ierr == MONGO_CURSOR_EXHAUSTED ); - - mongo_cursor_destroy( cursor ); - remove_sample_data( conn ); - return 0; -} - -int test_tailable( mongo *conn ) { - mongo_cursor *cursor; - bson b, e; - int count; - - remove_sample_data( conn ); - create_capped_collection( conn ); - insert_sample_data( conn, 10000 ); - - bson_init( &b ); - bson_append_start_object( &b, "$query" ); - bson_append_finish_object( &b ); - bson_append_start_object( &b, "$sort" ); - bson_append_int( &b, "$natural", -1 ); - bson_append_finish_object( &b ); - bson_finish( &b ); - - cursor = mongo_find( conn, "test.cursors", &b, bson_empty( &e ), 0, 0, MONGO_TAILABLE ); - bson_destroy( &b ); - - count = 0; - while( mongo_cursor_next( cursor ) == MONGO_OK ) - count++; - - ASSERT( count == 10000 ); - - ASSERT( mongo_cursor_next( cursor ) == MONGO_ERROR ); - ASSERT( cursor->err == MONGO_CURSOR_PENDING ); - - insert_sample_data( conn, 10 ); - - count = 0; - while( mongo_cursor_next( cursor ) == MONGO_OK ) { - count++; - } - - ASSERT( count == 10 ); - - ASSERT( mongo_cursor_next( cursor ) == MONGO_ERROR ); - ASSERT( cursor->err == MONGO_CURSOR_PENDING ); - - mongo_cursor_destroy( cursor ); - remove_sample_data( conn ); - - return 0; -} - -int test_builder_api( mongo *conn ) { - int count = 0; - mongo_cursor cursor[1]; - - remove_sample_data( conn ); - insert_sample_data( conn, 10000 ); - mongo_cursor_init( cursor, conn, "test.cursors" ); - - while( mongo_cursor_next( cursor ) == MONGO_OK ) { - count++; - } - ASSERT( count == 10000 ); - - mongo_cursor_destroy( cursor ); - - mongo_cursor_init( cursor, conn, "test.cursors" ); - mongo_cursor_set_limit( cursor, 10 ); - count = 0; - while( mongo_cursor_next( cursor ) == MONGO_OK ) { - count++; - } - ASSERT( count == 10 ); - mongo_cursor_destroy( cursor ); - - return 0; -} - -int test_bad_query( mongo *conn ) { - mongo_cursor cursor[1]; - bson b[1]; - - bson_init( b ); - bson_append_start_object( b, "foo" ); - bson_append_int( b, "$bad", 1 ); - bson_append_finish_object( b ); - bson_finish( b ); - - mongo_cursor_init( cursor, conn, "test.cursors" ); - mongo_cursor_set_query( cursor, b ); - - ASSERT( mongo_cursor_next( cursor ) == MONGO_ERROR ); - ASSERT( cursor->err == MONGO_CURSOR_QUERY_FAIL ); - ASSERT( cursor->conn->lasterrcode == 10068 ); - ASSERT( strlen( cursor->conn->lasterrstr ) > 0 ); - - mongo_cursor_destroy( cursor ); - bson_destroy( b ); - return 0; -} - -int test_copy_cursor_data( mongo *conn ) { - mongo_cursor cursor[1]; - bson b[1]; - - insert_sample_data( conn, 10 ); - mongo_cursor_init( cursor, conn, "test.cursors" ); - - mongo_cursor_next( cursor ); - - ASSERT( bson_copy( b, mongo_cursor_bson( cursor ) ) == MONGO_OK ); - - ASSERT( memcmp( (void *)b->data, (void *)(cursor->current).data, - bson_size( &cursor->current ) ) == 0 ); - - mongo_cursor_destroy( cursor ); - bson_destroy( b ); - - return 0; -} - -int main() { - - mongo conn[1]; - - INIT_SOCKETS_FOR_WINDOWS; - - if( mongo_connect( conn, TEST_SERVER, 27017 ) != MONGO_OK ) { - printf( "Failed to connect" ); - exit( 1 ); - } - - test_multiple_getmore( conn ); - test_tailable( conn ); - test_builder_api( conn ); - test_bad_query( conn ); - test_copy_cursor_data( conn ); - - mongo_destroy( conn ); - return 0; -} diff --git a/mongo-c-driver-v0.6/test/endian_swap_test.c b/mongo-c-driver-v0.6/test/endian_swap_test.c deleted file mode 100644 index a39fe80..0000000 --- a/mongo-c-driver-v0.6/test/endian_swap_test.c +++ /dev/null @@ -1,31 +0,0 @@ -/* endian_swap.c */ - -#include "test.h" -#include "bson.h" -#include - -int main() { - int small = 0x00112233; - int64_t big = 0x0011223344556677; - double d = 1.2345; - - int small_swap; - int64_t big_swap; - int64_t d_swap; - - bson_swap_endian32( &small_swap, &small ); - ASSERT( small_swap == 0x33221100 ); - bson_swap_endian32( &small, &small_swap ); - ASSERT( small == 0x00112233 ); - - bson_swap_endian64( &big_swap, &big ); - ASSERT( big_swap == 0x7766554433221100 ); - bson_swap_endian64( &big, &big_swap ); - ASSERT( big == 0x0011223344556677 ); - - bson_swap_endian64( &d_swap, &d ); - bson_swap_endian64( &d, &d_swap ); - ASSERT( d == 1.2345 ); - - return 0; -} diff --git a/mongo-c-driver-v0.6/test/env_posix_test.c b/mongo-c-driver-v0.6/test/env_posix_test.c deleted file mode 100644 index 93d76a2..0000000 --- a/mongo-c-driver-v0.6/test/env_posix_test.c +++ /dev/null @@ -1,109 +0,0 @@ -/* env_posix_test.c - * Test posix-specific features. - */ - -#include "test.h" -#include "mongo.h" -#include -#include -#include -#include - -/* Test read timeout by causing the - * server to sleep for 10s on a query. - */ -int test_read_timeout( void ) { - mongo conn[1]; - bson b, obj, out, fields; - int res; - - if ( mongo_connect( conn, TEST_SERVER, 27017 ) ) { - printf( "failed to connect\n" ); - exit( 1 ); - } - - bson_init( &b ); - bson_append_code( &b, "$where", "sleep( 10 * 1000 );"); - bson_finish( &b ); - - bson_init( &obj ); - bson_append_string( &obj, "foo", "bar"); - bson_finish( &obj ); - - res = mongo_insert( conn, "test.foo", &obj, NULL ); - - /* Set the connection timeout here. */ - mongo_set_op_timeout( conn, 1000 ); - - res = mongo_find_one( conn, "test.foo", &b, bson_empty(&fields), &out ); - ASSERT( res == MONGO_ERROR ); - - ASSERT( conn->err == MONGO_IO_ERROR ); - ASSERT( strcmp( "Resource temporarily unavailable", conn->errstr ) == 0 ); - - return 0; -} - -/* Test getaddrinfo() by successfully connecting to 'localhost'. */ -int test_getaddrinfo( void ) { - mongo conn[1]; - bson b[1]; - char *ns = "test.foo"; - - if( mongo_connect( conn, "localhost", 27017 ) != MONGO_OK ) { - printf( "failed to connect\n" ); - exit( 1 ); - } - - mongo_cmd_drop_collection( conn, "test", "foo", NULL ); - - bson_init( b ); - bson_append_int( b, "foo", 17 ); - bson_finish( b ); - - mongo_insert( conn , ns , b, NULL ); - - ASSERT( mongo_count( conn, "test", "foo", NULL ) == 1 ); - - bson_destroy( b ); - mongo_destroy( conn ); - - - return 0; -} - -int test_error_messages( void ) { - mongo conn[1]; - bson b[1]; - const char *ns = "test.foo"; - - mongo_init( conn ); - - bson_init( b ); - bson_append_int( b, "foo", 17 ); - bson_finish( b ); - - ASSERT( mongo_insert( conn, ns, b, NULL ) != MONGO_OK ); - ASSERT( conn->err == MONGO_IO_ERROR ); - ASSERT( conn->errcode == ENOTSOCK ); - - mongo_init( conn ); - - ASSERT( mongo_count( conn, "test", "foo", NULL ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_IO_ERROR ); - ASSERT( conn->errcode == ENOTSOCK ); - - return 0; -} - -int main() { - char version[10]; - - if( mongo_get_server_version( version ) != -1 && version[0] != '1' ) { - test_read_timeout(); - } - test_getaddrinfo(); - test_error_messages(); - - return 0; -} diff --git a/mongo-c-driver-v0.6/test/env_win32_test.c b/mongo-c-driver-v0.6/test/env_win32_test.c deleted file mode 100644 index 035c16f..0000000 --- a/mongo-c-driver-v0.6/test/env_win32_test.c +++ /dev/null @@ -1,131 +0,0 @@ -/* env_win32_test.c - * Test WIN32-dependent features. - */ - -#include "test.h" -#include "mongo.h" -#include -#include -#include - -#ifdef _MSC_VER -#include // send,recv,socklen_t etc -#include // addrinfo -#else -#include -#include -typedef int socklen_t; -#endif - -/* Test read timeout by causing the - * server to sleep for 10s on a query. - */ -int test_read_timeout( void ) { - mongo conn[1]; - bson b, obj, out, fields; - int res; - - if ( mongo_connect( conn, TEST_SERVER, 27017 ) ) { - printf( "failed to connect\n" ); - exit( 1 ); - } - - bson_init( &b ); - bson_append_code( &b, "$where", "sleep( 10 * 1000 );"); - bson_finish( &b ); - - bson_init( &obj ); - bson_append_string( &obj, "foo", "bar"); - bson_finish( &obj ); - - res = mongo_insert( conn, "test.foo", &obj, NULL ); - - /* Set the connection timeout here. */ - - if( mongo_set_op_timeout( conn, 1000 ) != MONGO_OK ) { - printf("Could not set socket timeout!."); - exit(1); - } - - res = mongo_find_one( conn, "test.foo", &b, bson_empty(&fields), &out ); - ASSERT( res == MONGO_ERROR ); - - ASSERT( conn->err == MONGO_IO_ERROR ); - ASSERT( conn->errcode == WSAETIMEDOUT ); - - return 0; -} - -/* Test getaddrinfo() by successfully connecting to 'localhost'. */ -int test_getaddrinfo( void ) { - mongo conn[1]; - bson b[1]; - const char *ns = "test.foo"; - const char *errmsg = "getaddrinfo failed"; - - if( mongo_connect( conn, "badhost", 27017 ) == MONGO_OK ) { - printf( "connected to bad host!\n" ); - exit( 1 ); - } else { - ASSERT( strncmp( errmsg, conn->errstr, strlen( errmsg ) ) == 0 ); - } - - - if( mongo_connect( conn, "localhost", 27017 ) != MONGO_OK ) { - printf( "failed to connect\n" ); - exit( 1 ); - } - - mongo_cmd_drop_collection( conn, "test", "foo", NULL ); - - bson_init( b ); - bson_append_int( b, "foo", 17 ); - bson_finish( b ); - - mongo_insert( conn , ns , b, NULL ); - - ASSERT( mongo_count( conn, "test", "foo", NULL ) == 1 ); - - bson_destroy( b ); - mongo_destroy( conn ); - - - return 0; -} - -int test_error_messages( void ) { - mongo conn[1]; - bson b[1]; - const char *ns = "test.foo"; - - mongo_init( conn ); - - bson_init( b ); - bson_append_int( b, "foo", 17 ); - bson_finish( b ); - - ASSERT( mongo_insert( conn, ns, b, NULL ) != MONGO_OK ); - ASSERT( conn->err == MONGO_IO_ERROR ); - ASSERT( conn->errcode == WSAENOTSOCK ); - - mongo_init( conn ); - - ASSERT( mongo_count( conn, "test", "foo", NULL ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_IO_ERROR ); - ASSERT( conn->errcode == WSAENOTSOCK ); - - return 0; -} - -int main() { - char version[10]; - INIT_SOCKETS_FOR_WINDOWS; - - if( mongo_get_server_version( version ) != -1 && version[0] != '1' ) { - test_read_timeout(); - } - test_getaddrinfo(); - test_error_messages(); - - return 0; -} diff --git a/mongo-c-driver-v0.6/test/errors_test.c b/mongo-c-driver-v0.6/test/errors_test.c deleted file mode 100644 index ebbac88..0000000 --- a/mongo-c-driver-v0.6/test/errors_test.c +++ /dev/null @@ -1,259 +0,0 @@ -#include "test.h" -#include "mongo.h" -#include -#include -#include - -static const char *db = "test"; -static const char *ns = "test.c.error"; - -int test_namespace_validation() { - mongo conn[1]; - char longns[130] = "test.foo"; - int i; - - mongo_init( conn ); - - /* Test a few legal namespaces. */ - ASSERT( mongo_validate_ns( conn, "test.foo" ) == MONGO_OK ); - ASSERT( conn->err == 0 ); - - ASSERT( mongo_validate_ns( conn, "test.foo.bar" ) == MONGO_OK ); - ASSERT( conn->err == 0 ); - - /* Test illegal namespaces. */ - ASSERT( mongo_validate_ns( conn, ".test.foo" ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_NS_INVALID ); - ASSERT( strncmp( conn->errstr, "ns cannot start with", 20 ) == 0 ); - mongo_clear_errors( conn ); - - ASSERT( mongo_validate_ns( conn, "test..foo" ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_NS_INVALID ); - ASSERT( strncmp( conn->errstr, "ns cannot start with", 20 ) == 0 ); - mongo_clear_errors( conn ); - - ASSERT( mongo_validate_ns( conn, "test" ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_NS_INVALID ); - ASSERT( strncmp( conn->errstr, "ns cannot start with", 20 ) == 0 ); - mongo_clear_errors( conn ); - - ASSERT( mongo_validate_ns( conn, "." ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_NS_INVALID ); - ASSERT( strncmp( conn->errstr, "ns cannot start with", 20 ) == 0 ); - mongo_clear_errors( conn ); - - ASSERT( mongo_validate_ns( conn, "tes t.foo" ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_NS_INVALID ); - ASSERT( strncmp( conn->errstr, "Database name may not contain", 28 ) == 0 ); - mongo_clear_errors( conn ); - - ASSERT( mongo_validate_ns( conn, "te$st.foo" ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_NS_INVALID ); - ASSERT( strncmp( conn->errstr, "Database name may not contain", 28 ) == 0 ); - mongo_clear_errors( conn ); - - ASSERT( mongo_validate_ns( conn, "te/st.foo" ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_NS_INVALID ); - ASSERT( strncmp( conn->errstr, "Database name may not contain", 28 ) == 0 ); - mongo_clear_errors( conn ); - - ASSERT( mongo_validate_ns( conn, "te\\st.foo" ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_NS_INVALID ); - ASSERT( strncmp( conn->errstr, "Database name may not contain", 28 ) == 0 ); - mongo_clear_errors( conn ); - - ASSERT( mongo_validate_ns( conn, "test.fo$o" ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_NS_INVALID ); - ASSERT( strncmp( conn->errstr, "Collection may not contain '$'", 29 ) == 0 ); - mongo_clear_errors( conn ); - - ASSERT( mongo_validate_ns( conn, "test.fo..o" ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_NS_INVALID ); - ASSERT( strncmp( conn->errstr, "Collection may not contain two consecutive '.'", 46 ) == 0 ); - mongo_clear_errors( conn ); - - ASSERT( mongo_validate_ns( conn, "test.fo.o." ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_NS_INVALID ); - ASSERT( strncmp( conn->errstr, "Collection may not end with '.'", 30 ) == 0 ); - mongo_clear_errors( conn ); - - for(i = 8; i < 129; i++ ) - longns[i] = 'a'; - longns[129] = '\0'; - - ASSERT( mongo_validate_ns( conn, longns ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_NS_INVALID ); - ASSERT( strncmp( conn->errstr, "Namespace too long; has 129 but must <= 128.", 32 ) == 0 ); - mongo_clear_errors( conn ); - - return 0; -} - -int test_namespace_validation_on_insert( void ) { - mongo conn[1]; - bson b[1], b2[1]; - bson *objs[2]; - - INIT_SOCKETS_FOR_WINDOWS; - - if ( mongo_connect( conn , TEST_SERVER, 27017 ) ) { - printf( "failed to connect\n" ); - exit( 1 ); - } - - bson_init( b ); - bson_append_int( b, "foo", 1 ); - bson_finish( b ); - - ASSERT( mongo_insert( conn, "tet.fo$o", b, NULL ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_NS_INVALID ); - ASSERT( strncmp( conn->errstr, "Collection may not contain '$'", 29 ) == 0 ); - mongo_clear_errors( conn ); - - bson_init( b2 ); - bson_append_int( b2, "foo", 1 ); - bson_finish( b2 ); - - objs[0] = b; - objs[1] = b2; - - ASSERT( mongo_insert_batch( conn, "tet.fo$o", - (const bson **)objs, 2, NULL, 0 ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_NS_INVALID ); - ASSERT( strncmp( conn->errstr, "Collection may not contain '$'", 29 ) == 0 ); - - return 0; -} - -int test_insert_limits( void ) { - char version[10]; - mongo conn[1]; - int i; - char key[10]; - bson b[1], b2[1]; - bson *objs[2]; - - /* Test the default max BSON size. */ - mongo_init( conn ); - ASSERT( conn->max_bson_size == MONGO_DEFAULT_MAX_BSON_SIZE ); - - /* We'll perform the full test if we're running v2.0 or later. */ - if( mongo_get_server_version( version ) != -1 && version[0] <= '1' ) - return 0; - - if ( mongo_connect( conn , TEST_SERVER, 27017 ) ) { - printf( "failed to connect\n" ); - exit( 1 ); - } - - ASSERT( conn->max_bson_size > MONGO_DEFAULT_MAX_BSON_SIZE ); - - bson_init( b ); - for(i=0; i<1200000; i++) { - sprintf( key, "%d", i + 10000000 ); - bson_append_int( b, key, i ); - } - bson_finish( b ); - - ASSERT( bson_size( b ) > conn->max_bson_size ); - - ASSERT( mongo_insert( conn, "test.foo", b, NULL ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_BSON_TOO_LARGE ); - - mongo_clear_errors( conn ); - ASSERT( conn->err == 0 ); - - bson_init( b2 ); - bson_append_int( b2, "foo", 1 ); - bson_finish( b2 ); - - objs[0] = b; - objs[1] = b2; - - ASSERT( mongo_insert_batch( conn, "test.foo", (const bson **)objs, 2, - NULL, 0 ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_BSON_TOO_LARGE ); - - return 0; -} - -int test_get_last_error_commands( void ) { - mongo conn[1]; - bson obj; - - INIT_SOCKETS_FOR_WINDOWS; - - if ( mongo_connect( conn , TEST_SERVER, 27017 ) ) { - printf( "failed to connect\n" ); - exit( 1 ); - } - - /*********************/ - ASSERT( mongo_cmd_get_prev_error( conn, db, NULL ) == MONGO_OK ); - ASSERT( conn->lasterrcode == 0 ); - ASSERT( conn->lasterrstr[0] == 0 ); - - ASSERT( mongo_cmd_get_last_error( conn, db, NULL ) == MONGO_OK ); - ASSERT( conn->lasterrcode == 0 ); - ASSERT( conn->lasterrstr[0] == 0 ); - - ASSERT( mongo_cmd_get_prev_error( conn, db, &obj ) == MONGO_OK ); - bson_destroy( &obj ); - - ASSERT( mongo_cmd_get_last_error( conn, db, &obj ) == MONGO_OK ); - bson_destroy( &obj ); - - /*********************/ - mongo_simple_int_command( conn, db, "forceerror", 1, NULL ); - - ASSERT( mongo_cmd_get_prev_error( conn, db, NULL ) == MONGO_ERROR ); - ASSERT( conn->lasterrcode == 10038 ); - ASSERT( strcmp( ( const char * )conn->lasterrstr, "forced error" ) == 0 ); - - ASSERT( mongo_cmd_get_last_error( conn, db, NULL ) == MONGO_ERROR ); - - ASSERT( mongo_cmd_get_prev_error( conn, db, &obj ) == MONGO_ERROR ); - bson_destroy( &obj ); - - ASSERT( mongo_cmd_get_last_error( conn, db, &obj ) == MONGO_ERROR ); - bson_destroy( &obj ); - - /* should clear lasterror but not preverror */ - mongo_find_one( conn, ns, bson_empty( &obj ), bson_empty( &obj ), NULL ); - - ASSERT( mongo_cmd_get_prev_error( conn, db, NULL ) == MONGO_ERROR ); - ASSERT( mongo_cmd_get_last_error( conn, db, NULL ) == MONGO_OK ); - - ASSERT( mongo_cmd_get_prev_error( conn, db, &obj ) == MONGO_ERROR ); - bson_destroy( &obj ); - - ASSERT( mongo_cmd_get_last_error( conn, db, &obj ) == MONGO_OK ); - bson_destroy( &obj ); - - /*********************/ - mongo_cmd_reset_error( conn, db ); - - ASSERT( mongo_cmd_get_prev_error( conn, db, NULL ) == MONGO_OK ); - ASSERT( mongo_cmd_get_last_error( conn, db, NULL ) == MONGO_OK ); - - ASSERT( mongo_cmd_get_prev_error( conn, db, &obj ) == MONGO_OK ); - bson_destroy( &obj ); - - ASSERT( mongo_cmd_get_last_error( conn, db, &obj ) == MONGO_OK ); - bson_destroy( &obj ); - - - mongo_cmd_drop_db( conn, db ); - mongo_destroy( conn ); - - return 0; -} - -int main() { - test_get_last_error_commands(); - test_insert_limits(); - test_namespace_validation(); - test_namespace_validation_on_insert(); - - return 0; -} diff --git a/mongo-c-driver-v0.6/test/examples_test.c b/mongo-c-driver-v0.6/test/examples_test.c deleted file mode 100644 index f023c50..0000000 --- a/mongo-c-driver-v0.6/test/examples_test.c +++ /dev/null @@ -1,71 +0,0 @@ -#include "test.h" -#include "bson.h" -#include -#include -#include - -int main() { - bson b, sub; - bson_iterator it; - - /* Create a rich document like this one: - * - * { _id: ObjectId("4d95ea712b752328eb2fc2cc"), - * user_id: ObjectId("4d95ea712b752328eb2fc2cd"), - * - * items: [ - * { sku: "col-123", - * name: "John Coltrane: Impressions", - * price: 1099, - * }, - * - * { sku: "young-456", - * name: "Larry Young: Unity", - * price: 1199 - * } - * ], - * - * address: { - * street: "59 18th St.", - * zip: 10010 - * }, - * - * total: 2298 - * } - */ - bson_init( &b ); - bson_append_new_oid( &b, "_id" ); - bson_append_new_oid( &b, "user_id" ); - - bson_append_start_array( &b, "items" ); - bson_append_start_object( &b, "0" ); - bson_append_string( &b, "name", "John Coltrane: Impressions" ); - bson_append_int( &b, "price", 1099 ); - bson_append_finish_object( &b ); - - bson_append_start_object( &b, "1" ); - bson_append_string( &b, "name", "Larry Young: Unity" ); - bson_append_int( &b, "price", 1199 ); - bson_append_finish_object( &b ); - bson_append_finish_object( &b ); - - bson_append_start_object( &b, "address" ); - bson_append_string( &b, "street", "59 18th St." ); - bson_append_int( &b, "zip", 10010 ); - bson_append_finish_object( &b ); - - bson_append_int( &b, "total", 2298 ); - - bson_finish( &b ); - - /* Advance to the 'items' array */ - bson_find( &it, &b, "items" ); - - /* Get the subobject representing items */ - bson_iterator_subobject( &it, &sub ); - - /* Now iterate that object */ - bson_print( &sub ); - - return 0; -} diff --git a/mongo-c-driver-v0.6/test/functions_test.c b/mongo-c-driver-v0.6/test/functions_test.c deleted file mode 100644 index dc3a909..0000000 --- a/mongo-c-driver-v0.6/test/functions_test.c +++ /dev/null @@ -1,113 +0,0 @@ -/* functions.c */ - -#ifndef _WIN32 -#include "test.h" -#include "mongo.h" -#include -#include -#include -#include - -int test_value = 0; - -void *my_malloc( size_t size ) { - test_value = 1; - return malloc( size ); -} - -void *my_realloc( void *ptr, size_t size ) { - test_value = 2; - return realloc( ptr, size ); -} - -void my_free( void *ptr ) { - test_value = 3; - free( ptr ); -} - -int my_printf( const char *format, ... ) { - int ret = 0; - test_value = 4; - - return ret; -} - -int my_fprintf( FILE *fp, const char *format, ... ) { - int ret = 0; - test_value = 5; - - return ret; -} - -int my_sprintf( char *s, const char *format, ... ) { - int ret = 0; - test_value = 6; - - return ret; -} - -int my_errprintf( const char *format, ... ) { - int ret = 0; - test_value = 7; - - return ret; -} - -int main() { - - void *ptr; - char str[32]; - int size = 256; - - ptr = bson_malloc( size ); - ASSERT( test_value == 0 ); - ptr = bson_realloc( ptr, size + 64 ); - ASSERT( test_value == 0 ); - bson_free( ptr ); - ASSERT( test_value == 0 ); - - bson_malloc_func = my_malloc; - bson_realloc_func = my_realloc; - bson_free = my_free; - - ptr = bson_malloc( size ); - ASSERT( test_value == 1 ); - ptr = bson_realloc( ptr, size + 64 ); - ASSERT( test_value == 2 ); - bson_free( ptr ); - ASSERT( test_value == 3 ); - - test_value = 0; - - bson_printf( "Test printf %d\n", test_value ); - ASSERT( test_value == 0 ); - bson_fprintf( stdout, "Test fprintf %d\n", test_value ); - ASSERT( test_value == 0 ); - bson_sprintf( str, "Test sprintf %d\n", test_value ); - printf( "%s", str ); - ASSERT( test_value == 0 ); - bson_errprintf( "Test err %d\n", test_value ); - ASSERT( test_value == 0 ); - - bson_printf = my_printf; - bson_sprintf = my_sprintf; - bson_fprintf = my_fprintf; - bson_errprintf = my_errprintf; - - bson_printf( "Test %d\n", test_value ); - ASSERT( test_value == 4 ); - bson_fprintf( stdout, "Test %d\n", test_value ); - ASSERT( test_value == 5 ); - bson_sprintf( str, "Test %d\n", test_value ); - ASSERT( test_value == 6 ); - bson_printf( "Str: %s\n", str ); - bson_errprintf( "Test %d\n", test_value ); - ASSERT( test_value == 7 ); - - return 0; -} -#else -int main() { - return 0; -} -#endif diff --git a/mongo-c-driver-v0.6/test/gridfs_test.c b/mongo-c-driver-v0.6/test/gridfs_test.c deleted file mode 100644 index 95ed620..0000000 --- a/mongo-c-driver-v0.6/test/gridfs_test.c +++ /dev/null @@ -1,263 +0,0 @@ -#include "test.h" -#include "md5.h" -#include "mongo.h" -#include "gridfs.h" -#include -#include -#include -#include -#ifndef _WIN32 -#include -#endif - -#define LARGE 3*1024*1024 -#define UPPER 2000*1024 -#define MEDIUM 1024*512 -#define LOWER 1024*128 -#define DELTA 1024*128 - -void fill_buffer_randomly( char *data, int64_t length ) { - int64_t i; - int random; - char *letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; - int nletters = strlen( letters )+1; - - for ( i = 0; i < length; i++ ) { - random = rand() % nletters; - *( data + i ) = letters[random]; - } -} - -static void digest2hex( mongo_md5_byte_t digest[16], char hex_digest[33] ) { - static const char hex[16] = {'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'}; - int i; - for ( i=0; i<16; i++ ) { - hex_digest[2*i] = hex[( digest[i] & 0xf0 ) >> 4]; - hex_digest[2*i + 1] = hex[ digest[i] & 0x0f ]; - } - hex_digest[32] = '\0'; -} - -void test_gridfile( gridfs *gfs, char *data_before, int64_t length, char *filename, char *content_type ) { - gridfile gfile[1]; - FILE *stream; - mongo_md5_state_t pms[1]; - mongo_md5_byte_t digest[16]; - char hex_digest[33]; - int64_t i = length; - int n; - char *data_after = bson_malloc( LARGE ); - - gridfs_find_filename( gfs, filename, gfile ); - ASSERT( gridfile_exists( gfile ) ); - - stream = fopen( "output", "w+" ); - gridfile_write_file( gfile, stream ); - fseek( stream, 0, SEEK_SET ); - ASSERT( fread( data_after, length, sizeof( char ), stream ) ); - fclose( stream ); - ASSERT( strncmp( data_before, data_after, length ) == 0 ); - - gridfile_read( gfile, length, data_after ); - ASSERT( strncmp( data_before, data_after, length ) == 0 ); - - ASSERT( strcmp( gridfile_get_filename( gfile ), filename ) == 0 ); - - ASSERT( gridfile_get_contentlength( gfile ) == length ); - - ASSERT( gridfile_get_chunksize( gfile ) == DEFAULT_CHUNK_SIZE ); - - ASSERT( strcmp( gridfile_get_contenttype( gfile ), content_type ) == 0 ) ; - - ASSERT( strncmp( data_before, data_after, length ) == 0 ); - - mongo_md5_init( pms ); - - n = 0; - while( i > INT_MAX ) { - mongo_md5_append( pms, ( const mongo_md5_byte_t * )data_before + ( n * INT_MAX ), INT_MAX ); - i -= INT_MAX; - n += 1; - } - if( i > 0 ) - mongo_md5_append( pms, ( const mongo_md5_byte_t * )data_before + ( n * INT_MAX ), i ); - - mongo_md5_finish( pms, digest ); - digest2hex( digest, hex_digest ); - ASSERT( strcmp( gridfile_get_md5( gfile ), hex_digest ) == 0 ); - - gridfile_destroy( gfile ); - gridfs_remove_filename( gfs, filename ); - free( data_after ); - unlink( "output" ); -} - -void test_basic() { - mongo conn[1]; - gridfs gfs[1]; - char *data_before = bson_malloc( UPPER ); - int64_t i; - FILE *fd; - - srand( time( NULL ) ); - - INIT_SOCKETS_FOR_WINDOWS; - - if ( mongo_connect( conn, TEST_SERVER, 27017 ) ) { - printf( "failed to connect 2\n" ); - exit( 1 ); - } - - gridfs_init( conn, "test", "fs", gfs ); - - fill_buffer_randomly( data_before, UPPER ); - for ( i = LOWER; i <= UPPER; i += DELTA ) { - - /* Input from buffer */ - gridfs_store_buffer( gfs, data_before, i, "input-buffer", "text/html" ); - test_gridfile( gfs, data_before, i, "input-buffer", "text/html" ); - - /* Input from file */ - fd = fopen( "input-file", "w" ); - fwrite( data_before, sizeof( char ), i, fd ); - fclose( fd ); - gridfs_store_file( gfs, "input-file", "input-file", "text/html" ); - test_gridfile( gfs, data_before, i, "input-file", "text/html" ); - } - - gridfs_destroy( gfs ); - mongo_disconnect( conn ); - mongo_destroy( conn ); - free( data_before ); - - /* Clean up files. */ - unlink( "input-file" ); - unlink( "output" ); -} - -void test_streaming() { - mongo conn[1]; - gridfs gfs[1]; - gridfile gfile[1]; - char *medium = bson_malloc( 2*MEDIUM ); - char *small = bson_malloc( LOWER ); - char *buf = bson_malloc( LARGE ); - int n; - - if( buf == NULL || small == NULL ) { - printf( "Failed to allocate" ); - exit( 1 ); - } - - srand( time( NULL ) ); - - INIT_SOCKETS_FOR_WINDOWS; - - if ( mongo_connect( conn , TEST_SERVER, 27017 ) ) { - printf( "failed to connect 3\n" ); - exit( 1 ); - } - - fill_buffer_randomly( medium, ( int64_t )2 * MEDIUM ); - fill_buffer_randomly( small, ( int64_t )LOWER ); - fill_buffer_randomly( buf, ( int64_t )LARGE ); - - gridfs_init( conn, "test", "fs", gfs ); - gridfile_writer_init( gfile, gfs, "medium", "text/html" ); - - gridfile_write_buffer( gfile, medium, MEDIUM ); - gridfile_write_buffer( gfile, medium + MEDIUM, MEDIUM ); - gridfile_writer_done( gfile ); - test_gridfile( gfs, medium, 2 * MEDIUM, "medium", "text/html" ); - gridfs_destroy( gfs ); - - gridfs_init( conn, "test", "fs", gfs ); - - gridfs_store_buffer( gfs, small, LOWER, "small", "text/html" ); - test_gridfile( gfs, small, LOWER, "small", "text/html" ); - gridfs_destroy( gfs ); - - gridfs_init( conn, "test", "fs", gfs ); - gridfile_writer_init( gfile, gfs, "large", "text/html" ); - for( n=0; n < ( LARGE / 1024 ); n++ ) { - gridfile_write_buffer( gfile, buf + ( n * 1024 ), 1024 ); - } - gridfile_writer_done( gfile ); - test_gridfile( gfs, buf, LARGE, "large", "text/html" ); - - gridfs_destroy( gfs ); - mongo_destroy( conn ); - free( buf ); - free( small ); -} - -void test_large() { - mongo conn[1]; - gridfs gfs[1]; - gridfile gfile[1]; - FILE *fd; - int i, n; - char *buffer = bson_malloc( LARGE ); - int64_t filesize = ( int64_t )1024 * ( int64_t )LARGE; - - srand( time( NULL ) ); - - INIT_SOCKETS_FOR_WINDOWS; - - if ( mongo_connect( conn, TEST_SERVER, 27017 ) ) { - printf( "failed to connect 1\n" ); - exit( 1 ); - } - - gridfs_init( conn, "test", "fs", gfs ); - - /* Create a very large file */ - fill_buffer_randomly( buffer, ( int64_t )LARGE ); - fd = fopen( "bigfile", "w" ); - for( i=0; i<1024; i++ ) { - fwrite( buffer, 1, LARGE, fd ); - } - fclose( fd ); - - /* Now read the file into GridFS */ - gridfs_store_file( gfs, "bigfile", "bigfile", "text/html" ); - - gridfs_find_filename( gfs, "bigfile", gfile ); - - ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile" ) == 0 ); - ASSERT( gridfile_get_contentlength( gfile ) == filesize ); - - /* Read the file using the streaming interface */ - gridfile_writer_init( gfile, gfs, "bigfile-stream", "text/html" ); - - fd = fopen( "bigfile", "r" ); - - while( ( n = fread( buffer, 1, 1024, fd ) ) != 0 ) { - gridfile_write_buffer( gfile, buffer, n ); - } - gridfile_writer_done( gfile ); - - gridfs_find_filename( gfs, "bigfile-stream", gfile ); - - ASSERT( strcmp( gridfile_get_filename( gfile ), "bigfile-stream" ) == 0 ); - ASSERT( gridfile_get_contentlength( gfile ) == filesize ); - - gridfs_destroy( gfs ); - mongo_disconnect( conn ); - mongo_destroy( conn ); -} - -int main( void ) { -/* See https://jira.mongodb.org/browse/CDRIVER-126 - * on why we exclude this test from running on WIN32 */ -#ifndef _WIN32 - test_basic(); - test_streaming(); -#endif - - /* Normally not necessary to run test_large(), as it - * deals with very large (3GB) files and is therefore slow. - * test_large(); - */ - return 0; -} diff --git a/mongo-c-driver-v0.6/test/helpers_test.c b/mongo-c-driver-v0.6/test/helpers_test.c deleted file mode 100644 index 8c7d929..0000000 --- a/mongo-c-driver-v0.6/test/helpers_test.c +++ /dev/null @@ -1,55 +0,0 @@ -/* helpers.c */ - -#include "test.h" -#include "mongo.h" -#include -#include -#include -#include - -void test_index_helper( mongo *conn ) { - - bson b, out; - bson_iterator it; - - bson_init( &b ); - bson_append_int( &b, "foo", 1 ); - bson_finish( &b ); - - mongo_create_index( conn, "test.bar", &b, MONGO_INDEX_SPARSE | MONGO_INDEX_UNIQUE, &out ); - - bson_destroy( &b ); - - bson_init( &b ); - bson_append_start_object( &b, "key" ); - bson_append_int( &b, "foo", 1 ); - bson_append_finish_object( &b ); - - bson_finish( &b ); - - mongo_find_one( conn, "test.system.indexes", &b, NULL, &out ); - - bson_print( &out ); - - bson_iterator_init( &it, &out ); - - ASSERT( bson_find( &it, &out, "unique" ) ); - ASSERT( bson_find( &it, &out, "sparse" ) ); -} - -int main() { - - mongo conn[1]; - - INIT_SOCKETS_FOR_WINDOWS; - - if( mongo_connect( conn, TEST_SERVER, 27017 ) != MONGO_OK ) { - printf( "Failed to connect" ); - exit( 1 ); - } - - - test_index_helper( conn ); - - return 0; -} diff --git a/mongo-c-driver-v0.6/test/json_test.c b/mongo-c-driver-v0.6/test/json_test.c deleted file mode 100644 index 2ce0bde..0000000 --- a/mongo-c-driver-v0.6/test/json_test.c +++ /dev/null @@ -1,169 +0,0 @@ -/* testjson.c */ - -#include "test.h" -#include -#include - -#include "mongo.h" -#include "json/json.h" -#include "md5.h" - -void json_to_bson_append_element( bson_buffer *bb , const char *k , struct json_object *v ); - -/** - should already have called start_array - this will not call start/finish - */ -void json_to_bson_append_array( bson_buffer *bb , struct json_object *a ) { - int i; - char buf[10]; - for ( i=0; i -#include -#include -#include - -int increment( void ) { - static int i = 1000; - i++; - return i; -} - -int fuzz( void ) { - return 50000; -} - -/* Test custom increment and fuzz functions. */ -int main() { - - bson_oid_t o; - int res; - - bson_set_oid_inc( increment ); - bson_set_oid_fuzz( fuzz ); - - bson_oid_gen( &o ); - bson_big_endian32( &res, &( o.ints[2] ) ); - - ASSERT( o.ints[1] == 50000 ); - ASSERT( res == 1001 ); - - return 0; -} diff --git a/mongo-c-driver-v0.6/test/platform/linux/timeouts.c b/mongo-c-driver-v0.6/test/platform/linux/timeouts.c deleted file mode 100644 index a54fb15..0000000 --- a/mongo-c-driver-v0.6/test/platform/linux/timeouts.c +++ /dev/null @@ -1,37 +0,0 @@ -/* timeouts.c */ - -#include "../../test.h" -#include "mongo.h" -#include -#include -#include -#include - -int main() { - - mongo conn[1]; - bson b; - int res; - - if( mongo_connect( conn, TEST_SERVER, 27017 ) != MONGO_OK ) { - printf("Failed to connect"); - exit(1); - } - - res = mongo_simple_str_command( conn, "test", "$eval", - "for(i=0; i<100000; i++) { db.foo.find() }", &b ); - - ASSERT( res == MONGO_OK ); - - /* 50ms timeout */ - mongo_set_op_timeout( conn, 50 ); - - ASSERT( conn->err == 0 ); - res = mongo_simple_str_command( conn, "test", "$eval", - "for(i=0; i<100000; i++) { db.foo.find() }", &b ); - - ASSERT( res == MONGO_ERROR ); - ASSERT( conn->err == MONGO_IO_ERROR ); - - return 0; -} diff --git a/mongo-c-driver-v0.6/test/replica_set_test.c b/mongo-c-driver-v0.6/test/replica_set_test.c deleted file mode 100644 index da64679..0000000 --- a/mongo-c-driver-v0.6/test/replica_set_test.c +++ /dev/null @@ -1,154 +0,0 @@ -/* test.c */ - -#include "test.h" -#include "mongo.h" -#include -#include -#include -#include - -#ifndef SEED_START_PORT -#define SEED_START_PORT 30000 -#endif - -#ifndef REPLICA_SET_NAME -#define REPLICA_SET_NAME "replica-set-foo" -#endif - -int test_connect( const char *set_name ) { - - mongo conn[1]; - int res; - - INIT_SOCKETS_FOR_WINDOWS; - - mongo_replset_init( conn, set_name ); - mongo_replset_add_seed( conn, TEST_SERVER, SEED_START_PORT + 1 ); - mongo_replset_add_seed( conn, TEST_SERVER, SEED_START_PORT ); - - res = mongo_replset_connect( conn ); - - if( res != MONGO_OK ) { - res = conn->err; - return res; - } - - ASSERT( conn->primary->port == SEED_START_PORT || - conn->primary->port == SEED_START_PORT + 1 || - conn->primary->port == SEED_START_PORT + 2 ); - - mongo_destroy( conn ); - return res; -} - -int test_reconnect( const char *set_name ) { - - mongo conn[1]; - int res = 0; - int e = 0; - bson b; - - INIT_SOCKETS_FOR_WINDOWS; - - mongo_replset_init( conn, set_name ); - mongo_replset_add_seed( conn, TEST_SERVER, SEED_START_PORT ); - mongo_replset_add_seed( conn, TEST_SERVER, SEED_START_PORT + 1 ); - - - if( ( mongo_replset_connect( conn ) != MONGO_OK ) ) { - mongo_destroy( conn ); - return MONGO_ERROR; - } else { - fprintf( stderr, "Disconnect now:\n" ); - sleep( 10 ); - e = 1; - do { - res = mongo_find_one( conn, "foo.bar", bson_empty( &b ), bson_empty( &b ), NULL ); - if( res == MONGO_ERROR && conn->err == MONGO_IO_ERROR ) { - sleep( 2 ); - if( e++ < 30 ) { - fprintf( stderr, "Attempting reconnect %d.\n", e ); - mongo_reconnect( conn ); - } else { - fprintf( stderr, "Fail.\n" ); - return -1; - } - } - } while( 1 ); - } - - - return 0; -} - -int test_insert_limits( const char *set_name ) { - char version[10]; - mongo conn[1]; - mongo_write_concern wc[1]; - int i; - char key[10]; - int res = 0; - bson b[1], b2[1]; - bson *objs[2]; - - mongo_write_concern_init( wc ); - wc->w = 1; - mongo_write_concern_finish( wc ); - - /* We'll perform the full test if we're running v2.0 or later. */ - if( mongo_get_server_version( version ) != -1 && version[0] <= '1' ) - return 0; - - mongo_replset_init( conn, set_name ); - mongo_replset_add_seed( conn, TEST_SERVER, SEED_START_PORT + 1 ); - mongo_replset_add_seed( conn, TEST_SERVER, SEED_START_PORT ); - res = mongo_replset_connect( conn ); - - if( res != MONGO_OK ) { - res = conn->err; - return res; - } - - ASSERT( conn->max_bson_size > MONGO_DEFAULT_MAX_BSON_SIZE ); - - bson_init( b ); - for(i=0; i<1200000; i++) { - sprintf( key, "%d", i + 10000000 ); - bson_append_int( b, key, i ); - } - bson_finish( b ); - - ASSERT( bson_size( b ) > conn->max_bson_size ); - - ASSERT( mongo_insert( conn, "test.foo", b, wc ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_BSON_TOO_LARGE ); - - mongo_clear_errors( conn ); - ASSERT( conn->err == 0 ); - - bson_init( b2 ); - bson_append_int( b2, "foo", 1 ); - bson_finish( b2 ); - - objs[0] = b; - objs[1] = b2; - - ASSERT( mongo_insert_batch( conn, "test.foo", (const bson**)objs, 2, wc, 0 ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_BSON_TOO_LARGE ); - - mongo_write_concern_destroy( wc ); - - return 0; -} - -int main() { - ASSERT( test_connect( REPLICA_SET_NAME ) == MONGO_OK ); - ASSERT( test_connect( "test-foobar" ) == MONGO_CONN_BAD_SET_NAME ); - ASSERT( test_insert_limits( REPLICA_SET_NAME ) == MONGO_OK ); - - /* - ASSERT( test_reconnect( "test-rs" ) == 0 ); - */ - - return 0; -} diff --git a/mongo-c-driver-v0.6/test/resize_test.c b/mongo-c-driver-v0.6/test/resize_test.c deleted file mode 100644 index 33d433f..0000000 --- a/mongo-c-driver-v0.6/test/resize_test.c +++ /dev/null @@ -1,34 +0,0 @@ -/* resize.c */ - -#include "test.h" -#include "bson.h" -#include - -/* 64 Xs */ -const char *bigstring = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"; - -int main() { - bson b; - - bson_init( &b ); - bson_append_string( &b, "a", bigstring ); - bson_append_start_object( &b, "sub" ); - bson_append_string( &b,"a", bigstring ); - bson_append_start_object( &b, "sub" ); - bson_append_string( &b,"a", bigstring ); - bson_append_start_object( &b, "sub" ); - bson_append_string( &b,"a", bigstring ); - bson_append_string( &b,"b", bigstring ); - bson_append_string( &b,"c", bigstring ); - bson_append_string( &b,"d", bigstring ); - bson_append_string( &b,"e", bigstring ); - bson_append_string( &b,"f", bigstring ); - bson_append_finish_object( &b ); - bson_append_finish_object( &b ); - bson_append_finish_object( &b ); - bson_finish( &b ); - - /* bson_print(&b); */ - bson_destroy( &b ); - return 0; -} diff --git a/mongo-c-driver-v0.6/test/simple_test.c b/mongo-c-driver-v0.6/test/simple_test.c deleted file mode 100644 index e3e0d28..0000000 --- a/mongo-c-driver-v0.6/test/simple_test.c +++ /dev/null @@ -1,159 +0,0 @@ -/* test.c */ - -#include "test.h" -#include "mongo.h" -#include "env.h" -#include -#include -#include - -int main() { - mongo conn[1]; - mongo_cursor cursor[1]; - bson b; - int i; - char hex_oid[25]; - bson_timestamp_t ts = { 1, 2 }; - - const char *col = "c.simple"; - const char *ns = "test.c.simple"; - - /* mongo_connect( conn, TEST_SERVER, 27017 ); */ - - /* Simple connect API - mongo conn[1]; - - mongo_init( conn ); - mongo_connect( conn, TEST_SERVER, 27017 ); - mongo_destroy( conn ); - - * Advanced and replica set API - mongo conn[1]; - - mongo_replset_init( conn, "foobar" ); - mongo_set_connect_timeout( conn, 1000 ); - mongo_replset_connect( conn ); - mongo_destroy( conn ); - - * BSON API - bson obj[1]; - - bson_init( obj ); - bson_append_int( obj, "a", 1 ); - bson_finish( obj ); - mongo_insert( conn, obj ); - bson_destroy( obj ); - - * BSON Iterator API - bson_iterator i[1]; - - bson_iterator_init( i, b ); - - * Cursor API - mongo_cursor cursor[1]; - - mongo_cursor_init( cursor, "test.ns" ); - mongo_cursor_limit( cursor, 100 ); - mongo_cursor_skip( cursor, 100 ); - mongo_cursor_query( cursor, &query ); - mongo_cursor_fields( cursor, &fields ); - data = mongo_cursor_next( cursor ); - mongo_cursor_destroy( cursor ); - */ - - INIT_SOCKETS_FOR_WINDOWS; - - if( mongo_connect( conn , TEST_SERVER, 27017 ) != MONGO_OK ) { - printf( "failed to connect\n" ); - exit( 1 ); - } - - mongo_cmd_drop_collection( conn, "test", col, NULL ); - mongo_find_one( conn, ns, bson_empty( &b ), bson_empty( &b ), NULL ); - - for( i=0; i< 5; i++ ) { - bson_init( &b ); - - bson_append_new_oid( &b, "_id" ); - bson_append_timestamp( &b, "ts", &ts ); - bson_append_double( &b , "a" , 17 ); - bson_append_int( &b , "b" , 17 ); - bson_append_string( &b , "c" , "17" ); - - { - bson_append_start_object( &b , "d" ); - bson_append_int( &b, "i", 71 ); - bson_append_finish_object( &b ); - } - { - bson_append_start_array( &b , "e" ); - bson_append_int( &b, "0", 71 ); - bson_append_string( &b, "1", "71" ); - bson_append_finish_object( &b ); - } - - bson_finish( &b ); - ASSERT( mongo_insert( conn , ns , &b, NULL ) == MONGO_OK ); - bson_destroy( &b ); - } - - mongo_cursor_init( cursor, conn, ns ); - - while( mongo_cursor_next( cursor ) == MONGO_OK ) { - bson_iterator it; - bson_iterator_init( &it, mongo_cursor_bson( cursor ) ); - while( bson_iterator_next( &it ) ) { - fprintf( stderr, " %s: ", bson_iterator_key( &it ) ); - - switch( bson_iterator_type( &it ) ) { - case BSON_DOUBLE: - fprintf( stderr, "(double) %e\n", bson_iterator_double( &it ) ); - break; - case BSON_INT: - fprintf( stderr, "(int) %d\n", bson_iterator_int( &it ) ); - break; - case BSON_STRING: - fprintf( stderr, "(string) \"%s\"\n", bson_iterator_string( &it ) ); - break; - case BSON_OID: - bson_oid_to_string( bson_iterator_oid( &it ), hex_oid ); - fprintf( stderr, "(oid) \"%s\"\n", hex_oid ); - break; - case BSON_OBJECT: - fprintf( stderr, "(subobject) {...}\n" ); - break; - case BSON_ARRAY: - fprintf( stderr, "(array) [...]\n" ); - break; - case BSON_TIMESTAMP: - fprintf( stderr, "(timestamp) [...]\n" ); - break; - default: - fprintf( stderr, "(type %d)\n", bson_iterator_type( &it ) ); - break; - } - } - fprintf( stderr, "\n" ); - } - - mongo_cursor_destroy( cursor ); - ASSERT( mongo_cmd_drop_db( conn, "test" ) == MONGO_OK ); - mongo_disconnect( conn ); - - ASSERT( mongo_check_connection( conn ) == MONGO_ERROR ); - - mongo_reconnect( conn ); - - ASSERT( mongo_check_connection( conn ) == MONGO_OK ); - - mongo_env_close_socket( conn->sock ); - - ASSERT( mongo_check_connection( conn ) == MONGO_ERROR ); - - mongo_reconnect( conn ); - - ASSERT( mongo_simple_int_command( conn, "admin", "ping", 1, NULL ) == MONGO_OK ); - - mongo_destroy( conn ); - return 0; -} diff --git a/mongo-c-driver-v0.6/test/sizes_test.c b/mongo-c-driver-v0.6/test/sizes_test.c deleted file mode 100644 index dc7cdcf..0000000 --- a/mongo-c-driver-v0.6/test/sizes_test.c +++ /dev/null @@ -1,22 +0,0 @@ -/* sizes.c */ - -#include "test.h" -#include "mongo.h" -#include - -int main() { - mongo_reply mr; - - ASSERT( sizeof( int ) == 4 ); - ASSERT( sizeof( int64_t ) == 8 ); - ASSERT( sizeof( double ) == 8 ); - ASSERT( sizeof( bson_oid_t ) == 12 ); - - ASSERT( sizeof( mongo_header ) == 4+4+4+4 ); - ASSERT( sizeof( mongo_reply_fields ) == 4+8+4+4 ); - - /* field offset of obj in mongo_reply */ - ASSERT( ( &mr.objs - ( char * )&mr ) == ( 4+4+4+4 + 4+8+4+4 ) ); - - return 0; -} diff --git a/mongo-c-driver-v0.6/test/test.h b/mongo-c-driver-v0.6/test/test.h deleted file mode 100644 index eee7f93..0000000 --- a/mongo-c-driver-v0.6/test/test.h +++ /dev/null @@ -1,56 +0,0 @@ -#include "mongo.h" -#include - -#define ASSERT(x) \ - do{ \ - if(!(x)){ \ - printf("\nFailed ASSERT [%s] (%d):\n %s\n\n", __FILE__, __LINE__, #x); \ - exit(1); \ - }\ - }while(0) - -#define ASSERT_EQUAL_STRINGS(x, y) \ - do{ \ - if((strncmp( x, y, strlen( y ) ) != 0 )){ \ - printf("\nFailed ASSERT_EQUAL_STRINGS [%s] (%d):\n \"%s\" does not equal\n %s\n", __FILE__, __LINE__, x, #y); \ - exit(1); \ - }\ - }while(0) - -#ifdef _WIN32 -#define INIT_SOCKETS_FOR_WINDOWS mongo_init_sockets(); -#else -#define INIT_SOCKETS_FOR_WINDOWS do {} while(0) -#endif - -const char *TEST_DB = "test"; -const char *TEST_COL = "foo"; -const char *TEST_NS = "test.foo"; - -MONGO_EXTERN_C_START - -int mongo_get_server_version( char *version ) { - mongo conn[1]; - bson cmd[1], out[1]; - bson_iterator it[1]; - const char *result; - - mongo_connect( conn, TEST_SERVER, 27017 ); - - bson_init( cmd ); - bson_append_int( cmd, "buildinfo", 1 ); - bson_finish( cmd ); - - if( mongo_run_command( conn, "admin", cmd, out ) == MONGO_ERROR ) { - return -1; - } - - bson_iterator_init( it, out ); - result = bson_iterator_string( it ); - - memcpy( version, result, strlen( result ) ); - - return 0; -} - -MONGO_EXTERN_C_END diff --git a/mongo-c-driver-v0.6/test/update_test.c b/mongo-c-driver-v0.6/test/update_test.c deleted file mode 100644 index 0677ac1..0000000 --- a/mongo-c-driver-v0.6/test/update_test.c +++ /dev/null @@ -1,108 +0,0 @@ -#include "test.h" -#include "mongo.h" -#include -#include -#include - -int main() { - mongo conn[1]; - bson obj; - bson cond; - int i; - bson_oid_t oid; - const char *col = "c.update_test"; - const char *ns = "test.c.update_test"; - - INIT_SOCKETS_FOR_WINDOWS; - - if ( mongo_connect( conn , TEST_SERVER, 27017 ) ) { - printf( "failed to connect\n" ); - exit( 1 ); - } - - /* if the collection doesn't exist dropping it will fail */ - if ( mongo_cmd_drop_collection( conn, "test", col, NULL ) == MONGO_OK - && mongo_find_one( conn, ns, bson_empty( &obj ), bson_empty( &obj ), NULL ) != MONGO_OK ) { - printf( "failed to drop collection\n" ); - exit( 1 ); - } - - bson_oid_gen( &oid ); - - { - /* insert */ - bson_init( &obj ); - bson_append_oid( &obj, "_id", &oid ); - bson_append_int( &obj, "a", 3 ); - bson_finish( &obj ); - mongo_insert( conn, ns, &obj, NULL ); - bson_destroy( &obj ); - } - - { - /* insert */ - bson op; - - bson_init( &cond ); - bson_append_oid( &cond, "_id", &oid ); - bson_finish( &cond ); - - bson_init( &op ); - { - bson_append_start_object( &op, "$inc" ); - bson_append_int( &op, "a", 2 ); - bson_append_finish_object( &op ); - } - { - bson_append_start_object( &op, "$set" ); - bson_append_double( &op, "b", -1.5 ); - bson_append_finish_object( &op ); - } - bson_finish( &op ); - - for ( i=0; i<5; i++ ) - mongo_update( conn, ns, &cond, &op, 0, NULL ); - - /* cond is used later */ - bson_destroy( &op ); - } - - if( mongo_find_one( conn, ns, &cond, 0, &obj ) != MONGO_OK ) { - printf( "Failed to find object\n" ); - exit( 1 ); - } else { - int fields = 0; - bson_iterator it; - bson_iterator_init( &it, &obj ); - - bson_destroy( &cond ); - - while( bson_iterator_next( &it ) ) { - switch( bson_iterator_key( &it )[0] ) { - case '_': /* id */ - ASSERT( bson_iterator_type( &it ) == BSON_OID ); - ASSERT( !memcmp( bson_iterator_oid( &it )->bytes, oid.bytes, 12 ) ); - fields++; - break; - case 'a': - ASSERT( bson_iterator_type( &it ) == BSON_INT ); - ASSERT( bson_iterator_int( &it ) == 3 + 5*2 ); - fields++; - break; - case 'b': - ASSERT( bson_iterator_type( &it ) == BSON_DOUBLE ); - ASSERT( bson_iterator_double( &it ) == -1.5 ); - fields++; - break; - } - } - - ASSERT( fields == 3 ); - } - - bson_destroy( &obj ); - - mongo_cmd_drop_db( conn, "test" ); - mongo_destroy( conn ); - return 0; -} diff --git a/mongo-c-driver-v0.6/test/validate_test.c b/mongo-c-driver-v0.6/test/validate_test.c deleted file mode 100644 index 6e156ce..0000000 --- a/mongo-c-driver-v0.6/test/validate_test.c +++ /dev/null @@ -1,138 +0,0 @@ -/* validate.c */ - -#include "test.h" -#include "mongo.h" -#include "encoding.h" -#include -#include -#include - -#define BATCH_SIZE 10 - -static void make_small_invalid( bson *out, int i ) { - bson_init( out ); - bson_append_new_oid( out, "$_id" ); - bson_append_int( out, "x.foo", i ); - bson_finish( out ); -} - -int main() { - mongo conn[1]; - bson b, empty; - mongo_cursor cursor[1]; - unsigned char not_utf8[3]; - int result = 0; - const char *ns = "test.c.validate"; - - int i=0, j=0; - bson bs[BATCH_SIZE]; - bson *bp[BATCH_SIZE]; - - not_utf8[0] = 0xC0; - not_utf8[1] = 0xC0; - not_utf8[2] = '\0'; - - INIT_SOCKETS_FOR_WINDOWS; - - if ( mongo_connect( conn, TEST_SERVER, 27017 ) ) { - printf( "failed to connect\n" ); - exit( 1 ); - } - - /* Test checking for finished bson. */ - bson_init( &b ); - bson_append_int( &b, "foo", 1 ); - ASSERT( mongo_insert( conn, "test.foo", &b, NULL ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_BSON_NOT_FINISHED ); - - /* Test valid keys. */ - bson_init( &b ); - result = bson_append_string( &b , "a.b" , "17" ); - ASSERT( result == BSON_OK ); - - ASSERT( b.err & BSON_FIELD_HAS_DOT ); - - /* Don't set INIT dollar if deb ref fields are being used. */ - result = bson_append_string( &b , "$id" , "17" ); - ASSERT( result == BSON_OK ); - ASSERT( !(b.err & BSON_FIELD_INIT_DOLLAR) ); - - result = bson_append_string( &b , "$ref" , "17" ); - ASSERT( result == BSON_OK ); - ASSERT( !(b.err & BSON_FIELD_INIT_DOLLAR) ); - - result = bson_append_string( &b , "$db" , "17" ); - ASSERT( result == BSON_OK ); - ASSERT( !(b.err & BSON_FIELD_INIT_DOLLAR) ); - - result = bson_append_string( &b , "$ab" , "17" ); - ASSERT( result == BSON_OK ); - ASSERT( b.err & BSON_FIELD_INIT_DOLLAR ); - - result = bson_append_string( &b , "ab" , "this is valid utf8" ); - ASSERT( result == BSON_OK ); - ASSERT( ! ( b.err & BSON_NOT_UTF8 ) ); - - result = bson_append_string( &b , ( const char * )not_utf8, "valid" ); - ASSERT( result == BSON_ERROR ); - ASSERT( b.err & BSON_NOT_UTF8 ); - - ASSERT( bson_finish( &b ) == BSON_ERROR ); - ASSERT( b.err & BSON_FIELD_HAS_DOT ); - ASSERT( b.err & BSON_FIELD_INIT_DOLLAR ); - ASSERT( b.err & BSON_NOT_UTF8 ); - - result = mongo_insert( conn, ns, &b, NULL ); - ASSERT( result == MONGO_ERROR ); - ASSERT( conn->err & MONGO_BSON_NOT_FINISHED ); - - result = mongo_update( conn, ns, bson_empty( &empty ), &b, 0, NULL ); - ASSERT( result == MONGO_ERROR ); - ASSERT( conn->err & MONGO_BSON_NOT_FINISHED ); - - mongo_cursor_init( cursor, conn, "test.cursors" ); - mongo_cursor_set_query( cursor, &b ); - result = mongo_cursor_next( cursor ); - ASSERT( result == MONGO_ERROR ); - ASSERT( cursor->err & MONGO_CURSOR_BSON_ERROR ); - ASSERT( cursor->conn->err & MONGO_BSON_NOT_FINISHED ); - - bson_destroy( &b ); - - /* Test valid strings. */ - bson_init( & b ); - result = bson_append_string( &b , "foo" , "bar" ); - ASSERT( result == BSON_OK ); - ASSERT( b.err == 0 ); - - result = bson_append_string( &b , "foo" , ( const char * )not_utf8 ); - ASSERT( result == BSON_ERROR ); - ASSERT( b.err & BSON_NOT_UTF8 ); - - b.err = 0; - ASSERT( b.err == 0 ); - - result = bson_append_regex( &b , "foo" , ( const char * )not_utf8, "s" ); - ASSERT( result == BSON_ERROR ); - ASSERT( b.err & BSON_NOT_UTF8 ); - - for ( j=0; j < BATCH_SIZE; j++ ) - bp[j] = &bs[j]; - - for ( j=0; j < BATCH_SIZE; j++ ) - make_small_invalid( &bs[j], i ); - - result = mongo_insert_batch( conn, ns, (const bson **)bp, BATCH_SIZE, NULL, 0 ); - ASSERT( result == MONGO_ERROR ); - ASSERT( conn->err == MONGO_BSON_INVALID ); - - for ( j=0; j < BATCH_SIZE; j++ ) - bson_destroy( &bs[j] ); - - mongo_cmd_drop_db( conn, "test" ); - mongo_disconnect( conn ); - - mongo_destroy( conn ); - - return 0; -} diff --git a/mongo-c-driver-v0.6/test/write_concern_test.c b/mongo-c-driver-v0.6/test/write_concern_test.c deleted file mode 100644 index e7b7a2d..0000000 --- a/mongo-c-driver-v0.6/test/write_concern_test.c +++ /dev/null @@ -1,284 +0,0 @@ -/* write_concern_test.c */ - -#include "test.h" -#include "mongo.h" -#include -#include -#include - -/* TODO remove and add mongo_create_collection to the public API. */ -void create_capped_collection( mongo *conn ) { - mongo_cmd_drop_collection( conn, "test", "wc", NULL ); - mongo_create_capped_collection( conn, "test", "wc", 1000000, 0, NULL ); -} - -void test_batch_insert_with_continue( mongo *conn ) { - bson *objs[5]; - bson *objs2[5]; - bson empty; - int i; - - mongo_cmd_drop_collection( conn, TEST_DB, TEST_COL, NULL ); - mongo_create_simple_index( conn, TEST_NS, "n", MONGO_INDEX_UNIQUE, NULL ); - - for( i=0; i<5; i++ ) { - objs[i] = bson_malloc( sizeof( bson ) ); - bson_init( objs[i] ); - bson_append_int( objs[i], "n", i ); - bson_finish( objs[i] ); - } - - ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs, 5, - NULL, 0 ) == MONGO_OK ); - - ASSERT( mongo_count( conn, TEST_DB, TEST_COL, - bson_empty( &empty ) ) == 5 ); - - /* Add one duplicate value for n. */ - objs2[0] = bson_malloc( sizeof( bson ) ); - bson_init( objs2[0] ); - bson_append_int( objs2[0], "n", 1 ); - bson_finish( objs2[0] ); - - /* Add n for 6 - 9. */ - for( i = 1; i < 5; i++ ) { - objs2[i] = bson_malloc( sizeof( bson ) ); - bson_init( objs2[i] ); - bson_append_int( objs2[i], "n", i + 5 ); - bson_finish( objs2[i] ); - } - - /* Without continue on error, will fail immediately. */ - ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs2, 5, - NULL, 0 ) == MONGO_OK ); - ASSERT( mongo_count( conn, TEST_DB, TEST_COL, - bson_empty( &empty ) ) == 5 ); - - /* With continue on error, will insert four documents. */ - ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs2, 5, - NULL, MONGO_CONTINUE_ON_ERROR ) == MONGO_OK ); - ASSERT( mongo_count( conn, TEST_DB, TEST_COL, - bson_empty( &empty ) ) == 9 ); - - for( i=0; i<5; i++ ) { - bson_destroy( objs2[i] ); - bson_free( objs2[i] ); - - bson_destroy( objs[i] ); - bson_free( objs[i] ); - } -} - -/* We can test write concern for update - * and remove by doing operations on a capped collection. */ -void test_update_and_remove( mongo *conn ) { - mongo_write_concern wc[1]; - bson *objs[5]; - bson query[1], update[1]; - bson empty; - int i; - - create_capped_collection( conn ); - - for( i=0; i<5; i++ ) { - objs[i] = bson_malloc( sizeof( bson ) ); - bson_init( objs[i] ); - bson_append_int( objs[i], "n", i ); - bson_finish( objs[i] ); - } - - ASSERT( mongo_insert_batch( conn, "test.wc", (const bson **)objs, 5, - NULL, 0 ) == MONGO_OK ); - - ASSERT( mongo_count( conn, "test", "wc", bson_empty( &empty ) ) == 5 ); - - bson_init( query ); - bson_append_int( query, "n", 2 ); - bson_finish( query ); - - ASSERT( mongo_find_one( conn, "test.wc", query, bson_empty( &empty ), NULL ) == MONGO_OK ); - - bson_init( update ); - bson_append_start_object( update, "$set" ); - bson_append_string( update, "n", "a big long string" ); - bson_append_finish_object( update ); - bson_finish( update ); - - /* Update will appear to succeed with no write concern specified, but doesn't. */ - ASSERT( mongo_find_one( conn, "test.wc", query, bson_empty( &empty ), NULL ) == MONGO_OK ); - ASSERT( mongo_update( conn, "test.wc", query, update, 0, NULL ) == MONGO_OK ); - ASSERT( mongo_find_one( conn, "test.wc", query, bson_empty( &empty ), NULL ) == MONGO_OK ); - - /* Remove will appear to succeed with no write concern specified, but doesn't. */ - ASSERT( mongo_remove( conn, "test.wc", query, NULL ) == MONGO_OK ); - ASSERT( mongo_find_one( conn, "test.wc", query, bson_empty( &empty ), NULL ) == MONGO_OK ); - - mongo_write_concern_init( wc ); - wc->w = 1; - mongo_write_concern_finish( wc ); - - mongo_clear_errors( conn ); - ASSERT( mongo_update( conn, "test.wc", query, update, 0, wc ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_WRITE_ERROR ); - ASSERT_EQUAL_STRINGS( conn->lasterrstr, "failing update: objects in a capped ns cannot grow" ); - - mongo_clear_errors( conn ); - ASSERT( mongo_remove( conn, "test.wc", query, wc ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_WRITE_ERROR ); - ASSERT_EQUAL_STRINGS( conn->lasterrstr, "can't remove from a capped collection" ); - - mongo_write_concern_destroy( wc ); - bson_destroy( query ); - bson_destroy( update ); - for( i=0; i<5; i++ ) { - bson_destroy( objs[i] ); - bson_free( objs[i] ); - } -} - -void test_write_concern_input( mongo *conn ) { - mongo_write_concern wc[1], wcbad[1]; - bson b[1]; - - mongo_cmd_drop_collection( conn, TEST_DB, TEST_COL, NULL ); - - bson_init( b ); - bson_append_new_oid( b, "_id" ); - bson_finish( b ); - - mongo_write_concern_init( wc ); - wc->w = 1; - - /* Failure to finish write concern object. */ - ASSERT( mongo_insert( conn, TEST_NS, b, wc ) != MONGO_OK ); - ASSERT( conn->err == MONGO_WRITE_CONCERN_INVALID ); - ASSERT_EQUAL_STRINGS( conn->errstr, - "Must call mongo_write_concern_finish() before using *write_concern." ); - - mongo_write_concern_finish( wc ); - - /* Use a bad write concern. */ - mongo_clear_errors( conn ); - mongo_write_concern_init( wcbad ); - wcbad->w = 2; - mongo_write_concern_finish( wcbad ); - mongo_set_write_concern( conn, wcbad ); - ASSERT( mongo_insert( conn, TEST_NS, b, NULL ) != MONGO_OK ); - ASSERT( conn->err == MONGO_WRITE_ERROR ); - ASSERT_EQUAL_STRINGS( conn->lasterrstr, "norepl" ); - - /* Ensure that supplied write concern overrides default. */ - mongo_clear_errors( conn ); - ASSERT( mongo_insert( conn, TEST_NS, b, wc ) != MONGO_OK ); - ASSERT( conn->err == MONGO_WRITE_ERROR ); - ASSERT_EQUAL_STRINGS( conn->errstr, "See conn->lasterrstr for details." ); - ASSERT_EQUAL_STRINGS( conn->lasterrstr, "E11000 duplicate key error index" ); - ASSERT( conn->lasterrcode == 11000 ); - - conn->write_concern = NULL; - mongo_write_concern_destroy( wc ); - mongo_write_concern_destroy( wcbad ); -} - -void test_insert( mongo *conn ) { - mongo_write_concern wc[1]; - bson b[1], b2[1], b3[1], b4[1], empty[1]; - bson *objs[2]; - - mongo_cmd_drop_collection( conn, TEST_DB, TEST_COL, NULL ); - - mongo_write_concern_init( wc ); - wc->w = 1; - mongo_write_concern_finish( wc ); - - bson_init( b4 ); - bson_append_string( b4, "foo", "bar" ); - bson_finish( b4 ); - - ASSERT( mongo_insert( conn, TEST_NS, b4, wc ) == MONGO_OK ); - - ASSERT( mongo_remove( conn, TEST_NS, bson_empty( empty ), wc ) == MONGO_OK ); - - bson_init( b ); - bson_append_new_oid( b, "_id" ); - bson_finish( b ); - - ASSERT( mongo_insert( conn, TEST_NS, b, NULL ) == MONGO_OK ); - - /* This fails but returns OK because it doesn't use a write concern. */ - ASSERT( mongo_insert( conn, TEST_NS, b, NULL ) == MONGO_OK ); - - ASSERT( mongo_insert( conn, TEST_NS, b, wc ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_WRITE_ERROR ); - ASSERT_EQUAL_STRINGS( conn->errstr, "See conn->lasterrstr for details." ); - ASSERT_EQUAL_STRINGS( conn->lasterrstr, "E11000 duplicate key error index" ); - ASSERT( conn->lasterrcode == 11000 ); - mongo_clear_errors( conn ); - - /* Still fails but returns OK because it doesn't use a write concern. */ - ASSERT( mongo_insert( conn, TEST_NS, b, NULL ) == MONGO_OK ); - - /* But not when we set a default write concern on the conn. */ - mongo_set_write_concern( conn, wc ); - ASSERT( mongo_insert( conn, TEST_NS, b, NULL ) != MONGO_OK ); - ASSERT( conn->err == MONGO_WRITE_ERROR ); - ASSERT_EQUAL_STRINGS( conn->errstr, "See conn->lasterrstr for details." ); - ASSERT_EQUAL_STRINGS( conn->lasterrstr, "E11000 duplicate key error index" ); - ASSERT( conn->lasterrcode == 11000 ); - - /* Now test batch insert. */ - bson_init( b2 ); - bson_append_new_oid( b2, "_id" ); - bson_finish( b2 ); - - bson_init( b3 ); - bson_append_new_oid( b3, "_id" ); - bson_finish( b3 ); - - objs[0] = b2; - objs[1] = b3; - - /* Insert two new documents by insert_batch. */ - conn->write_concern = NULL; - ASSERT( mongo_count( conn, TEST_DB, TEST_COL, bson_empty( empty ) ) == 1 ); - ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs, 2, NULL, 0 ) == MONGO_OK ); - ASSERT( mongo_count( conn, TEST_DB, TEST_COL, bson_empty( empty ) ) == 3 ); - - /* This should definitely fail if we try again with write concern. */ - mongo_clear_errors( conn ); - ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs, 2, wc, 0 ) == MONGO_ERROR ); - ASSERT( conn->err == MONGO_WRITE_ERROR ); - ASSERT_EQUAL_STRINGS( conn->errstr, "See conn->lasterrstr for details." ); - ASSERT_EQUAL_STRINGS( conn->lasterrstr, "E11000 duplicate key error index" ); - ASSERT( conn->lasterrcode == 11000 ); - - /* But it will succeed without the write concern set. */ - ASSERT( mongo_insert_batch( conn, TEST_NS, (const bson **)objs, 2, NULL, 0 ) == MONGO_OK ); - - bson_destroy( b ); - bson_destroy( b2 ); - bson_destroy( b3 ); - mongo_write_concern_destroy( wc ); -} - -int main() { - mongo conn[1]; - char version[10]; - - INIT_SOCKETS_FOR_WINDOWS; - - if( mongo_connect( conn, TEST_SERVER, 27017 ) != MONGO_OK ) { - printf( "failed to connect\n" ); - exit( 1 ); - } - - test_insert( conn ); - if( mongo_get_server_version( version ) != -1 && version[0] != '1' ) { - test_write_concern_input( conn ); - test_update_and_remove( conn ); - test_batch_insert_with_continue( conn ); - } - - mongo_destroy( conn ); - return 0; -} diff --git a/mongo_fdw--1.0--1.1.sql b/mongo_fdw--1.0--1.1.sql new file mode 100644 index 0000000..781155d --- /dev/null +++ b/mongo_fdw--1.0--1.1.sql @@ -0,0 +1,6 @@ +/* mongo_fdw/mongo_fdw--1.0--1.1.sql */ + +CREATE OR REPLACE FUNCTION mongo_fdw_version() + RETURNS pg_catalog.int4 STRICT + AS 'MODULE_PATHNAME' LANGUAGE C; + diff --git a/mongo_fdw--1.0.sql b/mongo_fdw--1.0.sql index af83adf..12f3753 100644 --- a/mongo_fdw--1.0.sql +++ b/mongo_fdw--1.0.sql @@ -1,6 +1,7 @@ /* mongo_fdw/mongo_fdw--1.0.sql */ --- Copyright (c) 2012-2014 Citus Data, Inc. +-- Portions Copyright (c) 2004-2024, EnterpriseDB Corporation. +-- Portions Copyright © 2012–2014 Citus Data, Inc. -- complain if script is sourced in psql, rather than via CREATE EXTENSION \echo Use "CREATE EXTENSION mongo_fdw" to load this file. \quit diff --git a/mongo_fdw--1.1.sql b/mongo_fdw--1.1.sql new file mode 100644 index 0000000..47ec3c2 --- /dev/null +++ b/mongo_fdw--1.1.sql @@ -0,0 +1,25 @@ +/* mongo_fdw/mongo_fdw--1.1.sql */ + +-- Portions Copyright (c) 2004-2024, EnterpriseDB Corporation. +-- Portions Copyright © 2012–2014 Citus Data, Inc. + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION mongo_fdw" to load this file. \quit + +CREATE FUNCTION mongo_fdw_handler() +RETURNS fdw_handler +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT; + +CREATE FUNCTION mongo_fdw_validator(text[], oid) +RETURNS void +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT; + +CREATE FOREIGN DATA WRAPPER mongo_fdw + HANDLER mongo_fdw_handler + VALIDATOR mongo_fdw_validator; + +CREATE OR REPLACE FUNCTION mongo_fdw_version() + RETURNS pg_catalog.int4 STRICT + AS 'MODULE_PATHNAME' LANGUAGE C; diff --git a/mongo_fdw.c b/mongo_fdw.c index daa5438..80f0ae0 100644 --- a/mongo_fdw.c +++ b/mongo_fdw.c @@ -1,1464 +1,4595 @@ /*------------------------------------------------------------------------- * * mongo_fdw.c + * Foreign-data wrapper for remote MongoDB servers * - * Function definitions for MongoDB foreign data wrapper. These functions access - * data stored in MongoDB through the official C driver. + * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 2004-2024, EnterpriseDB Corporation. + * Portions Copyright (c) 2012–2014 Citus Data, Inc. * - * Copyright (c) 2012-2014 Citus Data, Inc. + * IDENTIFICATION + * mongo_fdw.c * *------------------------------------------------------------------------- */ - #include "postgres.h" -#include "mongo_fdw.h" +#include "mongo_wrapper.h" -#include "access/reloptions.h" +#include "access/htup_details.h" +#include "access/table.h" +#include "catalog/heap.h" +#include "catalog/pg_operator.h" #include "catalog/pg_type.h" -#include "commands/defrem.h" -#include "commands/explain.h" -#include "commands/vacuum.h" -#include "foreign/fdwapi.h" -#include "foreign/foreign.h" -#include "nodes/makefuncs.h" -#include "optimizer/cost.h" -#include "optimizer/pathnode.h" -#include "optimizer/plancat.h" -#include "optimizer/planmain.h" -#include "optimizer/restrictinfo.h" -#include "utils/array.h" -#include "utils/builtins.h" -#include "utils/date.h" -#include "utils/hsearch.h" -#include "utils/lsyscache.h" -#include "utils/rel.h" -#include "utils/memutils.h" - -#if PG_VERSION_NUM >= 90300 - #include "access/htup_details.h" +#if PG_VERSION_NUM >= 130000 +#include "common/hashfn.h" +#include "common/jsonapi.h" #endif +#include "miscadmin.h" +#include "mongo_fdw.h" +#include "mongo_query.h" +#include "nodes/nodeFuncs.h" +#if PG_VERSION_NUM >= 140000 +#include "optimizer/appendinfo.h" +#endif +#include "optimizer/optimizer.h" +#include "optimizer/paths.h" +#include "optimizer/tlist.h" +#include "parser/parsetree.h" +#if PG_VERSION_NUM >= 160000 +#include "parser/parse_relation.h" +#endif +#include "storage/ipc.h" +#include "utils/guc.h" +#include "utils/jsonb.h" +#if PG_VERSION_NUM < 130000 +#include "utils/jsonapi.h" +#else +#include "utils/jsonfuncs.h" +#endif +#include "utils/rel.h" +#include "utils/selfuncs.h" +#include "utils/syscache.h" +#include "utils/typcache.h" - -/* Local functions forward declarations */ -static StringInfo OptionNamesString(Oid currentContextId); -static void MongoGetForeignRelSize(PlannerInfo *root, RelOptInfo *baserel, - Oid foreignTableId); -static void MongoGetForeignPaths(PlannerInfo *root, RelOptInfo *baserel, - Oid foreignTableId); -static ForeignScan * MongoGetForeignPlan(PlannerInfo *root, RelOptInfo *baserel, - Oid foreignTableId, ForeignPath *bestPath, - List *targetList, List *restrictionClauses); -static void MongoExplainForeignScan(ForeignScanState *scanState, - ExplainState *explainState); -static void MongoBeginForeignScan(ForeignScanState *scanState, int executorFlags); -static TupleTableSlot * MongoIterateForeignScan(ForeignScanState *scanState); -static void MongoEndForeignScan(ForeignScanState *scanState); -static void MongoReScanForeignScan(ForeignScanState *scanState); -static Const * SerializeDocument(bson *document); -static bson * DeserializeDocument(Const *constant); -static double ForeignTableDocumentCount(Oid foreignTableId); -static MongoFdwOptions * MongoGetOptions(Oid foreignTableId); -static char * MongoGetOptionValue(Oid foreignTableId, const char *optionName); -static HTAB * ColumnMappingHash(Oid foreignTableId, List *columnList); -static void FillTupleSlot(const bson *bsonDocument, const char *bsonDocumentKey, - HTAB *columnMappingHash, Datum *columnValues, - bool *columnNulls); -static bool ColumnTypesCompatible(bson_type bsonType, Oid columnTypeId); -static Datum ColumnValueArray(bson_iterator *bsonIterator, Oid valueTypeId); -static Datum ColumnValue(bson_iterator *bsonIterator, Oid columnTypeId, - int32 columnTypeMod); -static void MongoFreeScanState(MongoFdwExecState *executionState); -static bool MongoAnalyzeForeignTable(Relation relation, - AcquireSampleRowsFunc *acquireSampleRowsFunc, - BlockNumber *totalPageCount); -static int MongoAcquireSampleRows(Relation relation, int errorLevel, - HeapTuple *sampleRows, int targetRowCount, - double *totalRowCount, double *totalDeadRowCount); - - -/* declarations for dynamic loading */ +/* Declarations for dynamic loading */ PG_MODULE_MAGIC; -PG_FUNCTION_INFO_V1(mongo_fdw_handler); -PG_FUNCTION_INFO_V1(mongo_fdw_validator); - +/* + * In PG 9.5.1 the number will be 90501, + * our version is 5.5.2 so number will be 50502 + */ +#define CODE_VERSION 50502 /* - * mongo_fdw_handler creates and returns a struct with pointers to foreign table - * callback functions. + * Macro to check unsupported sorting methods. Currently, ASC NULLS FIRST and + * DESC NULLS LAST give the same sorting result on MongoDB and Postgres. So, + * sorting methods other than these are not pushed down. */ -Datum -mongo_fdw_handler(PG_FUNCTION_ARGS) -{ - FdwRoutine *fdwRoutine = makeNode(FdwRoutine); +#define IS_PATHKEY_PUSHABLE(pathkey) \ + ((pathkey->pk_strategy == BTLessStrategyNumber && pathkey->pk_nulls_first) || \ + (pathkey->pk_strategy != BTLessStrategyNumber && !pathkey->pk_nulls_first)) - fdwRoutine->GetForeignRelSize = MongoGetForeignRelSize; - fdwRoutine->GetForeignPaths = MongoGetForeignPaths; - fdwRoutine->GetForeignPlan = MongoGetForeignPlan; - fdwRoutine->ExplainForeignScan = MongoExplainForeignScan; - fdwRoutine->BeginForeignScan = MongoBeginForeignScan; - fdwRoutine->IterateForeignScan = MongoIterateForeignScan; - fdwRoutine->ReScanForeignScan = MongoReScanForeignScan; - fdwRoutine->EndForeignScan = MongoEndForeignScan; - fdwRoutine->AnalyzeForeignTable = MongoAnalyzeForeignTable; +/* Maximum path keys supported by MongoDB */ +#define MAX_PATHKEYS 32 - PG_RETURN_POINTER(fdwRoutine); -} +/* + * The number of rows in a foreign relation are estimated to be so less that + * an in-memory sort on those many rows wouldn't cost noticeably higher than + * the underlying scan. Hence for now, cost sorts same as underlying scans. + */ +#define DEFAULT_MONGO_SORT_MULTIPLIER 1 +/* GUC variables. */ +static bool enable_join_pushdown = true; +static bool enable_order_by_pushdown = true; +static bool enable_aggregate_pushdown = true; /* - * mongo_fdw_validator validates options given to one of the following commands: - * foreign data wrapper, server, user mapping, or foreign table. This function - * errors out if the given option name or its value is considered invalid. + * This enum describes what's kept in the fdw_private list for a ForeignPath. + * We store: + * + * 1) Boolean flag showing if the remote query has the final sort + * 2) Boolean flag showing if the remote query has the LIMIT clause */ -Datum -mongo_fdw_validator(PG_FUNCTION_ARGS) +enum FdwPathPrivateIndex { - Datum optionArray = PG_GETARG_DATUM(0); - Oid optionContextId = PG_GETARG_OID(1); - List *optionList = untransformRelOptions(optionArray); - ListCell *optionCell = NULL; - - foreach(optionCell, optionList) - { - DefElem *optionDef = (DefElem *) lfirst(optionCell); - char *optionName = optionDef->defname; - bool optionValid = false; - - int32 optionIndex = 0; - for (optionIndex = 0; optionIndex < ValidOptionCount; optionIndex++) - { - const MongoValidOption *validOption = &(ValidOptionArray[optionIndex]); - - if ((optionContextId == validOption->optionContextId) && - (strncmp(optionName, validOption->optionName, NAMEDATALEN) == 0)) - { - optionValid = true; - break; - } - } + /* has-final-sort flag (as an integer Value node) */ + FdwPathPrivateHasFinalSort, + /* has-limit flag (as an integer Value node) */ + FdwPathPrivateHasLimit +}; - /* if invalid option, display an informative error message */ - if (!optionValid) - { - StringInfo optionNamesString = OptionNamesString(optionContextId); +extern PGDLLEXPORT void _PG_init(void); - ereport(ERROR, (errcode(ERRCODE_FDW_INVALID_OPTION_NAME), - errmsg("invalid option \"%s\"", optionName), - errhint("Valid options in this context are: %s", - optionNamesString->data))); - } +PG_FUNCTION_INFO_V1(mongo_fdw_handler); +PG_FUNCTION_INFO_V1(mongo_fdw_version); + +/* FDW callback routines */ +static void mongoGetForeignRelSize(PlannerInfo *root, + RelOptInfo *baserel, + Oid foreigntableid); +static void mongoGetForeignPaths(PlannerInfo *root, + RelOptInfo *baserel, + Oid foreigntableid); +static ForeignScan *mongoGetForeignPlan(PlannerInfo *root, + RelOptInfo *foreignrel, + Oid foreigntableid, + ForeignPath *best_path, + List *targetlist, + List *restrictionClauses, + Plan *outer_plan); +static void mongoExplainForeignScan(ForeignScanState *node, ExplainState *es); +static void mongoBeginForeignScan(ForeignScanState *node, int eflags); +static TupleTableSlot *mongoIterateForeignScan(ForeignScanState *node); +static void mongoEndForeignScan(ForeignScanState *node); +static void mongoReScanForeignScan(ForeignScanState *node); +static TupleTableSlot *mongoExecForeignUpdate(EState *estate, + ResultRelInfo *resultRelInfo, + TupleTableSlot *slot, + TupleTableSlot *planSlot); +static TupleTableSlot *mongoExecForeignDelete(EState *estate, + ResultRelInfo *resultRelInfo, + TupleTableSlot *slot, + TupleTableSlot *planSlot); +static void mongoEndForeignModify(EState *estate, + ResultRelInfo *resultRelInfo); +#if PG_VERSION_NUM >= 140000 +static void mongoAddForeignUpdateTargets(PlannerInfo *root, + Index rtindex, + RangeTblEntry *target_rte, + Relation target_relation); +#else +static void mongoAddForeignUpdateTargets(Query *parsetree, + RangeTblEntry *target_rte, + Relation target_relation); +#endif +static void mongoBeginForeignModify(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo, + List *fdw_private, + int subplan_index, + int eflags); +static TupleTableSlot *mongoExecForeignInsert(EState *estate, + ResultRelInfo *resultRelInfo, + TupleTableSlot *slot, + TupleTableSlot *planSlot); +static List *mongoPlanForeignModify(PlannerInfo *root, + ModifyTable *plan, + Index resultRelation, + int subplan_index); +static void mongoExplainForeignModify(ModifyTableState *mtstate, + ResultRelInfo *rinfo, + List *fdw_private, + int subplan_index, + ExplainState *es); +static bool mongoAnalyzeForeignTable(Relation relation, + AcquireSampleRowsFunc *func, + BlockNumber *totalpages); +static void mongoBeginForeignInsert(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo); +static void mongoEndForeignInsert(EState *estate, + ResultRelInfo *resultRelInfo); +static void mongoGetForeignJoinPaths(PlannerInfo *root, RelOptInfo *joinrel, + RelOptInfo *outerrel, + RelOptInfo *innerrel, + JoinType jointype, + JoinPathExtraData *extra); +static void mongoGetForeignUpperPaths(PlannerInfo *root, + UpperRelationKind stage, + RelOptInfo *input_rel, + RelOptInfo *output_rel, + void *extra); - /* if port option is given, error out if its value isn't an integer */ - if (strncmp(optionName, OPTION_NAME_PORT, NAMEDATALEN) == 0) - { - char *optionValue = defGetString(optionDef); - int32 portNumber = pg_atoi(optionValue, sizeof(int32), 0); - (void) portNumber; - } - } +/* + * Helper functions + */ +static double foreign_table_document_count(Oid foreignTableId); +static HTAB *column_mapping_hash(Oid foreignTableId, List *columnList, + List *colNameList, List *colIsInnerList, + uint32 relType); +static void fill_tuple_slot(const BSON *bsonDocument, + const char *bsonDocumentKey, + HTAB *columnMappingHash, + Datum *columnValues, + bool *columnNulls, + uint32 relType); +static bool column_types_compatible(BSON_TYPE bsonType, Oid columnTypeId); +static Datum column_value_array(BSON_ITERATOR *bsonIterator, Oid valueTypeId); +static Datum column_value(BSON_ITERATOR *bsonIterator, + Oid columnTypeId, + int32 columnTypeMod); +static void mongo_free_scan_state(MongoFdwModifyState *fmstate); +static int mongo_acquire_sample_rows(Relation relation, + int errorLevel, + HeapTuple *sampleRows, + int targetRowCount, + double *totalRowCount, + double *totalDeadRowCount); +static void mongo_fdw_exit(int code, Datum arg); +static bool mongo_foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, + JoinType jointype, RelOptInfo *outerrel, + RelOptInfo *innerrel, + JoinPathExtraData *extra); +static void mongo_prepare_qual_info(List *quals, MongoRelQualInfo *qual_info); +static bool mongo_foreign_grouping_ok(PlannerInfo *root, + RelOptInfo *grouped_rel, + Node *havingQual); +static void mongo_add_foreign_grouping_paths(PlannerInfo *root, + RelOptInfo *input_rel, + RelOptInfo *grouped_rel, + GroupPathExtraData *extra); +static void mongo_add_foreign_final_paths(PlannerInfo *root, + RelOptInfo *input_rel, + RelOptInfo *final_rel, + FinalPathExtraData *extra); +static void mongoEstimateCosts(RelOptInfo *baserel, Cost *startup_cost, + Cost *total_cost, Oid foreigntableid); + +static List *mongo_get_useful_ecs_for_relation(PlannerInfo *root, + RelOptInfo *rel); +static List *mongo_get_useful_pathkeys_for_relation(PlannerInfo *root, + RelOptInfo *rel); +#if PG_VERSION_NUM >= 170000 +static void mongo_add_paths_with_pathkeys(PlannerInfo *root, + RelOptInfo *rel, + Path *epq_path, + Cost base_startup_cost, + Cost base_total_cost, + List *restrictlist); +#else +static void mongo_add_paths_with_pathkeys(PlannerInfo *root, + RelOptInfo *rel, + Path *epq_path, + Cost base_startup_cost, + Cost base_total_cost); +#endif +static EquivalenceMember *mongo_find_em_for_rel_target(PlannerInfo *root, + EquivalenceClass *ec, + RelOptInfo *rel); +static void mongo_add_foreign_ordered_paths(PlannerInfo *root, + RelOptInfo *input_rel, + RelOptInfo *ordered_rel); + +/* The null action object used for pure validation */ +#if PG_VERSION_NUM < 130000 +static JsonSemAction nullSemAction = +{ + NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL +}; +#else +JsonSemAction nullSemAction = +{ + NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL +}; +#endif - PG_RETURN_VOID(); +/* + * Library load-time initalization, sets on_proc_exit() callback for + * backend shutdown. + */ +void +_PG_init(void) +{ + /* + * Sometimes getting a join or sorted result from MongoDB server is slower + * than performing those operations locally. To have that flexibility add + * a few GUCs to control those push-downs. + */ + DefineCustomBoolVariable("mongo_fdw.enable_join_pushdown", + "enable/disable join pushdown", + NULL, + &enable_join_pushdown, + true, + PGC_SUSET, + 0, + NULL, + NULL, + NULL); + + DefineCustomBoolVariable("mongo_fdw.enable_order_by_pushdown", + "Enable/Disable ORDER BY push down", + NULL, + &enable_order_by_pushdown, + true, + PGC_SUSET, + 0, + NULL, + NULL, + NULL); + + DefineCustomBoolVariable("mongo_fdw.enable_aggregate_pushdown", + "Enable/Disable aggregate push down", + NULL, + &enable_aggregate_pushdown, + true, + PGC_SUSET, + 0, + NULL, + NULL, + NULL); + + /* Initialize MongoDB C driver */ + mongoc_init(); + + on_proc_exit(&mongo_fdw_exit, PointerGetDatum(NULL)); } - /* - * OptionNamesString finds all options that are valid for the current context, - * and concatenates these option names in a comma separated string. + * mongo_fdw_handler + * Creates and returns a struct with pointers to foreign table callback + * functions. */ -static StringInfo -OptionNamesString(Oid currentContextId) +Datum +mongo_fdw_handler(PG_FUNCTION_ARGS) { - StringInfo optionNamesString = makeStringInfo(); - bool firstOptionPrinted = false; - - int32 optionIndex = 0; - for (optionIndex = 0; optionIndex < ValidOptionCount; optionIndex++) - { - const MongoValidOption *validOption = &(ValidOptionArray[optionIndex]); - - /* if option belongs to current context, append option name */ - if (currentContextId == validOption->optionContextId) - { - if (firstOptionPrinted) - { - appendStringInfoString(optionNamesString, ", "); - } + FdwRoutine *fdwRoutine = makeNode(FdwRoutine); - appendStringInfoString(optionNamesString, validOption->optionName); - firstOptionPrinted = true; - } - } + /* Functions for scanning foreign tables */ + fdwRoutine->GetForeignRelSize = mongoGetForeignRelSize; + fdwRoutine->GetForeignPaths = mongoGetForeignPaths; + fdwRoutine->GetForeignPlan = mongoGetForeignPlan; + fdwRoutine->BeginForeignScan = mongoBeginForeignScan; + fdwRoutine->IterateForeignScan = mongoIterateForeignScan; + fdwRoutine->ReScanForeignScan = mongoReScanForeignScan; + fdwRoutine->EndForeignScan = mongoEndForeignScan; + + /* Support for insert/update/delete */ + fdwRoutine->AddForeignUpdateTargets = mongoAddForeignUpdateTargets; + fdwRoutine->PlanForeignModify = mongoPlanForeignModify; + fdwRoutine->BeginForeignModify = mongoBeginForeignModify; + fdwRoutine->ExecForeignInsert = mongoExecForeignInsert; + fdwRoutine->ExecForeignUpdate = mongoExecForeignUpdate; + fdwRoutine->ExecForeignDelete = mongoExecForeignDelete; + fdwRoutine->EndForeignModify = mongoEndForeignModify; + + /* Support for EXPLAIN */ + fdwRoutine->ExplainForeignScan = mongoExplainForeignScan; + fdwRoutine->ExplainForeignModify = mongoExplainForeignModify; + + /* Support for ANALYZE */ + fdwRoutine->AnalyzeForeignTable = mongoAnalyzeForeignTable; + + /* Partition routing and/or COPY from */ + fdwRoutine->BeginForeignInsert = mongoBeginForeignInsert; + fdwRoutine->EndForeignInsert = mongoEndForeignInsert; + + /* Support function for join push-down */ + fdwRoutine->GetForeignJoinPaths = mongoGetForeignJoinPaths; + + /* Support functions for upper relation push-down */ + fdwRoutine->GetForeignUpperPaths = mongoGetForeignUpperPaths; - return optionNamesString; + PG_RETURN_POINTER(fdwRoutine); } - /* - * MongoGetForeignRelSize obtains relation size estimates for mongo foreign table. + * mongo_fdw_exit + * Exit callback function. */ static void -MongoGetForeignRelSize(PlannerInfo *root, RelOptInfo *baserel, Oid foreignTableId) +mongo_fdw_exit(int code, Datum arg) { - double documentCount = ForeignTableDocumentCount(foreignTableId); - if (documentCount > 0.0) - { - /* - * We estimate the number of rows returned after restriction qualifiers - * are applied. This will be more accurate if analyze is run on this - * relation. - */ - List *rowClauseList = baserel->baserestrictinfo; - double rowSelectivity = clauselist_selectivity(root, rowClauseList, - 0, JOIN_INNER, NULL); - - double outputRowCount = clamp_row_est(documentCount * rowSelectivity); - baserel->rows = outputRowCount; - } - else - { - ereport(DEBUG1, (errmsg("could not retrieve document count for collection"), - errhint("Falling back to default estimates in planning"))); - } + mongo_cleanup_connection(); + /* Release all memory and other resources allocated by the driver */ + mongoc_cleanup(); } - /* - * MongoGetForeignPaths creates the only scan path used to execute the query. - * Note that MongoDB may decide to use an underlying index for this scan, but - * that decision isn't deterministic or visible to us. We therefore create a - * single table scan path. + * MongoGetForeignRelSize + * Obtains relation size estimates for mongo foreign table. */ static void -MongoGetForeignPaths(PlannerInfo *root, RelOptInfo *baserel, Oid foreignTableId) +mongoGetForeignRelSize(PlannerInfo *root, + RelOptInfo *baserel, + Oid foreigntableid) { - double tupleFilterCost = baserel->baserestrictcost.per_tuple; - double inputRowCount = 0.0; - double documentSelectivity = 0.0; - double foreignTableSize = 0; - int32 documentWidth = 0; - BlockNumber pageCount = 0; - double totalDiskAccessCost = 0.0; - double cpuCostPerDoc = 0.0; - double cpuCostPerRow = 0.0; - double totalCpuCost = 0.0; - double connectionCost = 0.0; - double documentCount = 0.0; - List *opExpressionList = NIL; - Cost startupCost = 0.0; - Cost totalCost = 0.0; - Path *foreignPath = NULL; - - documentCount = ForeignTableDocumentCount(foreignTableId); - if (documentCount > 0.0) - { - /* - * We estimate the number of rows returned after restriction qualifiers - * are applied by MongoDB. - */ - opExpressionList = ApplicableOpExpressionList(baserel); - documentSelectivity = clauselist_selectivity(root, opExpressionList, - 0, JOIN_INNER, NULL); - inputRowCount = clamp_row_est(documentCount * documentSelectivity); - - /* - * We estimate disk costs assuming a sequential scan over the data. This is - * an inaccurate assumption as Mongo scatters the data over disk pages, and - * may rely on an index to retrieve the data. Still, this should at least - * give us a relative cost. - */ - documentWidth = get_relation_data_width(foreignTableId, baserel->attr_widths); - foreignTableSize = documentCount * documentWidth; - - pageCount = (BlockNumber) rint(foreignTableSize / BLCKSZ); - totalDiskAccessCost = seq_page_cost * pageCount; + RangeTblEntry *rte = planner_rt_fetch(baserel->relid, root); + MongoFdwRelationInfo *fpinfo; + MongoFdwOptions *options; + ListCell *lc; + char *relname; + char *database; + char *refname; - /* - * The cost of processing a document returned by Mongo (input row) is 5x the - * cost of processing a regular row. - */ - cpuCostPerDoc = cpu_tuple_cost; - cpuCostPerRow = (cpu_tuple_cost * MONGO_TUPLE_COST_MULTIPLIER) + tupleFilterCost; - totalCpuCost = (cpuCostPerDoc * documentCount) + (cpuCostPerRow * inputRowCount); + /* + * We use MongoFdwRelationInfo to pass various information to subsequent + * functions. + */ + fpinfo = (MongoFdwRelationInfo *) palloc0(sizeof(MongoFdwRelationInfo)); + baserel->fdw_private = (void *) fpinfo; - connectionCost = MONGO_CONNECTION_COST_MULTIPLIER * seq_page_cost; - startupCost = baserel->baserestrictcost.startup + connectionCost; - totalCost = startupCost + totalDiskAccessCost + totalCpuCost; - } - else + /* + * Identify which baserestrictinfo clauses can be sent to the remote + * server and which can't. Only the OpExpr clauses are sent to the remote + * server. + */ + foreach(lc, baserel->baserestrictinfo) { - ereport(DEBUG1, (errmsg("could not retrieve document count for collection"), - errhint("Falling back to default estimates in planning"))); + RestrictInfo *ri = (RestrictInfo *) lfirst(lc); + + if (mongo_is_foreign_expr(root, baserel, ri->clause, false)) + fpinfo->remote_conds = lappend(fpinfo->remote_conds, ri); + else + fpinfo->local_conds = lappend(fpinfo->local_conds, ri); } - /* create a foreign path node */ - foreignPath = (Path *) create_foreignscan_path(root, baserel, baserel->rows, - startupCost, totalCost, - NIL, /* no pathkeys */ - NULL, /* no outer rel either */ - NIL); /* no fdw_private data */ + /* Base foreign tables need to be pushed down always. */ + fpinfo->pushdown_safe = true; - /* add foreign path as the only possible path */ - add_path(baserel, foreignPath); -} + /* Fetch options */ + options = mongo_get_options(foreigntableid); + /* + * Retrieve exact document count for remote collection if asked, + * otherwise, use default estimate in planning. + */ + if (options->use_remote_estimate) + { + double documentCount = foreign_table_document_count(foreigntableid); -/* - * MongoGetForeignPlan creates a foreign scan plan node for scanning the MongoDB - * collection. Note that MongoDB may decide to use an underlying index for this - * scan, but that decision isn't deterministic or visible to us. - */ -static ForeignScan * -MongoGetForeignPlan(PlannerInfo *root, RelOptInfo *baserel, Oid foreignTableId, - ForeignPath *bestPath, List *targetList, List *restrictionClauses) -{ - Index scanRangeTableIndex = baserel->relid; - ForeignScan *foreignScan = NULL; - List *foreignPrivateList = NIL; - List *opExpressionList = NIL; - bson *queryDocument = NULL; - Const *queryBuffer = NULL; - List *columnList = NIL; + if (documentCount > 0.0) + { + double rowSelectivity; + + /* + * We estimate the number of rows returned after restriction + * qualifiers are applied. This will be more accurate if analyze + * is run on this relation. + */ + rowSelectivity = clauselist_selectivity(root, + baserel->baserestrictinfo, + 0, JOIN_INNER, NULL); + baserel->rows = clamp_row_est(documentCount * rowSelectivity); + } + else + ereport(DEBUG1, + (errmsg("could not retrieve document count for collection"), + errhint("Falling back to default estimates in planning."))); + } + + relname = options->collectionName; + database = options->svr_database; + fpinfo->base_relname = relname; /* - * We push down applicable restriction clauses to MongoDB, but for simplicity - * we currently put all the restrictionClauses into the plan node's qual - * list for the executor to re-check. So all we have to do here is strip - * RestrictInfo nodes from the clauses and ignore pseudoconstants (which - * will be handled elsewhere). + * Set the name of relation in fpinfo, while we are constructing it here. + * It will be used to build the string describing the join relation in + * EXPLAIN output. We can't know whether the VERBOSE option is specified + * or not, so always schema-qualify the foreign table name. */ - restrictionClauses = extract_actual_clauses(restrictionClauses, false); + fpinfo->relation_name = makeStringInfo(); + refname = rte->eref->aliasname; + appendStringInfo(fpinfo->relation_name, "%s.%s", + quote_identifier(database), + quote_identifier(relname)); + if (*refname && strcmp(refname, relname) != 0) + appendStringInfo(fpinfo->relation_name, " %s", + quote_identifier(rte->eref->aliasname)); + + /* Also store the options in fpinfo for further use */ + fpinfo->options = options; /* - * We construct the query document to have MongoDB filter its rows. We could - * also construct a column name document here to retrieve only the needed - * columns. However, we found this optimization to degrade performance on - * the MongoDB server-side, so we instead filter out columns on our side. + * Store aggregation enable/disable option in the fpinfo directly for + * further use. This flag can be useful when options are not accessible + * in the recursive cases. */ - opExpressionList = ApplicableOpExpressionList(baserel); - queryDocument = QueryDocument(foreignTableId, opExpressionList); - queryBuffer = SerializeDocument(queryDocument); - - /* only clean up the query struct, but not its data */ - bson_dispose(queryDocument); + fpinfo->is_agg_scanrel_pushable = options->enable_aggregate_pushdown; - /* we don't need to serialize column list as lists are copiable */ - columnList = ColumnList(baserel); - - /* construct foreign plan with query document and column list */ - foreignPrivateList = list_make2(queryBuffer, columnList); - - /* create the foreign scan node */ - foreignScan = make_foreignscan(targetList, restrictionClauses, - scanRangeTableIndex, - NIL, /* no expressions to evaluate */ - foreignPrivateList); - return foreignScan; + /* Set the flag is_order_by_pushable of the base relation */ + fpinfo->is_order_by_pushable = options->enable_order_by_pushdown; } - /* - * MongoExplainForeignScan produces extra output for the Explain command. + * mongoGetForeignPaths + * Creates the only scan path used to execute the query. + * + * Note that MongoDB may decide to use an underlying index for this scan, but + * that decision isn't deterministic or visible to us. We therefore create a + * single table scan path. */ static void -MongoExplainForeignScan(ForeignScanState *scanState, ExplainState *explainState) +mongoGetForeignPaths(PlannerInfo *root, + RelOptInfo *baserel, + Oid foreigntableid) { - MongoFdwOptions *mongoFdwOptions = NULL; - StringInfo namespaceName = NULL; - Oid foreignTableId = InvalidOid; - - foreignTableId = RelationGetRelid(scanState->ss.ss_currentRelation); - mongoFdwOptions = MongoGetOptions(foreignTableId); - - /* construct fully qualified collection name */ - namespaceName = makeStringInfo(); - appendStringInfo(namespaceName, "%s.%s", mongoFdwOptions->databaseName, - mongoFdwOptions->collectionName); - - ExplainPropertyText("Foreign Namespace", namespaceName->data, explainState); -} + Path *foreignPath; + MongoFdwOptions *options; + Cost startupCost = 0; + Cost totalCost = 0; + /* Fetch options */ + options = mongo_get_options(foreigntableid); -/* - * MongoBeginForeignScan connects to the MongoDB server, and opens a cursor that - * uses the database name, collection name, and the remote query to send to the - * server. The function also creates a hash table that maps referenced column - * names to column index and type information. - */ -static void -MongoBeginForeignScan(ForeignScanState *scanState, int executorFlags) -{ - mongo *mongoConnection = NULL; - mongo_cursor *mongoCursor = NULL; - int32 connectStatus = MONGO_ERROR; - Oid foreignTableId = InvalidOid; - List *columnList = NIL; - HTAB *columnMappingHash = NULL; - char *addressName = NULL; - int32 portNumber = 0; - int32 errorCode = 0; - StringInfo namespaceName = NULL; - ForeignScan *foreignScan = NULL; - List *foreignPrivateList = NIL; - Const *queryBuffer = NULL; - bson *queryDocument = NULL; - MongoFdwOptions *mongoFdwOptions = NULL; - MongoFdwExecState *executionState = NULL; - - /* if Explain with no Analyze, do nothing */ - if (executorFlags & EXEC_FLAG_EXPLAIN_ONLY) + /* + * Retrieve exact document count for remote collection if asked, + * otherwise, use default estimate in planning. + */ + if (options->use_remote_estimate) { - return; - } + double documentCount = foreign_table_document_count(foreigntableid); - foreignTableId = RelationGetRelid(scanState->ss.ss_currentRelation); - mongoFdwOptions = MongoGetOptions(foreignTableId); + if (documentCount > 0.0) + { + MongoFdwRelationInfo *fpinfo = (MongoFdwRelationInfo *) baserel->fdw_private; + double tupleFilterCost = baserel->baserestrictcost.per_tuple; + double inputRowCount; + double documentSelectivity; + double foreignTableSize; + int32 documentWidth; + BlockNumber pageCount; + double totalDiskAccessCost; + double cpuCostPerDoc; + double cpuCostPerRow; + double totalCpuCost; + double connectionCost; + List *opExpressionList; - /* resolve hostname and port number; and connect to mongo server */ - addressName = mongoFdwOptions->addressName; - portNumber = mongoFdwOptions->portNumber; + /* + * We estimate the number of rows returned after restriction + * qualifiers are applied by MongoDB. + */ + opExpressionList = fpinfo->remote_conds; + documentSelectivity = clauselist_selectivity(root, + opExpressionList, 0, + JOIN_INNER, NULL); + inputRowCount = clamp_row_est(documentCount * documentSelectivity); - mongoConnection = mongo_create(); - mongo_init(mongoConnection); + /* + * We estimate disk costs assuming a sequential scan over the + * data. This is an inaccurate assumption as Mongo scatters the + * data over disk pages, and may rely on an index to retrieve the + * data. Still, this should at least give us a relative cost. + */ + documentWidth = get_relation_data_width(foreigntableid, + baserel->attr_widths); + foreignTableSize = documentCount * documentWidth; - connectStatus = mongo_connect(mongoConnection, addressName, portNumber); - if (connectStatus != MONGO_OK) - { - errorCode = (int32) mongoConnection->err; + pageCount = (BlockNumber) rint(foreignTableSize / BLCKSZ); + totalDiskAccessCost = seq_page_cost * pageCount; - mongo_destroy(mongoConnection); - mongo_dispose(mongoConnection); + /* + * The cost of processing a document returned by Mongo (input row) + * is 5x the cost of processing a regular row. + */ + cpuCostPerDoc = cpu_tuple_cost; + cpuCostPerRow = (cpu_tuple_cost * MONGO_TUPLE_COST_MULTIPLIER) + tupleFilterCost; + totalCpuCost = (cpuCostPerDoc * documentCount) + (cpuCostPerRow * inputRowCount); - ereport(ERROR, (errmsg("could not connect to %s:%d", addressName, portNumber), - errhint("Mongo driver connection error: %d", errorCode))); + connectionCost = MONGO_CONNECTION_COST_MULTIPLIER * seq_page_cost; + startupCost = baserel->baserestrictcost.startup + connectionCost; + totalCost = startupCost + totalDiskAccessCost + totalCpuCost; + } + else + ereport(DEBUG1, + (errmsg("could not retrieve document count for collection"), + errhint("Falling back to default estimates in planning."))); + } + else + { + /* Estimate default costs */ + mongoEstimateCosts(baserel, &startupCost, &totalCost, foreigntableid); } - /* deserialize query document; and create column info hash */ - foreignScan = (ForeignScan *) scanState->ss.ps.plan; - foreignPrivateList = foreignScan->fdw_private; - Assert(list_length(foreignPrivateList) == 2); - - queryBuffer = (Const *) linitial(foreignPrivateList); - queryDocument = DeserializeDocument(queryBuffer); + /* Create a foreign path node */ +#if PG_VERSION_NUM >= 170000 + foreignPath = (Path *) create_foreignscan_path(root, baserel, + NULL, /* default pathtarget */ + baserel->rows, + startupCost, + totalCost, + NIL, /* no pathkeys */ + baserel->lateral_relids, + NULL, /* no extra plan */ + NIL, /* no fdw_restrictinfo list */ + NIL); /* no fdw_private data */ +#else + foreignPath = (Path *) create_foreignscan_path(root, baserel, + NULL, /* default pathtarget */ + baserel->rows, + startupCost, + totalCost, + NIL, /* no pathkeys */ + baserel->lateral_relids, + NULL, /* no extra plan */ + NIL); /* no fdw_private list */ +#endif - columnList = (List *) lsecond(foreignPrivateList); - columnMappingHash = ColumnMappingHash(foreignTableId, columnList); + /* Add foreign path as the only possible path */ + add_path(baserel, foreignPath); - namespaceName = makeStringInfo(); - appendStringInfo(namespaceName, "%s.%s", mongoFdwOptions->databaseName, - mongoFdwOptions->collectionName); - - /* create cursor for collection name and set query */ - mongoCursor = mongo_cursor_create(); - mongo_cursor_init(mongoCursor, mongoConnection, namespaceName->data); - mongo_cursor_set_query(mongoCursor, queryDocument); - - /* create and set foreign execution state */ - executionState = (MongoFdwExecState *) palloc0(sizeof(MongoFdwExecState)); - executionState->columnMappingHash = columnMappingHash; - executionState->mongoConnection = mongoConnection; - executionState->mongoCursor = mongoCursor; - executionState->queryDocument = queryDocument; - - scanState->fdw_state = (void *) executionState; + /* Add paths with pathkeys */ +#if PG_VERSION_NUM >= 170000 + mongo_add_paths_with_pathkeys(root, baserel, NULL, startupCost, totalCost, + NIL); +#else + mongo_add_paths_with_pathkeys(root, baserel, NULL, startupCost, totalCost); +#endif } - /* - * MongoIterateForeignScan reads the next document from MongoDB, converts it to - * a PostgreSQL tuple, and stores the converted tuple into the ScanTupleSlot as - * a virtual tuple. + * mongoGetForeignPlan + * Creates a foreign scan plan node for scanning the MongoDB collection. + * + * Note that MongoDB may decide to use an underlying index for this + * scan, but that decision isn't deterministic or visible to us. */ -static TupleTableSlot * -MongoIterateForeignScan(ForeignScanState *scanState) +static ForeignScan * +mongoGetForeignPlan(PlannerInfo *root, + RelOptInfo *foreignrel, + Oid foreigntableid, + ForeignPath *best_path, + List *targetList, + List *restrictionClauses, + Plan *outer_plan) { - MongoFdwExecState *executionState = (MongoFdwExecState *) scanState->fdw_state; - TupleTableSlot *tupleSlot = scanState->ss.ss_ScanTupleSlot; - mongo_cursor *mongoCursor = executionState->mongoCursor; - HTAB *columnMappingHash = executionState->columnMappingHash; - int32 cursorStatus = MONGO_ERROR; - - TupleDesc tupleDescriptor = tupleSlot->tts_tupleDescriptor; - Datum *columnValues = tupleSlot->tts_values; - bool *columnNulls = tupleSlot->tts_isnull; - int32 columnCount = tupleDescriptor->natts; + MongoFdwRelationInfo *fpinfo = (MongoFdwRelationInfo *) foreignrel->fdw_private; + Index scan_relid = foreignrel->relid; + ForeignScan *foreignScan; + List *fdw_private; + List *columnList; + List *scan_var_list; + ListCell *lc; + List *local_exprs = NIL; + List *remote_exprs = NIL; + List *fdw_scan_tlist = NIL; + List *column_name_list = NIL; + List *is_inner_column_list = NIL; + List *quals = NIL; + MongoFdwRelType mongofdwreltype; + MongoRelQualInfo *qual_info; + MongoFdwRelationInfo *ofpinfo = NULL; + List *pathKeyList = NIL; + List *isAscSortList = NIL; + bool has_final_sort = false; + bool has_limit = false; + int64 limit_value; + int64 offset_value; /* - * We execute the protocol to load a virtual tuple into a slot. We first - * call ExecClearTuple, then fill in values / isnull arrays, and last call - * ExecStoreVirtualTuple. If we are done fetching documents from Mongo, we - * just return an empty slot as required. + * Get FDW private data created by mongoGetForeignUpperPaths(), if any. */ - ExecClearTuple(tupleSlot); - - /* initialize all values for this row to null */ - memset(columnValues, 0, columnCount * sizeof(Datum)); - memset(columnNulls, true, columnCount * sizeof(bool)); - - cursorStatus = mongo_cursor_next(mongoCursor); - if (cursorStatus == MONGO_OK) + if (best_path->fdw_private) { - const bson *bsonDocument = mongo_cursor_bson(mongoCursor); - const char *bsonDocumentKey = NULL; /* top level document */ - - FillTupleSlot(bsonDocument, bsonDocumentKey, - columnMappingHash, columnValues, columnNulls); - - ExecStoreVirtualTuple(tupleSlot); + has_final_sort = intVal(list_nth(best_path->fdw_private, + FdwPathPrivateHasFinalSort)); + has_limit = intVal(list_nth(best_path->fdw_private, + FdwPathPrivateHasLimit)); } + + /* Set scan relation id */ + if (IS_SIMPLE_REL(foreignrel)) + scan_relid = foreignrel->relid; else { - /* - * The following is a courtesy check. In practice when Mongo shuts down, - * mongo_cursor_next() could possibly crash. This function first frees - * cursor->reply, and then references reply in mongo_cursor_destroy(). - */ - mongo_cursor_error_t errorCode = mongoCursor->err; - if (errorCode != MONGO_CURSOR_EXHAUSTED) - { - MongoFreeScanState(executionState); + /* Join/Upper relation - set scan_relid to 0. */ + scan_relid = 0; - ereport(ERROR, (errmsg("could not iterate over mongo collection"), - errhint("Mongo driver cursor error code: %d", errorCode))); - } - } + Assert(!restrictionClauses); - return tupleSlot; -} + /* Extract local expressions from local conditions */ + foreach(lc, fpinfo->local_conds) + { + RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc); + Assert(IsA(rinfo, RestrictInfo)); + local_exprs = lappend(local_exprs, rinfo->clause); + } -/* - * MongoEndForeignScan finishes scanning the foreign table, closes the cursor - * and the connection to MongoDB, and reclaims scan related resources. - */ -static void -MongoEndForeignScan(ForeignScanState *scanState) -{ - MongoFdwExecState *executionState = (MongoFdwExecState *) scanState->fdw_state; + /* Extract remote expressions from remote conditions */ + foreach(lc, fpinfo->remote_conds) + { + RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc); - /* if we executed a query, reclaim mongo related resources */ - if (executionState != NULL) - { - MongoFreeScanState(executionState); + Assert(IsA(rinfo, RestrictInfo)); + remote_exprs = lappend(remote_exprs, rinfo->clause); + } } -} - - -/* - * MongoReScanForeignScan rescans the foreign table. Note that rescans in Mongo - * end up being notably more expensive than what the planner expects them to be, - * since MongoDB cursors don't provide reset/rewind functionality. - */ -static void -MongoReScanForeignScan(ForeignScanState *scanState) -{ - MongoFdwExecState *executionState = (MongoFdwExecState *) scanState->fdw_state; - mongo *mongoConnection = executionState->mongoConnection; - MongoFdwOptions *mongoFdwOptions = NULL; - mongo_cursor *mongoCursor = NULL; - StringInfo namespaceName = NULL; - Oid foreignTableId = InvalidOid; - /* close down the old cursor */ - mongo_cursor_destroy(executionState->mongoCursor); - mongo_cursor_dispose(executionState->mongoCursor); - - /* reconstruct full collection name */ - foreignTableId = RelationGetRelid(scanState->ss.ss_currentRelation); - mongoFdwOptions = MongoGetOptions(foreignTableId); - - namespaceName = makeStringInfo(); - appendStringInfo(namespaceName, "%s.%s", mongoFdwOptions->databaseName, - mongoFdwOptions->collectionName); + if (IS_UPPER_REL(foreignrel)) + scan_var_list = pull_var_clause((Node *) fpinfo->grouped_tlist, + PVC_RECURSE_AGGREGATES); + else + scan_var_list = pull_var_clause((Node *) foreignrel->reltarget->exprs, + PVC_RECURSE_PLACEHOLDERS); - /* reconstruct cursor for collection name and set query */ - mongoCursor = mongo_cursor_create(); - mongo_cursor_init(mongoCursor, mongoConnection, namespaceName->data); - mongo_cursor_set_query(mongoCursor, executionState->queryDocument); + /* System attributes are not allowed. */ + foreach(lc, scan_var_list) + { + Var *var = lfirst(lc); + const FormData_pg_attribute *attr; - executionState->mongoCursor = mongoCursor; -} + Assert(IsA(var, Var)); + if (var->varattno >= 0) + continue; -/* - * SerializeDocument serializes the document's data to a constant, as advised in - * foreign/fdwapi.h. Note that this function shallow-copies the document's data; - * and the caller should therefore not free it. - */ -static Const * -SerializeDocument(bson *document) -{ - Const *serializedDocument = NULL; - Datum documentDatum = 0; + attr = SystemAttributeDefinition(var->varattno); + ereport(ERROR, + (errcode(ERRCODE_FDW_COLUMN_NAME_NOT_FOUND), + errmsg("system attribute \"%s\" can't be fetched from remote relation", + attr->attname.data))); + } /* - * We access document data and wrap a datum around it. Note that even when - * we have an empty document, the document size can't be zero according to - * bson apis. + * Separate the restrictionClauses into those that can be executed + * remotely and those that can't. baserestrictinfo clauses that were + * previously determined to be safe or unsafe are shown in + * fpinfo->remote_conds and fpinfo->local_conds. Anything else in the + * restrictionClauses list will be a join clause, which we have to check + * for remote-safety. Only the OpExpr clauses are sent to the remote + * server. */ - const char *documentData = bson_data(document); - int32 documentSize = bson_buffer_size(document); - Assert(documentSize != 0); - - documentDatum = CStringGetDatum(documentData); - serializedDocument = makeConst(CSTRINGOID, -1, InvalidOid, documentSize, - documentDatum, false, false); - - return serializedDocument; -} - + foreach(lc, restrictionClauses) + { + RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc); -/* - * DeserializeDocument deserializes the constant to a bson document. For this, - * the function creates a document, and explicitly sets the document's data. - */ -static bson * -DeserializeDocument(Const *constant) -{ - bson *document = NULL; - Datum documentDatum = constant->constvalue; - char *documentData = DatumGetCString(documentDatum); + Assert(IsA(rinfo, RestrictInfo)); - Assert(constant->constlen > 0); - Assert(constant->constisnull == false); + /* Ignore pseudoconstants, they are dealt with elsewhere */ + if (rinfo->pseudoconstant) + continue; - document = bson_create(); - bson_init_size(document, 0); - bson_init_finished_data(document, documentData); + if (list_member_ptr(fpinfo->remote_conds, rinfo)) + remote_exprs = lappend(remote_exprs, rinfo->clause); + else if (list_member_ptr(fpinfo->local_conds, rinfo)) + local_exprs = lappend(local_exprs, rinfo->clause); + else if (IsA(rinfo->clause, OpExpr) && + mongo_is_foreign_expr(root, foreignrel, rinfo->clause, false)) + remote_exprs = lappend(remote_exprs, rinfo->clause); + else + local_exprs = lappend(local_exprs, rinfo->clause); + } - return document; -} + /* Add local expression Var nodes to scan_var_list. */ + scan_var_list = list_concat_unique(NIL, scan_var_list); + if (IS_UPPER_REL(foreignrel)) + scan_var_list = list_concat_unique(scan_var_list, + pull_var_clause((Node *) local_exprs, + PVC_RECURSE_AGGREGATES)); + else + scan_var_list = list_concat_unique(scan_var_list, + pull_var_clause((Node *) local_exprs, + PVC_RECURSE_PLACEHOLDERS)); + if (IS_JOIN_REL(foreignrel)) + { + /* + * For join relations, the planner needs a targetlist, which + * represents the output of the ForeignScan node. + */ + fdw_scan_tlist = add_to_flat_tlist(NIL, scan_var_list); -/* - * ForeignTableDocumentCount connects to the MongoDB server, and queries it for - * the number of documents in the foreign collection. On success, the function - * returns the document count. On failure, the function returns -1.0. - */ -static double -ForeignTableDocumentCount(Oid foreignTableId) -{ - MongoFdwOptions *options = NULL; - mongo *mongoConnection = NULL; - const bson *emptyQuery = NULL; - int32 status = MONGO_ERROR; - double documentCount = 0.0; + /* + * Ensure that the outer plan produces a tuple whose descriptor + * matches our scan tuple slot. Also, remove the local conditions + * from the outer plan's quals, lest they be evaluated twice, once by + * the local plan and once by the scan. + */ + if (outer_plan) + { + /* + * First, update the plan's qual list if possible. In some cases, + * the quals might be enforced below the topmost plan level, in + * which case we'll fail to remove them; it's not worth working + * harder than this. + */ + foreach(lc, local_exprs) + { + Node *qual = lfirst(lc); - /* resolve foreign table options; and connect to mongo server */ - options = MongoGetOptions(foreignTableId); + outer_plan->qual = list_delete(outer_plan->qual, qual); - mongoConnection = mongo_create(); - mongo_init(mongoConnection); + /* + * For an inner join, the local conditions of the foreign scan + * plan can be part of the joinquals as well. (They might + * also be in the mergequals or hashquals, but we can't touch + * those without breaking the plan.) + */ + if (IsA(outer_plan, NestLoop) || + IsA(outer_plan, MergeJoin) || + IsA(outer_plan, HashJoin)) + { + Join *join_plan = (Join *) outer_plan; + + if (join_plan->jointype == JOIN_INNER) + join_plan->joinqual = list_delete(join_plan->joinqual, + qual); + } + } - status = mongo_connect(mongoConnection, options->addressName, options->portNumber); - if (status == MONGO_OK) - { - documentCount = mongo_count(mongoConnection, options->databaseName, - options->collectionName, emptyQuery); + /* + * Now fix the subplan's tlist --- this might result in inserting + * a Result node atop the plan tree. + */ + outer_plan = change_plan_targetlist(outer_plan, fdw_scan_tlist, + best_path->path.parallel_safe); + } } - else + else if (IS_UPPER_REL(foreignrel)) { - documentCount = -1.0; + /* + * scan_var_list should have expressions and not TargetEntry nodes. + * However, grouped_tlist created has TLEs, and thus retrieve them + * into scan_var_list. + */ + scan_var_list = list_concat_unique(NIL, + get_tlist_exprs(fpinfo->grouped_tlist, + false)); + + /* + * The targetlist computed while assessing push-down safety represents + * the result we expect from the foreign server. + */ + fdw_scan_tlist = fpinfo->grouped_tlist; + local_exprs = extract_actual_clauses(fpinfo->local_conds, false); } - mongo_destroy(mongoConnection); - mongo_dispose(mongoConnection); + /* Form column list required for query execution from scan_var_list. */ + columnList = mongo_get_column_list(root, foreignrel, scan_var_list, + &column_name_list, + &is_inner_column_list); - return documentCount; -} + /* + * Identify the relation type. We can have a simple base rel, join rel, + * upper rel, and upper rel with join rel inside. Find out that. + */ + if (IS_UPPER_REL(foreignrel) && IS_JOIN_REL(fpinfo->outerrel)) + mongofdwreltype = UPPER_JOIN_REL; + else if (IS_UPPER_REL(foreignrel)) + mongofdwreltype = UPPER_REL; + else if (IS_JOIN_REL(foreignrel)) + mongofdwreltype = JOIN_REL; + else + mongofdwreltype = BASE_REL; -/* - * MongoGetOptions returns the option values to be used when connecting to and - * querying MongoDB. To resolve these values, the function checks the foreign - * table's options, and if not present, falls back to default values. - */ -static MongoFdwOptions * -MongoGetOptions(Oid foreignTableId) -{ - MongoFdwOptions *mongoFdwOptions = NULL; - char *addressName = NULL; - char *portName = NULL; - int32 portNumber = 0; - char *databaseName = NULL; - char *collectionName = NULL; + /* + * We use MongoRelQualInfo to pass various information related to joining + * quals and grouping target to fdw_private which is used to form + * equivalent MongoDB query during the execution phase. + */ + qual_info = (MongoRelQualInfo *) palloc(sizeof(MongoRelQualInfo)); + + qual_info->root = root; + qual_info->foreignRel = foreignrel; + qual_info->exprColHash = NULL; + qual_info->colNameList = NIL; + qual_info->colNumList = NIL; + qual_info->rtiList = NIL; + qual_info->isOuterList = NIL; + qual_info->is_having = false; + qual_info->is_agg_column = false; + qual_info->aggTypeList = NIL; + qual_info->aggColList = NIL; + qual_info->isHavingList = NIL; - addressName = MongoGetOptionValue(foreignTableId, OPTION_NAME_ADDRESS); - if (addressName == NULL) + /* + * Prepare separate lists of information. This information would be + * useful at the time of execution to prepare the MongoDB query. + */ + if (IS_JOIN_REL(foreignrel) || IS_UPPER_REL(foreignrel)) { - addressName = pstrdup(DEFAULT_IP_ADDRESS); - } + ofpinfo = (MongoFdwRelationInfo *) fpinfo->outerrel->fdw_private; - portName = MongoGetOptionValue(foreignTableId, OPTION_NAME_PORT); - if (portName == NULL) - { - portNumber = DEFAULT_PORT_NUMBER; + /* + * Save foreign relation and relid's of an outer relation involved in + * the join depending on the relation type. + */ + if (mongofdwreltype == UPPER_JOIN_REL) + { + /* For aggregation over join relation */ + qual_info->foreignRel = fpinfo->outerrel; + qual_info->outerRelids = ofpinfo->outerrel->relids; + } + else if (mongofdwreltype == UPPER_REL) + { + /* For aggregation relation */ + qual_info->foreignRel = fpinfo->outerrel; + qual_info->outerRelids = fpinfo->outerrel->relids; + } + else + { + Assert(mongofdwreltype == JOIN_REL); + qual_info->foreignRel = foreignrel; + qual_info->outerRelids = fpinfo->outerrel->relids; + } + + /* + * Extract required data of columns involved in join clauses and + * append it into the various lists required to pass it to the + * executor. + * + * Check and extract data for outer relation and its join clauses in + * case of aggregation on top of the join operation. + */ + if (IS_JOIN_REL(foreignrel) && fpinfo->joinclauses) + mongo_prepare_qual_info(fpinfo->joinclauses, qual_info); + else if (IS_JOIN_REL(fpinfo->outerrel) && ofpinfo->joinclauses) + mongo_prepare_qual_info(ofpinfo->joinclauses, qual_info); + + /* + * Extract required data of columns involved in the WHERE clause and + * append it into the various lists required to pass it to the + * executor. + */ + if (IS_JOIN_REL(foreignrel) && fpinfo->remote_conds) + mongo_prepare_qual_info(fpinfo->remote_conds, qual_info); + + /* Gather required information of an upper relation */ + if (IS_UPPER_REL(foreignrel)) + { + /* Extract remote expressions from the remote conditions */ + foreach(lc, ofpinfo->remote_conds) + { + RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc); + + Assert(IsA(rinfo, RestrictInfo)); + quals = lappend(quals, rinfo->clause); + } + + /* Extract WHERE clause column information */ + mongo_prepare_qual_info(quals, qual_info); + + /* + * Extract grouping target information i.e grouping operation and + * grouping clause. + */ + mongo_prepare_qual_info(scan_var_list, qual_info); + + /* Extract HAVING clause information */ + if (fpinfo->remote_conds) + { + qual_info->is_having = true; + mongo_prepare_qual_info(fpinfo->remote_conds, qual_info); + } + } + else + quals = remote_exprs; } else { - portNumber = pg_atoi(portName, sizeof(int32), 0); + quals = remote_exprs; + + /* For baserel */ + qual_info->foreignRel = foreignrel; + qual_info->outerRelids = NULL; + + /* + * Extract required data of columns involved in WHERE clause of the + * simple relation. + */ + mongo_prepare_qual_info(quals, qual_info); } - databaseName = MongoGetOptionValue(foreignTableId, OPTION_NAME_DATABASE); - if (databaseName == NULL) + /* + * Check the ORDER BY clause, and if we found any useful pathkeys, then + * store the required information. + */ + foreach(lc, best_path->path.pathkeys) { - databaseName = pstrdup(DEFAULT_DATABASE_NAME); + EquivalenceMember *em; + PathKey *pathkey = lfirst(lc); + Expr *em_expr; + + if (has_final_sort) + { + /* + * By construction, foreignrel is the input relation to the final + * sort. + */ + em = mongo_find_em_for_rel_target(root, pathkey->pk_eclass, + foreignrel); + } + else + em = mongo_find_em_for_rel(root, pathkey->pk_eclass, + qual_info->foreignRel); + + /* + * We don't expect any error here; it would mean that shippability + * wasn't verified earlier. For the same reason, we don't recheck + * shippability of the sort operator. + */ + if (em == NULL) + elog(ERROR, "could not find pathkey item to sort"); + + /* Ignore binary-compatible relabeling */ + em_expr = em->em_expr; + while (IsA(em_expr, RelabelType)) + em_expr = ((RelabelType *) em_expr)->arg; + + Assert(IsA(em_expr, Var)); + pathKeyList = list_append_unique(pathKeyList, (Var *) em_expr); + + if (pathkey->pk_strategy == BTLessStrategyNumber) + isAscSortList = lappend_int(isAscSortList, 1); + else + isAscSortList = lappend_int(isAscSortList, -1); } - collectionName = MongoGetOptionValue(foreignTableId, OPTION_NAME_COLLECTION); - if (collectionName == NULL) + /* Extract the required data of columns involved in the ORDER BY clause */ + mongo_prepare_qual_info(pathKeyList, qual_info); + + /* Destroy hash table used to get unique column info */ + hash_destroy(qual_info->exprColHash); + + /* + * Retrieve limit and offset values, which needs to be passed to the + * executor. If any of the two clauses (limit or offset) is missing from + * the query, then default value -1 is used to indicate the same. + */ + limit_value = offset_value = -1; + if (has_limit) { - collectionName = get_rel_name(foreignTableId); + Node *node; + + node = root->parse->limitCount; + if (node) + { + Assert(nodeTag(node) == T_Const && + ((Const *) node)->consttype == INT8OID); + + /* Treat NULL as no limit */ + if (!((Const *) node)->constisnull) + limit_value = DatumGetInt64(((Const *) node)->constvalue); + } + + node = root->parse->limitOffset; + if (node) + { + Assert(nodeTag(node) == T_Const && + ((Const *) node)->consttype == INT8OID); + + /* Treat NULL as no offset */ + if (!((Const *) node)->constisnull) + offset_value = DatumGetInt64(((Const *) node)->constvalue); + } } - mongoFdwOptions = (MongoFdwOptions *) palloc0(sizeof(MongoFdwOptions)); - mongoFdwOptions->addressName = addressName; - mongoFdwOptions->portNumber = portNumber; - mongoFdwOptions->databaseName = databaseName; - mongoFdwOptions->collectionName = collectionName; + /* + * Unlike postgres_fdw, remote query formation is done in the execution + * state. There is NO way to get the correct information required to form + * a remote query during the execution state. So, we are gathering + * information required to form a MongoDB query in the planning state and + * passing it to the execution state through fdw_private. + */ - return mongoFdwOptions; -} + /* + * Build the fdw_private list that will be available to the executor. + * Items in the list must match enum mongoFdwScanPrivateIndex. + */ + fdw_private = list_make2(columnList, quals); + + /* Append relation type */ + fdw_private = lappend(fdw_private, makeInteger(mongofdwreltype)); + + fdw_private = lappend(fdw_private, qual_info->colNameList); + fdw_private = lappend(fdw_private, qual_info->colNumList); + fdw_private = lappend(fdw_private, qual_info->rtiList); + fdw_private = lappend(fdw_private, qual_info->isOuterList); + fdw_private = lappend(fdw_private, pathKeyList); + fdw_private = lappend(fdw_private, isAscSortList); + fdw_private = lappend(fdw_private, makeInteger(has_limit)); + fdw_private = lappend(fdw_private, makeInteger(limit_value)); + fdw_private = lappend(fdw_private, makeInteger(offset_value)); + + if (IS_JOIN_REL(foreignrel) || IS_UPPER_REL(foreignrel)) + { + MongoFdwRelationInfo *tfpinfo = NULL; + + fdw_private = lappend(fdw_private, qual_info->aggTypeList); + fdw_private = lappend(fdw_private, qual_info->aggColList); + fdw_private = lappend(fdw_private, ofpinfo->groupbyColList); + fdw_private = lappend(fdw_private, remote_exprs); + fdw_private = lappend(fdw_private, qual_info->isHavingList); + fdw_private = lappend(fdw_private, + makeString(fpinfo->relation_name->data)); + fdw_private = lappend(fdw_private, column_name_list); + fdw_private = lappend(fdw_private, is_inner_column_list); + + if (mongofdwreltype == JOIN_REL) + tfpinfo = fpinfo; + else if (mongofdwreltype == UPPER_JOIN_REL) + tfpinfo = ofpinfo; + + if (tfpinfo) + { + fdw_private = lappend(fdw_private, + list_make2(makeString(tfpinfo->inner_relname), + makeString(tfpinfo->outer_relname))); + fdw_private = lappend(fdw_private, tfpinfo->joinclauses); + fdw_private = lappend(fdw_private, makeInteger(tfpinfo->jointype)); + } + } + + /* Create the foreign scan node */ + foreignScan = make_foreignscan(targetList, local_exprs, + scan_relid, + NIL, /* No expressions to evaluate */ + fdw_private + ,fdw_scan_tlist + ,NIL + ,outer_plan + ); + return foreignScan; +} /* - * MongoGetOptionValue walks over foreign table and foreign server options, and - * looks for the option with the given name. If found, the function returns the - * option's value. + * mongoExplainForeignScan + * Produces extra output for the Explain command. */ -static char * -MongoGetOptionValue(Oid foreignTableId, const char *optionName) +static void +mongoExplainForeignScan(ForeignScanState *node, ExplainState *es) { - ForeignTable *foreignTable = NULL; - ForeignServer *foreignServer = NULL; - List *optionList = NIL; - ListCell *optionCell = NULL; - char *optionValue = NULL; - - foreignTable = GetForeignTable(foreignTableId); - foreignServer = GetForeignServer(foreignTable->serverid); - - optionList = list_concat(optionList, foreignTable->options); - optionList = list_concat(optionList, foreignServer->options); + ForeignScan *fsplan = (ForeignScan *) node->ss.ps.plan; + RangeTblEntry *rte; + EState *estate = node->ss.ps.state; + List *fdw_private = fsplan->fdw_private; + int rtindex; + + if (fsplan->scan.scanrelid > 0) + rtindex = fsplan->scan.scanrelid; + else +#if PG_VERSION_NUM >= 160000 + rtindex = bms_next_member(fsplan->fs_base_relids, -1); +#else + rtindex = bms_next_member(fsplan->fs_relids, -1); +#endif + rte = rt_fetch(rtindex, estate->es_range_table); - foreach(optionCell, optionList) + if (list_length(fdw_private) > mongoFdwPrivateRelations) { - DefElem *optionDef = (DefElem *) lfirst(optionCell); - char *optionDefName = optionDef->defname; + char *relations = strVal(list_nth(fdw_private, + mongoFdwPrivateRelations)); - if (strncmp(optionDefName, optionName, NAMEDATALEN) == 0) - { - optionValue = defGetString(optionDef); - break; - } + ExplainPropertyText("Foreign Namespace", relations, es); } + else + { + StringInfo namespaceName; + MongoFdwOptions *options; - return optionValue; + options = mongo_get_options(rte->relid); + + /* Construct fully qualified collection name */ + namespaceName = makeStringInfo(); + appendStringInfo(namespaceName, "%s.%s", options->svr_database, + options->collectionName); + ExplainPropertyText("Foreign Namespace", namespaceName->data, es); + mongo_free_options(options); + } } +static void +mongoExplainForeignModify(ModifyTableState *mtstate, + ResultRelInfo *rinfo, + List *fdw_private, + int subplan_index, + ExplainState *es) +{ + MongoFdwOptions *options; + StringInfo namespaceName; + Oid foreignTableId; + + foreignTableId = RelationGetRelid(rinfo->ri_RelationDesc); + options = mongo_get_options(foreignTableId); + + /* Construct fully qualified collection name */ + namespaceName = makeStringInfo(); + appendStringInfo(namespaceName, "%s.%s", options->svr_database, + options->collectionName); + + mongo_free_options(options); + ExplainPropertyText("Foreign Namespace", namespaceName->data, es); +} /* - * ColumnMappingHash creates a hash table that maps column names to column index - * and types. This table helps us quickly translate BSON document key/values to - * the corresponding PostgreSQL columns. + * mongoBeginForeignScan + * Connects to the MongoDB server, and opens a cursor that uses the + * database name, collection name, and the remote query to send to the + * server. + * + * The function also creates a hash table that maps referenced + * column names to column index and type information. */ -static HTAB * -ColumnMappingHash(Oid foreignTableId, List *columnList) +static void +mongoBeginForeignScan(ForeignScanState *node, int eflags) { - ListCell *columnCell = NULL; - const long hashTableSize = 2048; - HTAB *columnMappingHash = NULL; + MONGO_CONN *mongoConnection; + List *columnList; + HTAB *columnMappingHash; + MongoFdwOptions *options; + MongoFdwModifyState *fmstate; + RangeTblEntry *rte; + EState *estate = node->ss.ps.state; + ForeignScan *fsplan = (ForeignScan *) node->ss.ps.plan; + List *fdw_private = fsplan->fdw_private; + Oid userid; + ForeignServer *server; + UserMapping *user; + ForeignTable *table; + int rtindex; + List *colNameList = NIL; + List *colIsInnerList = NIL; + + /* If Explain with no Analyze, do nothing */ + if (eflags & EXEC_FLAG_EXPLAIN_ONLY) + return; - /* create hash table */ - HASHCTL hashInfo; - memset(&hashInfo, 0, sizeof(hashInfo)); - hashInfo.keysize = NAMEDATALEN; - hashInfo.entrysize = sizeof(ColumnMapping); - hashInfo.hash = string_hash; - hashInfo.hcxt = CurrentMemoryContext; + fmstate = (MongoFdwModifyState *) palloc0(sizeof(MongoFdwModifyState)); - columnMappingHash = hash_create("Column Mapping Hash", hashTableSize, &hashInfo, - (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT)); - Assert(columnMappingHash != NULL); + /* + * Identify which user to do the remote access as. This should match what + * ExecCheckRTEPerms() does. In the case of a join, use the + * lowest-numbered member RTE as a representative; we would get the same + * result from any. + */ + if (fsplan->scan.scanrelid > 0) + rtindex = fsplan->scan.scanrelid; + else +#if PG_VERSION_NUM >= 160000 + rtindex = bms_next_member(fsplan->fs_base_relids, -1); +#else + rtindex = bms_next_member(fsplan->fs_relids, -1); +#endif - foreach(columnCell, columnList) - { - Var *column = (Var *) lfirst(columnCell); - AttrNumber columnId = column->varattno; +#if PG_VERSION_NUM >= 160000 + rte = exec_rt_fetch(rtindex, estate); + userid = fsplan->checkAsUser ? fsplan->checkAsUser : GetUserId(); +#else + rte = rt_fetch(rtindex, estate->es_range_table); + userid = rte->checkAsUser ? rte->checkAsUser : GetUserId(); +#endif - ColumnMapping *columnMapping = NULL; - char *columnName = NULL; - bool handleFound = false; - void *hashKey = NULL; + /* Get info about foreign table. */ + fmstate->rel = node->ss.ss_currentRelation; + table = GetForeignTable(rte->relid); + server = GetForeignServer(table->serverid); + user = GetUserMapping(userid, server->serverid); - columnName = get_relid_attribute_name(foreignTableId, columnId); - hashKey = (void *) columnName; + options = mongo_get_options(rte->relid); - columnMapping = (ColumnMapping *) hash_search(columnMappingHash, hashKey, - HASH_ENTER, &handleFound); - Assert(columnMapping != NULL); + /* + * Get connection to the foreign server. Connection manager will + * establish new connection if necessary. + */ + mongoConnection = mongo_get_connection(server, user, options); - columnMapping->columnIndex = columnId - 1; - columnMapping->columnTypeId = column->vartype; - columnMapping->columnTypeMod = column->vartypmod; - columnMapping->columnArrayTypeId = get_element_type(column->vartype); + columnList = list_nth(fdw_private, mongoFdwPrivateColumnList); + fmstate->relType = intVal(list_nth(fdw_private, mongoFdwPrivateRelType)); + + if (fmstate->relType == JOIN_REL || fmstate->relType == UPPER_JOIN_REL) + { + colNameList = list_nth(fdw_private, mongoFdwPrivateColNameList); + colIsInnerList = list_nth(fdw_private, mongoFdwPrivateColIsInnerList); } - return columnMappingHash; -} + columnMappingHash = column_mapping_hash(rte->relid, columnList, + colNameList, colIsInnerList, + fmstate->relType); + /* Create and set foreign execution state */ + fmstate->columnMappingHash = columnMappingHash; + fmstate->mongoConnection = mongoConnection; + fmstate->options = options; + + node->fdw_state = (void *) fmstate; +} /* - * FillTupleSlot walks over all key/value pairs in the given document. For each - * pair, the function checks if the key appears in the column mapping hash, and - * if the value type is compatible with the one specified for the column. If so, - * the function converts the value and fills the corresponding tuple position. - * The bsonDocumentKey parameter is used for recursion, and should always be - * passed as NULL. + * mongoIterateForeignScan + * Opens a Mongo cursor that uses the database name, collection name, and + * the remote query to send to the server. + * + * Reads the next document from MongoDB, converts it to a PostgreSQL tuple, + * and stores the converted tuple into the ScanTupleSlot as a virtual tuple. */ -static void -FillTupleSlot(const bson *bsonDocument, const char *bsonDocumentKey, - HTAB *columnMappingHash, Datum *columnValues, bool *columnNulls) +static TupleTableSlot * +mongoIterateForeignScan(ForeignScanState *node) { - bson_iterator bsonIterator = { NULL, 0 }; - bson_iterator_init(&bsonIterator, bsonDocument); - - while (bson_iterator_next(&bsonIterator)) + MongoFdwModifyState *fmstate = (MongoFdwModifyState *) node->fdw_state; + TupleTableSlot *tupleSlot = node->ss.ss_ScanTupleSlot; + MONGO_CURSOR *mongoCursor = fmstate->mongoCursor; + HTAB *columnMappingHash = fmstate->columnMappingHash; + TupleDesc tupleDescriptor = tupleSlot->tts_tupleDescriptor; + Datum *columnValues = tupleSlot->tts_values; + bool *columnNulls = tupleSlot->tts_isnull; + int32 columnCount = tupleDescriptor->natts; + + /* Create cursor for collection name and set query */ + if (mongoCursor == NULL) { - const char *bsonKey = bson_iterator_key(&bsonIterator); - bson_type bsonType = bson_iterator_type(&bsonIterator); + BSON *queryDocument; + char *collectionName; - ColumnMapping *columnMapping = NULL; - Oid columnTypeId = InvalidOid; - Oid columnArrayTypeId = InvalidOid; - bool compatibleTypes = false; - bool handleFound = false; - const char *bsonFullKey = NULL; - void *hashKey = NULL; + /* + * We construct the query document to have MongoDB filter its rows. We + * could also construct a column name document here to retrieve only + * the needed columns. However, we found this optimization to degrade + * performance on the MongoDB server-side, so we instead filter out + * columns on our side. + */ + queryDocument = mongo_query_document(node); - if (bsonDocumentKey != NULL) - { - /* - * For fields in nested BSON objects, we use fully qualified field - * name to check the column mapping. - */ - StringInfo bsonFullKeyString = makeStringInfo(); - appendStringInfo(bsonFullKeyString, "%s.%s", bsonDocumentKey, bsonKey); - bsonFullKey = bsonFullKeyString->data; - } + /* + * Decide input collection to the aggregation. In case of join, outer + * relation should be given as input collection to the aggregation. + */ + if (fmstate->relType == JOIN_REL || + fmstate->relType == UPPER_JOIN_REL) + collectionName = fmstate->outerRelName; else - { - bsonFullKey = bsonKey; - } - - /* recurse into nested objects */ - if (bsonType == BSON_OBJECT) - { - bson subObject; - bson_iterator_subobject(&bsonIterator, &subObject); - FillTupleSlot(&subObject, bsonFullKey, - columnMappingHash, columnValues, columnNulls); - continue; - } - - /* look up the corresponding column for this bson key */ - hashKey = (void *) bsonFullKey; - columnMapping = (ColumnMapping *) hash_search(columnMappingHash, hashKey, - HASH_FIND, &handleFound); + collectionName = fmstate->options->collectionName; - /* if no corresponding column or null bson value, continue */ - if (columnMapping == NULL || bsonType == BSON_NULL) - { - continue; - } + mongoCursor = mongoCursorCreate(fmstate->mongoConnection, + fmstate->options->svr_database, + collectionName, + queryDocument); - /* check if columns have compatible types */ - columnTypeId = columnMapping->columnTypeId; - columnArrayTypeId = columnMapping->columnArrayTypeId; + /* Save mongoCursor */ + fmstate->mongoCursor = mongoCursor; + } - if (OidIsValid(columnArrayTypeId) && bsonType == BSON_ARRAY) - { - compatibleTypes = true; - } - else - { - compatibleTypes = ColumnTypesCompatible(bsonType, columnTypeId); - } + /* + * We execute the protocol to load a virtual tuple into a slot. We first + * call ExecClearTuple, then fill in values / isnull arrays, and last call + * ExecStoreVirtualTuple. If we are done fetching documents from Mongo, + * we just return an empty slot as required. + */ + ExecClearTuple(tupleSlot); - /* if types are incompatible, leave this column null */ - if (!compatibleTypes) - { - continue; - } + /* Initialize all values for this row to null */ + memset(columnValues, 0, columnCount * sizeof(Datum)); + memset(columnNulls, true, columnCount * sizeof(bool)); - /* fill in corresponding column value and null flag */ - if (OidIsValid(columnArrayTypeId)) - { - int32 columnIndex = columnMapping->columnIndex; + if (mongoCursorNext(mongoCursor, NULL)) + { + const BSON *bsonDocument = mongoCursorBson(mongoCursor); + const char *bsonDocumentKey = NULL; /* Top level document */ - columnValues[columnIndex] = ColumnValueArray(&bsonIterator, - columnArrayTypeId); - columnNulls[columnIndex] = false; - } - else - { - int32 columnIndex = columnMapping->columnIndex; - Oid columnTypeMod = columnMapping->columnTypeMod; + fill_tuple_slot(bsonDocument, bsonDocumentKey, columnMappingHash, + columnValues, columnNulls, fmstate->relType); - columnValues[columnIndex] = ColumnValue(&bsonIterator, - columnTypeId, columnTypeMod); - columnNulls[columnIndex] = false; - } + ExecStoreVirtualTuple(tupleSlot); } -} + return tupleSlot; +} /* - * ColumnTypesCompatible checks if the given BSON type can be converted to the - * given PostgreSQL type. In this check, the function also uses its knowledge of - * internal conversions applied by BSON APIs. + * mongoEndForeignScan + * Finishes scanning the foreign table, closes the cursor and the + * connection to MongoDB, and reclaims scan related resources. */ -static bool -ColumnTypesCompatible(bson_type bsonType, Oid columnTypeId) +static void +mongoEndForeignScan(ForeignScanState *node) { - bool compatibleTypes = false; + MongoFdwModifyState *fmstate; - /* we consider the PostgreSQL column type as authoritative */ - switch(columnTypeId) + fmstate = (MongoFdwModifyState *) node->fdw_state; + /* If we executed a query, reclaim mongo related resources */ + if (fmstate != NULL) { - case INT2OID: case INT4OID: - case INT8OID: case FLOAT4OID: - case FLOAT8OID: case NUMERICOID: - { - if (bsonType == BSON_INT || bsonType == BSON_LONG || - bsonType == BSON_DOUBLE) - { - compatibleTypes = true; - } - break; - } - case BOOLOID: - { - if (bsonType == BSON_INT || bsonType == BSON_LONG || - bsonType == BSON_DOUBLE || bsonType == BSON_BOOL) - { - compatibleTypes = true; - } - break; - } - case BPCHAROID: - case VARCHAROID: - case TEXTOID: + if (fmstate->options) { - if (bsonType == BSON_STRING) - { - compatibleTypes = true; - } - break; - } - case NAMEOID: - { - /* - * We currently overload the NAMEOID type to represent the BSON - * object identifier. We can safely overload this 64-byte data type - * since it's reserved for internal use in PostgreSQL. - */ - if (bsonType == BSON_OID) - { - compatibleTypes = true; - } - break; - } - case DATEOID: - case TIMESTAMPOID: - case TIMESTAMPTZOID: - { - if (bsonType == BSON_DATE) - { - compatibleTypes = true; - } - break; - } - default: - { - /* - * We currently error out on other data types. Some types such as - * byte arrays are easy to add, but they need testing. Other types - * such as money or inet, do not have equivalents in MongoDB. - */ - ereport(ERROR, (errcode(ERRCODE_FDW_INVALID_DATA_TYPE), - errmsg("cannot convert bson type to column type"), - errhint("Column type: %u", (uint32) columnTypeId))); - break; + mongo_free_options(fmstate->options); + fmstate->options = NULL; } + mongo_free_scan_state(fmstate); } - - return compatibleTypes; } - /* - * ColumnValueArray uses array element type id to read the current array pointed - * to by the BSON iterator, and converts each array element (with matching type) - * to the corresponding PostgreSQL datum. Then, the function constructs an array - * datum from element datums, and returns the array datum. + * mongoReScanForeignScan + * Rescans the foreign table. + * + * Note that rescans in Mongo end up being notably more expensive than what the + * planner expects them to be, since MongoDB cursors don't provide reset/rewind + * functionality. */ -static Datum -ColumnValueArray(bson_iterator *bsonIterator, Oid valueTypeId) +static void +mongoReScanForeignScan(ForeignScanState *node) { - Datum *columnValueArray = palloc0(INITIAL_ARRAY_CAPACITY * sizeof(Datum)); - uint32 arrayCapacity = INITIAL_ARRAY_CAPACITY; - uint32 arrayGrowthFactor = 2; - uint32 arrayIndex = 0; + MongoFdwModifyState *fmstate = (MongoFdwModifyState *) node->fdw_state; - ArrayType *columnValueObject = NULL; - Datum columnValueDatum = 0; - bool typeByValue = false; - char typeAlignment = 0; - int16 typeLength = 0; + /* Close down the old cursor */ + if (fmstate->mongoCursor) + { + mongoCursorDestroy(fmstate->mongoCursor); + fmstate->mongoCursor = NULL; + } +} - bson_iterator bsonSubIterator = { NULL, 0 }; - bson_iterator_subiterator(bsonIterator, &bsonSubIterator); +static List * +mongoPlanForeignModify(PlannerInfo *root, + ModifyTable *plan, + Index resultRelation, + int subplan_index) +{ + CmdType operation = plan->operation; + RangeTblEntry *rte = planner_rt_fetch(resultRelation, root); + Relation rel; + List *targetAttrs = NIL; - while (bson_iterator_next(&bsonSubIterator)) + /* + * Core code already has some lock on each rel being planned, so we can + * use NoLock here. + */ +#if PG_VERSION_NUM < 130000 + rel = heap_open(rte->relid, NoLock); +#else + rel = table_open(rte->relid, NoLock); +#endif + if (operation == CMD_INSERT) { - bson_type bsonType = bson_iterator_type(&bsonSubIterator); - bool compatibleTypes = false; + TupleDesc tupdesc = RelationGetDescr(rel); + int attnum; - compatibleTypes = ColumnTypesCompatible(bsonType, valueTypeId); - if (bsonType == BSON_NULL || !compatibleTypes) + for (attnum = 1; attnum <= tupdesc->natts; attnum++) { - continue; + Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum - 1); + + if (!attr->attisdropped) + targetAttrs = lappend_int(targetAttrs, attnum); } + } + else if (operation == CMD_UPDATE) + { + Bitmapset *tmpset; +#if PG_VERSION_NUM >= 160000 + RTEPermissionInfo *perminfo; + int attidx; +#endif + AttrNumber col; + +#if PG_VERSION_NUM >= 160000 + perminfo = getRTEPermissionInfo(root->parse->rteperminfos, rte); + tmpset = bms_copy(perminfo->updatedCols); + attidx = -1; +#else + tmpset = bms_copy(rte->updatedCols); +#endif - if (arrayIndex >= arrayCapacity) +#if PG_VERSION_NUM >= 160000 + while ((attidx = bms_next_member(tmpset, attidx)) >= 0) +#else + while ((col = bms_first_member(tmpset)) >= 0) +#endif { - arrayCapacity *= arrayGrowthFactor; - columnValueArray = repalloc(columnValueArray, arrayCapacity * sizeof(Datum)); - } +#if PG_VERSION_NUM >= 160000 + col = attidx + FirstLowInvalidHeapAttributeNumber; +#else + col += FirstLowInvalidHeapAttributeNumber; +#endif + if (col <= InvalidAttrNumber) /* Shouldn't happen */ + elog(ERROR, "system-column update is not supported"); - /* use default type modifier (0) to convert column value */ - columnValueArray[arrayIndex] = ColumnValue(&bsonSubIterator, valueTypeId, 0); - arrayIndex++; + /* + * We also disallow updates to the first column which happens to + * be the row identifier in MongoDb (_id) + */ + if (col == 1) /* Shouldn't happen */ + elog(ERROR, "row identifier column update is not supported"); + + targetAttrs = lappend_int(targetAttrs, col); + } + /* We also want the rowid column to be available for the update */ + targetAttrs = lcons_int(1, targetAttrs); } + else + targetAttrs = lcons_int(1, targetAttrs); - get_typlenbyvalalign(valueTypeId, &typeLength, &typeByValue, &typeAlignment); - columnValueObject = construct_array(columnValueArray, arrayIndex, valueTypeId, - typeLength, typeByValue, typeAlignment); + /* + * RETURNING list not supported + */ + if (plan->returningLists) + elog(ERROR, "RETURNING is not supported by this FDW"); - columnValueDatum = PointerGetDatum(columnValueObject); - return columnValueDatum; -} +#if PG_VERSION_NUM < 130000 + heap_close(rel, NoLock); +#else + table_close(rel, NoLock); +#endif + return list_make1(targetAttrs); +} /* - * ColumnValue uses column type information to read the current value pointed to - * by the BSON iterator, and converts this value to the corresponding PostgreSQL - * datum. The function then returns this datum. + * mongoBeginForeignModify + * Begin an insert/update/delete operation on a foreign table. + */ +static void +mongoBeginForeignModify(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo, + List *fdw_private, + int subplan_index, + int eflags) +{ + MongoFdwModifyState *fmstate; + Relation rel = resultRelInfo->ri_RelationDesc; + AttrNumber n_params; + Oid typefnoid = InvalidOid; + bool isvarlena = false; + ListCell *lc; + Oid foreignTableId; + Oid userid; + ForeignServer *server; + UserMapping *user; + ForeignTable *table; +#if PG_VERSION_NUM >= 160000 + ForeignScan *fsplan = (ForeignScan *) mtstate->ps.plan; +#else + EState *estate = mtstate->ps.state; + RangeTblEntry *rte; +#endif + + /* + * Do nothing in EXPLAIN (no ANALYZE) case. resultRelInfo->ri_FdwState + * stays NULL. + */ + if (eflags & EXEC_FLAG_EXPLAIN_ONLY) + return; + +#if PG_VERSION_NUM >= 160000 + userid = fsplan->checkAsUser ? fsplan->checkAsUser : GetUserId(); +#else + rte = rt_fetch(resultRelInfo->ri_RangeTableIndex, estate->es_range_table); + userid = rte->checkAsUser ? rte->checkAsUser : GetUserId(); +#endif + + foreignTableId = RelationGetRelid(rel); + + /* Get info about foreign table. */ + table = GetForeignTable(foreignTableId); + server = GetForeignServer(table->serverid); + user = GetUserMapping(userid, server->serverid); + + /* Begin constructing MongoFdwModifyState. */ + fmstate = (MongoFdwModifyState *) palloc0(sizeof(MongoFdwModifyState)); + + fmstate->rel = rel; + fmstate->options = mongo_get_options(foreignTableId); + + /* + * Get connection to the foreign server. Connection manager will + * establish new connection if necessary. + */ + fmstate->mongoConnection = mongo_get_connection(server, user, + fmstate->options); + + fmstate->target_attrs = (List *) list_nth(fdw_private, 0); + + n_params = list_length(fmstate->target_attrs) + 1; + fmstate->p_flinfo = (FmgrInfo *) palloc(sizeof(FmgrInfo) * n_params); + fmstate->p_nums = 0; + + if (mtstate->operation == CMD_UPDATE) + { + Form_pg_attribute attr; +#if PG_VERSION_NUM >= 140000 + Plan *subplan = outerPlanState(mtstate)->plan; +#else + Plan *subplan = mtstate->mt_plans[subplan_index]->plan; +#endif + + Assert(subplan != NULL); + + attr = TupleDescAttr(RelationGetDescr(rel), 0); + + /* Find the rowid resjunk column in the subplan's result */ + fmstate->rowidAttno = ExecFindJunkAttributeInTlist(subplan->targetlist, + NameStr(attr->attname)); + if (!AttributeNumberIsValid(fmstate->rowidAttno)) + elog(ERROR, "could not find junk row identifier column"); + } + + /* Set up for remaining transmittable parameters */ + foreach(lc, fmstate->target_attrs) + { + int attnum = lfirst_int(lc); + Form_pg_attribute attr = TupleDescAttr(RelationGetDescr(rel), + attnum - 1); + + Assert(!attr->attisdropped); + + getTypeOutputInfo(attr->atttypid, &typefnoid, &isvarlena); + fmgr_info(typefnoid, &fmstate->p_flinfo[fmstate->p_nums]); + fmstate->p_nums++; + } + Assert(fmstate->p_nums <= n_params); + + resultRelInfo->ri_FdwState = fmstate; +} + +/* + * mongoExecForeignInsert + * Insert one row into a foreign table. + */ +static TupleTableSlot * +mongoExecForeignInsert(EState *estate, + ResultRelInfo *resultRelInfo, + TupleTableSlot *slot, + TupleTableSlot *planSlot) +{ + BSON *bsonDoc; + Oid typoid; + Datum value; + bool isnull = false; + MongoFdwModifyState *fmstate; + + fmstate = (MongoFdwModifyState *) resultRelInfo->ri_FdwState; + + bsonDoc = bsonCreate(); + + typoid = get_atttype(RelationGetRelid(resultRelInfo->ri_RelationDesc), 1); + + /* Get following parameters from slot */ + if (slot != NULL && fmstate->target_attrs != NIL) + { + ListCell *lc; + + foreach(lc, fmstate->target_attrs) + { + int attnum = lfirst_int(lc); + + value = slot_getattr(slot, attnum, &isnull); + + /* First column of MongoDB's foreign table must be _id */ + if (strcmp(TupleDescAttr(slot->tts_tupleDescriptor, 0)->attname.data, "_id") != 0) + elog(ERROR, "first column of MongoDB's foreign table must be \"_id\""); + + if (typoid != NAMEOID) + elog(ERROR, "type of first column of MongoDB's foreign table must be \"NAME\""); + if (strcmp(TupleDescAttr(slot->tts_tupleDescriptor, 0)->attname.data, "__doc") == 0) + continue; + + /* + * Ignore the value of first column which is row identifier in + * MongoDb (_id) and let MongoDB to insert the unique value for + * that column. + */ + if (attnum == 1) + { + ereport(DEBUG1, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot insert given data for \"_id\" column, skipping"), + errhint("Let MongoDB insert the unique value for \"_id\" column."))); + + continue; + } + + append_mongo_value(bsonDoc, + TupleDescAttr(slot->tts_tupleDescriptor, attnum - 1)->attname.data, + value, + isnull, + TupleDescAttr(slot->tts_tupleDescriptor, attnum - 1)->atttypid); + } + } + + /* Now we are ready to insert tuple/document into MongoDB */ + mongoInsert(fmstate->mongoConnection, fmstate->options->svr_database, + fmstate->options->collectionName, bsonDoc); + + bsonDestroy(bsonDoc); + + return slot; +} + +/* + * mongoAddForeignUpdateTargets + * Add column(s) needed for update/delete on a foreign table, we are using + * first column as row identification column, so we are adding that into + * target list. + */ +#if PG_VERSION_NUM >= 140000 +static void +mongoAddForeignUpdateTargets(PlannerInfo *root, + Index rtindex, + RangeTblEntry *target_rte, + Relation target_relation) +#else +static void +mongoAddForeignUpdateTargets(Query *parsetree, + RangeTblEntry *target_rte, + Relation target_relation) +#endif +{ + Var *var; + const char *attrname; +#if PG_VERSION_NUM < 140000 + TargetEntry *tle; +#endif + + /* + * What we need is the rowid which is the first column + */ + Form_pg_attribute attr = TupleDescAttr(RelationGetDescr(target_relation), + 0); + + /* Make a Var representing the desired value */ +#if PG_VERSION_NUM >= 140000 + var = makeVar(rtindex, +#else + var = makeVar(parsetree->resultRelation, +#endif + 1, + attr->atttypid, + attr->atttypmod, + InvalidOid, + 0); + + /* Get name of the row identifier column */ + attrname = NameStr(attr->attname); + +#if PG_VERSION_NUM >= 140000 + /* Register it as a row-identity column needed by this target rel */ + add_row_identity_var(root, var, rtindex, attrname); +#else + /* Wrap it in a TLE with the right name ... */ + tle = makeTargetEntry((Expr *) var, + list_length(parsetree->targetList) + 1, + pstrdup(attrname), + true); + + /* ... And add it to the query's targetlist */ + parsetree->targetList = lappend(parsetree->targetList, tle); +#endif +} + +static TupleTableSlot * +mongoExecForeignUpdate(EState *estate, + ResultRelInfo *resultRelInfo, + TupleTableSlot *slot, + TupleTableSlot *planSlot) +{ + Datum datum; + bool isNull = false; + Oid foreignTableId; + char *columnName; + Oid typoid; + BSON *document; + BSON *op = NULL; + BSON set; + MongoFdwModifyState *fmstate; + + fmstate = (MongoFdwModifyState *) resultRelInfo->ri_FdwState; + foreignTableId = RelationGetRelid(resultRelInfo->ri_RelationDesc); + + /* Get the id that was passed up as a resjunk column */ + datum = ExecGetJunkAttribute(planSlot, fmstate->rowidAttno, &isNull); + + columnName = get_attname(foreignTableId, 1, false); + + /* First column of MongoDB's foreign table must be _id */ + if (strcmp(columnName, "_id") != 0) + elog(ERROR, "first column of MongoDB's foreign table must be \"_id\""); + + typoid = get_atttype(foreignTableId, 1); + + /* The type of first column of MongoDB's foreign table must be NAME */ + if (typoid != NAMEOID) + elog(ERROR, "type of first column of MongoDB's foreign table must be \"NAME\""); + + document = bsonCreate(); + bsonAppendStartObject(document, "$set", &set); + + /* Get following parameters from slot */ + if (slot != NULL && fmstate->target_attrs != NIL) + { + ListCell *lc; + + foreach(lc, fmstate->target_attrs) + { + int attnum = lfirst_int(lc); + Form_pg_attribute attr = TupleDescAttr(slot->tts_tupleDescriptor, + attnum - 1); + Datum value; + bool isnull; + + if (strcmp("_id", attr->attname.data) == 0) + continue; + + if (strcmp("__doc", attr->attname.data) == 0) + elog(ERROR, "system column '__doc' update is not supported"); + + value = slot_getattr(slot, attnum, &isnull); + append_mongo_value(&set, attr->attname.data, value, + isnull ? true : false, attr->atttypid); + } + } + bsonAppendFinishObject(document, &set); + + op = bsonCreate(); + if (!append_mongo_value(op, columnName, datum, isNull, typoid)) + { + bsonDestroy(document); + return NULL; + } + + /* We are ready to update the row into MongoDB */ + mongoUpdate(fmstate->mongoConnection, fmstate->options->svr_database, + fmstate->options->collectionName, op, document); + + bsonDestroy(op); + bsonDestroy(document); + + /* Return NULL if nothing was updated on the remote end */ + return slot; +} + +/* + * mongoExecForeignDelete + * Delete one row from a foreign table + */ +static TupleTableSlot * +mongoExecForeignDelete(EState *estate, + ResultRelInfo *resultRelInfo, + TupleTableSlot *slot, + TupleTableSlot *planSlot) +{ + Datum datum; + bool isNull = false; + Oid foreignTableId; + char *columnName = NULL; + Oid typoid; + BSON *document; + MongoFdwModifyState *fmstate; + + fmstate = (MongoFdwModifyState *) resultRelInfo->ri_FdwState; + + foreignTableId = RelationGetRelid(resultRelInfo->ri_RelationDesc); + + /* Get the id that was passed up as a resjunk column */ + datum = ExecGetJunkAttribute(planSlot, 1, &isNull); + + columnName = get_attname(foreignTableId, 1, false); + + /* First column of MongoDB's foreign table must be _id */ + if (strcmp(columnName, "_id") != 0) + elog(ERROR, "first column of MongoDB's foreign table must be \"_id\""); + + typoid = get_atttype(foreignTableId, 1); + + /* The type of first column of MongoDB's foreign table must be NAME */ + if (typoid != NAMEOID) + elog(ERROR, "type of first column of MongoDB's foreign table must be \"NAME\""); + + document = bsonCreate(); + if (!append_mongo_value(document, columnName, datum, isNull, typoid)) + { + bsonDestroy(document); + return NULL; + } + + /* Now we are ready to delete a single document from MongoDB */ + mongoDelete(fmstate->mongoConnection, fmstate->options->svr_database, + fmstate->options->collectionName, document); + + bsonDestroy(document); + + /* Return NULL if nothing was updated on the remote end */ + return slot; +} + +/* + * mongoEndForeignModify + * Finish an insert/update/delete operation on a foreign table + */ +static void +mongoEndForeignModify(EState *estate, ResultRelInfo *resultRelInfo) +{ + MongoFdwModifyState *fmstate; + + fmstate = (MongoFdwModifyState *) resultRelInfo->ri_FdwState; + if (fmstate) + { + if (fmstate->options) + { + mongo_free_options(fmstate->options); + fmstate->options = NULL; + } + mongo_free_scan_state(fmstate); + pfree(fmstate); + } +} + +/* + * foreign_table_document_count + * Connects to the MongoDB server, and queries it for the number of + * documents in the foreign collection. On success, the function returns + * the document count. On failure, the function returns -1.0. + */ +static double +foreign_table_document_count(Oid foreignTableId) +{ + MongoFdwOptions *options; + MONGO_CONN *mongoConnection; + const BSON *emptyQuery = NULL; + double documentCount; + Oid userid = GetUserId(); + ForeignServer *server; + UserMapping *user; + ForeignTable *table; + + /* Get info about foreign table. */ + table = GetForeignTable(foreignTableId); + server = GetForeignServer(table->serverid); + user = GetUserMapping(userid, server->serverid); + + /* Resolve foreign table options; and connect to mongo server */ + options = mongo_get_options(foreignTableId); + + /* + * Get connection to the foreign server. Connection manager will + * establish new connection if necessary. + */ + mongoConnection = mongo_get_connection(server, user, options); + + documentCount = mongoAggregateCount(mongoConnection, options->svr_database, + options->collectionName, emptyQuery); + + mongo_free_options(options); + + return documentCount; +} + +/* + * column_mapping_hash + * Creates a hash table that maps column names to column index and types. + * + * This table helps us quickly translate BSON document key/values to the + * corresponding PostgreSQL columns. + */ +static HTAB * +column_mapping_hash(Oid foreignTableId, List *columnList, List *colNameList, + List *colIsInnerList, uint32 relType) +{ + ListCell *columnCell; + HTAB *columnMappingHash; + HASHCTL hashInfo; + uint32 attnum = 0; + Index listIndex = 0; + Index aggIndex = 0; + + memset(&hashInfo, 0, sizeof(hashInfo)); + hashInfo.keysize = NAMEDATALEN; + hashInfo.entrysize = sizeof(ColumnMapping); + hashInfo.hash = string_hash; + hashInfo.hcxt = CurrentMemoryContext; + + columnMappingHash = hash_create("Column Mapping Hash", MaxHashTableSize, + &hashInfo, + (HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT)); + Assert(columnMappingHash != NULL); + + foreach(columnCell, columnList) + { + Var *column = (Var *) lfirst(columnCell); + ColumnMapping *columnMapping; + char *columnName; + bool handleFound = false; + void *hashKey; + + if (relType == JOIN_REL) + { + int is_innerrel = list_nth_int(colIsInnerList, listIndex); + + columnName = strVal(list_nth(colNameList, listIndex++)); + + /* + * In MongoDB, columns involved in join result-set from inner + * table prefixed with Join result name. Uses hard-coded string + * "Join Result" to be prefixed to form the hash key to read the + * joined result set. This same prefix needs to be given as + * joined result set name in the $lookup stage when building the + * remote query. + * + * For a simple relation scan, the column name would be the hash + * key. + */ + if (is_innerrel) + { + const char *resultName = "Join_Result"; + StringInfo KeyString = makeStringInfo(); + + appendStringInfo(KeyString, "%s.%s", resultName, columnName); + + hashKey = (void *) KeyString->data; + } + else + hashKey = (void *) columnName; + } + + /* + * In MongoDB, columns involved in upper result-set named as + * "_id.column_name_variable" not the actual column names. Use this + * as hashKey to match the bson key we get at the time of fetching the + * column values. + * + * Use the hard-coded string v_agg* to get the aggregation result. + * This same name needs to be given as an aggregation result name + * while building the remote query. + */ + else if (relType == UPPER_REL || relType == UPPER_JOIN_REL) + { + if (IsA(column, Var)) + { + if (relType == UPPER_REL) + columnName = get_attname(foreignTableId, column->varattno, + false); + else + columnName = strVal(list_nth(colNameList, listIndex++)); + + /* + * Keep variable name same as a column name. Use the same + * name while building the MongoDB query in the + * mongo_query_document function. + */ + hashKey = psprintf("_id.%s", columnName); + } + else + hashKey = psprintf("AGG_RESULT_KEY%d", aggIndex++); + } + else + { + columnName = get_attname(foreignTableId, column->varattno, false); + hashKey = (void *) columnName; + } + + columnMapping = (ColumnMapping *) hash_search(columnMappingHash, + hashKey, + HASH_ENTER, + &handleFound); + Assert(columnMapping != NULL); + + /* + * Save attribute number of the current column in the resulting tuple. + * For join/upper relation, it is continuously increasing integers + * starting from 0, and for simple relation, it's varattno. + */ + if (relType != BASE_REL) + { + columnMapping->columnIndex = attnum; + attnum++; + } + else + columnMapping->columnIndex = column->varattno - 1; + + /* Save other information */ + if ((relType == UPPER_REL || relType == UPPER_JOIN_REL) && + !strncmp(hashKey, "AGG_RESULT_KEY", 5)) + { + Aggref *agg = (Aggref *) lfirst(columnCell); + + columnMapping->columnTypeId = agg->aggtype; + columnMapping->columnTypeMod = agg->aggcollid; + columnMapping->columnArrayTypeId = InvalidOid; + } + else + { + columnMapping->columnTypeId = column->vartype; + columnMapping->columnTypeMod = column->vartypmod; + columnMapping->columnArrayTypeId = get_element_type(column->vartype); + } + } + + return columnMappingHash; +} + +/* + * fill_tuple_slot + * Walks over all key/value pairs in the given document. + * + * For each pair, the function checks if the key appears in the column mapping + * hash, and if the value type is compatible with the one specified for the + * column. If so, the function converts the value and fills the corresponding + * tuple position. The bsonDocumentKey parameter is used for recursion, and + * should always be passed as NULL. + */ +static void +fill_tuple_slot(const BSON *bsonDocument, const char *bsonDocumentKey, + HTAB *columnMappingHash, Datum *columnValues, + bool *columnNulls, uint32 relType) +{ + ColumnMapping *columnMapping; + bool handleFound = false; + void *hashKey; + BSON_ITERATOR bsonIterator = {NULL, 0}; + + if (bsonIterInit(&bsonIterator, (BSON *) bsonDocument) == false) + elog(ERROR, "failed to initialize BSON iterator"); + + hashKey = "__doc"; + columnMapping = (ColumnMapping *) hash_search(columnMappingHash, hashKey, + HASH_FIND, &handleFound); + + if (columnMapping != NULL && handleFound == true && + columnValues[columnMapping->columnIndex] == 0) + { + JsonLexContext *lex; + text *result; + Datum columnValue; + char *str; + + str = bsonAsJson(bsonDocument); + result = cstring_to_text_with_len(str, strlen(str)); +#if PG_VERSION_NUM >= 170000 + lex = makeJsonLexContext(NULL, result, false); +#else + lex = makeJsonLexContext(result, false); +#endif + pg_parse_json(lex, &nullSemAction); + columnValue = PointerGetDatum(result); + + switch (columnMapping->columnTypeId) + { + case BOOLOID: + case INT2OID: + case INT4OID: + case INT8OID: + case BOXOID: + case BYTEAOID: + case CHAROID: + case VARCHAROID: + case NAMEOID: + case JSONOID: + case XMLOID: + case POINTOID: + case LSEGOID: + case LINEOID: + case UUIDOID: + case LSNOID: + case TEXTOID: + case CASHOID: + case DATEOID: + case MACADDROID: + case TIMESTAMPOID: + case TIMESTAMPTZOID: + case BPCHAROID: + columnValue = PointerGetDatum(result); + break; + case JSONBOID: + columnValue = DirectFunctionCall1(jsonb_in, + PointerGetDatum(str)); + break; + default: + ereport(ERROR, + (errcode(ERRCODE_FDW_INVALID_DATA_TYPE), + errmsg("unsupported type for column __doc"), + errhint("Column type: %u", + (uint32) columnMapping->columnTypeId))); + break; + } + + columnValues[columnMapping->columnIndex] = columnValue; + columnNulls[columnMapping->columnIndex] = false; + + return; + } + + while (bsonIterNext(&bsonIterator)) + { + const char *bsonKey = bsonIterKey(&bsonIterator); + BSON_TYPE bsonType = bsonIterType(&bsonIterator); + Oid columnTypeId = InvalidOid; + Oid columnArrayTypeId = InvalidOid; + bool compatibleTypes = false; + const char *bsonFullKey; + int32 attnum = 0; + bool is_agg = false; + + if (!strncmp(bsonKey, "AGG_RESULT_KEY", 5) && bsonType == BSON_TYPE_INT32) + is_agg = true; + + columnMapping = NULL; + if (bsonDocumentKey != NULL) + { + /* + * For fields in nested BSON objects, we use fully qualified field + * name to check the column mapping. + */ + StringInfo bsonFullKeyString = makeStringInfo(); + + appendStringInfo(bsonFullKeyString, "%s.%s", bsonDocumentKey, + bsonKey); + bsonFullKey = bsonFullKeyString->data; + } + else + bsonFullKey = bsonKey; + + /* Look up the corresponding column for this bson key */ + hashKey = (void *) bsonFullKey; + columnMapping = (ColumnMapping *) hash_search(columnMappingHash, + hashKey, + HASH_FIND, + &handleFound); + if (columnMapping != NULL) + { + columnTypeId = columnMapping->columnTypeId; + columnArrayTypeId = columnMapping->columnArrayTypeId; + } + + /* Recurse into nested objects */ + if (bsonType == BSON_TYPE_DOCUMENT) + { + if (columnTypeId != JSONOID) + { + BSON subObject; + + bsonIterSubObject(&bsonIterator, &subObject); + fill_tuple_slot(&subObject, bsonFullKey, columnMappingHash, + columnValues, columnNulls, relType); + continue; + } + } + + /* If no corresponding column or null BSON value, continue */ + if (!is_agg && (columnMapping == NULL || bsonType == BSON_TYPE_NULL)) + continue; + + /* Check if columns have compatible types */ + if ((OidIsValid(columnArrayTypeId) && bsonType == BSON_TYPE_ARRAY)) + compatibleTypes = true; + else + compatibleTypes = column_types_compatible(bsonType, columnTypeId); + + /* If types are incompatible, leave this column null */ + if (!compatibleTypes) + continue; + + if (columnMapping != NULL) + attnum = columnMapping->columnIndex; + + /* Fill in corresponding column value and null flag */ + if (OidIsValid(columnArrayTypeId)) + columnValues[attnum] = column_value_array(&bsonIterator, + columnArrayTypeId); + else + columnValues[attnum] = column_value(&bsonIterator, columnTypeId, + columnMapping->columnTypeMod); + columnNulls[attnum] = false; + } +} + +/* + * column_types_compatible + * Checks if the given BSON type can be converted to the given PostgreSQL + * type. + * + * In this check, the function also uses its knowledge of internal conversions + * applied by BSON APIs. + */ +static bool +column_types_compatible(BSON_TYPE bsonType, Oid columnTypeId) +{ + bool compatibleTypes = false; + + /* We consider the PostgreSQL column type as authoritative */ + switch (columnTypeId) + { + case INT2OID: + case INT4OID: + case INT8OID: + case FLOAT4OID: + case FLOAT8OID: + case NUMERICOID: + if (bsonType == BSON_TYPE_INT32 || bsonType == BSON_TYPE_INT64 || + bsonType == BSON_TYPE_DOUBLE) + compatibleTypes = true; + if (bsonType == BSON_TYPE_BOOL) + compatibleTypes = true; + break; + case BOOLOID: + if (bsonType == BSON_TYPE_INT32 || bsonType == BSON_TYPE_INT64 || + bsonType == BSON_TYPE_DOUBLE || bsonType == BSON_TYPE_BOOL) + compatibleTypes = true; + break; + case BPCHAROID: + case VARCHAROID: + case TEXTOID: + if (bsonType == BSON_TYPE_UTF8) + compatibleTypes = true; + break; + case BYTEAOID: + if (bsonType == BSON_TYPE_BINDATA) + compatibleTypes = true; + if (bsonType == BSON_TYPE_OID) + compatibleTypes = true; + break; + case NAMEOID: + + /* + * We currently error out on data types other than object + * identifier. MongoDB supports more data types for the _id field + * but those are not yet handled in mongo_fdw. + */ + if (bsonType != BSON_TYPE_OID) + ereport(ERROR, + (errcode(ERRCODE_FDW_INVALID_DATA_TYPE), + errmsg("cannot convert BSON type to column type"), + errhint("Column type \"NAME\" is compatible only with BSON type \"ObjectId\"."))); + + /* + * We currently overload the NAMEOID type to represent the BSON + * object identifier. We can safely overload this 64-byte data + * type since it's reserved for internal use in PostgreSQL. + */ + compatibleTypes = true; + break; + case DATEOID: + case TIMESTAMPOID: + case TIMESTAMPTZOID: + if (bsonType == BSON_TYPE_DATE_TIME) + compatibleTypes = true; + break; + case NUMERICARRAY_OID: + if (bsonType == BSON_TYPE_ARRAY) + compatibleTypes = true; + break; + case JSONOID: + if (bsonType == BSON_TYPE_DOCUMENT || + bsonType == BSON_TYPE_ARRAY) + compatibleTypes = true; + break; + default: + + /* + * We currently error out on other data types. Some types such as + * byte arrays are easy to add, but they need testing. + * + * Other types such as money or inet, do not have equivalents in + * MongoDB. + */ + ereport(ERROR, + (errcode(ERRCODE_FDW_INVALID_DATA_TYPE), + errmsg("cannot convert BSON type to column type"), + errhint("Column type: %u", (uint32) columnTypeId))); + break; + } + + return compatibleTypes; +} + +/* + * column_value_array + * Uses array element type id to read the current array pointed to by the + * BSON iterator, and converts each array element (with matching type) to + * the corresponding PostgreSQL datum. + * + * Then, the function constructs an array datum from element datums, and + * returns the array datum. */ static Datum -ColumnValue(bson_iterator *bsonIterator, Oid columnTypeId, int32 columnTypeMod) +column_value_array(BSON_ITERATOR *bsonIterator, Oid valueTypeId) +{ + Datum *columnValueArray = palloc(INITIAL_ARRAY_CAPACITY * sizeof(Datum)); + uint32 arrayCapacity = INITIAL_ARRAY_CAPACITY; + uint32 arrayIndex = 0; + ArrayType *columnValueObject; + Datum columnValueDatum; + bool typeByValue; + char typeAlignment; + int16 typeLength; + + BSON_ITERATOR bsonSubIterator = {NULL, 0}; + + bsonIterSubIter(bsonIterator, &bsonSubIterator); + while (bsonIterNext(&bsonSubIterator)) + { + BSON_TYPE bsonType = bsonIterType(&bsonSubIterator); + bool compatibleTypes = false; + + compatibleTypes = column_types_compatible(bsonType, valueTypeId); + if (bsonType == BSON_TYPE_NULL || !compatibleTypes) + continue; + + if (arrayIndex >= arrayCapacity) + { + /* Double the array capacity. */ + arrayCapacity *= 2; + columnValueArray = repalloc(columnValueArray, + arrayCapacity * sizeof(Datum)); + } + + /* Use default type modifier (0) to convert column value */ + columnValueArray[arrayIndex] = column_value(&bsonSubIterator, + valueTypeId, 0); + arrayIndex++; + } + + get_typlenbyvalalign(valueTypeId, &typeLength, &typeByValue, + &typeAlignment); + columnValueObject = construct_array(columnValueArray, + arrayIndex, + valueTypeId, + typeLength, + typeByValue, + typeAlignment); + + columnValueDatum = PointerGetDatum(columnValueObject); + + pfree(columnValueArray); + + return columnValueDatum; +} + +/* + * column_value + * Uses column type information to read the current value pointed to by + * the BSON iterator, and converts this value to the corresponding + * PostgreSQL datum. The function then returns this datum. + */ +static Datum +column_value(BSON_ITERATOR *bsonIterator, Oid columnTypeId, + int32 columnTypeMod) +{ + Datum columnValue; + + switch (columnTypeId) + { + case INT2OID: + { + int16 value = (int16) bsonIterInt32(bsonIterator); + + columnValue = Int16GetDatum(value); + } + break; + case INT4OID: + { + int32 value = bsonIterInt32(bsonIterator); + + columnValue = Int32GetDatum(value); + } + break; + case INT8OID: + { + int64 value = bsonIterInt64(bsonIterator); + + columnValue = Int64GetDatum(value); + } + break; + case FLOAT4OID: + { + float4 value = (float4) bsonIterDouble(bsonIterator); + + columnValue = Float4GetDatum(value); + } + break; + case FLOAT8OID: + { + float8 value = bsonIterDouble(bsonIterator); + + columnValue = Float8GetDatum(value); + } + break; + case NUMERICOID: + { + float8 value = bsonIterDouble(bsonIterator); + Datum valueDatum = DirectFunctionCall1(float8_numeric, + Float8GetDatum(value)); + + /* + * Since we have a Numeric value, using numeric() here instead + * of numeric_in() input function for typmod conversion. + */ + columnValue = DirectFunctionCall2(numeric, valueDatum, + Int32GetDatum(columnTypeMod)); + } + break; + case BOOLOID: + { + bool value = bsonIterBool(bsonIterator); + + columnValue = BoolGetDatum(value); + } + break; + case BPCHAROID: + { + const char *value = bsonIterString(bsonIterator); + Datum valueDatum = CStringGetDatum(value); + + columnValue = DirectFunctionCall3(bpcharin, valueDatum, + ObjectIdGetDatum(InvalidOid), + Int32GetDatum(columnTypeMod)); + } + break; + case VARCHAROID: + { + const char *value = bsonIterString(bsonIterator); + Datum valueDatum = CStringGetDatum(value); + + columnValue = DirectFunctionCall3(varcharin, valueDatum, + ObjectIdGetDatum(InvalidOid), + Int32GetDatum(columnTypeMod)); + } + break; + case TEXTOID: + { + const char *value = bsonIterString(bsonIterator); + + columnValue = CStringGetTextDatum(value); + } + break; + case NAMEOID: + { + char value[NAMEDATALEN]; + Datum valueDatum = 0; + + bson_oid_t *bsonObjectId = (bson_oid_t *) bsonIterOid(bsonIterator); + + bson_oid_to_string(bsonObjectId, value); + + valueDatum = CStringGetDatum(value); + columnValue = DirectFunctionCall3(namein, valueDatum, + ObjectIdGetDatum(InvalidOid), + Int32GetDatum(columnTypeMod)); + } + break; + case BYTEAOID: + { + int value_len; + char *value; + bytea *result; + + switch (bsonIterType(bsonIterator)) + { + case BSON_TYPE_OID: + value = (char *) bsonIterOid(bsonIterator); + value_len = 12; + break; + default: + value = (char *) bsonIterBinData(bsonIterator, + (uint32_t *) &value_len); + break; + } + result = (bytea *) palloc(value_len + VARHDRSZ); + memcpy(VARDATA(result), value, value_len); + SET_VARSIZE(result, value_len + VARHDRSZ); + columnValue = PointerGetDatum(result); + } + break; + case DATEOID: + { + int64 valueMillis = bsonIterDate(bsonIterator); + int64 timestamp = (valueMillis * 1000L) - POSTGRES_TO_UNIX_EPOCH_USECS; + Datum timestampDatum = TimestampGetDatum(timestamp); + + columnValue = DirectFunctionCall1(timestamp_date, + timestampDatum); + } + break; + case TIMESTAMPOID: + case TIMESTAMPTZOID: + { + int64 valueMillis = bsonIterDate(bsonIterator); + int64 timestamp = (valueMillis * 1000L) - POSTGRES_TO_UNIX_EPOCH_USECS; + + /* Overlook type modifiers for timestamp */ + columnValue = TimestampGetDatum(timestamp); + } + break; + case JSONOID: + { + JsonLexContext *lex; + text *result; + StringInfo buffer = makeStringInfo(); + + BSON_TYPE type = BSON_ITER_TYPE(bsonIterator); + + if (type != BSON_TYPE_ARRAY && type != BSON_TYPE_DOCUMENT) + ereport(ERROR, + (errmsg("cannot convert to json"))); + + /* Convert BSON to JSON value */ + bsonToJsonStringValue(buffer, bsonIterator, + BSON_TYPE_ARRAY == type); + result = cstring_to_text_with_len(buffer->data, buffer->len); +#if PG_VERSION_NUM >= 170000 + lex = makeJsonLexContext(NULL, result, false); +#else + lex = makeJsonLexContext(result, false); +#endif + pg_parse_json(lex, &nullSemAction); + columnValue = PointerGetDatum(result); + } + break; + default: + ereport(ERROR, + (errcode(ERRCODE_FDW_INVALID_DATA_TYPE), + errmsg("cannot convert BSON type to column type"), + errhint("Column type: %u", (uint32) columnTypeId))); + break; + } + + return columnValue; +} + +/* + * mongo_free_scan_state + * Closes the cursor and connection to MongoDB, and reclaims all Mongo + * related resources allocated for the foreign scan. + */ +static void +mongo_free_scan_state(MongoFdwModifyState *fmstate) +{ + if (fmstate == NULL) + return; + + if (fmstate->queryDocument) + { + bsonDestroy(fmstate->queryDocument); + fmstate->queryDocument = NULL; + } + + if (fmstate->mongoCursor) + { + mongoCursorDestroy(fmstate->mongoCursor); + fmstate->mongoCursor = NULL; + } + + /* Release remote connection */ + mongo_release_connection(fmstate->mongoConnection); +} + +/* + * mongoAnalyzeForeignTable + * Collects statistics for the given foreign table. + */ +static bool +mongoAnalyzeForeignTable(Relation relation, + AcquireSampleRowsFunc *func, + BlockNumber *totalpages) +{ + BlockNumber pageCount = 0; + int attributeCount; + int32 *attributeWidths; + Oid foreignTableId; + int32 documentWidth; + double documentCount; + double foreignTableSize; + + foreignTableId = RelationGetRelid(relation); + documentCount = foreign_table_document_count(foreignTableId); + + if (documentCount > 0.0) + { + attributeCount = RelationGetNumberOfAttributes(relation); + attributeWidths = (int32 *) palloc0((attributeCount + 1) * sizeof(int32)); + + /* + * We estimate disk costs assuming a sequential scan over the data. + * This is an inaccurate assumption as Mongo scatters the data over + * disk pages, and may rely on an index to retrieve the data. Still, + * this should at least give us a relative cost. + */ + documentWidth = get_relation_data_width(foreignTableId, + attributeWidths); + foreignTableSize = documentCount * documentWidth; + + pageCount = (BlockNumber) rint(foreignTableSize / BLCKSZ); + } + else + ereport(DEBUG1, + (errmsg("could not retrieve document count for collection"), + errhint("Could not collect statistics about foreign table."))); + + (*totalpages) = pageCount; + (*func) = mongo_acquire_sample_rows; + + return true; +} + +/* + * mongo_acquire_sample_rows + * Acquires a random sample of rows from the foreign table. + * + * Selected rows are returned in the caller allocated sampleRows array, + * which must have at least target row count entries. The actual number of + * rows selected is returned as the function result. We also count the number + * of rows in the collection and return it in total row count. We also always + * set dead row count to zero. + * + * Note that the returned list of rows is not always in order by physical + * position in the MongoDB collection. Therefore, correlation estimates + * derived later may be meaningless, but it's OK because we don't use the + * estimates currently (the planner only pays attention to correlation for + * index scans). + */ +static int +mongo_acquire_sample_rows(Relation relation, + int errorLevel, + HeapTuple *sampleRows, + int targetRowCount, + double *totalRowCount, + double *totalDeadRowCount) +{ + MONGO_CONN *mongoConnection; + int sampleRowCount = 0; + double rowCount = 0; + double rowCountToSkip = -1; /* -1 means not set yet */ + double randomState; + Datum *columnValues; + bool *columnNulls; + Oid foreignTableId; + TupleDesc tupleDescriptor; + AttrNumber columnCount; + AttrNumber columnId; + HTAB *columnMappingHash; + MONGO_CURSOR *mongoCursor; + BSON *queryDocument = bsonCreate(); + List *columnList = NIL; + char *relationName; + MemoryContext oldContext = CurrentMemoryContext; + MemoryContext tupleContext; + MongoFdwOptions *options; + ForeignServer *server; + UserMapping *user; + ForeignTable *table; + + /* Create list of columns in the relation */ + tupleDescriptor = RelationGetDescr(relation); + columnCount = tupleDescriptor->natts; + + for (columnId = 1; columnId <= columnCount; columnId++) + { + Var *column = (Var *) palloc0(sizeof(Var)); + Form_pg_attribute attr = TupleDescAttr(tupleDescriptor, columnId - 1); + + column->varattno = columnId; + column->vartype = attr->atttypid; + column->vartypmod = attr->atttypmod; + + columnList = lappend(columnList, column); + } + + foreignTableId = RelationGetRelid(relation); + table = GetForeignTable(foreignTableId); + server = GetForeignServer(table->serverid); + user = GetUserMapping(GetUserId(), server->serverid); + options = mongo_get_options(foreignTableId); + + /* + * Get connection to the foreign server. Connection manager will + * establish new connection if necessary. + */ + mongoConnection = mongo_get_connection(server, user, options); + + /* Create cursor for collection name and set query */ + mongoCursor = mongoCursorCreate(mongoConnection, options->svr_database, + options->collectionName, queryDocument); + columnMappingHash = column_mapping_hash(foreignTableId, columnList, NIL, + NIL, BASE_REL); + + /* + * Use per-tuple memory context to prevent leak of memory used to read + * rows from the file with copy routines. + */ + tupleContext = AllocSetContextCreate(CurrentMemoryContext, + "mongo_fdw temporary context", + ALLOCSET_DEFAULT_SIZES); + + /* Prepare for sampling rows */ + randomState = anl_init_selection_state(targetRowCount); + + columnValues = (Datum *) palloc(columnCount * sizeof(Datum)); + columnNulls = (bool *) palloc(columnCount * sizeof(bool)); + + for (;;) + { + /* Check for user-requested abort or sleep */ + vacuum_delay_point(); + + /* Initialize all values for this row to null */ + memset(columnValues, 0, columnCount * sizeof(Datum)); + memset(columnNulls, true, columnCount * sizeof(bool)); + + if (mongoCursorNext(mongoCursor, NULL)) + { + const BSON *bsonDocument = mongoCursorBson(mongoCursor); + const char *bsonDocumentKey = NULL; /* Top level document */ + + /* Fetch next tuple */ + MemoryContextReset(tupleContext); + MemoryContextSwitchTo(tupleContext); + + fill_tuple_slot(bsonDocument, bsonDocumentKey, columnMappingHash, + columnValues, columnNulls, BASE_REL); + + MemoryContextSwitchTo(oldContext); + } + else + { + bson_error_t error; + + if (mongoc_cursor_error(mongoCursor, &error)) + ereport(ERROR, + (errmsg("could not iterate over mongo collection"), + errhint("Mongo driver error: %s", error.message))); + break; + } + + /* + * The first targetRowCount sample rows are simply copied into the + * reservoir. Then we start replacing tuples in the sample until we + * reach the end of the relation. This algorithm is from Jeff + * Vitter's paper (see more info in commands/analyze.c). + */ + if (sampleRowCount < targetRowCount) + sampleRows[sampleRowCount++] = heap_form_tuple(tupleDescriptor, + columnValues, + columnNulls); + else + { + /* + * t in Vitter's paper is the number of records already processed. + * If we need to compute a new S value, we must use the "not yet + * incremented" value of rowCount as t. + */ + if (rowCountToSkip < 0) + rowCountToSkip = anl_get_next_S(rowCount, targetRowCount, + &randomState); + + if (rowCountToSkip <= 0) + { + /* + * Found a suitable tuple, so save it, replacing one old tuple + * at random. + */ + int rowIndex = (int) (targetRowCount * anl_random_fract()); + + Assert(rowIndex >= 0); + Assert(rowIndex < targetRowCount); + + heap_freetuple(sampleRows[rowIndex]); + sampleRows[rowIndex] = heap_form_tuple(tupleDescriptor, + columnValues, + columnNulls); + } + + rowCountToSkip -= 1; + } + + rowCount += 1; + } + + /* Only clean up the query struct, but not its data */ + bsonDestroy(queryDocument); + + /* Clean up */ + MemoryContextDelete(tupleContext); + + pfree(columnValues); + pfree(columnNulls); + + /* Emit some interesting relation info */ + relationName = RelationGetRelationName(relation); + ereport(errorLevel, + (errmsg("\"%s\": collection contains %.0f rows; %d rows in sample", + relationName, rowCount, sampleRowCount))); + + (*totalRowCount) = rowCount; + (*totalDeadRowCount) = 0; + + return sampleRowCount; +} + +Datum +mongo_fdw_version(PG_FUNCTION_ARGS) +{ + PG_RETURN_INT32(CODE_VERSION); +} + +/* + * mongoBeginForeignInsert + * Prepare for an insert operation triggered by partition routing + * or COPY FROM. + * + * This is not yet supported, so raise an error. + */ +static void +mongoBeginForeignInsert(ModifyTableState *mtstate, + ResultRelInfo *resultRelInfo) +{ + ereport(ERROR, + (errcode(ERRCODE_FDW_UNABLE_TO_CREATE_EXECUTION), + errmsg("COPY and foreign partition routing not supported in mongo_fdw"))); +} + +/* + * mongoEndForeignInsert + * BeginForeignInsert() is not yet implemented, hence we do not + * have anything to cleanup as of now. We throw an error here just + * to make sure when we do that we do not forget to cleanup + * resources. + */ +static void +mongoEndForeignInsert(EState *estate, ResultRelInfo *resultRelInfo) +{ + ereport(ERROR, + (errcode(ERRCODE_FDW_UNABLE_TO_CREATE_EXECUTION), + errmsg("COPY and foreign partition routing not supported in mongo_fdw"))); +} + +/* + * mongoGetForeignJoinPaths + * Add possible ForeignPath to joinrel, if the join is safe to push down. + */ +static void +mongoGetForeignJoinPaths(PlannerInfo *root, RelOptInfo *joinrel, + RelOptInfo *outerrel, RelOptInfo *innerrel, + JoinType jointype, JoinPathExtraData *extra) +{ + MongoFdwRelationInfo *fpinfo; + ForeignPath *joinpath; + Cost startup_cost; + Cost total_cost; + Path *epq_path = NULL; /* Path to create plan to be executed when + * EvalPlanQual gets triggered. */ + MongoFdwRelationInfo *fpinfo_o; + MongoFdwRelationInfo *fpinfo_i; + + /* + * Skip if this join combination has been considered already. + */ + if (joinrel->fdw_private) + return; + + fpinfo_o = (MongoFdwRelationInfo *) outerrel->fdw_private; + fpinfo_i = (MongoFdwRelationInfo *) innerrel->fdw_private; + + /* If join pushdown is not enabled, honor it. */ + if ((!IS_JOIN_REL(outerrel) && !fpinfo_o->options->enable_join_pushdown) || + (!IS_JOIN_REL(innerrel) && !fpinfo_i->options->enable_join_pushdown) || + !enable_join_pushdown) + return; + + /* + * Create unfinished MongoFdwRelationInfo entry which is used to indicate + * that the join relation is already considered, so that we won't waste + * time in judging the safety of join pushdown and adding the same paths + * again if found safe. Once we know that this join can be pushed down, + * we fill the entry. + */ + fpinfo = (MongoFdwRelationInfo *) palloc0(sizeof(MongoFdwRelationInfo)); + fpinfo->pushdown_safe = false; + joinrel->fdw_private = fpinfo; + + /* + * In case there is a possibility that EvalPlanQual will be executed, we + * should be able to reconstruct the row, from base relations applying all + * the conditions. We create a local plan from a suitable local path + * available in the path list. In case such a path doesn't exist, we can + * not push the join to the foreign server since we won't be able to + * reconstruct the row for EvalPlanQual(). Find an alternative local path + * before we add ForeignPath, lest the new path would kick possibly the + * only local path. Do this before calling mongo_foreign_join_ok(), since + * that function updates fpinfo and marks it as pushable if the join is + * found to be pushable. + */ + if (root->parse->commandType == CMD_DELETE || + root->parse->commandType == CMD_UPDATE || + root->rowMarks) + { + epq_path = GetExistingLocalJoinPath(joinrel); + if (!epq_path) + { + elog(DEBUG3, "could not push down foreign join because a local path suitable for EPQ checks was not found"); + return; + } + } + else + epq_path = NULL; + + if (!mongo_foreign_join_ok(root, joinrel, jointype, outerrel, innerrel, + extra)) + { + /* Free path required for EPQ if we copied one; we don't need it now */ + if (epq_path) + pfree(epq_path); + return; + } + + /* TODO: Put accurate estimates here */ + startup_cost = 15.0; + total_cost = 20 + startup_cost; + + /* + * Create a new join path and add it to the joinrel which represents a + * join between foreign tables. + */ +#if PG_VERSION_NUM >= 170000 + joinpath = create_foreign_join_path(root, + joinrel, + NULL, + joinrel->rows, + startup_cost, + total_cost, + NIL, /* no pathkeys */ + joinrel->lateral_relids, + epq_path, + extra->restrictlist, + NIL); /* no fdw_private */ +#else + joinpath = create_foreign_join_path(root, + joinrel, + NULL, + joinrel->rows, + startup_cost, + total_cost, + NIL, /* no pathkeys */ + joinrel->lateral_relids, + epq_path, + NIL); /* no fdw_private */ +#endif + + /* Add generated path into joinrel by add_path(). */ + add_path(joinrel, (Path *) joinpath); + + /* Add paths with pathkeys */ +#if PG_VERSION_NUM >= 170000 + mongo_add_paths_with_pathkeys(root, joinrel, epq_path, startup_cost, + total_cost, extra->restrictlist); +#else + mongo_add_paths_with_pathkeys(root, joinrel, epq_path, startup_cost, + total_cost); +#endif + + /* XXX Consider parameterized paths for the join relation */ +} + +/* + * mongo_foreign_join_ok + * Assess whether the join between inner and outer relations can be + * pushed down to the foreign server. + */ +static bool +mongo_foreign_join_ok(PlannerInfo *root, RelOptInfo *joinrel, + JoinType jointype, RelOptInfo *outerrel, + RelOptInfo *innerrel, JoinPathExtraData *extra) { - Datum columnValue = 0; + MongoFdwRelationInfo *fpinfo; + MongoFdwRelationInfo *fpinfo_o; + MongoFdwRelationInfo *fpinfo_i; + ListCell *lc; + List *joinclauses = NIL; + List *scan_var_list; + RangeTblEntry *rte; + char *colname; + + /* We support pushing down only INNER, LEFT, RIGHT OUTER join */ + if (jointype != JOIN_INNER && jointype != JOIN_LEFT && + jointype != JOIN_RIGHT) + return false; + + fpinfo = (MongoFdwRelationInfo *) joinrel->fdw_private; + fpinfo_o = (MongoFdwRelationInfo *) outerrel->fdw_private; + fpinfo_i = (MongoFdwRelationInfo *) innerrel->fdw_private; + + /* Recursive joins can't be pushed down */ + if (IS_JOIN_REL(outerrel) || IS_JOIN_REL(innerrel)) + return false; + + /* + * If either of the joining relations is marked as unsafe to pushdown, the + * join can not be pushed down. + */ + if (!fpinfo_o || !fpinfo_o->pushdown_safe || + !fpinfo_i || !fpinfo_i->pushdown_safe) + return false; + + /* + * If joining relations have local conditions, those conditions are + * required to be applied before joining the relations. Hence the join + * can not be pushed down. + */ + if (fpinfo_o->local_conds || fpinfo_i->local_conds) + return false; + + scan_var_list = pull_var_clause((Node *) joinrel->reltarget->exprs, + PVC_RECURSE_PLACEHOLDERS); - switch(columnTypeId) + /* + * Don't push-down join when whole row reference and/or full document + * retrieval is involved in the target list. + */ + foreach(lc, scan_var_list) { - case INT2OID: + Var *var = lfirst(lc); + + Assert(IsA(var, Var)); + + /* Don't support whole row reference. */ + if (var->varattno == 0) + return false; + + rte = planner_rt_fetch(var->varno, root); + colname = get_attname(rte->relid, var->varattno, false); + + /* Don't support full document retrieval */ + if (strcmp("__doc", colname) == 0) + return false; + } + + /* + * Separate restrict list into join quals and pushed-down (other) quals. + * + * Join quals belonging to an outer join must all be shippable, else we + * cannot execute the join remotely. Add such quals to 'joinclauses'. + * + * Add other quals to fpinfo->remote_conds if they are shippable, else to + * fpinfo->local_conds. In an inner join it's okay to execute conditions + * either locally or remotely; the same is true for pushed-down conditions + * at an outer join. + * + * Note we might return failure after having already scribbled on + * fpinfo->remote_conds and fpinfo->local_conds. That's okay because we + * won't consult those lists again if we deem the join unshippable. + */ + joinclauses = NIL; + foreach(lc, extra->restrictlist) + { + RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc); + bool is_remote_clause = mongo_is_foreign_expr(root, + joinrel, + rinfo->clause, + false); + + if (IS_OUTER_JOIN(jointype) && + !RINFO_IS_PUSHED_DOWN(rinfo, joinrel->relids)) { - int16 value = (int16) bson_iterator_int(bsonIterator); - columnValue = Int16GetDatum(value); - break; + if (!is_remote_clause) + return false; + joinclauses = lappend(joinclauses, rinfo); } - case INT4OID: + else { - int32 value = bson_iterator_int(bsonIterator); - columnValue = Int32GetDatum(value); - break; + if (is_remote_clause && jointype == JOIN_INNER) + { + /* + * Unlike postgres_fdw, for inner join, don't append the join + * clauses to remote_conds, instead keep the join clauses + * separate. Currently, we are providing limited operator + * push-ability support for join pushdown, hence we keep those + * clauses separate to avoid INNER JOIN not getting pushdown + * if any of the WHERE clauses are not shippable as per join + * pushdown shippability. + */ + joinclauses = lappend(joinclauses, rinfo); + } + else + fpinfo->local_conds = lappend(fpinfo->local_conds, rinfo); } - case INT8OID: - { - int64 value = bson_iterator_long(bsonIterator); - columnValue = Int64GetDatum(value); + } + + /* + * If there's some PlaceHolderVar that would need to be evaluated within + * this join tree (because there's an upper reference to a quantity that + * may go to NULL as a result of an outer join), then we can't try to push + * the join down. + */ + foreach(lc, root->placeholder_list) + { + PlaceHolderInfo *phinfo = lfirst(lc); + Relids relids; + + /* PlaceHolderInfo refers to parent relids, not child relids. */ + relids = IS_OTHER_REL(joinrel) ? + joinrel->top_parent_relids : joinrel->relids; + + if (bms_is_subset(phinfo->ph_eval_at, relids) && + bms_nonempty_difference(relids, phinfo->ph_eval_at)) + return false; + } + + /* Save the join clauses, for later use. */ + fpinfo->joinclauses = joinclauses; + fpinfo->outerrel = outerrel; + fpinfo->innerrel = innerrel; + fpinfo->jointype = jointype; + + /* + * Pull the other remote conditions from the joining relations into join + * clauses or other remote clauses (remote_conds) of this relation. This + * avoids building sub-queries at every join step. + * + * For an INNER and OUTER join, the clauses from the outer side are added + * to remote_conds since those can be evaluated after the join is + * evaluated. The clauses from the inner side are added to the + * joinclauses, since they need to be evaluated while constructing the + * join. + * + * The joining sides cannot have local conditions, thus no need to test + * the shippability of the clauses being pulled up. + */ + switch (jointype) + { + case JOIN_INNER: + case JOIN_LEFT: + fpinfo->joinclauses = mongo_list_concat(fpinfo->joinclauses, + fpinfo_i->remote_conds); + fpinfo->remote_conds = mongo_list_concat(fpinfo->remote_conds, + fpinfo_o->remote_conds); break; - } - case FLOAT4OID: - { - float4 value = (float4) bson_iterator_double(bsonIterator); - columnValue = Float4GetDatum(value); + case JOIN_RIGHT: + fpinfo->joinclauses = mongo_list_concat(fpinfo->joinclauses, + fpinfo_o->remote_conds); + fpinfo->remote_conds = mongo_list_concat(fpinfo->remote_conds, + fpinfo_i->remote_conds); break; + default: + /* Should not happen, we have just checked this above */ + elog(ERROR, "unsupported join type %d", jointype); + } + + fpinfo->outer_relname = fpinfo_o->base_relname; + fpinfo->inner_relname = fpinfo_i->base_relname; + + /* Mark that this join can be pushed down safely */ + fpinfo->pushdown_safe = true; + + /* Joinrel's aggregation flag depends on each joining relation's flag. */ + fpinfo->is_agg_scanrel_pushable = fpinfo_o->is_agg_scanrel_pushable && + fpinfo_i->is_agg_scanrel_pushable; + + /* Set the flag is_order_by_pushable of the join relation */ + fpinfo->is_order_by_pushable = fpinfo_o->is_order_by_pushable && + fpinfo_i->is_order_by_pushable; + + /* + * Set the string describing this join relation to be used in EXPLAIN + * output of the corresponding ForeignScan. + */ + fpinfo->relation_name = makeStringInfo(); + appendStringInfo(fpinfo->relation_name, "(%s) %s JOIN (%s)", + fpinfo_o->relation_name->data, + mongo_get_jointype_name(fpinfo->jointype), + fpinfo_i->relation_name->data); + + return true; +} + +/* + * mongo_prepare_qual_info + * Gather information of columns involved in the quals by extracting + * clause from each qual and process it further using mongo_check_qual(). + */ +static void +mongo_prepare_qual_info(List *quals, MongoRelQualInfo *qual_info) +{ + ListCell *lc; + + foreach(lc, quals) + { + Expr *expr = (Expr *) lfirst(lc); + + /* Extract clause from RestrictInfo */ + if (IsA(expr, RestrictInfo)) + { + RestrictInfo *ri = (RestrictInfo *) expr; + + expr = ri->clause; } - case FLOAT8OID: + + mongo_check_qual(expr, qual_info); + } +} + +/* + * mongo_foreign_grouping_ok + * Assess whether the aggregation, grouping and having operations can + * be pushed down to the foreign server. As a side effect, save + * information we obtain in this function to MongoFdwRelationInfo of + * the input relation. + */ +static bool +mongo_foreign_grouping_ok(PlannerInfo *root, RelOptInfo *grouped_rel, + Node *havingQual) +{ + Query *query = root->parse; + PathTarget *grouping_target = grouped_rel->reltarget; + MongoFdwRelationInfo *fpinfo = (MongoFdwRelationInfo *) grouped_rel->fdw_private; + MongoFdwRelationInfo *ofpinfo = (MongoFdwRelationInfo *) fpinfo->outerrel->fdw_private; + ListCell *lc; + int i; + List *tlist = NIL; + + /* Grouping Sets are not pushable */ + if (query->groupingSets) + return false; + + /* + * If underneath input relation has any local conditions, those conditions + * are required to be applied before performing aggregation. Hence the + * aggregate cannot be pushed down. + */ + if (ofpinfo->local_conds) + return false; + + /* + * Evaluate grouping targets and check whether they are safe to push down + * to the foreign side. All GROUP BY expressions will be part of the + * grouping target and thus there is no need to evaluate them separately. + * While doing so, add required expressions into the target list which can + * then be used to pass to a foreign server. + */ + i = 0; + foreach(lc, grouping_target->exprs) + { + Expr *expr = (Expr *) lfirst(lc); + Index sgref = get_pathtarget_sortgroupref(grouping_target, i); + ListCell *l; + + /* Check whether this expression is part of GROUP BY clause */ + if (sgref && get_sortgroupref_clause_noerr(sgref, query->groupClause)) { - float8 value = bson_iterator_double(bsonIterator); - columnValue = Float8GetDatum(value); - break; + TargetEntry *tle; + + /* + * If any of the GROUP BY expression is not shippable we can not + * push down aggregation to the foreign server. + */ + if (!mongo_is_foreign_expr(root, grouped_rel, expr, false)) + return false; + + /* Add column in group by column list */ + ofpinfo->groupbyColList = lappend(ofpinfo->groupbyColList, expr); + + /* + * If it would be a foreign param, we can't put it into the tlist, + * so we have to fail. + */ + if (mongo_is_foreign_param(root, grouped_rel, expr)) + return false; + + /* + * Pushable, so add to tlist. We need to create a TLE for this + * expression and apply the sortgroupref to it. We cannot use + * add_to_flat_tlist() here because that avoids making duplicate + * entries in the tlist. If there are duplicate entries with + * distinct sortgrouprefs, we have to duplicate that situation in + * the output tlist. + */ + tle = makeTargetEntry(expr, list_length(tlist) + 1, NULL, false); + tle->ressortgroupref = sgref; + tlist = lappend(tlist, tle); } - case NUMERICOID: + else { - float8 value = bson_iterator_double(bsonIterator); - Datum valueDatum = Float8GetDatum(value); + /* Check entire expression whether it is pushable or not */ + if (mongo_is_foreign_expr(root, grouped_rel, expr, false) && + !mongo_is_foreign_param(root, grouped_rel, expr)) + { + /* Pushable, add to tlist */ + tlist = add_to_flat_tlist(tlist, list_make1(expr)); + } + else + { + List *aggvars; - /* overlook type modifiers for numeric */ - columnValue = DirectFunctionCall1(float8_numeric, valueDatum); - break; + /* Not matched exactly, pull the var with aggregates then */ + aggvars = pull_var_clause((Node *) expr, + PVC_INCLUDE_AGGREGATES); + + /* + * If any aggregate expression is not shippable, then we + * cannot push down aggregation to the foreign server. + */ + if (!mongo_is_foreign_expr(root, grouped_rel, (Expr *) aggvars, + false)) + return false; + + /* + * Add aggregates, if any, into the targetlist. Plain var + * nodes should be either same as some GROUP BY expression or + * part of some GROUP BY expression. In later case, the query + * cannot refer plain var nodes without the surrounding + * expression. In both the cases, they are already part of + * the targetlist and thus no need to add them again. In fact + * adding pulled plain var nodes in SELECT clause will cause + * an error on the foreign server if they are not same as some + * GROUP BY expression. + */ + foreach(l, aggvars) + { + expr = (Expr *) lfirst(l); + + if (IsA(expr, Aggref)) + tlist = add_to_flat_tlist(tlist, list_make1(expr)); + } + } } - case BOOLOID: + + i++; + } + + /* + * Classify the pushable and non-pushable having clauses and save them in + * remote_conds and local_conds of the grouped rel's fpinfo. + */ + if (havingQual) + { + foreach(lc, (List *) havingQual) { - bool value = bson_iterator_bool(bsonIterator); - columnValue = BoolGetDatum(value); - break; + Expr *expr = (Expr *) lfirst(lc); + RestrictInfo *rinfo; + + /* + * Currently, the core code doesn't wrap havingQuals in + * RestrictInfos, so we must make our own. + */ + Assert(!IsA(expr, RestrictInfo)); +#if PG_VERSION_NUM >= 160000 + rinfo = make_restrictinfo(root, + expr, + true, + false, + false, + false, + root->qual_security_level, + grouped_rel->relids, + NULL, + NULL); +#elif PG_VERSION_NUM >= 140000 + rinfo = make_restrictinfo(root, + expr, + true, + false, + false, + root->qual_security_level, + grouped_rel->relids, + NULL, + NULL); +#else + rinfo = make_restrictinfo(expr, + true, + false, + false, + root->qual_security_level, + grouped_rel->relids, + NULL, + NULL); +#endif + + if (!mongo_is_foreign_expr(root, grouped_rel, expr, true)) + fpinfo->local_conds = lappend(fpinfo->local_conds, rinfo); + else + fpinfo->remote_conds = lappend(fpinfo->remote_conds, rinfo); } - case BPCHAROID: + } + + /* + * If there are any local conditions, pull Vars and aggregates from it and + * check whether they are safe to pushdown or not. + */ + if (fpinfo->local_conds) + { + List *aggvars = NIL; + + foreach(lc, fpinfo->local_conds) { - const char *value = bson_iterator_string(bsonIterator); - Datum valueDatum = CStringGetDatum(value); + RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc); - columnValue = DirectFunctionCall3(bpcharin, valueDatum, - ObjectIdGetDatum(InvalidOid), - Int32GetDatum(columnTypeMod)); - break; + aggvars = list_concat(aggvars, + pull_var_clause((Node *) rinfo->clause, + PVC_INCLUDE_AGGREGATES)); } - case VARCHAROID: + + foreach(lc, aggvars) { - const char *value = bson_iterator_string(bsonIterator); - Datum valueDatum = CStringGetDatum(value); + Expr *expr = (Expr *) lfirst(lc); + + /* + * If aggregates within local conditions are not safe to push + * down, then we cannot push down the query. Vars are already + * part of GROUP BY clause which are checked above, so no need to + * access them again here. + */ + if (IsA(expr, Aggref)) + { + if (!mongo_is_foreign_expr(root, grouped_rel, expr, false)) + return false; + + tlist = add_to_flat_tlist(tlist, list_make1(expr)); + } + } + } + + /* Store generated targetlist */ + fpinfo->grouped_tlist = tlist; + + /* Safe to pushdown */ + fpinfo->pushdown_safe = true; + + /* + * Set the string describing this grouped relation to be used in EXPLAIN + * output of corresponding ForeignScan. + */ + fpinfo->relation_name = makeStringInfo(); + appendStringInfo(fpinfo->relation_name, "Aggregate on (%s)", + ofpinfo->relation_name->data); + + return true; +} + +/* + * mongoGetForeignUpperPaths + * Add paths for post-join operations like aggregation, grouping etc. if + * corresponding operations are safe to push down. + */ +static void +mongoGetForeignUpperPaths(PlannerInfo *root, UpperRelationKind stage, + RelOptInfo *input_rel, RelOptInfo *output_rel, + void *extra) +{ + MongoFdwRelationInfo *fpinfo; + + /* + * If input rel is not safe to pushdown, then simply return as we cannot + * perform any post-join operations on the foreign server. + */ + if (!input_rel->fdw_private || + !((MongoFdwRelationInfo *) input_rel->fdw_private)->pushdown_safe) + return; + + /* Ignore stages we don't support; and skip any duplicate calls. */ + if ((stage != UPPERREL_GROUP_AGG && stage != UPPERREL_ORDERED && + stage != UPPERREL_FINAL) || + output_rel->fdw_private) + return; - columnValue = DirectFunctionCall3(varcharin, valueDatum, - ObjectIdGetDatum(InvalidOid), - Int32GetDatum(columnTypeMod)); + fpinfo = (MongoFdwRelationInfo *) palloc0(sizeof(MongoFdwRelationInfo)); + fpinfo->pushdown_safe = false; + fpinfo->stage = stage; + output_rel->fdw_private = fpinfo; + + switch (stage) + { + case UPPERREL_GROUP_AGG: + mongo_add_foreign_grouping_paths(root, input_rel, output_rel, + (GroupPathExtraData *) extra); + break; + case UPPERREL_ORDERED: + mongo_add_foreign_ordered_paths(root, input_rel, output_rel); + break; + case UPPERREL_FINAL: + mongo_add_foreign_final_paths(root, input_rel, output_rel, + (FinalPathExtraData *) extra); break; + default: + elog(ERROR, "unexpected upper relation: %d", (int) stage); + break; + } +} + +/* + * mongo_add_foreign_grouping_paths + * Add foreign path for grouping and/or aggregation. + * + * Given input_rel represents the underlying scan. The paths are added to the + * given grouped_rel. + */ +static void +mongo_add_foreign_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel, + RelOptInfo *grouped_rel, + GroupPathExtraData *extra) +{ + Query *parse = root->parse; + MongoFdwRelationInfo *fpinfo = grouped_rel->fdw_private; + ForeignPath *grouppath; + Cost startup_cost; + Cost total_cost; + double num_groups; + + /* Nothing to be done, if there is no grouping or aggregation required. */ + if (!parse->groupClause && !parse->groupingSets && !parse->hasAggs && + !root->hasHavingQual) + return; + + /* Save the input_rel as outerrel in fpinfo */ + fpinfo->outerrel = input_rel; + + /* Set aggregation flag of aggregate relation */ + fpinfo->is_agg_scanrel_pushable = + ((MongoFdwRelationInfo *) input_rel->fdw_private)->is_agg_scanrel_pushable; + + /* If aggregate pushdown is not enabled, honor it. */ + if (!enable_aggregate_pushdown || !fpinfo->is_agg_scanrel_pushable) + return; + + /* Assess if it is safe to push down aggregation and grouping. */ + if (!mongo_foreign_grouping_ok(root, grouped_rel, extra->havingQual)) + return; + + fpinfo->is_order_by_pushable = + ((MongoFdwRelationInfo *) input_rel->fdw_private)->is_order_by_pushable; + + /* + * TODO: Put accurate estimates here. + * + * Cost used here is minimum of the cost estimated for base and join + * relation. + */ + startup_cost = 15; + total_cost = 10 + startup_cost; + + /* Estimate output tuples which should be same as number of groups */ +#if PG_VERSION_NUM >= 140000 + num_groups = estimate_num_groups(root, + get_sortgrouplist_exprs(root->parse->groupClause, + fpinfo->grouped_tlist), + input_rel->rows, NULL, NULL); +#else + num_groups = estimate_num_groups(root, + get_sortgrouplist_exprs(root->parse->groupClause, + fpinfo->grouped_tlist), + input_rel->rows, NULL); +#endif + + /* Create and add foreign path to the grouping relation. */ +#if PG_VERSION_NUM >= 170000 + grouppath = create_foreign_upper_path(root, + grouped_rel, + grouped_rel->reltarget, + num_groups, + startup_cost, + total_cost, + NIL, /* no pathkeys */ + NULL, + NIL, /* no fdw_restrictinfo list */ + NIL); /* no fdw_private */ +#else + grouppath = create_foreign_upper_path(root, + grouped_rel, + grouped_rel->reltarget, + num_groups, + startup_cost, + total_cost, + NIL, /* no pathkeys */ + NULL, + NIL); /* no fdw_private */ +#endif + + /* Add generated path into grouped_rel by add_path(). */ + add_path(grouped_rel, (Path *) grouppath); +} + +/* + * mongoEstimateCosts + * Estimate the remote query cost + */ +static void +mongoEstimateCosts(RelOptInfo *baserel, Cost *startup_cost, Cost *total_cost, + Oid foreigntableid) +{ + MongoFdwOptions *options; + + /* Fetch options */ + options = mongo_get_options(foreigntableid); + + /* Local databases are probably faster */ + if (strcmp(options->svr_address, "127.0.0.1") == 0 || + strcmp(options->svr_address, "localhost") == 0) + *startup_cost = 10; + else + *startup_cost = 25; + + *total_cost = baserel->rows + *startup_cost; +} + +/* + * mongo_get_useful_ecs_for_relation + * Determine which EquivalenceClasses might be involved in useful + * orderings of this relation. + * + * This function is in some respects a mirror image of the core function + * pathkeys_useful_for_merging: for a regular table, we know what indexes + * we have and want to test whether any of them are useful. For a foreign + * table, we don't know what indexes are present on the remote side but + * want to speculate about which ones we'd like to use if they existed. + * + * This function returns a list of potentially-useful equivalence classes, + * but it does not guarantee that an EquivalenceMember exists which contains + * Vars only from the given relation. For example, given ft1 JOIN t1 ON + * ft1.x + t1.x = 0, this function will say that the equivalence class + * containing ft1.x + t1.x is potentially useful. Supposing ft1 is remote and + * t1 is local (or on a different server), it will turn out that no useful + * ORDER BY clause can be generated. It's not our job to figure that out + * here; we're only interested in identifying relevant ECs. + */ +static List * +mongo_get_useful_ecs_for_relation(PlannerInfo *root, RelOptInfo *rel) +{ + List *useful_eclass_list = NIL; + ListCell *lc; + Relids relids; + + /* + * First, consider whether any active EC is potentially useful for a merge + * join against this relation. + */ + if (rel->has_eclass_joins) + { + foreach(lc, root->eq_classes) + { + EquivalenceClass *cur_ec = (EquivalenceClass *) lfirst(lc); + + if (eclass_useful_for_merging(root, cur_ec, rel)) + useful_eclass_list = lappend(useful_eclass_list, cur_ec); } - case TEXTOID: + } + + /* + * Next, consider whether there are any non-EC derivable join clauses that + * are merge-joinable. If the joininfo list is empty, we can exit + * quickly. + */ + if (rel->joininfo == NIL) + return useful_eclass_list; + + /* If this is a child rel, we must use the topmost parent rel to search. */ + if (IS_OTHER_REL(rel)) + { + Assert(!bms_is_empty(rel->top_parent_relids)); + relids = rel->top_parent_relids; + } + else + relids = rel->relids; + + /* Check each join clause in turn. */ + foreach(lc, rel->joininfo) + { + RestrictInfo *restrictinfo = (RestrictInfo *) lfirst(lc); + + /* Consider only mergejoinable clauses */ + if (restrictinfo->mergeopfamilies == NIL) + continue; + + /* Make sure we've got canonical ECs. */ + update_mergeclause_eclasses(root, restrictinfo); + + /* + * restrictinfo->mergeopfamilies != NIL is sufficient to guarantee + * that left_ec and right_ec will be initialized, per comments in + * distribute_qual_to_rels. + * + * We want to identify which side of this merge-joinable clause + * contains columns from the relation produced by this RelOptInfo. We + * test for overlap, not containment, because there could be extra + * relations on either side. For example, suppose we've got something + * like ((A JOIN B ON A.x = B.x) JOIN C ON A.y = C.y) LEFT JOIN D ON + * A.y = D.y. The input rel might be the joinrel between A and B, and + * we'll consider the join clause A.y = D.y. relids contains a + * relation not involved in the join class (B) and the equivalence + * class for the left-hand side of the clause contains a relation not + * involved in the input rel (C). Despite the fact that we have only + * overlap and not containment in either direction, A.y is potentially + * useful as a sort column. + * + * Note that it's even possible that relids overlaps neither side of + * the join clause. For example, consider A LEFT JOIN B ON A.x = B.x + * AND A.x = 1. The clause A.x = 1 will appear in B's joininfo list, + * but overlaps neither side of B. In that case, we just skip this + * join clause, since it doesn't suggest a useful sort order for this + * relation. + */ + if (bms_overlap(relids, restrictinfo->right_ec->ec_relids)) + useful_eclass_list = list_append_unique_ptr(useful_eclass_list, + restrictinfo->right_ec); + else if (bms_overlap(relids, restrictinfo->left_ec->ec_relids)) + useful_eclass_list = list_append_unique_ptr(useful_eclass_list, + restrictinfo->left_ec); + } + + return useful_eclass_list; +} + +/* + * mongo_get_useful_pathkeys_for_relation + * Determine which orderings of a relation might be useful. + * + * Getting data in sorted order can be useful either because the requested + * order matches the final output ordering for the overall query we're + * planning, or because it enables an efficient merge join. Here, we try + * to figure out which pathkeys to consider. + * + * MongoDB considers null values as the "smallest" ones, so they appear first + * when sorting in ascending order, and appear last when sorting in descending + * order. MongoDB doesn't have provision for "NULLS FIRST" and "NULLS LAST" + * like syntaxes. So, by considering all these restrictions from MongoDB, we + * can support push-down of only below two cases of the ORDER BY clause: + * + * 1. ORDER BY ASC NULLS FIRST + * 2. ORDER BY DESC NULLS LAST + * + * Where, expr can only be a column and not any expression because MongoDB + * sorts only on fields. Multiple columns can be provided. + */ +static List * +mongo_get_useful_pathkeys_for_relation(PlannerInfo *root, RelOptInfo *rel) +{ + List *useful_pathkeys_list = NIL; + List *useful_eclass_list; + MongoFdwRelationInfo *fpinfo = (MongoFdwRelationInfo *) rel->fdw_private; + EquivalenceClass *query_ec = NULL; + ListCell *lc; + + /* + * Pushing the query_pathkeys to the remote server is always worth + * considering, because it might let us avoid a local sort. + */ + fpinfo->qp_is_pushdown_safe = false; + if (root->query_pathkeys) + { + bool query_pathkeys_ok = true; + + foreach(lc, root->query_pathkeys) { - const char *value = bson_iterator_string(bsonIterator); - columnValue = CStringGetTextDatum(value); - break; + PathKey *pathkey = (PathKey *) lfirst(lc); + + /* Only ASC NULLS FIRST and DESC NULLS LAST can be pushed down */ + if (!IS_PATHKEY_PUSHABLE(pathkey)) + { + query_pathkeys_ok = false; + break; + } + + /* + * The planner and executor don't have any clever strategy for + * taking data sorted by a prefix of the query's pathkeys and + * getting it to be sorted by all of those pathkeys. We'll just + * end up resorting the entire data set. So, unless we can push + * down all of the query pathkeys, forget it. + */ + if (!mongo_is_foreign_pathkey(root, rel, pathkey)) + { + query_pathkeys_ok = false; + break; + } } - case NAMEOID: + + if (query_pathkeys_ok) { - char value[NAMEDATALEN]; - Datum valueDatum = 0; + useful_pathkeys_list = list_make1(list_copy(root->query_pathkeys)); + fpinfo->qp_is_pushdown_safe = true; + } + } - bson_oid_t *bsonObjectId = bson_iterator_oid(bsonIterator); - bson_oid_to_string(bsonObjectId, value); + /* Get the list of interesting EquivalenceClasses. */ + useful_eclass_list = mongo_get_useful_ecs_for_relation(root, rel); - valueDatum = CStringGetDatum(value); - columnValue = DirectFunctionCall3(namein, valueDatum, - ObjectIdGetDatum(InvalidOid), - Int32GetDatum(columnTypeMod)); - break; - } - case DATEOID: - { - int64 valueMillis = bson_iterator_date(bsonIterator); - int64 timestamp = (valueMillis * 1000L) - POSTGRES_TO_UNIX_EPOCH_USECS; - Datum timestampDatum = TimestampGetDatum(timestamp); + /* Extract unique EC for query, if any, so we don't consider it again. */ + if (list_length(root->query_pathkeys) == 1) + { + PathKey *query_pathkey = linitial(root->query_pathkeys); - columnValue = DirectFunctionCall1(timestamp_date, timestampDatum); - break; - } - case TIMESTAMPOID: - case TIMESTAMPTZOID: - { - int64 valueMillis = bson_iterator_date(bsonIterator); - int64 timestamp = (valueMillis * 1000L) - POSTGRES_TO_UNIX_EPOCH_USECS; + query_ec = query_pathkey->pk_eclass; + } - /* overlook type modifiers for timestamp */ - columnValue = TimestampGetDatum(timestamp); - break; - } - default: - { - ereport(ERROR, (errcode(ERRCODE_FDW_INVALID_DATA_TYPE), - errmsg("cannot convert bson type to column type"), - errhint("Column type: %u", (uint32) columnTypeId))); - break; - } + /* + * As a heuristic, the only pathkeys we consider here are those of length + * one. It's surely possible to consider more, but since each one we + * choose to consider will generate a round-trip to the remote side, we + * need to be a bit cautious here. It would sure be nice to have a local + * cache of information about remote index definitions... + */ + foreach(lc, useful_eclass_list) + { + EquivalenceClass *cur_ec = lfirst(lc); + EquivalenceMember *em; + Expr *em_expr; + PathKey *pathkey; + + /* If redundant with what we did above, skip it. */ + if (cur_ec == query_ec) + continue; + + /* Can't push down the sort if the EC's opfamily is not shippable. */ + if (!mongo_is_builtin(linitial_oid(cur_ec->ec_opfamilies))) + continue; + + /* If no pushable expression for this rel, skip it. */ + if (!(em = mongo_find_em_for_rel(root, cur_ec, rel))) + continue; + + /* Ignore binary-compatible relabeling */ + em_expr = em->em_expr; + while (em_expr && IsA(em_expr, RelabelType)) + em_expr = ((RelabelType *) em_expr)->arg; + + /* Only Vars are allowed per MongoDB. */ + if (!IsA(em_expr, Var)) + continue; + + /* Looks like we can generate a pathkey, so let's do it. */ + pathkey = make_canonical_pathkey(root, cur_ec, + linitial_oid(cur_ec->ec_opfamilies), + BTLessStrategyNumber, + false); + if (!IS_PATHKEY_PUSHABLE(pathkey)) + continue; + + /* Check for sort operator pushability. */ + if (!mongo_is_default_sort_operator(em, pathkey)) + continue; + + useful_pathkeys_list = lappend(useful_pathkeys_list, + list_make1(pathkey)); } - return columnValue; + return useful_pathkeys_list; } - /* - * MongoFreeScanState closes the cursor and connection to MongoDB, and reclaims - * all Mongo related resources allocated for the foreign scan. + * mongo_add_paths_with_pathkeys + * Add path with root->query_pathkeys if that's pushable. + * + * Pushing down query_pathkeys to the foreign server might let us avoid a + * local sort. */ +#if PG_VERSION_NUM >= 170000 +static void +mongo_add_paths_with_pathkeys(PlannerInfo *root, RelOptInfo *rel, + Path *epq_path, Cost base_startup_cost, + Cost base_total_cost, List *restrictlist) +#else static void -MongoFreeScanState(MongoFdwExecState *executionState) +mongo_add_paths_with_pathkeys(PlannerInfo *root, RelOptInfo *rel, + Path *epq_path, Cost base_startup_cost, + Cost base_total_cost) +#endif { - if (executionState == NULL) - { + ListCell *lc; + List *useful_pathkeys_list = NIL; /* List of all pathkeys */ + + /* If orderby pushdown is not enabled, honor it. */ + if (!enable_order_by_pushdown || + !((MongoFdwRelationInfo *) rel->fdw_private)->is_order_by_pushable) + return; + + /* + * Check the query pathkeys length. Don't push when exceeding the limit + * set by MongoDB. + */ + if (list_length(root->query_pathkeys) > MAX_PATHKEYS) return; - } - bson_destroy(executionState->queryDocument); - bson_dispose(executionState->queryDocument); + useful_pathkeys_list = mongo_get_useful_pathkeys_for_relation(root, rel); - mongo_cursor_destroy(executionState->mongoCursor); - mongo_cursor_dispose(executionState->mongoCursor); + /* Create one path for each set of pathkeys we found above. */ + foreach(lc, useful_pathkeys_list) + { + Cost startup_cost; + Cost total_cost; + List *useful_pathkeys = lfirst(lc); + Path *sorted_epq_path; - /* also close the connection to mongo server */ - mongo_destroy(executionState->mongoConnection); - mongo_dispose(executionState->mongoConnection); -} + /* TODO put accurate estimates. */ + startup_cost = base_startup_cost * DEFAULT_MONGO_SORT_MULTIPLIER; + total_cost = base_total_cost * DEFAULT_MONGO_SORT_MULTIPLIER; + /* + * The EPQ path must be at least as well sorted as the path itself, in + * case it gets used as input to a mergejoin. + */ + sorted_epq_path = epq_path; + if (sorted_epq_path != NULL && + !pathkeys_contained_in(useful_pathkeys, + sorted_epq_path->pathkeys)) + sorted_epq_path = (Path *) + create_sort_path(root, + rel, + sorted_epq_path, + useful_pathkeys, + -1.0); + + if (IS_SIMPLE_REL(rel)) +#if PG_VERSION_NUM >= 170000 + add_path(rel, (Path *) + create_foreignscan_path(root, rel, + NULL, + rel->rows, + startup_cost, + total_cost, + useful_pathkeys, + rel->lateral_relids, + sorted_epq_path, + NIL, /* no fdw_restrictinfo list */ + NIL)); /* no fdw_private list */ +#else + add_path(rel, (Path *) + create_foreignscan_path(root, rel, + NULL, + rel->rows, + startup_cost, + total_cost, + useful_pathkeys, + rel->lateral_relids, + sorted_epq_path, + NIL)); /* no fdw_private list */ +#endif + else +#if PG_VERSION_NUM >= 170000 + add_path(rel, (Path *) + create_foreign_join_path(root, rel, + NULL, + rel->rows, + startup_cost, + total_cost, + useful_pathkeys, + rel->lateral_relids, + sorted_epq_path, + restrictlist, + NIL)); /* no fdw_private */ +#else + add_path(rel, (Path *) + create_foreign_join_path(root, rel, + NULL, + rel->rows, + startup_cost, + total_cost, + useful_pathkeys, + rel->lateral_relids, + sorted_epq_path, + NIL)); /* no fdw_private */ +#endif + } +} /* - * MongoAnalyzeForeignTable collects statistics for the given foreign table. + * mongo_find_em_for_rel + * Find an equivalence class member expression, all of whose Vars, come + * from the indicated relation. */ -static bool -MongoAnalyzeForeignTable(Relation relation, - AcquireSampleRowsFunc *acquireSampleRowsFunc, - BlockNumber *totalPageCount) +EquivalenceMember * +mongo_find_em_for_rel(PlannerInfo *root, EquivalenceClass *ec, RelOptInfo *rel) { - BlockNumber pageCount = 0; - int attributeCount = 0; - int32 *attributeWidths = NULL; - Oid foreignTableId = InvalidOid; - int32 documentWidth = 0; - double documentCount = 0.0; - double foreignTableSize = 0; - - foreignTableId = RelationGetRelid(relation); - documentCount = ForeignTableDocumentCount(foreignTableId); + ListCell *lc_em; - if (documentCount > 0.0) + foreach(lc_em, ec->ec_members) { - attributeCount = RelationGetNumberOfAttributes(relation); - attributeWidths = (int32 *) palloc0((attributeCount + 1) * sizeof(int32)); + EquivalenceMember *em = (EquivalenceMember *) lfirst(lc_em); /* - * We estimate disk costs assuming a sequential scan over the data. This is - * an inaccurate assumption as Mongo scatters the data over disk pages, and - * may rely on an index to retrieve the data. Still, this should at least - * give us a relative cost. + * Note we require !bms_is_empty, else we'd accept constant + * expressions which are not suitable for the purpose. */ - documentWidth = get_relation_data_width(foreignTableId, attributeWidths); - foreignTableSize = documentCount * documentWidth; - - pageCount = (BlockNumber) rint(foreignTableSize / BLCKSZ); - } - else - { - ereport(ERROR, (errmsg("could not retrieve document count for collection"), - errhint("could not collect statistics about foreign table"))); + if (bms_is_subset(em->em_relids, rel->relids) && + !bms_is_empty(em->em_relids) && + mongo_is_foreign_expr(root, rel, em->em_expr, false)) + { + /* + * If there is more than one equivalence member whose Vars are + * taken entirely from this relation, we'll be content to choose + * any one of those. + */ + return em; + } } - (*totalPageCount) = pageCount; - (*acquireSampleRowsFunc) = MongoAcquireSampleRows; - - return true; + /* We didn't find any suitable equivalence class expression */ + return NULL; } - /* - * MongoAcquireSampleRows acquires a random sample of rows from the foreign - * table. Selected rows are returned in the caller allocated sampleRows array, - * which must have at least target row count entries. The actual number of rows - * selected is returned as the function result. We also count the number of rows - * in the collection and return it in total row count. We also always set dead - * row count to zero. + * mongo_add_foreign_ordered_paths + * Add foreign paths for performing the final sort remotely. * - * Note that the returned list of rows is not always in order by physical - * position in the MongoDB collection. Therefore, correlation estimates - * derived later may be meaningless, but it's OK because we don't use the - * estimates currently (the planner only pays attention to correlation for - * index scans). + * Given input_rel contains the source-data Paths. The paths are added to the + * given ordered_rel. */ -static int -MongoAcquireSampleRows(Relation relation, int errorLevel, - HeapTuple *sampleRows, int targetRowCount, - double *totalRowCount, double *totalDeadRowCount) +static void +mongo_add_foreign_ordered_paths(PlannerInfo *root, RelOptInfo *input_rel, + RelOptInfo *ordered_rel) { - int sampleRowCount = 0; - double rowCount = 0; - double rowCountToSkip = -1; /* -1 means not set yet */ - double randomState = 0; - Datum *columnValues = NULL; - bool *columnNulls = NULL; - Oid foreignTableId = InvalidOid; - TupleDesc tupleDescriptor = NULL; - Form_pg_attribute *attributesPtr = NULL; - AttrNumber columnCount = 0; - AttrNumber columnId = 0; - HTAB *columnMappingHash = NULL; - mongo_cursor *mongoCursor = NULL; - bson *queryDocument = NULL; - Const *queryBuffer = NULL; - List *columnList = NIL; - ForeignScanState *scanState = NULL; - List *foreignPrivateList = NIL; - ForeignScan *foreignScan = NULL; - MongoFdwExecState *executionState = NULL; - char *relationName = NULL; - int executorFlags = 0; - MemoryContext oldContext = CurrentMemoryContext; - MemoryContext tupleContext = NULL; + Query *parse = root->parse; + MongoFdwRelationInfo *ifpinfo = input_rel->fdw_private; + MongoFdwRelationInfo *fpinfo = ordered_rel->fdw_private; + double rows; + Cost startup_cost; + Cost total_cost; + List *fdw_private; + ForeignPath *ordered_path; + ListCell *lc; + + /* Set the flag is_order_by_pushable of the ordered relation */ + fpinfo->is_order_by_pushable = + ((MongoFdwRelationInfo *) input_rel->fdw_private)->is_order_by_pushable; + + /* If orderby pushdown is not enabled, honor it. */ + if (!enable_order_by_pushdown || !fpinfo->is_order_by_pushable) + return; - /* create list of columns in the relation */ - tupleDescriptor = RelationGetDescr(relation); - columnCount = tupleDescriptor->natts; - attributesPtr = tupleDescriptor->attrs; + /* Shouldn't get here unless the query has ORDER BY */ + Assert(parse->sortClause); - for (columnId = 1; columnId <= columnCount; columnId++) + /* We don't support cases where there are any SRFs in the targetlist */ + if (parse->hasTargetSRFs) + return; + + /* + * Check the query pathkeys length. Don't push when exceeding the limit + * set by MongoDB. + */ + if (list_length(root->query_pathkeys) > MAX_PATHKEYS) + return; + + /* Save the input_rel as outerrel in fpinfo */ + fpinfo->outerrel = input_rel; + + /* + * If the input_rel is a base or join relation, we would already have + * considered pushing down the final sort to the remote server when + * creating pre-sorted foreign paths for that relation, because the + * query_pathkeys is set to the root->sort_pathkeys in that case (see + * standard_qp_callback()). + */ + if (input_rel->reloptkind == RELOPT_BASEREL || + input_rel->reloptkind == RELOPT_JOINREL) { - Var *column = (Var *) palloc0(sizeof(Var)); + Assert(root->query_pathkeys == root->sort_pathkeys); - /* only assign required fields for column mapping hash */ - column->varattno = columnId; - column->vartype = attributesPtr[columnId-1]->atttypid; - column->vartypmod = attributesPtr[columnId-1]->atttypmod; + /* Safe to push down */ + fpinfo->pushdown_safe = ifpinfo->qp_is_pushdown_safe; - columnList = lappend(columnList, column); + return; } - /* create state structure */ - scanState = makeNode(ForeignScanState); - scanState->ss.ss_currentRelation = relation; + /* The input_rel should be a grouping relation */ + Assert(input_rel->reloptkind == RELOPT_UPPER_REL && + ifpinfo->stage == UPPERREL_GROUP_AGG); - foreignTableId = RelationGetRelid(relation); - queryDocument = QueryDocument(foreignTableId, NIL); - queryBuffer = SerializeDocument(queryDocument); + /* + * We try to create a path below by extending a simple foreign path for + * the underlying grouping relation to perform the final sort remotely, + * which is stored into the fdw_private list of the resulting path. + */ - /* only clean up the query struct, but not its data */ - bson_dispose(queryDocument); + /* Assess if it is safe to push down the final sort */ + foreach(lc, root->sort_pathkeys) + { + PathKey *pathkey = (PathKey *) lfirst(lc); + EquivalenceClass *pathkey_ec = pathkey->pk_eclass; + EquivalenceMember *em = NULL; + Expr *sort_expr; + + /* + * mongo_is_foreign_expr would detect volatile expressions as well, + * but checking ec_has_volatile here saves some cycles. + */ + if (pathkey_ec->ec_has_volatile) + return; + + if (!IS_PATHKEY_PUSHABLE(pathkey)) + return; + + /* + * Get the sort expression for the pathkey_ec. The EC must contain a + * shippable EM that is computed in input_rel's reltarget, else we + * can't push down the sort. + */ + em = mongo_find_em_for_rel_target(root, pathkey_ec, input_rel); - /* construct foreign plan with query document and column list */ - foreignPrivateList = list_make2(queryBuffer, columnList); + /* Check for sort operator pushability. */ + if (!mongo_is_default_sort_operator(em, pathkey)) + return; - foreignScan = makeNode(ForeignScan); - foreignScan->fdw_private = foreignPrivateList; + /* Ignore binary-compatible relabeling */ + sort_expr = em->em_expr; + while (sort_expr && IsA(sort_expr, RelabelType)) + sort_expr = ((RelabelType *) sort_expr)->arg; - scanState->ss.ps.plan = (Plan *) foreignScan; + /* Only Vars are allowed per MongoDB. */ + if (!IsA(sort_expr, Var)) + return; + } - MongoBeginForeignScan(scanState, executorFlags); + /* Safe to push down */ + fpinfo->pushdown_safe = true; - executionState = (MongoFdwExecState *) scanState->fdw_state; - mongoCursor = executionState->mongoCursor; - columnMappingHash = executionState->columnMappingHash; + /* TODO: Put accurate estimates */ + startup_cost = 10; + total_cost = 10 + startup_cost; + rows = 10; /* - * Use per-tuple memory context to prevent leak of memory used to read - * rows from the file with copy routines. + * Build the fdw_private list that will be used by mongoGetForeignPlan. + * Items in the list must match the order in the enum FdwPathPrivateIndex. */ - tupleContext = AllocSetContextCreate(CurrentMemoryContext, - "mongo_fdw temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + fdw_private = list_make2(makeInteger(true), makeInteger(false)); + + /* Create foreign ordering path */ +#if PG_VERSION_NUM >= 170000 + ordered_path = create_foreign_upper_path(root, + input_rel, + root->upper_targets[UPPERREL_ORDERED], + rows, + startup_cost, + total_cost, + root->sort_pathkeys, + NULL, /* no extra plan */ + NIL, /* no fdw_restrictinfo list */ + fdw_private); +#else + ordered_path = create_foreign_upper_path(root, + input_rel, + root->upper_targets[UPPERREL_ORDERED], + rows, + startup_cost, + total_cost, + root->sort_pathkeys, + NULL, /* no extra plan */ + fdw_private); +#endif - /* prepare for sampling rows */ - randomState = anl_init_selection_state(targetRowCount); + /* and add it to the ordered_rel */ + add_path(ordered_rel, (Path *) ordered_path); +} - columnValues = (Datum *) palloc0(columnCount * sizeof(Datum)); - columnNulls = (bool *) palloc0(columnCount * sizeof(bool)); +/* + * mongo_add_foreign_final_paths + * Add foreign paths for performing the final processing remotely. + * + * Given input_rel contains the source-data Paths. The paths are added to the + * given final_rel. + */ +static void +mongo_add_foreign_final_paths(PlannerInfo *root, RelOptInfo *input_rel, + RelOptInfo *final_rel, FinalPathExtraData *extra) +{ + Query *parse = root->parse; + MongoFdwRelationInfo *ifpinfo = (MongoFdwRelationInfo *) input_rel->fdw_private; + MongoFdwRelationInfo *fpinfo = (MongoFdwRelationInfo *) final_rel->fdw_private; + bool has_final_sort = false; + List *pathkeys = NIL; + double rows; + Cost startup_cost; + Cost total_cost; + List *fdw_private; + ForeignPath *final_path; - for (;;) - { - int32 cursorStatus = MONGO_ERROR; + /* + * Currently, we only support this for SELECT commands + */ + if (parse->commandType != CMD_SELECT) + return; - /* check for user-requested abort or sleep */ - vacuum_delay_point(); + /* + * We do not support LIMIT with FOR UPDATE/SHARE. Also, if there is no + * FOR UPDATE/SHARE clause and there is no LIMIT, don't need to add + * Foreign final path. + */ + if (parse->rowMarks || !extra->limit_needed) + return; - /* initialize all values for this row to null */ - memset(columnValues, 0, columnCount * sizeof(Datum)); - memset(columnNulls, true, columnCount * sizeof(bool)); + /* We don't support cases where there are any SRFs in the targetlist */ + if (parse->hasTargetSRFs) + return; - cursorStatus = mongo_cursor_next(mongoCursor); - if (cursorStatus == MONGO_OK) - { - const bson *bsonDocument = mongo_cursor_bson(mongoCursor); - const char *bsonDocumentKey = NULL; /* top level document */ + /* Save the input_rel as outerrel in fpinfo */ + fpinfo->outerrel = input_rel; - /* fetch next tuple */ - MemoryContextReset(tupleContext); - MemoryContextSwitchTo(tupleContext); + /* + * If there is no need to add a LIMIT node, there might be a ForeignPath + * in the input_rel's pathlist that implements all behavior of the query. + * Note: we would already have accounted for the query's FOR UPDATE/SHARE + * (if any) before we get here. + */ + if (!extra->limit_needed) + { + ListCell *lc; - FillTupleSlot(bsonDocument, bsonDocumentKey, - columnMappingHash, columnValues, columnNulls); + Assert(parse->rowMarks); - MemoryContextSwitchTo(oldContext); - } - else + /* + * Grouping and aggregation are not supported with FOR UPDATE/SHARE, + * so the input_rel should be a base, join, or ordered relation; and + * if it's an ordered relation, its input relation should be a base or + * join relation. + */ + Assert(input_rel->reloptkind == RELOPT_BASEREL || + input_rel->reloptkind == RELOPT_JOINREL || + (input_rel->reloptkind == RELOPT_UPPER_REL && + ifpinfo->stage == UPPERREL_ORDERED && + (ifpinfo->outerrel->reloptkind == RELOPT_BASEREL || + ifpinfo->outerrel->reloptkind == RELOPT_JOINREL))); + + foreach(lc, input_rel->pathlist) { + Path *path = (Path *) lfirst(lc); + /* - * The following is a courtesy check. In practice when Mongo shuts down, - * mongo_cursor_next() could possibly crash. + * apply_scanjoin_target_to_paths() uses create_projection_path() + * to adjust each of its input paths if needed, whereas + * create_ordered_paths() uses apply_projection_to_path() to do + * that. So the former might have put a ProjectionPath on top of + * the ForeignPath; look through ProjectionPath and see if the + * path underneath it is ForeignPath. */ - mongo_cursor_error_t errorCode = mongoCursor->err; - if (errorCode != MONGO_CURSOR_EXHAUSTED) + if (IsA(path, ForeignPath) || + (IsA(path, ProjectionPath) && + IsA(((ProjectionPath *) path)->subpath, ForeignPath))) { - MongoFreeScanState(executionState); - ereport(ERROR, (errmsg("could not iterate over mongo collection"), - errhint("Mongo driver cursor error code: %d", - errorCode))); - } + /* + * Create foreign final path; this gets rid of a + * no-longer-needed outer plan (if any), which makes the + * EXPLAIN output look cleaner + */ +#if PG_VERSION_NUM >= 170000 + final_path = create_foreign_upper_path(root, + path->parent, + path->pathtarget, + path->rows, + path->startup_cost, + path->total_cost, + path->pathkeys, + NULL, /* no extra plan */ + NIL, /* no fdw_restrictinfo list */ + NIL); /* no fdw_private */ +#else + final_path = create_foreign_upper_path(root, + path->parent, + path->pathtarget, + path->rows, + path->startup_cost, + path->total_cost, + path->pathkeys, + NULL, /* no extra plan */ + NIL); /* no fdw_private */ +#endif - break; - } + /* and add it to the final_rel */ + add_path(final_rel, (Path *) final_path); + + /* Safe to push down */ + fpinfo->pushdown_safe = true; + + return; + } + } /* - * The first targetRowCount sample rows are simply copied into the - * reservoir. Then we start replacing tuples in the sample until we - * reach the end of the relation. This algorithm is from Jeff Vitter's - * paper (see more info in commands/analyze.c). + * If we get here it means no ForeignPaths; since we would already + * have considered pushing down all operations for the query to the + * remote server, give up on it. */ - if (sampleRowCount < targetRowCount) + return; + } + + Assert(extra->limit_needed); + + /* + * If the input_rel is an ordered relation, replace the input_rel with its + * input relation + */ + if (input_rel->reloptkind == RELOPT_UPPER_REL && + ifpinfo->stage == UPPERREL_ORDERED) + { + /* Do not push down LIMIT if ORDER BY push down is disabled */ + if (!enable_order_by_pushdown) + return; + + input_rel = ifpinfo->outerrel; + ifpinfo = (MongoFdwRelationInfo *) input_rel->fdw_private; + has_final_sort = true; + pathkeys = root->sort_pathkeys; + } + + /* The input_rel should be a base, join, or grouping relation */ + Assert(input_rel->reloptkind == RELOPT_BASEREL || + input_rel->reloptkind == RELOPT_JOINREL || + (input_rel->reloptkind == RELOPT_UPPER_REL && + ifpinfo->stage == UPPERREL_GROUP_AGG)); + + /* + * We try to create a path below by extending a simple foreign path for + * the underlying base, join, or grouping relation to perform the final + * sort (if has_final_sort) and the LIMIT restriction remotely, which is + * stored into the fdw_private list of the resulting path. (We + * re-estimate the costs of sorting the underlying relation, if + * has_final_sort.) + */ + + /* + * Assess if it is safe to push down the LIMIT and OFFSET to the remote + * server + */ + + /* + * If the underlying relation has any local conditions, the LIMIT/OFFSET + * cannot be pushed down. + */ + if (ifpinfo->local_conds) + return; + + /* + * Support only Const nodes as expressions are NOT supported on MongoDB. + * Also, MongoDB supports only positive 64-bit integer values, so don't + * pushdown in case of -ve values given for LIMIT/OFFSET clauses. + */ + if (parse->limitCount) + { + Node *node = parse->limitCount; + + if (nodeTag(node) != T_Const || + (((Const *) node)->consttype != INT8OID)) + return; + + if (!((Const *) node)->constisnull && + (DatumGetInt64(((Const *) node)->constvalue) < 0)) + return; + } + if (parse->limitOffset) + { + Node *node = parse->limitOffset; + + if (nodeTag(node) != T_Const || + (((Const *) node)->consttype != INT8OID)) + return; + + if (!((Const *) node)->constisnull && + (DatumGetInt64(((Const *) node)->constvalue) < 0)) + return; + } + + /* Safe to push down */ + fpinfo->pushdown_safe = true; + + /* TODO: Put accurate estimates */ + startup_cost = 1; + total_cost = 1 + startup_cost; + rows = 1; + + /* + * Build the fdw_private list that will be used by mongoGetForeignPlan. + * Items in the list must match order in enum FdwPathPrivateIndex. + */ + fdw_private = list_make2(makeInteger(has_final_sort), + makeInteger(extra->limit_needed)); + + /* + * Create foreign final path; this gets rid of a no-longer-needed outer + * plan (if any), which makes the EXPLAIN output look cleaner + */ +#if PG_VERSION_NUM >= 170000 + final_path = create_foreign_upper_path(root, + input_rel, + root->upper_targets[UPPERREL_FINAL], + rows, + startup_cost, + total_cost, + pathkeys, + NULL, /* no extra plan */ + NIL, /* no fdw_restrictinfo list */ + fdw_private); +#else + final_path = create_foreign_upper_path(root, + input_rel, + root->upper_targets[UPPERREL_FINAL], + rows, + startup_cost, + total_cost, + pathkeys, + NULL, /* no extra plan */ + fdw_private); +#endif + + /* and add it to the final_rel */ + add_path(final_rel, (Path *) final_path); +} + +/* + * mongo_find_em_for_rel_target + * Find an equivalence class member expression to be computed as a sort + * column in the given target. + */ +static EquivalenceMember * +mongo_find_em_for_rel_target(PlannerInfo *root, EquivalenceClass *ec, + RelOptInfo *rel) +{ + PathTarget *target = rel->reltarget; + ListCell *lc1; + int i; + + i = 0; + foreach(lc1, target->exprs) + { + Expr *expr = (Expr *) lfirst(lc1); + Index sgref = get_pathtarget_sortgroupref(target, i); + ListCell *lc2; + + /* Ignore non-sort expressions */ + if (sgref == 0 || + get_sortgroupref_clause_noerr(sgref, + root->parse->sortClause) == NULL) { - sampleRows[sampleRowCount++] = heap_form_tuple(tupleDescriptor, - columnValues, - columnNulls); + i++; + continue; } - else + + /* We ignore binary-compatible relabeling */ + while (expr && IsA(expr, RelabelType)) + expr = ((RelabelType *) expr)->arg; + + /* Locate an EquivalenceClass member matching this expr, if any */ + foreach(lc2, ec->ec_members) { - /* - * t in Vitter's paper is the number of records already processed. - * If we need to compute a new S value, we must use the "not yet - * incremented" value of rowCount as t. - */ - if (rowCountToSkip < 0) - { - rowCountToSkip = anl_get_next_S(rowCount, targetRowCount, - &randomState); - } + EquivalenceMember *em = (EquivalenceMember *) lfirst(lc2); + Expr *em_expr; - if (rowCountToSkip <= 0) - { - /* - * Found a suitable tuple, so save it, replacing one old tuple - * at random. - */ - int rowIndex = (int) (targetRowCount * anl_random_fract()); - Assert(rowIndex >= 0); - Assert(rowIndex < targetRowCount); + /* Don't match constants */ + if (em->em_is_const) + continue; - heap_freetuple(sampleRows[rowIndex]); - sampleRows[rowIndex] = heap_form_tuple(tupleDescriptor, - columnValues, - columnNulls); - } + /* Ignore child members */ + if (em->em_is_child) + continue; - rowCountToSkip -= 1; + /* Match if same expression (after stripping relabel) */ + em_expr = em->em_expr; + while (em_expr && IsA(em_expr, RelabelType)) + em_expr = ((RelabelType *) em_expr)->arg; + + if (!equal(em_expr, expr)) + continue; + + /* + * Check that expression (including relabels!) is shippable. If + * it's unsafe to remote, we cannot push down the final sort. + */ + if (mongo_is_foreign_expr(root, rel, em->em_expr, false)) + return em; } - rowCount += 1; + i++; } - /* clean up */ - MemoryContextDelete(tupleContext); - MongoFreeScanState(executionState); - - pfree(columnValues); - pfree(columnNulls); + return NULL; /* keep compiler quiet */ +} - /* emit some interesting relation info */ - relationName = RelationGetRelationName(relation); - ereport(errorLevel, (errmsg("\"%s\": collection contains %.0f rows; %d rows in sample", - relationName, rowCount, sampleRowCount))); +/* + * mongo_is_default_sort_operator + * Returns true if default sort operator is provided. + */ +bool +mongo_is_default_sort_operator(EquivalenceMember *em, PathKey *pathkey) +{ + Oid oprid; + char *oprname; + TypeCacheEntry *typentry; + + if (em == NULL) + return false; + + /* Can't push down the sort if pathkey's opfamily is not shippable. */ + if (!mongo_is_builtin(pathkey->pk_opfamily)) + return NULL; + + oprid = get_opfamily_member(pathkey->pk_opfamily, + em->em_datatype, + em->em_datatype, + pathkey->pk_strategy); + if (!OidIsValid(oprid)) + elog(ERROR, "missing operator %d(%u,%u) in opfamily %u", + pathkey->pk_strategy, em->em_datatype, em->em_datatype, + pathkey->pk_opfamily); + + /* Can't push down the sort if the operator is not shippable. */ + oprname = get_opname(oprid); + if (!((strncmp(oprname, "<", NAMEDATALEN) == 0) || + (strncmp(oprname, ">", NAMEDATALEN) == 0))) + return false; - (*totalRowCount) = rowCount; - (*totalDeadRowCount) = 0; + /* + * See whether the operator is default < or > for sort expr's datatype. + * Here we need to use the expression's actual type to discover whether + * the desired operator will be the default or not. + */ + typentry = lookup_type_cache(exprType((Node *) em->em_expr), + TYPECACHE_LT_OPR | TYPECACHE_GT_OPR); + if (oprid == typentry->lt_opr || oprid == typentry->gt_opr) + return true; - return sampleRowCount; + return false; } diff --git a/mongo_fdw.control b/mongo_fdw.control index c0fbe86..311c587 100644 --- a/mongo_fdw.control +++ b/mongo_fdw.control @@ -1,8 +1,9 @@ # mongo_fdw extension # -# Copyright (c) 2012-2014 Citus Data, Inc. +# Portions Copyright (c) 2004-2024, EnterpriseDB Corporation. +# Portions Copyright © 2012–2014 Citus Data, Inc. # comment = 'foreign data wrapper for MongoDB access' -default_version = '1.0' +default_version = '1.1' module_pathname = '$libdir/mongo_fdw' relocatable = true diff --git a/mongo_fdw.h b/mongo_fdw.h index 60947a2..4f615bc 100644 --- a/mongo_fdw.h +++ b/mongo_fdw.h @@ -1,75 +1,190 @@ /*------------------------------------------------------------------------- * * mongo_fdw.h + * Foreign-data wrapper for remote MongoDB servers * - * Type and function declarations for MongoDB foreign data wrapper. + * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 2004-2024, EnterpriseDB Corporation. + * Portions Copyright (c) 2012–2014 Citus Data, Inc. * - * Copyright (c) 2012-2014 Citus Data, Inc. + * IDENTIFICATION + * mongo_fdw.h * *------------------------------------------------------------------------- */ - #ifndef MONGO_FDW_H #define MONGO_FDW_H -#include "bson.h" -#include "mongo.h" +#include "mongo_wrapper.h" -#include "fmgr.h" +#include "mongoc.h" +#include "access/reloptions.h" #include "catalog/pg_foreign_server.h" #include "catalog/pg_foreign_table.h" -#include "utils/datetime.h" +#include "catalog/pg_user_mapping.h" +#include "catalog/pg_type.h" +#include "commands/defrem.h" +#include "commands/explain.h" +#include "commands/vacuum.h" +#include "fmgr.h" +#include "foreign/fdwapi.h" +#include "foreign/foreign.h" +#include "nodes/makefuncs.h" #include "nodes/pg_list.h" -#include "nodes/relation.h" +#include "optimizer/cost.h" +#include "optimizer/pathnode.h" +#include "optimizer/plancat.h" +#include "optimizer/planmain.h" +#include "optimizer/restrictinfo.h" +#include "utils/array.h" +#include "utils/builtins.h" +#include "utils/date.h" +#include "utils/datetime.h" +#include "utils/hsearch.h" +#include "utils/lsyscache.h" +#include "utils/memutils.h" #include "utils/timestamp.h" +#define BSON bson_t +#define BSON_TYPE bson_type_t +#define BSON_ITERATOR bson_iter_t +#define MONGO_CONN mongoc_client_t +#define MONGO_CURSOR mongoc_cursor_t +#define BSON_TYPE_DOCUMENT BSON_TYPE_DOCUMENT +#define BSON_TYPE_NULL BSON_TYPE_NULL +#define BSON_TYPE_ARRAY BSON_TYPE_ARRAY +#define BSON_TYPE_INT32 BSON_TYPE_INT32 +#define BSON_TYPE_INT64 BSON_TYPE_INT64 +#define BSON_TYPE_DOUBLE BSON_TYPE_DOUBLE +#define BSON_TYPE_BINDATA BSON_TYPE_BINARY +#define BSON_TYPE_BOOL BSON_TYPE_BOOL +#define BSON_TYPE_UTF8 BSON_TYPE_UTF8 +#define BSON_TYPE_OID BSON_TYPE_OID +#define BSON_TYPE_DATE_TIME BSON_TYPE_DATE_TIME +#define BSON_TYPE_SYMBOL BSON_TYPE_SYMBOL +#define BSON_TYPE_UNDEFINED BSON_TYPE_UNDEFINED +#define BSON_TYPE_REGEX BSON_TYPE_REGEX +#define BSON_TYPE_CODE BSON_TYPE_CODE +#define BSON_TYPE_CODEWSCOPE BSON_TYPE_CODEWSCOPE +#define BSON_TYPE_TIMESTAMP BSON_TYPE_TIMESTAMP + +#define PREF_READ_PRIMARY_NAME "readPrimary" +#define PREF_READ_SECONDARY_NAME "readSecondary" +#define PREF_READ_PRIMARY_PREFERRED_NAME "readPrimaryPreferred" +#define PREF_READ_SECONDARY_PREFERRED_NAME "readSecondaryPreferred" +#define PREF_READ_NEAREST_NAME "readNearest" + +#define BSON_ITER_BOOL bson_iter_bool +#define BSON_ITER_DOUBLE bson_iter_double +#define BSON_ITER_INT32 bson_iter_int32 +#define BSON_ITER_INT64 bson_iter_int64 +#define BSON_ITER_OID bson_iter_oid +#define BSON_ITER_UTF8 bson_iter_utf8 +#define BSON_ITER_REGEX bson_iter_regex +#define BSON_ITER_DATE_TIME bson_iter_date_time +#define BSON_ITER_CODE bson_iter_code +#define BSON_ITER_VALUE bson_iter_value +#define BSON_ITER_KEY bson_iter_key +#define BSON_ITER_NEXT bson_iter_next +#define BSON_ITER_TYPE bson_iter_type +#define BSON_ITER_BINARY bson_iter_binary /* Defines for valid option names */ -#define OPTION_NAME_ADDRESS "address" -#define OPTION_NAME_PORT "port" -#define OPTION_NAME_DATABASE "database" -#define OPTION_NAME_COLLECTION "collection" +#define OPTION_NAME_ADDRESS "address" +#define OPTION_NAME_PORT "port" +#define OPTION_NAME_DATABASE "database" +#define OPTION_NAME_COLLECTION "collection" +#define OPTION_NAME_USERNAME "username" +#define OPTION_NAME_PASSWORD "password" +#define OPTION_NAME_USE_REMOTE_ESTIMATE "use_remote_estimate" +#define OPTION_NAME_READ_PREFERENCE "read_preference" +#define OPTION_NAME_AUTHENTICATION_DATABASE "authentication_database" +#define OPTION_NAME_REPLICA_SET "replica_set" +#define OPTION_NAME_SSL "ssl" +#define OPTION_NAME_PEM_FILE "pem_file" +#define OPTION_NAME_PEM_PWD "pem_pwd" +#define OPTION_NAME_CA_FILE "ca_file" +#define OPTION_NAME_CA_DIR "ca_dir" +#define OPTION_NAME_CRL_FILE "crl_file" +#define OPTION_NAME_WEAK_CERT "weak_cert_validation" +#define OPTION_NAME_ENABLE_JOIN_PUSHDOWN "enable_join_pushdown" +#define OPTION_NAME_ENABLE_AGGREGATE_PUSHDOWN "enable_aggregate_pushdown" +#define OPTION_NAME_ENABLE_ORDER_BY_PUSHDOWN "enable_order_by_pushdown" /* Default values for option parameters */ -#define DEFAULT_IP_ADDRESS "127.0.0.1" -#define DEFAULT_PORT_NUMBER 27017 -#define DEFAULT_DATABASE_NAME "test" +#define DEFAULT_IP_ADDRESS "127.0.0.1" +#define DEFAULT_PORT_NUMBER 27017 +#define DEFAULT_DATABASE_NAME "test" /* Defines for sending queries and converting types */ -#define EQUALITY_OPERATOR_NAME "=" -#define INITIAL_ARRAY_CAPACITY 8 -#define MONGO_TUPLE_COST_MULTIPLIER 5 -#define MONGO_CONNECTION_COST_MULTIPLIER 5 -#define POSTGRES_TO_UNIX_EPOCH_DAYS (POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) -#define POSTGRES_TO_UNIX_EPOCH_USECS (POSTGRES_TO_UNIX_EPOCH_DAYS * USECS_PER_DAY) +#define EQUALITY_OPERATOR_NAME "=" +#define INITIAL_ARRAY_CAPACITY 8 +#define MONGO_TUPLE_COST_MULTIPLIER 5 +#define MONGO_CONNECTION_COST_MULTIPLIER 5 +#define POSTGRES_TO_UNIX_EPOCH_DAYS (POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) +#define POSTGRES_TO_UNIX_EPOCH_USECS (POSTGRES_TO_UNIX_EPOCH_DAYS * USECS_PER_DAY) + +/* Macro for list API backporting. */ +#if PG_VERSION_NUM < 130000 +#define mongo_list_concat(l1, l2) list_concat(l1, list_copy(l2)) +#else +#define mongo_list_concat(l1, l2) list_concat((l1), (l2)) +#endif +/* Macro for hard-coded aggregation result key */ +#define AGG_RESULT_KEY "v_agg" /* - * MongoValidOption keeps an option name and a context. When an option is passed - * into mongo_fdw objects (server and foreign table), we compare this option's - * name and context against those of valid options. + * We build a hash table that stores the column details. However, a table can + * have maximum MaxHeapAttributeNumber columns. And since we allow join only + * on two tables, we set the max hash table size to twice that limit. + */ +#define MaxHashTableSize (MaxHeapAttributeNumber * 2) + +/* + * MongoValidOption keeps an option name and a context. When an option is + * passed into mongo_fdw objects (server and foreign table), we compare this + * option's name and context against those of valid options. */ typedef struct MongoValidOption { const char *optionName; - Oid optionContextId; - + Oid optionContextId; } MongoValidOption; - /* Array of options that are valid for mongo_fdw */ -static const uint32 ValidOptionCount = 4; +static const uint32 ValidOptionCount = 23; static const MongoValidOption ValidOptionArray[] = { - /* foreign server options */ - { OPTION_NAME_ADDRESS, ForeignServerRelationId }, - { OPTION_NAME_PORT, ForeignServerRelationId }, + /* Foreign server options */ + {OPTION_NAME_ADDRESS, ForeignServerRelationId}, + {OPTION_NAME_PORT, ForeignServerRelationId}, + {OPTION_NAME_USE_REMOTE_ESTIMATE, ForeignServerRelationId}, + {OPTION_NAME_READ_PREFERENCE, ForeignServerRelationId}, + {OPTION_NAME_AUTHENTICATION_DATABASE, ForeignServerRelationId}, + {OPTION_NAME_REPLICA_SET, ForeignServerRelationId}, + {OPTION_NAME_SSL, ForeignServerRelationId}, + {OPTION_NAME_PEM_FILE, ForeignServerRelationId}, + {OPTION_NAME_PEM_PWD, ForeignServerRelationId}, + {OPTION_NAME_CA_FILE, ForeignServerRelationId}, + {OPTION_NAME_CA_DIR, ForeignServerRelationId}, + {OPTION_NAME_CRL_FILE, ForeignServerRelationId}, + {OPTION_NAME_WEAK_CERT, ForeignServerRelationId}, + {OPTION_NAME_ENABLE_JOIN_PUSHDOWN, ForeignServerRelationId}, + {OPTION_NAME_ENABLE_AGGREGATE_PUSHDOWN, ForeignServerRelationId}, + {OPTION_NAME_ENABLE_ORDER_BY_PUSHDOWN, ForeignServerRelationId}, - /* foreign table options */ - { OPTION_NAME_DATABASE, ForeignTableRelationId }, - { OPTION_NAME_COLLECTION, ForeignTableRelationId } -}; + /* Foreign table options */ + {OPTION_NAME_DATABASE, ForeignTableRelationId}, + {OPTION_NAME_COLLECTION, ForeignTableRelationId}, + {OPTION_NAME_ENABLE_JOIN_PUSHDOWN, ForeignTableRelationId}, + {OPTION_NAME_ENABLE_AGGREGATE_PUSHDOWN, ForeignTableRelationId}, + {OPTION_NAME_ENABLE_ORDER_BY_PUSHDOWN, ForeignTableRelationId}, + /* User mapping options */ + {OPTION_NAME_USERNAME, UserMappingRelationId}, + {OPTION_NAME_PASSWORD, UserMappingRelationId} +}; /* * MongoFdwOptions holds the option values to be used when connecting to Mongo. @@ -78,53 +193,268 @@ static const MongoValidOption ValidOptionArray[] = */ typedef struct MongoFdwOptions { - char *addressName; - int32 portNumber; - char *databaseName; - char *collectionName; - + char *svr_address; + uint16 svr_port; + char *svr_database; + char *collectionName; + char *svr_username; + char *svr_password; + bool use_remote_estimate; /* use remote estimate for rows */ + char *readPreference; + char *authenticationDatabase; + char *replicaSet; + bool ssl; + char *pem_file; + char *pem_pwd; + char *ca_file; + char *ca_dir; + char *crl_file; + bool weak_cert_validation; + bool enable_join_pushdown; + bool enable_aggregate_pushdown; + bool enable_order_by_pushdown; } MongoFdwOptions; - /* - * MongoFdwExecState keeps foreign data wrapper specific execution state that we - * create and hold onto when executing the query. + * MongoFdwExecState keeps foreign data wrapper specific execution state that + * we create and hold onto when executing the query. + * + * Execution state of a foreign insert/update/delete operation. */ -typedef struct MongoFdwExecState +typedef struct MongoFdwModifyState { + Relation rel; /* relcache entry for the foreign table */ + List *target_attrs; /* list of target attribute numbers */ + + /* Info about parameters for prepared statement */ + int p_nums; /* number of parameters to transmit */ + FmgrInfo *p_flinfo; /* output conversion functions for them */ + struct HTAB *columnMappingHash; - mongo *mongoConnection; - mongo_cursor *mongoCursor; - bson *queryDocument; -} MongoFdwExecState; + MONGO_CONN *mongoConnection; /* MongoDB connection */ + MONGO_CURSOR *mongoCursor; /* MongoDB cursor */ + BSON *queryDocument; /* Bson Document */ + + MongoFdwOptions *options; + AttrNumber rowidAttno; /* attnum of resjunk rowid column */ + /* Join/Upper relation information */ + uint32 relType; /* relation type. Base, Join, Upper, or Upper + * on join */ + char *outerRelName; /* Outer relation name */ +} MongoFdwModifyState; /* - * ColumnMapping reprents a hash table entry that maps a column name to column - * related information. We construct these hash table entries to speed up the - * conversion from BSON documents to PostgreSQL tuples; and each hash entry maps - * the column name to the column's tuple index and its type-related information. + * ColumnMapping represents a hash table entry that maps a column name to + * column-related information. We construct these hash table entries to speed + * up the conversion from BSON documents to PostgreSQL tuples, and each hash + * entry maps the column name to the column's tuple index and its type-related + * information. */ typedef struct ColumnMapping { - char columnName[NAMEDATALEN]; - uint32 columnIndex; - Oid columnTypeId; - int32 columnTypeMod; - Oid columnArrayTypeId; - + char columnName[NAMEDATALEN]; + uint32 columnIndex; + Oid columnTypeId; + int32 columnTypeMod; + Oid columnArrayTypeId; + /* Column serial number in target list (set only for join rel) */ + uint32 columnSerialNo; } ColumnMapping; +/* + * FDW-specific planner information kept in RelOptInfo.fdw_private for a + * mongo_fdw foreign table. For a baserel, this struct is created by + * MongoGetForeignRelSize. + */ +typedef struct MongoFdwRelationInfo +{ + /* + * True means that the relation can be pushed down. Always true for simple + * foreign scan. + */ + bool pushdown_safe; + + /* baserestrictinfo clauses, broken down into safe and unsafe subsets. */ + List *local_conds; + List *remote_conds; + + /* Name of the base rel (not set for join rels!) */ + char *base_relname; + + /* + * Name of the relation while EXPLAINing ForeignScan. It is used for join + * relations but is set for all relations. For join relation, the name + * indicates which foreign tables are being joined and the join type used. + */ + StringInfo relation_name; + + /* True means that the query_pathkeys is safe to push down */ + bool qp_is_pushdown_safe; + + /* Join information */ + RelOptInfo *outerrel; + RelOptInfo *innerrel; + JoinType jointype; + List *joinclauses; + char *inner_relname; + char *outer_relname; + + MongoFdwOptions *options; /* Options applicable for this relation */ + + /* Grouping information */ + List *grouped_tlist; + List *groupbyColList; + + /* Upper relation information */ + UpperRelationKind stage; + + /* + * True if the underlying scan relation involved in aggregation is + * pushable. + */ + bool is_agg_scanrel_pushable; + + /* Inherit required flags from MongoFdwOptions */ + bool is_order_by_pushable; +} MongoFdwRelationInfo; + +/* + * MongoRelQualInfo contains column name, varno, varattno, and its relation + * name of columns involved in the join quals which is passed to the execution + * state through fdw_private. For upper relation, it also includes aggregate + * type, aggregate column name, and whether the aggregate is in target or in + * having clause details. + * + * Unlike postgres_fdw, remote query formation is done in the execution state. + * The information, mainly the varno i.e. range table index, we get at the + * execution time is different than the planning state. That may result in + * fetching incorrect data. So, to avoid this, we are gathering information + * required to form a MongoDB query in the planning state and passing it to the + * executor. + * + * For join relation: + * Assume, we have the following two tables with RTI 1 and 2 respectively: + * T1(a int, b int) + * T2(x int, y int) + * + * and if the join clause is like below with T1 as inner relation and T2 outer + * relation: + * (T1.a = T2.x AND T1.b > T2.y) + * + * then as columns a, b, x, and y are involved in the join clause, we need to + * form the following 4 lists as part of MongoRelQualInfo: + * + * 1. colNameList: List of column names + * a->x->b->y + * 2. colNumList: List of column attribute number + * 1->1->2->2 + * 3. rtiList: Range table index of the column + * 1->2->1->2 + * 4. isOuterList: Is it a column of an outer relation? + * 1->0->1->0 + * + * If we want information related to column 'a', then look for information + * available at the zeroth index of all four lists. + * + * To avoid duplicate entry of columns, we use a hash table having a unique + * hash key as a set of varno and varattno. + * + * For upper relation: + * Assume, we have to calculate the sum of column 'a' and the average of column + * 'b' of the above table 'T1' where the minimum of a is greater than 1. This + * can be done by the following SQL query: + * + * SELECT SUM(a), AVG(b) FROM T1 HAVING MIN(a) > 1; + * + * Here, there are two aggregation types SUM and MIN, and two aggregation + * columns i.e. 'a' and 'b'. To differentiate between two aggregation + * operations, we need to save information about whether the aggregation + * operation is part of a target list or having clause. So, we need to form + * the following three lists as a part of MongoRelQualInfo: + * + * 1. aggTypeList: List of aggregation operations + * SUM->AVG->MIN + * 2. aggColList: List of aggregated columns + * a->b->a + * 3. isHavingList: Is aggregation operation part of HAVING clause or not? + * 0->0->1 + */ +typedef struct MongoRelQualInfo +{ + PlannerInfo *root; /* global planner state */ + RelOptInfo *foreignRel; /* the foreign relation we are planning for */ + Relids outerRelids; /* set of base relids of outer relation */ + List *colNameList; + List *colNumList; + List *rtiList; + List *isOuterList; + struct HTAB *exprColHash; + /* For upper-relation */ + bool is_agg_column; /* is column aggregated or not? */ + bool is_having; /* is it part of HAVING clause or not? */ + List *aggTypeList; + List *aggColList; + List *isHavingList; +} MongoRelQualInfo; + +typedef struct ColumnHashKey +{ + int varno; + int varattno; +} ColumnHashKey; + +/* + * Indexes for relation type. The RelOptKind could be used but there is no + * kind called UPPER_JOIN_REL. The UPPER_JOIN_REL is nothing but UPPER_REL but + * for our use case, we are differentiating these two types. + */ +typedef enum MongoFdwRelType +{ + BASE_REL, + JOIN_REL, + UPPER_REL, + UPPER_JOIN_REL +} MongoFdwRelType; + +/* options.c */ +extern MongoFdwOptions *mongo_get_options(Oid foreignTableId); +extern void mongo_free_options(MongoFdwOptions *options); +extern StringInfo mongo_option_names_string(Oid currentContextId); + +/* connection.c */ +MONGO_CONN *mongo_get_connection(ForeignServer *server, + UserMapping *user, + MongoFdwOptions *opt); + +extern void mongo_cleanup_connection(void); +extern void mongo_release_connection(MONGO_CONN *conn); /* Function declarations related to creating the mongo query */ -extern List * ApplicableOpExpressionList(RelOptInfo *baserel); -extern bson * QueryDocument(Oid relationId, List *opExpressionList); -extern List * ColumnList(RelOptInfo *baserel); +extern BSON *mongo_query_document(ForeignScanState *scanStateNode); +extern List *mongo_get_column_list(PlannerInfo *root, RelOptInfo *foreignrel, + List *scan_var_list, List **colNameList, + List **colIsInnerList); +extern bool mongo_is_foreign_expr(PlannerInfo *root, RelOptInfo *baserel, + Expr *expression, bool is_having_cond); +extern bool mongo_is_foreign_param(PlannerInfo *root, RelOptInfo *baserel, + Expr *expr); /* Function declarations for foreign data wrapper */ extern Datum mongo_fdw_handler(PG_FUNCTION_ARGS); extern Datum mongo_fdw_validator(PG_FUNCTION_ARGS); +/* deparse.c headers */ +extern void mongo_check_qual(Expr *node, MongoRelQualInfo *qual_info); +extern const char *mongo_get_jointype_name(JoinType jointype); +extern EquivalenceMember *mongo_find_em_for_rel(PlannerInfo *root, + EquivalenceClass *ec, + RelOptInfo *rel); +extern bool mongo_is_builtin(Oid oid); +extern bool mongo_is_default_sort_operator(EquivalenceMember *em, + PathKey *pathkey); +extern bool mongo_is_foreign_pathkey(PlannerInfo *root, RelOptInfo *baserel, + PathKey *pathkey); -#endif /* MONGO_FDW_H */ +#endif /* MONGO_FDW_H */ diff --git a/mongo_query.c b/mongo_query.c index fb2ac33..1a7595a 100644 --- a/mongo_query.c +++ b/mongo_query.c @@ -1,272 +1,663 @@ /*------------------------------------------------------------------------- * * mongo_query.c + * FDW query handling for mongo_fdw * - * Function definitions for sending queries to MongoDB. These functions assume - * that queries are sent through the official MongoDB C driver, and apply query - * optimizations to reduce the amount of data fetched from the driver. + * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 2004-2024, EnterpriseDB Corporation. + * Portions Copyright (c) 2012–2014 Citus Data, Inc. * - * Copyright (c) 2012-2014 Citus Data, Inc. + * IDENTIFICATION + * mongo_query.c * *------------------------------------------------------------------------- */ - #include "postgres.h" -#include "mongo_fdw.h" +#include "mongo_wrapper.h" + +#include +#include + +#include "access/htup_details.h" +#include "access/table.h" +#include "catalog/heap.h" +#include "catalog/pg_collation.h" +#include "catalog/pg_operator.h" +#if PG_VERSION_NUM >= 130000 +#include "common/hashfn.h" +#endif +#include "mongoc.h" +#include "mongo_query.h" +#include "optimizer/optimizer.h" +#include "parser/parsetree.h" +#include "utils/rel.h" +#include "utils/syscache.h" -#include "catalog/pg_type.h" -#include "nodes/makefuncs.h" -#include "nodes/relation.h" -#include "optimizer/var.h" -#include "utils/array.h" -#include "utils/builtins.h" -#include "utils/date.h" -#include "utils/lsyscache.h" -#include "utils/numeric.h" -#include "utils/timestamp.h" +/* + * Global context for foreign_expr_walker's search of an expression tree. + */ +typedef struct foreign_glob_cxt +{ + PlannerInfo *root; /* global planner state */ + RelOptInfo *foreignrel; /* the foreign relation we are planning for */ + Relids relids; /* relids of base relations in the underlying + * scan */ + bool is_having_cond; /* "true" for HAVING clause condition */ +} foreign_glob_cxt; +/* + * Local (per-tree-level) context for foreign_expr_walker's search. + * This is concerned with identifying collations used in the expression. + */ +typedef enum +{ + FDW_COLLATE_NONE, /* expression is of a noncollatable type */ + FDW_COLLATE_SAFE, /* collation derives from a foreign Var */ + FDW_COLLATE_UNSAFE /* collation derives from something else */ +} FDWCollateState; -/* Local functions forward declarations */ -static Expr * FindArgumentOfType(List *argumentList, NodeTag argumentType); -static char * MongoOperatorName(const char *operatorName); -static List * EqualityOperatorList(List *operatorList); -static List * UniqueColumnList(List *operatorList); -static List * ColumnOperatorList(Var *column, List *operatorList); -static void AppendConstantValue(bson *queryDocument, const char *keyName, - Const *constant); +typedef struct foreign_loc_cxt +{ + Oid collation; /* OID of current collation, if any */ + FDWCollateState state; /* state of current collation choice */ +} foreign_loc_cxt; +/* Local functions forward declarations */ +static bool foreign_expr_walker(Node *node, + foreign_glob_cxt *glob_cxt, + foreign_loc_cxt *outer_cxt); +static List *prepare_var_list_for_baserel(Oid relid, Index varno, + Bitmapset *attrs_used); +static HTAB *column_info_hash(List *colname_list, List *colnum_list, + List *rti_list, List *isouter_list); +static void mongo_prepare_pipeline(List *clause, BSON *inner_pipeline, + pipeline_cxt *context); +static void mongo_append_clauses_to_pipeline(List *clause, BSON *child_doc, + pipeline_cxt *context); + +#if PG_VERSION_NUM >= 160000 +static List *mongo_append_unique_var(List *varlist, Var *var); +#endif /* - * ApplicableOpExpressionList walks over all filter clauses that relate to this - * foreign table, and chooses applicable clauses that we know we can translate - * into Mongo queries. Currently, these clauses include comparison expressions - * that have a column and a constant as arguments. For example, "o_orderdate >= - * date '1994-01-01' + interval '1' year" is an applicable expression. + * mongo_query_document + * Takes in the applicable operator expressions for relation, the join + * clauses for join relation, and grouping targets for upper relation and + * converts these expressions, join clauses, and grouping targets into + * equivalent queries in MongoDB. + * + * For join clauses, transforms simple comparison expressions along with a + * comparison between two vars and nested operator expressions as well. + * + * Example: Consider the following two foreign tables: + * t1(_id NAME, age INT, name VARCHAR) + * t2(_id NAME, old INT, alias VARCHAR) + * + * SQL query: + * SELECT * FROM t1 LEFT JOIN t2 ON (t1.age = t2.old) + * WHERE (t1.age % 2) = 1 + * ORDER BY t1.age ASC NULLS FIRST; + + * Equivalent MongoDB query: + * + * db.t1.aggregate([ + * { + * "$lookup": + * { + * "from": "t2", + * "let": { "v_age": "$age" }, + * "pipeline": [ + * { + * "$match": + * { + * "$expr": + * { + * "$and": [ + * { "$eq": [ "$$v_age", "$old" ] } + * { "$ne": [ "$$v_age", null ] }, + * { "$ne": [ "$old", null ] }, + * ] + * } + * } + * } + * ], + * "as": "Join_Result" + * } + * }, + * { "$match" : + * { + * "$expr" : + * { "$and" : [ + * { "$eq" : [ { "$mod" : [ "$age", 2] }, 1]}, + * { "$ne" : [ "$age", null ] } + * ] + * } + * } + * } + * { + * "$unwind": + * { + * "path": "$Join_Result", + * "preserveNullAndEmptyArrays": true + * } + * }, + * { "$sort": { "age" : 1 } } + * ]) + * + * Any MongoDB query would have the following three main arrays: + * 1. Root pipeline array (first square bracket): + * This has three elements called $lookup, $unwind, and $match stages. + * 2. Inner pipeline array (starting with "pipeline" keyword above): + * It has one element that is $match. + * 3. "$and" expression inside inner pipeline: + * These elements depend on the join clauses available. + * + * The outer $match stage (2nd element of root pipeline array) represents + * remote_exprs, and $match inside $lookup stage represents the join clauses. + * + * For grouping target, add $group stage on the base relation or join relation. + * The HAVING clause is nothing but a post $match stage. + * + * Example: Consider above table t1: + * + * SQL query: + * SELECT name, SUM(age) FROM t1 GROUP BY name HAVING MIN(name) = 'xyz' + * ORDER BY name DESC NULLS LAST; + * + * Equivalent MongoDB query: + * + * db.t1.aggregate([ + * { + * "$group": + * { + * "_id": {"name": "$name"}, + * "v_agg0": {"$sum": "$age"}, + * "v_having": {"$min": "$name"} + * } + * }, + * { + * "$match": {"v_having": "xyz"} + * } + * { "$sort": { "name" : -1 } } + * ]) + * + * For ORDER BY, add $sort stage on the base relation or join or grouping + * relation as shown in the above examples of join and grouping relations. */ -List * -ApplicableOpExpressionList(RelOptInfo *baserel) +BSON * +mongo_query_document(ForeignScanState *scanStateNode) { - List *opExpressionList = NIL; - List *restrictInfoList = baserel->baserestrictinfo; - ListCell *restrictInfoCell = NULL; + ForeignScan *fsplan = (ForeignScan *) scanStateNode->ss.ps.plan; + BSON *queryDocument = bsonCreate(); + BSON *filter = bsonCreate(); + List *PrivateList = fsplan->fdw_private; + List *opExpressionList = list_nth(PrivateList, + mongoFdwPrivateRemoteExprList); + MongoFdwModifyState *fmstate = (MongoFdwModifyState *) scanStateNode->fdw_state; + BSON root_pipeline; + BSON match_stage; + int root_index = 0; + List *joinclauses = NIL; + List *colnum_list; + List *colname_list = NIL; + List *isouter_list = NIL; + List *rti_list; + List *pathkey_list; + List *is_ascsort_list; + char *inner_relname = NULL; + char *outer_relname = NULL; + HTAB *columnInfoHash; + int jointype = 0; + bool has_limit; + + /* Retrieve data passed by planning phase */ + colname_list = list_nth(PrivateList, mongoFdwPrivateJoinClauseColNameList); + colnum_list = list_nth(PrivateList, mongoFdwPrivareJoinClauseColNumList); + rti_list = list_nth(PrivateList, mongoFdwPrivateJoinClauseRtiList); + isouter_list = list_nth(PrivateList, mongoFdwPrivateJoinClauseIsOuterList); + +#ifdef USE_ASSERT_CHECKING + { + /* Length should be same for all lists of column information */ + int natts = list_length(colname_list); + + Assert(natts == list_length(colnum_list) && + natts == list_length(rti_list) && + natts == list_length(isouter_list)); + } +#endif + + /* Store information in the hash-table */ + columnInfoHash = column_info_hash(colname_list, colnum_list, rti_list, + isouter_list); + + /* Retrieve information related to ORDER BY clause */ + pathkey_list = list_nth(PrivateList, mongoFdwPrivatePathKeyList); + is_ascsort_list = list_nth(PrivateList, mongoFdwPrivateIsAscSortList); + + /* Retrieve information related to LIMIT/OFFSET clause */ + has_limit = intVal(list_nth(PrivateList, mongoFdwPrivateHasLimitClause)); - foreach(restrictInfoCell, restrictInfoList) + if (fmstate->relType == JOIN_REL || fmstate->relType == UPPER_JOIN_REL) { - RestrictInfo *restrictInfo = (RestrictInfo *) lfirst(restrictInfoCell); - Expr *expression = restrictInfo->clause; - NodeTag expressionType = 0; - - OpExpr *opExpression = NULL; - char *operatorName = NULL; - char *mongoOperatorName = NULL; - List *argumentList = NIL; - Var *column = NULL; - Const *constant = NULL; - bool equalsOperator = false; - bool constantIsArray = false; - - /* we only support operator expressions */ - expressionType = nodeTag(expression); - if (expressionType != T_OpExpr) - { - continue; - } + List *innerouter_relname; + + joinclauses = list_nth(PrivateList, mongoFdwPrivateJoinClauseList); + if (joinclauses) + jointype = intVal(list_nth(PrivateList, mongoFdwPrivateJoinType)); + + innerouter_relname = list_nth(PrivateList, + mongoFdwPrivateJoinInnerOuterRelName); + inner_relname = strVal(list_nth(innerouter_relname, 0)); + outer_relname = strVal(list_nth(innerouter_relname, 1)); + } + + /* Prepare array of stages */ + bsonAppendStartArray(queryDocument, "pipeline", &root_pipeline); + + /* + * Add filter into query pipeline if available. These are remote_exprs + * i.e. clauses available in WHERE and those are push-able to the remote + * side. + */ + if (opExpressionList) + { + pipeline_cxt context; + + context.colInfoHash = columnInfoHash; + context.isBoolExpr = false; + context.isJoinClause = false; + context.scanStateNode = scanStateNode; + + bsonAppendStartArray(filter, "pipeline", &match_stage); - opExpression = (OpExpr *) expression; - operatorName = get_opname(opExpression->opno); + /* Form equivalent WHERE clauses in MongoDB */ + mongo_prepare_pipeline(opExpressionList, &match_stage, &context); - /* we only support =, <, >, <=, >=, and <> operators */ - if (strncmp(operatorName, EQUALITY_OPERATOR_NAME, NAMEDATALEN) == 0) + bsonAppendFinishArray(filter, &match_stage); + } + + if (fmstate->relType == JOIN_REL || fmstate->relType == UPPER_JOIN_REL) + { + BSON inner_pipeline; + BSON lookup_object; + BSON lookup; + BSON let_exprs; + BSON unwind_stage; + BSON unwind; + BSON *inner_pipeline_doc = bsonCreate(); + ListCell *cell1; + ListCell *cell2; + + /* $lookup stage. This is to perform JOIN */ + bsonAppendStartObject(&root_pipeline, psprintf("%d", root_index++), + &lookup_object); + bsonAppendStartObject(&lookup_object, "$lookup", &lookup); + bsonAppendUTF8(&lookup, "from", inner_relname); + + /* + * Start "let" operator: Specifies variables to use in the pipeline + * stages. To access columns of outer relation, those need to be + * defined in terms of a variable using "let". + */ + bsonAppendStartObject(&lookup, "let", &let_exprs); + forboth(cell1, colname_list, cell2, isouter_list) { - equalsOperator = true; + char *colname = strVal(lfirst(cell1)); + bool is_outer = lfirst_int(cell2); + + /* + * Ignore column name with "*" because this is not the name of any + * particular column and is not allowed in the let operator. While + * deparsing the COUNT(*) aggregation operation, this column name + * is added to lists to maintain the length of column information. + */ + if (is_outer && strcmp(colname, "*") != 0) + { + char *varname = psprintf("%s", + get_varname_for_outer_col(colname)); + char *field = psprintf("$%s", colname); + + bsonAppendUTF8(&let_exprs, varname, field); + } } + bsonAppendFinishObject(&lookup, &let_exprs); /* End "let" */ - mongoOperatorName = MongoOperatorName(operatorName); - if (!equalsOperator && mongoOperatorName == NULL) + /* Form inner pipeline required in $lookup stage to execute $match */ + bsonAppendStartArray(inner_pipeline_doc, "pipeline", &inner_pipeline); + if (joinclauses) { - continue; + pipeline_cxt context; + + context.colInfoHash = columnInfoHash; + context.isBoolExpr = false; + context.isJoinClause = true; + context.scanStateNode = scanStateNode; + + /* Form equivalent join qual clauses in MongoDB */ + mongo_prepare_pipeline(joinclauses, &inner_pipeline, &context); + bsonAppendFinishArray(inner_pipeline_doc, &inner_pipeline); } + /* Append inner pipeline to $lookup stage */ + bson_append_array(&lookup, "pipeline", (int) strlen("pipeline"), + &inner_pipeline); + + bsonAppendUTF8(&lookup, "as", "Join_Result"); + bsonAppendFinishObject(&lookup_object, &lookup); + bsonAppendFinishObject(&root_pipeline, &lookup_object); + + /* $match stage. This is to add a filter */ + if (opExpressionList) + bsonAppendBson(&root_pipeline, "$match", &match_stage); + /* - * We only support simple binary operators that compare a column against - * a constant. If the expression is a tree, we don't recurse into it. + * $unwind stage. This deconstructs an array field from the input + * documents to output a document for each element. */ - argumentList = opExpression->args; - column = (Var *) FindArgumentOfType(argumentList, T_Var); - constant = (Const *) FindArgumentOfType(argumentList, T_Const); + bsonAppendStartObject(&root_pipeline, psprintf("%d", root_index++), + &unwind_stage); + bsonAppendStartObject(&unwind_stage, "$unwind", &unwind); + bsonAppendUTF8(&unwind, "path", "$Join_Result"); + if (jointype == JOIN_INNER) + bsonAppendBool(&unwind, "preserveNullAndEmptyArrays", false); + else + bsonAppendBool(&unwind, "preserveNullAndEmptyArrays", true); + bsonAppendFinishObject(&unwind_stage, &unwind); + bsonAppendFinishObject(&root_pipeline, &unwind_stage); + + fmstate->outerRelName = outer_relname; + } + else if (opExpressionList) + bsonAppendBson(&root_pipeline, "$match", &match_stage); + + /* Add $group stage for upper relation */ + if (fmstate->relType == UPPER_JOIN_REL || fmstate->relType == UPPER_REL) + { + List *func_list; + List *agg_col_list; + List *groupby_col_list; + List *having_expr; + BSON groupby_expr; + BSON group_stage; + BSON group_expr; + BSON group; + ListCell *cell1; + ListCell *cell2; + ListCell *cell3; + List *is_having_list; + Index aggIndex = 0; + + func_list = list_nth(PrivateList, mongoFdwPrivateAggType); + agg_col_list = list_nth(PrivateList, mongoFdwPrivateAggColList); + groupby_col_list = list_nth(PrivateList, mongoFdwPrivateGroupByColList); + having_expr = list_nth(PrivateList, mongoFdwPrivateHavingExpr); + is_having_list = list_nth(PrivateList, mongoFdwPrivateIsHavingList); + + /* $group stage. */ + bsonAppendStartObject(&root_pipeline, psprintf("%d", root_index++), + &group_stage); + bsonAppendStartObject(&group_stage, "$group", &group); /* - * We don't push down operators where the constant is an array, since - * conditional operators for arrays in MongoDB aren't properly defined. - * For example, {similar_products : [ "B0009S4IJW", "6301964144" ]} - * finds results that are equal to the array, but {similar_products: - * {$gte: [ "B0009S4IJW", "6301964144" ]}} returns an empty set. + * Add columns from the GROUP BY clause in the "_id" field of $group + * stage. In case of aggregation on join result, a column of the + * inner table needs to be accessed by prefixing it using + * "Join_Result", which is been hardcoded. */ - if (constant != NULL) + if (groupby_col_list) { - Oid constantArrayTypeId = get_element_type(constant->consttype); - if (constantArrayTypeId != InvalidOid) + ListCell *columnCell; + + bsonAppendStartObject(&group, "_id", &groupby_expr); + foreach(columnCell, groupby_col_list) { - constantIsArray = true; + Var *column = (Var *) lfirst(columnCell); + bool found = false; + ColInfoHashKey key; + ColInfoHashEntry *columnInfo; + + key.varNo = column->varno; + key.varAttno = column->varattno; + + columnInfo = (ColInfoHashEntry *) hash_search(columnInfoHash, + (void *) &key, + HASH_FIND, + &found); + if (found) + { + if (columnInfo->isOuter) + bsonAppendUTF8(&groupby_expr, columnInfo->colName, + psprintf("$%s", columnInfo->colName)); + else + bsonAppendUTF8(&groupby_expr, columnInfo->colName, + psprintf("$Join_Result.%s", + columnInfo->colName)); + } } + bsonAppendFinishObject(&group, &groupby_expr); /* End "_id" */ } - - if (column != NULL && constant != NULL && !constantIsArray) + else { - opExpressionList = lappend(opExpressionList, opExpression); + /* If no GROUP BY clause then append null to the _id. */ + bsonAppendNull(&group, "_id"); } - } - - return opExpressionList; -} + /* Add grouping operation */ + forthree(cell1, func_list, cell2, agg_col_list, cell3, is_having_list) + { + ColInfoHashKey key; + ColInfoHashEntry *columnInfo; + bool found = false; + char *func_name = strVal(lfirst(cell1)); + Var *column = (Var *) lfirst(cell2); + bool is_having_agg = lfirst_int(cell3); + + if (is_having_agg) + bsonAppendStartObject(&group, "v_having", &group_expr); + else + bsonAppendStartObject(&group, + psprintf("AGG_RESULT_KEY%d", + aggIndex++), + &group_expr); + + key.varNo = column->varno; + key.varAttno = column->varattno; + + columnInfo = (ColInfoHashEntry *) hash_search(columnInfoHash, + (void *) &key, + HASH_FIND, + &found); -/* - * FindArgumentOfType walks over the given argument list, looks for an argument - * with the given type, and returns the argument if it is found. - */ -static Expr * -FindArgumentOfType(List *argumentList, NodeTag argumentType) -{ - Expr *foundArgument = NULL; - ListCell *argumentCell = NULL; + /* + * The aggregation operation in MongoDB other than COUNT has the + * same name as PostgreSQL but COUNT needs to be performed using + * the $sum operator because MongoDB doesn't have a direct $count + * operator for the currently supported version (i.e. v4.4). + * + * There is no syntax in MongoDB to provide column names for COUNT + * operation but for other supported operations, we can do so. + * + * In case of aggregation over the join, the resulted columns of + * inner relation need to be accessed by prefixing it with + * "Join_Result". + */ + if (found && strcmp(func_name, "count") != 0) + { + if (columnInfo->isOuter) + bsonAppendUTF8(&group_expr, psprintf("$%s", func_name), + psprintf("$%s", columnInfo->colName)); + else + bsonAppendUTF8(&group_expr, psprintf("$%s", func_name), + psprintf("$Join_Result.%s", + columnInfo->colName)); + } + else + { + /* + * The COUNT(*) in PostgreSQL is equivalent to {$sum: 1} in + * the MongoDB. + */ + bsonAppendInt32(&group_expr, psprintf("$%s", "sum"), 1); + } - foreach(argumentCell, argumentList) - { - Expr *argument = (Expr *) lfirst(argumentCell); - if (nodeTag(argument) == argumentType) - { - foundArgument = argument; - break; + bsonAppendFinishObject(&group, &group_expr); } - } - return foundArgument; -} + bsonAppendFinishObject(&group_stage, &group); + bsonAppendFinishObject(&root_pipeline, &group_stage); + /* Add HAVING operation */ + if (having_expr) + { + pipeline_cxt context; -/* - * QueryDocument takes in the applicable operator expressions for a relation and - * converts these expressions into equivalent queries in MongoDB. For now, this - * function can only transform simple comparison expressions, and returns these - * transformed expressions in a BSON document. For example, simple expressions - * "l_shipdate >= date '1994-01-01' AND l_shipdate < date '1995-01-01'" become - * "l_shipdate: { $gte: new Date(757382400000), $lt: new Date(788918400000) }". - */ -bson * -QueryDocument(Oid relationId, List *opExpressionList) -{ - List *equalityOperatorList = NIL; - List *comparisonOperatorList = NIL; - List *columnList = NIL; - ListCell *equalityOperatorCell = NULL; - ListCell *columnCell = NULL; - bson *queryDocument = NULL; - int documentStatus = BSON_OK; + context.colInfoHash = columnInfoHash; + context.isBoolExpr = false; + context.isJoinClause = false; + context.scanStateNode = scanStateNode; - queryDocument = bson_create(); - bson_init(queryDocument); + /* $match stage. Add a filter for the HAVING clause */ + bsonAppendStartObject(&root_pipeline, psprintf("%d", root_index++), + &match_stage); + /* Form equivalent HAVING clauses in MongoDB */ + mongo_prepare_pipeline(having_expr, &match_stage, &context); - /* - * We distinguish between equality expressions and others since we need to - * insert the latter (<, >, <=, >=, <>) as separate sub-documents into the - * BSON query object. - */ - equalityOperatorList = EqualityOperatorList(opExpressionList); - comparisonOperatorList = list_difference(opExpressionList, equalityOperatorList); + bsonAppendFinishObject(&root_pipeline, &match_stage); + } + } - /* append equality expressions to the query */ - foreach(equalityOperatorCell, equalityOperatorList) + /* Add sort stage */ + if (pathkey_list) { - OpExpr *equalityOperator = (OpExpr *) lfirst(equalityOperatorCell); - Oid columnId = InvalidOid; - char *columnName = NULL; - - List *argumentList = equalityOperator->args; - Var *column = (Var *) FindArgumentOfType(argumentList, T_Var); - Const *constant = (Const *) FindArgumentOfType(argumentList, T_Const); + BSON sort_stage; + BSON sort; + ListCell *cell1; + ListCell *cell2; - columnId = column->varattno; - columnName = get_relid_attribute_name(relationId, columnId); + bsonAppendStartObject(&root_pipeline, psprintf("%d", root_index++), + &sort_stage); + bsonAppendStartObject(&sort_stage, "$sort", &sort); - AppendConstantValue(queryDocument, columnName, constant); + forboth(cell1, pathkey_list, cell2, is_ascsort_list) + { + Var *column = (Var *) lfirst(cell1); + int is_asc_sort = lfirst_int(cell2); + bool found = false; + ColInfoHashKey key; + ColInfoHashEntry *columnInfo; + + /* Find column name */ + key.varNo = column->varno; + key.varAttno = column->varattno; + + columnInfo = (ColInfoHashEntry *) hash_search(columnInfoHash, + (void *) &key, + HASH_FIND, + &found); + if (found) + { + /* + * In the case of upper rel, access the column by prefixing it + * with "_id". To access the column of the inner relation in + * the join operation, use the prefix "Join_result" because + * direct access is not possible. However, columns of the + * simple relation and outer relation of the join can be + * accessed directly. + */ + if (fmstate->relType == UPPER_JOIN_REL || + fmstate->relType == UPPER_REL) + bsonAppendInt32(&sort, + psprintf("_id.%s", columnInfo->colName), + is_asc_sort); + else if (!columnInfo->isOuter && fmstate->relType != BASE_REL) + bsonAppendInt32(&sort, + psprintf("Join_result.%s", + columnInfo->colName), + is_asc_sort); + else + bsonAppendInt32(&sort, columnInfo->colName, is_asc_sort); + } + } + bsonAppendFinishObject(&sort_stage, &sort); + bsonAppendFinishObject(&root_pipeline, &sort_stage); /* End sort */ } - /* - * For comparison expressions, we need to group them by their columns and - * append all expressions that correspond to a column as one sub-document. - * Otherwise, even when we have two expressions to define the upper- and - * lower-bound of a range, Mongo uses only one of these expressions during - * an index search. - */ - columnList = UniqueColumnList(comparisonOperatorList); - - /* append comparison expressions, grouped by columns, to the query */ - foreach(columnCell, columnList) + /* Add LIMIT/SKIP stage */ + if (has_limit) { - Var *column = (Var *) lfirst(columnCell); - Oid columnId = InvalidOid; - char *columnName = NULL; - List *columnOperatorList = NIL; - ListCell *columnOperatorCell = NULL; + int64 limit_value; + int64 offset_value; - columnId = column->varattno; - columnName = get_relid_attribute_name(relationId, columnId); + /* + * Add skip stage for OFFSET clause. However, don't add the same if + * either offset is not provided or the offset value is zero. + */ + offset_value = (int64) intVal(list_nth(PrivateList, + mongoFdwPrivateLimitOffsetList)); + if (offset_value != -1 && offset_value != 0) + { + BSON skip_stage; - /* find all expressions that correspond to the column */ - columnOperatorList = ColumnOperatorList(column, comparisonOperatorList); + bsonAppendStartObject(&root_pipeline, psprintf("%d", root_index++), + &skip_stage); + bsonAppendInt64(&skip_stage, "$skip", offset_value); + bsonAppendFinishObject(&root_pipeline, &skip_stage); + } - /* for comparison expressions, start a sub-document */ - bson_append_start_object(queryDocument, columnName); + /* + * Add limit stage for LIMIT clause. However, don't add the same if + * the limit is not provided. + */ + limit_value = (int64) intVal(list_nth(PrivateList, + mongoFdwPrivateLimitCountList)); - foreach(columnOperatorCell, columnOperatorList) + if (limit_value != -1) { - OpExpr *columnOperator = (OpExpr *) lfirst(columnOperatorCell); - char *operatorName = NULL; - char *mongoOperatorName = NULL; - - List *argumentList = columnOperator->args; - Const *constant = (Const *) FindArgumentOfType(argumentList, T_Const); + BSON limit_stage; - operatorName = get_opname(columnOperator->opno); - mongoOperatorName = MongoOperatorName(operatorName); - - AppendConstantValue(queryDocument, mongoOperatorName, constant); + bsonAppendStartObject(&root_pipeline, psprintf("%d", root_index++), + &limit_stage); + bsonAppendInt64(&limit_stage, "$limit", limit_value); + bsonAppendFinishObject(&root_pipeline, &limit_stage); } - - bson_append_finish_object(queryDocument); } - documentStatus = bson_finish(queryDocument); - if (documentStatus != BSON_OK) - { - ereport(ERROR, (errmsg("could not create document for query"), - errhint("BSON error: %s", queryDocument->errstr))); - } + bsonAppendFinishArray(queryDocument, &root_pipeline); return queryDocument; } - /* - * MongoOperatorName takes in the given PostgreSQL comparison operator name, and - * returns its equivalent in MongoDB. + * mongo_operator_name + * Takes in the given PostgreSQL comparison operator name, and returns its + * equivalent in MongoDB. */ -static char * -MongoOperatorName(const char *operatorName) +char * +mongo_operator_name(const char *operatorName) { const char *mongoOperatorName = NULL; - const int32 nameCount = 5; - static const char *nameMappings[][2] = { { "<", "$lt" }, - { ">", "$gt" }, - { "<=", "$lte" }, - { ">=", "$gte" }, - { "<>", "$ne" } }; - - int32 nameIndex = 0; + const int32 nameCount = 14; + static const char *nameMappings[][2] = {{"<", "$lt"}, + {">", "$gt"}, + {"<=", "$lte"}, + {">=", "$gte"}, + {"<>", "$ne"}, + {"=", "$eq"}, + {"+", "$add"}, + {"-", "$subtract"}, + {"*", "$multiply"}, + {"/", "$divide"}, + {"%", "$mod"}, + {"^", "$pow"}, + {"|/", "$sqrt"}, + {"@", "$abs"}}; + int32 nameIndex; + for (nameIndex = 0; nameIndex < nameCount; nameIndex++) { const char *pgOperatorName = nameMappings[nameIndex][0]; + if (strncmp(pgOperatorName, operatorName, NAMEDATALEN) == 0) { mongoOperatorName = nameMappings[nameIndex][1]; @@ -277,269 +668,1187 @@ MongoOperatorName(const char *operatorName) return (char *) mongoOperatorName; } - -/* - * EqualityOperatorList finds the equality (=) operators in the given list, and - * returns these operators in a new list. - */ -static List * -EqualityOperatorList(List *operatorList) +void +append_param_value(BSON *queryDocument, const char *keyName, Param *paramNode, + ForeignScanState *scanStateNode) { - List *equalityOperatorList = NIL; - ListCell *operatorCell = NULL; - - foreach(operatorCell, operatorList) - { - OpExpr *operator = (OpExpr *) lfirst(operatorCell); - char *operatorName = NULL; - - operatorName = get_opname(operator->opno); - if (strncmp(operatorName, EQUALITY_OPERATOR_NAME, NAMEDATALEN) == 0) - { - equalityOperatorList = lappend(equalityOperatorList, operator); - } - } - - return equalityOperatorList; -} + ExprState *param_expr; + Datum param_value; + bool isNull; + ExprContext *econtext; + if (scanStateNode == NULL) + return; -/* - * UniqueColumnList walks over the given operator list, and extracts the column - * argument in each operator. The function then de-duplicates extracted columns, - * and returns them in a new list. - */ -static List * -UniqueColumnList(List *operatorList) -{ - List *uniqueColumnList = NIL; - ListCell *operatorCell = NULL; + econtext = scanStateNode->ss.ps.ps_ExprContext; - foreach(operatorCell, operatorList) - { - OpExpr *operator = (OpExpr *) lfirst(operatorCell); - List *argumentList = operator->args; - Var *column = (Var *) FindArgumentOfType(argumentList, T_Var); + /* Prepare for parameter expression evaluation */ + param_expr = ExecInitExpr((Expr *) paramNode, (PlanState *) scanStateNode); - /* list membership is determined via column's equal() function */ - uniqueColumnList = list_append_unique(uniqueColumnList, column); - } + /* Evaluate the parameter expression */ + param_value = ExecEvalExpr(param_expr, econtext, &isNull); - return uniqueColumnList; + append_mongo_value(queryDocument, keyName, param_value, isNull, + paramNode->paramtype); } - /* - * ColumnOperatorList finds all expressions that correspond to the given column, - * and returns them in a new list. + * append_constant_value + * Appends to the query document the key name and constant value. + * + * The function translates the constant value from its PostgreSQL type + * to its MongoDB equivalent. */ -static List * -ColumnOperatorList(Var *column, List *operatorList) +void +append_constant_value(BSON *queryDocument, const char *keyName, Const *constant) { - List *columnOperatorList = NIL; - ListCell *operatorCell = NULL; - - foreach(operatorCell, operatorList) + if (constant->constisnull) { - OpExpr *operator = (OpExpr *) lfirst(operatorCell); - List *argumentList = operator->args; - - Var *foundColumn = (Var *) FindArgumentOfType(argumentList, T_Var); - if (equal(column, foundColumn)) - { - columnOperatorList = lappend(columnOperatorList, operator); - } + bsonAppendNull(queryDocument, keyName); + return; } - return columnOperatorList; + append_mongo_value(queryDocument, keyName, constant->constvalue, false, + constant->consttype); } - -/* - * AppendConstantValue appends to the query document the key name and constant - * value. The function translates the constant value from its PostgreSQL type to - * its MongoDB equivalent. - */ -static void -AppendConstantValue(bson *queryDocument, const char *keyName, Const *constant) +bool +append_mongo_value(BSON *queryDocument, const char *keyName, Datum value, + bool isnull, Oid id) { - Datum constantValue = constant->constvalue; - Oid constantTypeId = constant->consttype; + bool status = false; - bool constantNull = constant->constisnull; - if (constantNull) + if (isnull) { - bson_append_null(queryDocument, keyName); - return; + status = bsonAppendNull(queryDocument, keyName); + return status; } - switch(constantTypeId) + switch (id) { case INT2OID: - { - int16 value = DatumGetInt16(constantValue); - bson_append_int(queryDocument, keyName, (int) value); + { + int16 valueInt = DatumGetInt16(value); + + status = bsonAppendInt32(queryDocument, keyName, + (int) valueInt); + } break; - } case INT4OID: - { - int32 value = DatumGetInt32(constantValue); - bson_append_int(queryDocument, keyName, value); + { + int32 valueInt = DatumGetInt32(value); + + status = bsonAppendInt32(queryDocument, keyName, valueInt); + } break; - } case INT8OID: - { - int64 value = DatumGetInt64(constantValue); - bson_append_long(queryDocument, keyName, value); + { + int64 valueLong = DatumGetInt64(value); + + status = bsonAppendInt64(queryDocument, keyName, valueLong); + } break; - } case FLOAT4OID: - { - float4 value = DatumGetFloat4(constantValue); - bson_append_double(queryDocument, keyName, (double) value); + { + float4 valueFloat = DatumGetFloat4(value); + + status = bsonAppendDouble(queryDocument, keyName, + (double) valueFloat); + } break; - } case FLOAT8OID: - { - float8 value = DatumGetFloat8(constantValue); - bson_append_double(queryDocument, keyName, value); + { + float8 valueFloat = DatumGetFloat8(value); + + status = bsonAppendDouble(queryDocument, keyName, valueFloat); + } break; - } case NUMERICOID: - { - Datum valueDatum = DirectFunctionCall1(numeric_float8, constantValue); - float8 value = DatumGetFloat8(valueDatum); - bson_append_double(queryDocument, keyName, value); + { + Datum valueDatum = DirectFunctionCall1(numeric_float8, + value); + float8 valueFloat = DatumGetFloat8(valueDatum); + + status = bsonAppendDouble(queryDocument, keyName, valueFloat); + } break; - } case BOOLOID: - { - bool value = DatumGetBool(constantValue); - bson_append_int(queryDocument, keyName, (int) value); + { + bool valueBool = DatumGetBool(value); + + status = bsonAppendBool(queryDocument, keyName, + (int) valueBool); + } break; - } case BPCHAROID: case VARCHAROID: case TEXTOID: - { - char *outputString = NULL; - Oid outputFunctionId = InvalidOid; - bool typeVarLength = false; - - getTypeOutputInfo(constantTypeId, &outputFunctionId, &typeVarLength); - outputString = OidOutputFunctionCall(outputFunctionId, constantValue); + { + char *outputString; + Oid outputFunctionId; + bool typeVarLength; - bson_append_string(queryDocument, keyName, outputString); + getTypeOutputInfo(id, &outputFunctionId, &typeVarLength); + outputString = OidOutputFunctionCall(outputFunctionId, value); + status = bsonAppendUTF8(queryDocument, keyName, outputString); + } break; - } - case NAMEOID: - { - char *outputString = NULL; - Oid outputFunctionId = InvalidOid; - bool typeVarLength = false; - bson_oid_t bsonObjectId; - memset(bsonObjectId.bytes, 0, sizeof(bsonObjectId.bytes)); + case BYTEAOID: + { + int len; + char *data; + char *result = DatumGetPointer(value); + + if (VARATT_IS_1B(result)) + { + len = VARSIZE_1B(result) - VARHDRSZ_SHORT; + data = VARDATA_1B(result); + } + else + { + len = VARSIZE_4B(result) - VARHDRSZ; + data = VARDATA_4B(result); + } + if (strcmp(keyName, "_id") == 0) + { + bson_oid_t oid; + + bson_oid_init_from_data(&oid, (const uint8_t *) data); + status = bsonAppendOid(queryDocument, keyName, &oid); + } + else + status = bsonAppendBinary(queryDocument, keyName, data, + len); + } + break; + case NAMEOID: + { + char *outputString; + Oid outputFunctionId; + bool typeVarLength; + bson_oid_t bsonObjectId; + + memset(bsonObjectId.bytes, 0, sizeof(bsonObjectId.bytes)); + getTypeOutputInfo(id, &outputFunctionId, &typeVarLength); + outputString = OidOutputFunctionCall(outputFunctionId, value); + bsonOidFromString(&bsonObjectId, outputString); + status = bsonAppendOid(queryDocument, keyName, &bsonObjectId); + } + break; + case DATEOID: + { + Datum valueDatum = DirectFunctionCall1(date_timestamp, + value); + Timestamp valueTimestamp = DatumGetTimestamp(valueDatum); + int64 valueMicroSecs = valueTimestamp + POSTGRES_TO_UNIX_EPOCH_USECS; + int64 valueMilliSecs = valueMicroSecs / 1000; + + status = bsonAppendDate(queryDocument, keyName, + valueMilliSecs); + } + break; + case TIMESTAMPOID: + case TIMESTAMPTZOID: + { + Timestamp valueTimestamp = DatumGetTimestamp(value); + int64 valueMicroSecs = valueTimestamp + POSTGRES_TO_UNIX_EPOCH_USECS; + int64 valueMilliSecs = valueMicroSecs / 1000; - getTypeOutputInfo(constantTypeId, &outputFunctionId, &typeVarLength); - outputString = OidOutputFunctionCall(outputFunctionId, constantValue); - bson_oid_from_string(&bsonObjectId, outputString); + status = bsonAppendDate(queryDocument, keyName, + valueMilliSecs); + } + break; + case NUMERICARRAY_OID: + { + ArrayType *array; + Oid elmtype; + int16 elmlen; + bool elmbyval; + char elmalign; + int num_elems; + Datum *elem_values; + bool *elem_nulls; + int i; + BSON childDocument; + + array = DatumGetArrayTypeP(value); + elmtype = ARR_ELEMTYPE(array); + get_typlenbyvalalign(elmtype, &elmlen, &elmbyval, &elmalign); + + deconstruct_array(array, elmtype, elmlen, elmbyval, elmalign, + &elem_values, &elem_nulls, &num_elems); + + bsonAppendStartArray(queryDocument, keyName, &childDocument); + for (i = 0; i < num_elems; i++) + { + Datum valueDatum; + float8 valueFloat; + + if (elem_nulls[i]) + continue; + + valueDatum = DirectFunctionCall1(numeric_float8, + elem_values[i]); + valueFloat = DatumGetFloat8(valueDatum); + status = bsonAppendDouble(&childDocument, keyName, + valueFloat); + } + bsonAppendFinishArray(queryDocument, &childDocument); + pfree(elem_values); + pfree(elem_nulls); + } + break; + case TEXTARRAYOID: + { + ArrayType *array; + Oid elmtype; + int16 elmlen; + bool elmbyval; + char elmalign; + int num_elems; + Datum *elem_values; + bool *elem_nulls; + int i; + BSON childDocument; + + array = DatumGetArrayTypeP(value); + elmtype = ARR_ELEMTYPE(array); + get_typlenbyvalalign(elmtype, &elmlen, &elmbyval, &elmalign); + + deconstruct_array(array, elmtype, elmlen, elmbyval, elmalign, + &elem_values, &elem_nulls, &num_elems); + + bsonAppendStartArray(queryDocument, keyName, &childDocument); + for (i = 0; i < num_elems; i++) + { + char *valueString; + Oid outputFunctionId; + bool typeVarLength; + + if (elem_nulls[i]) + continue; + + getTypeOutputInfo(TEXTOID, &outputFunctionId, + &typeVarLength); + valueString = OidOutputFunctionCall(outputFunctionId, + elem_values[i]); + status = bsonAppendUTF8(&childDocument, keyName, + valueString); + } + bsonAppendFinishArray(queryDocument, &childDocument); + pfree(elem_values); + pfree(elem_nulls); + } + break; + case JSONOID: + { + char *outputString; + Oid outputFunctionId; + struct json_object *o; + bool typeVarLength; + + getTypeOutputInfo(id, &outputFunctionId, &typeVarLength); + outputString = OidOutputFunctionCall(outputFunctionId, value); + o = jsonTokenerPrase(outputString); + + if (o == NULL) + { + elog(WARNING, "cannot parse the document"); + status = 0; + break; + } + + status = jsonToBsonAppendElement(queryDocument, keyName, o); + } + break; + default: - bson_append_oid(queryDocument, keyName, &bsonObjectId); + /* + * We currently error out on other data types. Some types such as + * byte arrays are easy to add, but they need testing. + * + * Other types such as money or inet, do not have equivalents in + * MongoDB. + */ + ereport(ERROR, + (errcode(ERRCODE_FDW_INVALID_DATA_TYPE), + errmsg("cannot convert constant value to BSON value"), + errhint("Constant value data type: %u", id))); break; + } + + return status; +} + +/* + * mongo_get_column_list + * Process scan_var_list to find all columns needed for query execution + * and return them. + * + * Also, form two separate lists: + * 1. column_name_list: column names of needed columns. + * 2. is_inner_column_list: column is of inner relation or not. + */ +List * +mongo_get_column_list(PlannerInfo *root, RelOptInfo *foreignrel, + List *scan_var_list, List **column_name_list, + List **is_inner_column_list) +{ + List *columnList = NIL; + ListCell *lc; + RelOptInfo *scanrel; + MongoFdwRelationInfo *fpinfo = (MongoFdwRelationInfo *) foreignrel->fdw_private; + MongoFdwRelationInfo *ofpinfo = NULL; + + scanrel = IS_UPPER_REL(foreignrel) ? fpinfo->outerrel : foreignrel; + + if (IS_UPPER_REL(foreignrel) && IS_JOIN_REL(scanrel)) + ofpinfo = (MongoFdwRelationInfo *) fpinfo->outerrel->fdw_private; + + foreach(lc, scan_var_list) + { + Var *var = (Var *) lfirst(lc); + RangeTblEntry *rte = planner_rt_fetch(var->varno, root); + int is_innerrel = false; + + /* + * Add aggregation target also in the needed column list. This would + * be handled in the function column_mapping_hash. + */ + if (IsA(var, Aggref)) + { +#if PG_VERSION_NUM >= 160000 + columnList = mongo_append_unique_var(columnList, var); +#else + columnList = list_append_unique(columnList, var); +#endif + continue; } - case DATEOID: + + if (!IsA(var, Var)) + continue; + + /* Var belongs to foreign table? */ + if (!bms_is_member(var->varno, scanrel->relids)) + continue; + + /* Is whole-row reference requested? */ + if (var->varattno == 0) { - Datum valueDatum = DirectFunctionCall1(date_timestamp, constantValue); - Timestamp valueTimestamp = DatumGetTimestamp(valueDatum); - int64 valueMicroSecs = valueTimestamp + POSTGRES_TO_UNIX_EPOCH_USECS; - int64 valueMilliSecs = valueMicroSecs / 1000; + List *wr_var_list; + Bitmapset *attrs_used; +#if PG_VERSION_NUM >= 160000 + ListCell *cell; +#endif - bson_append_date(queryDocument, keyName, valueMilliSecs); - break; + Assert(OidIsValid(rte->relid)); + + /* + * Get list of Var nodes for all undropped attributes of the base + * relation. + */ + attrs_used = bms_make_singleton(0 - + FirstLowInvalidHeapAttributeNumber); + + wr_var_list = prepare_var_list_for_baserel(rte->relid, var->varno, + attrs_used); + +#if PG_VERSION_NUM >= 160000 + foreach(cell, wr_var_list) + { + Var *tlvar = (Var *) lfirst(cell); + + columnList = mongo_append_unique_var(columnList, tlvar); + } +#else + columnList = list_concat_unique(columnList, wr_var_list); +#endif + bms_free(attrs_used); } - case TIMESTAMPOID: - case TIMESTAMPTZOID: + else +#if PG_VERSION_NUM >= 160000 + columnList = mongo_append_unique_var(columnList, var); +#else + columnList = list_append_unique(columnList, var); +#endif + + if (IS_JOIN_REL(foreignrel) || + (IS_UPPER_REL(foreignrel) && IS_JOIN_REL(scanrel))) { - Timestamp valueTimestamp = DatumGetTimestamp(constantValue); - int64 valueMicroSecs = valueTimestamp + POSTGRES_TO_UNIX_EPOCH_USECS; - int64 valueMilliSecs = valueMicroSecs / 1000; + char *columnName; + + columnName = get_attname(rte->relid, var->varattno, false); + *column_name_list = lappend(*column_name_list, + makeString(columnName)); + if (IS_UPPER_REL(foreignrel) && IS_JOIN_REL(scanrel) && + bms_is_member(var->varno, ofpinfo->innerrel->relids)) + is_innerrel = true; + else if (IS_JOIN_REL(foreignrel) && + bms_is_member(var->varno, fpinfo->innerrel->relids)) + is_innerrel = true; + + *is_inner_column_list = lappend_int(*is_inner_column_list, + is_innerrel); + } + } - bson_append_date(queryDocument, keyName, valueMilliSecs); + return columnList; +} + +/* + * Check if expression is safe to execute remotely, and return true if so. + * + * In addition, *outer_cxt is updated with collation information. + * + * We must check that the expression contains only node types we can deparse, + * that all types/operators are safe to send (which we approximate + * as being built-in), and that all collations used in the expression derive + * from Vars of the foreign table. + * + * For WHERE as well as JOIN clauses, in the case of operator expression, we do + * support arithmetic (=, <, >, <=, >=, <>, +, -, *, /, %, ^, @ and |/) + * operators. Also, both operands of the binary operator can be a column. If + * the expression is a tree, we do recurse into it. Supports Boolean + * expression as well. + */ +static bool +foreign_expr_walker(Node *node, foreign_glob_cxt *glob_cxt, + foreign_loc_cxt *outer_cxt) +{ + foreign_loc_cxt inner_cxt; + Oid collation; + FDWCollateState state; + + /* Need do nothing for empty subexpressions */ + if (node == NULL) + return true; + + /* Set up inner_cxt for possible recursion to child nodes */ + inner_cxt.collation = InvalidOid; + inner_cxt.state = FDW_COLLATE_NONE; + + switch (nodeTag(node)) + { + case T_Var: + { + Var *var = (Var *) node; + + /* + * If the Var is from the foreign table, we consider its + * collation (if any) safe to use. If it is from another + * table, don't push it down. + */ + if (bms_is_member(var->varno, glob_cxt->relids) && + var->varlevelsup == 0) + { + /* Var belongs to foreign table */ + collation = var->varcollid; + state = OidIsValid(collation) ? FDW_COLLATE_SAFE : FDW_COLLATE_NONE; + } + else + { + /* + * Var belongs to some other table. Unlike postgres_fdw, + * can't be treated like Param because MongoDB doesn't + * have corresponding syntax to represent it in the query + * pipeline. + */ + return false; + } + } + break; + case T_Const: + { + Const *c = (Const *) node; + + /* + * We don't push down operators where the constant is an + * array, since conditional operators for arrays in MongoDB + * aren't properly defined. + */ + if (OidIsValid(get_element_type(c->consttype))) + return false; + + /* + * If the constant has nondefault collation, either it's of a + * non-builtin type, or it reflects folding of a CollateExpr. + * It's unsafe to send to the remote unless it's used in a + * non-collation-sensitive context. + */ + collation = c->constcollid; + if (collation == InvalidOid || + collation == DEFAULT_COLLATION_OID) + state = FDW_COLLATE_NONE; + else + state = FDW_COLLATE_UNSAFE; + } + break; + case T_Param: + { + Param *p = (Param *) node; + + /* + * Bail out on planner internal params. We could perhaps pass + * them to the remote server as regular params, but we don't + * have the machinery to do that at the moment. + */ + if (p->paramkind != PARAM_EXTERN) + return false; + + /* + * Collation rule is same as for Consts and non-foreign Vars. + */ + collation = p->paramcollid; + if (collation == InvalidOid || + collation == DEFAULT_COLLATION_OID) + state = FDW_COLLATE_NONE; + else + state = FDW_COLLATE_UNSAFE; + } + break; + case T_OpExpr: + { + OpExpr *oe = (OpExpr *) node; + char *oname = get_opname(oe->opno); + + /* Don't support operator expression in grouping targets */ + if (IS_UPPER_REL(glob_cxt->foreignrel) && + !glob_cxt->is_having_cond) + return false; + + /* + * We support =, <, >, <=, >=, <>, +, -, *, /, %, ^, |/, and @ + * operators for joinclause of join relation. + */ + if (!(strncmp(oname, EQUALITY_OPERATOR_NAME, NAMEDATALEN) == 0) && + (mongo_operator_name(oname) == NULL)) + return false; + + /* + * Recurse to input subexpressions. + * + * We support same operators as joinclause for WHERE + * conditions of simple as well as join relation. + */ + if (!foreign_expr_walker((Node *) oe->args, glob_cxt, + &inner_cxt)) + return false; + + /* + * If operator's input collation is not derived from a foreign + * Var, it can't be sent to remote. + */ + if (oe->inputcollid == InvalidOid) + /* OK, inputs are all noncollatable */ ; + else if (inner_cxt.state != FDW_COLLATE_SAFE || + oe->inputcollid != inner_cxt.collation) + return false; + + /* Result-collation handling */ + collation = oe->opcollid; + if (collation == InvalidOid) + state = FDW_COLLATE_NONE; + else if (inner_cxt.state == FDW_COLLATE_SAFE && + collation == inner_cxt.collation) + state = FDW_COLLATE_SAFE; + else if (collation == DEFAULT_COLLATION_OID) + state = FDW_COLLATE_NONE; + else + state = FDW_COLLATE_UNSAFE; + } + break; + case T_RelabelType: + { + RelabelType *r = (RelabelType *) node; + + /* + * Recurse to input subexpression. + */ + if (!foreign_expr_walker((Node *) r->arg, + glob_cxt, &inner_cxt)) + return false; + + /* + * RelabelType must not introduce a collation not derived from + * an input foreign Var. + */ + collation = r->resultcollid; + if (collation == InvalidOid) + state = FDW_COLLATE_NONE; + else if (inner_cxt.state == FDW_COLLATE_SAFE && + collation == inner_cxt.collation) + state = FDW_COLLATE_SAFE; + else if (collation == DEFAULT_COLLATION_OID) + state = FDW_COLLATE_NONE; + else + state = FDW_COLLATE_UNSAFE; + } + break; + case T_List: + { + List *l = (List *) node; + ListCell *lc; + + /* + * Recurse to component subexpressions. + * + * For simple relation, if the comparison is between two + * columns of the same table, then we don't push down because + * building corresponding MongoDB query is not possible with + * the cirrent MongoC driver. + */ + foreach(lc, l) + { + if ((!foreign_expr_walker((Node *) lfirst(lc), + glob_cxt, &inner_cxt))) + return false; + } + + /* + * When processing a list, collation state just bubbles up + * from the list elements. + */ + collation = inner_cxt.collation; + state = inner_cxt.state; + } + break; + case T_BoolExpr: + { + BoolExpr *b = (BoolExpr *) node; + + /* + * Recurse to input sub-expressions. + */ + if (!foreign_expr_walker((Node *) b->args, + glob_cxt, &inner_cxt)) + return false; + + /* Output is always boolean and so noncollatable. */ + collation = InvalidOid; + state = FDW_COLLATE_NONE; + } + break; + case T_Aggref: + { + Aggref *agg = (Aggref *) node; + ListCell *lc; + const char *func_name = get_func_name(agg->aggfnoid); + + /* Not safe to pushdown when not in a grouping context */ + if (!IS_UPPER_REL(glob_cxt->foreignrel)) + return false; + + /* Only non-split aggregates are pushable. */ + if (agg->aggsplit != AGGSPLIT_SIMPLE) + return false; + + /* + * Aggregates with the order, FILTER, VARIADIC, and DISTINCT + * are not supported on MongoDB. + */ + if (agg->aggorder || agg->aggfilter || agg->aggvariadic || + agg->aggdistinct) + return false; + + if (!(strcmp(func_name, "min") == 0 || + strcmp(func_name, "max") == 0 || + strcmp(func_name, "sum") == 0 || + strcmp(func_name, "avg") == 0 || + strcmp(func_name, "count") == 0)) + return false; + + /* + * Don't push down when the count is on the column. This + * restriction is due to the unavailability of syntax in the + * MongoDB to provide a count of the particular column. + */ + if (!strcmp(func_name, "count") && agg->args) + return false; + + /* + * Recurse to input args. aggdirectargs, aggorder, and + * aggdistinct are all present in args, so no need to check + * their shippability explicitly. + */ + foreach(lc, agg->args) + { + Node *n = (Node *) lfirst(lc); + + /* If TargetEntry, extract the expression from it. */ + if (IsA(n, TargetEntry)) + { + TargetEntry *tle = (TargetEntry *) n; + + n = (Node *) tle->expr; + } + + if (!IsA(n, Var) || !foreign_expr_walker(n, glob_cxt, + &inner_cxt)) + return false; + } + + /* + * If aggregate's input collation is not derived from a + * foreign Var, it can't be sent to remote. + */ + if (agg->inputcollid == InvalidOid) + /* OK, inputs are all noncollatable */ ; + else if (inner_cxt.state != FDW_COLLATE_SAFE || + agg->inputcollid != inner_cxt.collation) + return false; + + /* + * Detect whether the node is introducing a collation not + * derived from a foreign Var. (If so, we just mark it unsafe + * for now rather than immediately returning false, since th e + * parent node might not care.) + */ + collation = agg->aggcollid; + if (collation == InvalidOid) + state = FDW_COLLATE_NONE; + else if (inner_cxt.state == FDW_COLLATE_SAFE && + collation == inner_cxt.collation) + state = FDW_COLLATE_SAFE; + else if (collation == DEFAULT_COLLATION_OID) + state = FDW_COLLATE_NONE; + else + state = FDW_COLLATE_UNSAFE; + } break; - } default: - { + /* - * We currently error out on other data types. Some types such as - * byte arrays are easy to add, but they need testing. Other types - * such as money or inet, do not have equivalents in MongoDB. + * If it's anything else, assume it's unsafe. This list can be + * expanded later, but don't forget to add deparse support. */ - ereport(ERROR, (errcode(ERRCODE_FDW_INVALID_DATA_TYPE), - errmsg("cannot convert constant value to BSON value"), - errhint("Constant value data type: %u", constantTypeId))); - break; + return false; + } + + /* + * Now, merge my collation information into my parent's state. + */ + if (state > outer_cxt->state) + { + /* Override previous parent state */ + outer_cxt->collation = collation; + outer_cxt->state = state; + } + else if (state == outer_cxt->state) + { + /* Merge, or detect error if there's a collation conflict */ + switch (state) + { + case FDW_COLLATE_NONE: + /* Nothing + nothing is still nothing */ + break; + case FDW_COLLATE_SAFE: + if (collation != outer_cxt->collation) + { + /* + * Non-default collation always beats default. + */ + if (outer_cxt->collation == DEFAULT_COLLATION_OID) + { + /* Override previous parent state */ + outer_cxt->collation = collation; + } + else if (collation != DEFAULT_COLLATION_OID) + { + /* + * Conflict; show state as indeterminate. We don't + * want to "return false" right away, since parent + * node might not care about collation. + */ + outer_cxt->state = FDW_COLLATE_UNSAFE; + } + } + break; + case FDW_COLLATE_UNSAFE: + /* We're still conflicted ... */ + break; + } + } + + /* It looks OK */ + return true; +} + +/* + * mongo_is_foreign_expr + * Returns true if given expr is safe to evaluate on the foreign server. + */ +bool +mongo_is_foreign_expr(PlannerInfo *root, RelOptInfo *baserel, Expr *expression, + bool is_having_cond) +{ + foreign_glob_cxt glob_cxt; + foreign_loc_cxt loc_cxt; + MongoFdwRelationInfo *fpinfo = (MongoFdwRelationInfo *) baserel->fdw_private; + + /* + * Check that the expression consists of nodes that are safe to execute + * remotely. + */ + glob_cxt.root = root; + glob_cxt.foreignrel = baserel; + + /* + * For an upper relation, use relids from its underneath scan relation, + * because the upperrel's own relids currently aren't set to anything + * meaningful by the core code. For other relations, use their own + * relids. + */ + if (IS_UPPER_REL(baserel)) + glob_cxt.relids = fpinfo->outerrel->relids; + else + glob_cxt.relids = baserel->relids; + + glob_cxt.is_having_cond = is_having_cond; + loc_cxt.collation = InvalidOid; + loc_cxt.state = FDW_COLLATE_NONE; + if (!foreign_expr_walker((Node *) expression, &glob_cxt, &loc_cxt)) + return false; + + /* + * If the expression has a valid collation that does not arise from a + * foreign var, the expression can not be sent over. + */ + if (loc_cxt.state == FDW_COLLATE_UNSAFE) + return false; + + /* OK to evaluate on the remote server */ + return true; +} + +/* + * prepare_var_list_for_baserel + * Build list of nodes corresponding to the attributes requested for given + * base relation. + * + * The list contains Var nodes corresponding to the attributes specified in + * attrs_used. If whole-row reference is required, add Var nodes corresponding + * to all the attributes in the relation. + */ +static List * +prepare_var_list_for_baserel(Oid relid, Index varno, Bitmapset *attrs_used) +{ + int attno; + List *tlist = NIL; + Node *node; + bool wholerow_requested = false; + Relation relation; + TupleDesc tupdesc; + + Assert(OidIsValid(relid)); + + /* Planner must have taken a lock, so request no lock here */ +#if PG_VERSION_NUM < 130000 + relation = heap_open(relid, NoLock); +#else + relation = table_open(relid, NoLock); +#endif + + tupdesc = RelationGetDescr(relation); + + /* Is whole-row reference requested? */ + wholerow_requested = bms_is_member(0 - FirstLowInvalidHeapAttributeNumber, + attrs_used); + + /* Handle user defined attributes first. */ + for (attno = 1; attno <= tupdesc->natts; attno++) + { + Form_pg_attribute attr = TupleDescAttr(tupdesc, attno - 1); + + /* Ignore dropped attributes. */ + if (attr->attisdropped) + continue; + + /* For a required attribute create a Var node */ + if (wholerow_requested || + bms_is_member(attno - FirstLowInvalidHeapAttributeNumber, + attrs_used)) + { + node = (Node *) makeVar(varno, attno, attr->atttypid, + attr->atttypmod, attr->attcollation, 0); + tlist = lappend(tlist, node); + } } + +#if PG_VERSION_NUM < 130000 + heap_close(relation, NoLock); +#else + table_close(relation, NoLock); +#endif + + return tlist; } +/* + * column_info_hash + * Creates a hash table that maps varno and varattno to the column names, + * and also stores whether the column is part of outer relation or not. + * + * This table helps us to form the pipeline quickly. + */ +static HTAB * +column_info_hash(List *colname_list, List *colnum_list, List *rti_list, + List *isouter_list) +{ + HTAB *columnInfoHash; + ColInfoHashKey key; + HASHCTL hashInfo; + ListCell *l1; + ListCell *l2; + ListCell *l3; + ListCell *l4; + + memset(&hashInfo, 0, sizeof(hashInfo)); + hashInfo.keysize = sizeof(ColInfoHashKey); + hashInfo.entrysize = sizeof(ColInfoHashEntry); + hashInfo.hcxt = CurrentMemoryContext; + + columnInfoHash = hash_create("Column Information Hash", MaxHashTableSize, + &hashInfo, + (HASH_ELEM | HASH_BLOBS | HASH_CONTEXT)); + Assert(columnInfoHash != NULL); + + /* + * There's no forfour() in version 11 and below, so need to traverse one + * list the hard way. + */ + l4 = list_head(isouter_list); + forthree(l1, colname_list, l2, colnum_list, l3, rti_list) + { + ColInfoHashEntry *columnInfo; + char *columnName = strVal(lfirst(l1)); + int columnNum = lfirst_int(l2); + int varNo = lfirst_int(l3); + bool isOuter = lfirst_int(l4); + + key.varNo = varNo; + key.varAttno = columnNum; + + columnInfo = (ColInfoHashEntry *) hash_search(columnInfoHash, + (void *) &key, + HASH_ENTER, + NULL); + Assert(columnInfo != NULL); + + columnInfo->colName = columnName; + columnInfo->isOuter = isOuter; + +#if PG_VERSION_NUM >= 130000 + l4 = lnext(isouter_list, l4); +#else + l4 = lnext(l4); +#endif + } + + return columnInfoHash; +} /* - * ColumnList takes in the planner's information about this foreign table. The - * function then finds all columns needed for query execution, including those - * used in projections, joins, and filter clauses, de-duplicates these columns, - * and returns them in a new list. + * mongo_prepare_pipeline + * Form query pipeline syntax equivalent to postgresql. + * + * From the example given on mongo_query_document, the following part of + * MongoDB query formed by this function: + * + * "pipeline": [ + * { + * "$match": + * { + * "$expr": + * { + * "$and": [ + * { "$eq": [ "$$v_age", "$old" ] } + * { "$ne": [ "$$v_age", null ] }, + * { "$ne": [ "$old", null ] }, + * ] + * } + * } + * } + * ] */ -List * -ColumnList(RelOptInfo *baserel) +static void +mongo_prepare_pipeline(List *clause, BSON *inner_pipeline, + pipeline_cxt *context) { - List *columnList = NIL; - List *neededColumnList = NIL; - AttrNumber columnIndex = 1; - AttrNumber columnCount = baserel->max_attr; - List *targetColumnList = baserel->reltargetlist; - List *restrictInfoList = baserel->baserestrictinfo; - ListCell *restrictInfoCell = NULL; - - /* first add the columns used in joins and projections */ - neededColumnList = list_copy(targetColumnList); - - /* then walk over all restriction clauses, and pull up any used columns */ - foreach(restrictInfoCell, restrictInfoList) + BSON *and_query_doc = bsonCreate(); + BSON match_object; + BSON match_stage; + BSON expr; + BSON and_op; + + if (context->isJoinClause) { - RestrictInfo *restrictInfo = (RestrictInfo *) lfirst(restrictInfoCell); - Node *restrictClause = (Node *) restrictInfo->clause; - List *clauseColumnList = NIL; + int inner_pipeline_index = 0; - /* recursively pull up any columns used in the restriction clause */ - clauseColumnList = pull_var_clause(restrictClause, - PVC_RECURSE_AGGREGATES, - PVC_RECURSE_PLACEHOLDERS); + bsonAppendStartObject(inner_pipeline, + psprintf("%d", inner_pipeline_index++), + &match_object); + bsonAppendStartObject(&match_object, "$match", &match_stage); + } + else + bsonAppendStartObject(inner_pipeline, "$match", &match_stage); - neededColumnList = list_union(neededColumnList, clauseColumnList); + bsonAppendStartObject(&match_stage, "$expr", &expr); + + bsonAppendStartArray(and_query_doc, "$and", &and_op); + + context->arrayIndex = 0; + context->opExprCount = 0; + + /* Append JOIN/WHERE/HAVING clause expression */ + mongo_append_clauses_to_pipeline(clause, &and_op, context); + + /* Append $and array to $expr */ + bson_append_array(&expr, "$and", (int) strlen("$and"), &and_op); + + bsonAppendFinishArray(and_query_doc, &and_op); + bsonAppendFinishObject(&match_stage, &expr); + if (context->isJoinClause) + { + bsonAppendFinishObject(&match_object, &match_stage); + bsonAppendFinishObject(inner_pipeline, &match_object); } + else + bsonAppendFinishObject(inner_pipeline, &match_stage); +} - /* walk over all column definitions, and de-duplicate column list */ - for (columnIndex = 1; columnIndex <= columnCount; columnIndex++) +/* + * mongo_append_clauses_to_pipeline + * Append all JOIN/WHERE/HAVING clauses to mongoDB's $and array. + */ +static void +mongo_append_clauses_to_pipeline(List *clause, BSON *child_doc, + pipeline_cxt *context) +{ + ListCell *lc; + + /* loop through all clauses */ + foreach(lc, clause) { - ListCell *neededColumnCell = NULL; - Var *column = NULL; + Expr *expr = (Expr *) lfirst(lc); - /* look for this column in the needed column list */ - foreach(neededColumnCell, neededColumnList) + /* Extract clause from RestrictInfo */ + if (IsA(expr, RestrictInfo)) { - Var *neededColumn = (Var *) lfirst(neededColumnCell); - if (neededColumn->varattno == columnIndex) + RestrictInfo *ri = (RestrictInfo *) expr; + + expr = ri->clause; + } + + mongo_append_expr(expr, child_doc, context); + context->arrayIndex++; + } +} + +/* + * mongo_is_foreign_param + * Returns true if given expr is something we'd have to send the + * value of to the foreign server. + */ +bool +mongo_is_foreign_param(PlannerInfo *root, RelOptInfo *baserel, Expr *expr) +{ + if (expr == NULL) + return false; + + switch (nodeTag(expr)) + { + case T_Var: { - column = neededColumn; + /* It would have to be sent unless it's a foreign Var. */ + Var *var = (Var *) expr; + Relids relids; + MongoFdwRelationInfo *fpinfo = (MongoFdwRelationInfo *) (baserel->fdw_private); + + if (IS_UPPER_REL(baserel)) + relids = fpinfo->outerrel->relids; + else + relids = baserel->relids; + + if (bms_is_member(var->varno, relids) && var->varlevelsup == 0) + return false; /* foreign Var, so not a param. */ + else + return true; /* it'd have to be a param. */ break; } - } + case T_Param: + /* Params always have to be sent to the foreign server. */ + return true; + default: + break; + } + return false; +} - if (column != NULL) - { - columnList = lappend(columnList, column); - } +#if PG_VERSION_NUM >= 160000 +/* + * mongo_append_unique_var + * Append var to var list, but only if it isn't already in the list. + * + * Whether a var is already a member of list is determined using varno and + * varattno. + */ +static List * +mongo_append_unique_var(List *varlist, Var *var) +{ + ListCell *lc; + + foreach(lc, varlist) + { + Var *tlvar = (Var *) lfirst(lc); + + if (IsA(tlvar, Var) && + tlvar->varno == var->varno && + tlvar->varattno == var->varattno) + return varlist; } - return columnList; + return lappend(varlist, var); +} +#endif + +/* + * get_varname_for_outer_col + * Form variable name from outer relation column name. + */ +char * +get_varname_for_outer_col(const char *str) +{ + static char result[66]; + + /* + * Add prefix "v_" to column name to form variable name. Need to prefix + * with any lowercase letter because variable names must begin with only a + * lowercase ASCII letter or a non-ASCII character. + */ + sprintf(result, "v_%s", str); + + /* + * Also, replace occurences of dot (".") in the variable name with + * underscore ("_"), because special characters other than "_" are NOT + * allowed. + */ + mongo_replace_char(result + 2, '.', '_'); + + return result; +} + +/* + * mongo_replace_char + * Find and replace given character from the string. + */ +void +mongo_replace_char(char *str, char find, char replace) +{ + int i; + int len = strlen(str); + + for (i = 0; i < len; i++) + if (str[i] == find) + str[i] = replace; } diff --git a/mongo_query.h b/mongo_query.h index 0a75e98..0658f67 100644 --- a/mongo_query.h +++ b/mongo_query.h @@ -1,18 +1,150 @@ /*------------------------------------------------------------------------- * * mongo_query.h + * FDW query handling for mongo_fdw * - * Type and function declarations for constructing queries to send to MongoDB. + * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 2004-2024, EnterpriseDB Corporation. + * Portions Copyright (c) 2012–2014 Citus Data, Inc. * - * Copyright (c) 2012-2014 Citus Data, Inc. + * IDENTIFICATION + * mongo_query.h * *------------------------------------------------------------------------- */ - #ifndef MONGO_QUERY_H #define MONGO_QUERY_H +#define NUMERICARRAY_OID 1231 + +/* + * Context for aggregation pipeline formation. + */ +typedef struct pipeline_cxt +{ + struct HTAB *colInfoHash; /* columns information hash */ + unsigned int arrayIndex; /* Index of the various arrays in the + * pipeline, starting from zero */ + bool isBoolExpr; /* is join expression boolean? */ + bool isJoinClause; /* is join clause? This is to add null check + * only in case of join clause */ + uint32 opExprCount; /* count death of the expression */ + ForeignScanState *scanStateNode; /* To evaluate param expression */ +} pipeline_cxt; + +/* + * ColInfoEntry represents a hash table entry that maps a unique column's varno + * and varattno to the column name and related information. We construct these + * hash table entries to speed up the BSON query document formation. + */ +typedef struct ColInfoHashKey +{ + int varNo; + int varAttno; +} ColInfoHashKey; +typedef struct ColInfoEntry +{ + ColInfoHashKey key; /* Hash key */ + char *colName; + bool isOuter; +} ColInfoHashEntry; + +/* + * Indexes of FDW-private information stored in fdw_private lists. + * + * These items are indexed with the enum mongoFdwScanPrivateIndex, so an item + * can be fetched with list_nth(). For example, to get the column list: + * col_list = strVal(list_nth(fdw_private, mongoFdwPrivateColumnList)); + */ +enum mongoFdwScanPrivateIndex +{ + /* + * Column list to form column mapping hash i.e. to get only needed columns + * from all fetched columns from remote. + */ + mongoFdwPrivateColumnList, + + /* Expressions to execute remotely */ + mongoFdwPrivateRemoteExprList, + + /* Relation Type (BASE/JOIN/UPPER/UPPER_JOIN) */ + mongoFdwPrivateRelType, + + /* + * List of column name, attribute number, range table index, and whether + * this column is of outer relation or not. + * + * The columns which are part of the join clauses are listed. + */ + mongoFdwPrivateJoinClauseColNameList, + mongoFdwPrivareJoinClauseColNumList, + mongoFdwPrivateJoinClauseRtiList, + mongoFdwPrivateJoinClauseIsOuterList, + + /* ORDER BY clause information */ + mongoFdwPrivatePathKeyList, + mongoFdwPrivateIsAscSortList, + + /* LIMIT/OFFSET clause information */ + mongoFdwPrivateHasLimitClause, + mongoFdwPrivateLimitCountList, + mongoFdwPrivateLimitOffsetList, + + /* Upper relation information */ + + /* Upper relation grouping operation name list */ + mongoFdwPrivateAggType, + + /* List of column names involved in grouping operation list */ + mongoFdwPrivateAggColList, + + /* GROUP BY clause expression */ + mongoFdwPrivateGroupByColList, + + /* Having expression */ + mongoFdwPrivateHavingExpr, + + /* Is the grouping expression part of HAVING expression or not? */ + mongoFdwPrivateIsHavingList, + + /* + * String describing join i.e. names of relations being joined and types + * of join, added when the scan is join + */ + mongoFdwPrivateRelations, + + /* + * List of column names and whether those are part of inner or outer + * relation stored to form Column Mapping Hash. These are needed column + * means those are part of target and restriction columns. + */ + mongoFdwPrivateColNameList, + mongoFdwPrivateColIsInnerList, + + /* Inner and Outer relation names */ + mongoFdwPrivateJoinInnerOuterRelName, + + /* List of join clauses to form a pipeline */ + mongoFdwPrivateJoinClauseList, + + /* Join-type */ + mongoFdwPrivateJoinType +}; +/* Function to be used in mongo_fdw.c */ +extern bool append_mongo_value(BSON *queryDocument, const char *keyName, + Datum value, bool isnull, Oid id); +/* Functions to be used in deparse.c */ +extern char *mongo_operator_name(const char *operatorName); +extern void append_constant_value(BSON *queryDocument, const char *keyName, + Const *constant); +extern void mongo_append_expr(Expr *node, BSON *child_doc, + pipeline_cxt *context); +extern void append_param_value(BSON *queryDocument, const char *keyName, + Param *paramNode, + ForeignScanState *scanStateNode); +extern char *get_varname_for_outer_col(const char *str); +extern void mongo_replace_char(char* str, char find, char replace); -#endif /* MONGO_QUERY_H */ +#endif /* MONGO_QUERY_H */ diff --git a/mongo_wrapper.c b/mongo_wrapper.c new file mode 100644 index 0000000..69ad65e --- /dev/null +++ b/mongo_wrapper.c @@ -0,0 +1,768 @@ +/*------------------------------------------------------------------------- + * + * mongo_wrapper.c + * Wrapper functions for remote MongoDB servers + * + * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 2004-2024, EnterpriseDB Corporation. + * Portions Copyright (c) 2012–2014 Citus Data, Inc. + * + * IDENTIFICATION + * mongo_wrapper.c + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include +#include "mongo_wrapper.h" + +#define ITER_TYPE(i) ((bson_type_t) * ((i)->raw + (i)->type)) + +/* + * mongoConnect + * Connect to MongoDB server using Host/ip and Port number. + */ +MONGO_CONN * +mongoConnect(MongoFdwOptions *opt) +{ + MONGO_CONN *client; + char *uri; + + if (opt->svr_username && opt->svr_password) + { + if (opt->authenticationDatabase) + { + if (opt->replicaSet) + { + if (opt->readPreference) + uri = bson_strdup_printf("mongodb://%s:%s@%s:%hu/%s?readPreference=%s&ssl=%s&authSource=%s&replicaSet=%s", + opt->svr_username, + opt->svr_password, + opt->svr_address, opt->svr_port, + opt->svr_database, + opt->readPreference, + opt->ssl ? "true" : "false", + opt->authenticationDatabase, + opt->replicaSet); + else + uri = bson_strdup_printf("mongodb://%s:%s@%s:%hu/%s?ssl=%s&authSource=%s&replicaSet=%s", + opt->svr_username, + opt->svr_password, + opt->svr_address, opt->svr_port, + opt->svr_database, + opt->ssl ? "true" : "false", + opt->authenticationDatabase, + opt->replicaSet); + } + else if (opt->readPreference) + uri = bson_strdup_printf("mongodb://%s:%s@%s:%hu/%s?readPreference=%s&ssl=%s&authSource=%s", + opt->svr_username, opt->svr_password, + opt->svr_address, opt->svr_port, + opt->svr_database, + opt->readPreference, + opt->ssl ? "true" : "false", + opt->authenticationDatabase); + else + uri = bson_strdup_printf("mongodb://%s:%s@%s:%hu/%s?ssl=%s&authSource=%s", + opt->svr_username, opt->svr_password, + opt->svr_address, opt->svr_port, + opt->svr_database, + opt->ssl ? "true" : "false", + opt->authenticationDatabase); + } + else if (opt->replicaSet) + { + if (opt->readPreference) + uri = bson_strdup_printf("mongodb://%s:%s@%s:%hu/%s?readPreference=%s&ssl=%s&replicaSet=%s", + opt->svr_username, opt->svr_password, + opt->svr_address, opt->svr_port, + opt->svr_database, + opt->readPreference, + opt->ssl ? "true" : "false", + opt->replicaSet); + else + uri = bson_strdup_printf("mongodb://%s:%s@%s:%hu/%s?ssl=%s&replicaSet=%s", + opt->svr_username, opt->svr_password, + opt->svr_address, opt->svr_port, + opt->svr_database, + opt->ssl ? "true" : "false", + opt->replicaSet); + } + else if (opt->readPreference) + uri = bson_strdup_printf("mongodb://%s:%s@%s:%hu/%s?readPreference=%s&ssl=%s", + opt->svr_username, opt->svr_password, + opt->svr_address, opt->svr_port, + opt->svr_database, opt->readPreference, + opt->ssl ? "true" : "false"); + else + uri = bson_strdup_printf("mongodb://%s:%s@%s:%hu/%s?ssl=%s", + opt->svr_username, opt->svr_password, + opt->svr_address, + opt->svr_port, opt->svr_database, + opt->ssl ? "true" : "false"); + } + else if (opt->replicaSet) + { + if (opt->readPreference) + uri = bson_strdup_printf("mongodb://%s:%hu/%s?readPreference=%s&ssl=%s&replicaSet=%s", + opt->svr_address, opt->svr_port, + opt->svr_database, opt->readPreference, + opt->ssl ? "true" : "false", + opt->replicaSet); + else + uri = bson_strdup_printf("mongodb://%s:%hu/%s?ssl=%s&replicaSet=%s", + opt->svr_address, opt->svr_port, + opt->svr_database, + opt->ssl ? "true" : "false", + opt->replicaSet); + } + else if (opt->readPreference) + uri = bson_strdup_printf("mongodb://%s:%hu/%s?readPreference=%s&ssl=%s", + opt->svr_address, opt->svr_port, + opt->svr_database, opt->readPreference, + opt->ssl ? "true" : "false"); + else + uri = bson_strdup_printf("mongodb://%s:%hu/%s?ssl=%s", + opt->svr_address, opt->svr_port, + opt->svr_database, + opt->ssl ? "true" : "false"); + + + client = mongoc_client_new(uri); + + if (opt->ssl) + { + mongoc_ssl_opt_t *ssl_opts = (mongoc_ssl_opt_t *) malloc(sizeof(mongoc_ssl_opt_t)); + + ssl_opts->pem_file = opt->pem_file; + ssl_opts->pem_pwd = opt->pem_pwd; + ssl_opts->ca_file = opt->ca_file; + ssl_opts->ca_dir = opt->ca_dir; + ssl_opts->crl_file = opt->crl_file; + ssl_opts->weak_cert_validation = opt->weak_cert_validation; + mongoc_client_set_ssl_opts(client, ssl_opts); + free(ssl_opts); + } + + bson_free(uri); + + if (client == NULL) + ereport(ERROR, + (errmsg("could not connect to %s:%d", opt->svr_address, + opt->svr_port), + errhint("Mongo driver connection error."))); + + return client; +} + +/* + * mongoDisconnect + * Disconnect from MongoDB server. + */ +void +mongoDisconnect(MONGO_CONN *conn) +{ + if (conn) + mongoc_client_destroy(conn); +} + +/* + * mongoInsert + * Insert a document 'b' into MongoDB. + */ +bool +mongoInsert(MONGO_CONN *conn, char *database, char *collection, BSON *b) +{ + mongoc_collection_t *c; + bson_error_t error; + bool r = false; + + c = mongoc_client_get_collection(conn, database, collection); + + r = mongoc_collection_insert(c, MONGOC_INSERT_NONE, b, NULL, &error); + mongoc_collection_destroy(c); + if (!r) + ereport(ERROR, + (errmsg("failed to insert row"), + errhint("Mongo error: \"%s\"", error.message))); + + return true; +} + +/* + * mongoUpdate + * Update a document 'b' into MongoDB. + */ +bool +mongoUpdate(MONGO_CONN *conn, char *database, char *collection, BSON *b, + BSON *op) +{ + mongoc_collection_t *c; + bson_error_t error; + bool r = false; + + c = mongoc_client_get_collection(conn, database, collection); + + r = mongoc_collection_update(c, MONGOC_UPDATE_NONE, b, op, NULL, &error); + mongoc_collection_destroy(c); + if (!r) + ereport(ERROR, + (errmsg("failed to update row"), + errhint("Mongo error: \"%s\"", error.message))); + + return true; +} + +/* + * mongoDelete + * Delete MongoDB's document. + */ +bool +mongoDelete(MONGO_CONN *conn, char *database, char *collection, BSON *b) +{ + mongoc_collection_t *c; + bson_error_t error; + bool r = false; + + c = mongoc_client_get_collection(conn, database, collection); + + r = mongoc_collection_remove(c, MONGOC_DELETE_SINGLE_REMOVE, b, NULL, + &error); + mongoc_collection_destroy(c); + if (!r) + ereport(ERROR, + (errmsg("failed to delete row"), + errhint("Mongo error: \"%s\"", error.message))); + + return true; +} + +/* + * mongoCursorCreate + * Performs a query against the configured MongoDB server and return + * cursor which can be destroyed by calling mongoc_cursor_current. + */ +MONGO_CURSOR * +mongoCursorCreate(MONGO_CONN *conn, char *database, char *collection, BSON *q) +{ + mongoc_collection_t *c; + MONGO_CURSOR *cur; + bson_error_t error; + + c = mongoc_client_get_collection(conn, database, collection); + cur = mongoc_collection_aggregate(c, MONGOC_QUERY_NONE, q, NULL, NULL); + mongoc_cursor_error(cur, &error); + if (!cur) + ereport(ERROR, + (errmsg("failed to create cursor"), + errhint("Mongo error: \"%s\"", error.message))); + + mongoc_collection_destroy(c); + + return cur; +} + +/* + * mongoCursorDestroy + * Destroy cursor created by calling mongoCursorCreate function. + */ +void +mongoCursorDestroy(MONGO_CURSOR *c) +{ + mongoc_cursor_destroy(c); +} + + +/* + * mongoCursorBson + * Get the current document from cursor. + */ +const BSON * +mongoCursorBson(MONGO_CURSOR *c) +{ + return mongoc_cursor_current(c); +} + +/* + * mongoCursorNext + * Get the next document from the cursor. + */ +bool +mongoCursorNext(MONGO_CURSOR *c, BSON *b) +{ + return mongoc_cursor_next(c, (const BSON **) &b); +} + +/* + * bsonCreate + * Allocates a new bson_t structure, and also initialize the bson object. + * + * After that point objects can be appended to that bson object and can be + * iterated. A newly allocated bson_t that should be freed with bson_destroy(). + */ +BSON * +bsonCreate(void) +{ + BSON *doc; + + doc = bson_new(); + bson_init(doc); + + return doc; +} + +/* + * bsonDestroy + * Destroy Bson object created by bsonCreate function. + */ +void +bsonDestroy(BSON *b) +{ + bson_destroy(b); +} + +/* + * bsonIterInit + * Initialize the bson Iterator. + */ +bool +bsonIterInit(BSON_ITERATOR *it, BSON *b) +{ + return bson_iter_init(it, b); +} + +bool +bsonIterSubObject(BSON_ITERATOR *it, BSON *b) +{ + const uint8_t *buffer; + uint32_t len; + + bson_iter_document(it, &len, &buffer); + bson_init_static(b, buffer, len); + + return true; +} + +int32_t +bsonIterInt32(BSON_ITERATOR *it) +{ + BSON_ASSERT(it); + switch ((int) ITER_TYPE(it)) + { + case BSON_TYPE_BOOL: + return (int32) bson_iter_bool(it); + case BSON_TYPE_DOUBLE: + { + double val = bson_iter_double(it); + + if (val < PG_INT32_MIN || val > PG_INT32_MAX) + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), + errmsg("value \"%f\" is out of range for type integer", + val))); + + return (int32) val; + } + case BSON_TYPE_INT64: + { + int64 val = bson_iter_int64(it); + + if (val < PG_INT32_MIN || val > PG_INT32_MAX) + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), + errmsg("value \"%ld\" is out of range for type integer", + val))); + + return (int32) val; + } + case BSON_TYPE_INT32: + return bson_iter_int32(it); + default: + return 0; + } +} + +int64_t +bsonIterInt64(BSON_ITERATOR *it) +{ + return bson_iter_as_int64(it); +} + +double +bsonIterDouble(BSON_ITERATOR *it) +{ + return bson_iter_as_double(it); +} + +bool +bsonIterBool(BSON_ITERATOR *it) +{ + return bson_iter_as_bool(it); +} + +const char * +bsonIterString(BSON_ITERATOR *it) +{ + uint32_t len = 0; + + return bson_iter_utf8(it, &len); +} + +const char * +bsonIterBinData(BSON_ITERATOR *it, uint32_t *len) +{ + const uint8_t *binary = NULL; + bson_subtype_t subtype = BSON_SUBTYPE_BINARY; + + bson_iter_binary(it, &subtype, len, &binary); + + return (char *) binary; +} + +const bson_oid_t * +bsonIterOid(BSON_ITERATOR *it) +{ + return bson_iter_oid(it); +} + +time_t +bsonIterDate(BSON_ITERATOR *it) +{ + return bson_iter_date_time(it); +} + +const char * +bsonIterKey(BSON_ITERATOR *it) +{ + return bson_iter_key(it); +} + +int +bsonIterType(BSON_ITERATOR *it) +{ + return bson_iter_type(it); +} + +int +bsonIterNext(BSON_ITERATOR *it) +{ + return bson_iter_next(it); +} + +bool +bsonIterSubIter(BSON_ITERATOR *it, BSON_ITERATOR *sub) +{ + return bson_iter_recurse(it, sub); +} + +void +bsonOidFromString(bson_oid_t *o, char *str) +{ + bson_oid_init_from_string(o, str); +} + +bool +bsonAppendOid(BSON *b, const char *key, bson_oid_t *v) +{ + return bson_append_oid(b, key, strlen(key), v); +} + +bool +bsonAppendBool(BSON *b, const char *key, bool v) +{ + return bson_append_bool(b, key, -1, v); +} + +bool +bsonAppendStartObject(BSON *b, char *key, BSON *r) +{ + return bson_append_document_begin(b, key, strlen(key), r); +} + +bool +bsonAppendFinishObject(BSON *b, BSON *r) +{ + return bson_append_document_end(b, r); +} + +bool +bsonAppendNull(BSON *b, const char *key) +{ + return bson_append_null(b, key, strlen(key)); +} + +bool +bsonAppendInt32(BSON *b, const char *key, int v) +{ + return bson_append_int32(b, key, strlen(key), v); +} + +bool +bsonAppendInt64(BSON *b, const char *key, int64_t v) +{ + return bson_append_int64(b, key, strlen(key), v); +} + +bool +bsonAppendDouble(BSON *b, const char *key, double v) +{ + return bson_append_double(b, key, strlen(key), v); +} + +bool +bsonAppendUTF8(BSON *b, const char *key, char *v) +{ + + return bson_append_utf8(b, key, strlen(key), v, strlen(v)); +} + +bool +bsonAppendBinary(BSON *b, const char *key, char *v, size_t len) +{ + return bson_append_binary(b, key, (int) strlen(key), BSON_SUBTYPE_BINARY, + (const uint8_t *) v, len); +} + +bool +bsonAppendDate(BSON *b, const char *key, time_t v) +{ + return bson_append_date_time(b, key, strlen(key), v); +} + +bool +bsonAppendBson(BSON *b, char *key, BSON *c) +{ + return bson_append_document(b, key, strlen(key), c); +} + +bool +bsonAppendStartArray(BSON *b, const char *key, BSON *c) +{ + return bson_append_array_begin(b, key, -1, c); +} + +bool +bsonAppendFinishArray(BSON *b, BSON *c) +{ + return bson_append_array_end(b, c); +} + +bool +jsonToBsonAppendElement(BSON *bb, const char *k, struct json_object *v) +{ + bool status = true; + + if (!v) + { + bsonAppendNull(bb, k); + return status; + } + + switch (json_object_get_type(v)) + { + case json_type_int: + bsonAppendInt32(bb, k, json_object_get_int(v)); + break; + case json_type_boolean: + bsonAppendBool(bb, k, json_object_get_boolean(v)); + break; + case json_type_double: + bsonAppendDouble(bb, k, json_object_get_double(v)); + break; + case json_type_string: + bsonAppendUTF8(bb, k, (char *) json_object_get_string(v)); + break; + case json_type_object: + { + BSON t; + struct json_object *joj; + + joj = json_object_object_get(v, "$oid"); + + if (joj != NULL) + { + bson_oid_t bsonObjectId; + + memset(bsonObjectId.bytes, 0, sizeof(bsonObjectId.bytes)); + bsonOidFromString(&bsonObjectId, (char *) json_object_get_string(joj)); + status = bsonAppendOid(bb, k, &bsonObjectId); + break; + } + joj = json_object_object_get(v, "$date"); + if (joj != NULL) + { + status = bsonAppendDate(bb, k, json_object_get_int64(joj)); + break; + } + bsonAppendStartObject(bb, (char *) k, &t); + + { + json_object_object_foreach(v, kk, vv) + jsonToBsonAppendElement(&t, kk, vv); + } + bsonAppendFinishObject(bb, &t); + } + break; + case json_type_array: + { + int i; + char buf[10]; + BSON t; + + bsonAppendStartArray(bb, k, &t); + for (i = 0; i < json_object_array_length(v); i++) + { + sprintf(buf, "%d", i); + jsonToBsonAppendElement(&t, buf, json_object_array_get_idx(v, i)); + } + bsonAppendFinishObject(bb, &t); + } + break; + default: + ereport(ERROR, + (errcode(ERRCODE_FDW_INVALID_DATA_TYPE), + errmsg("can't handle type for : %s", + json_object_to_json_string(v)))); + } + + return status; +} + +json_object * +jsonTokenerPrase(char *s) +{ + return json_tokener_parse(s); +} + +/* + * mongoAggregateCount + * Count the number of documents. + */ +double +mongoAggregateCount(MONGO_CONN *conn, const char *database, + const char *collection, const BSON *b) +{ + BSON *command; + BSON *reply; + double count = 0; + mongoc_cursor_t *cursor; + + command = bsonCreate(); + reply = bsonCreate(); + bsonAppendUTF8(command, "count", (char *) collection); + if (b) /* Not empty */ + bsonAppendBson(command, "query", (BSON *) b); + + cursor = mongoc_client_command(conn, database, MONGOC_QUERY_SLAVE_OK, 0, 1, + 0, command, NULL, NULL); + if (cursor) + { + BSON *doc; + bool ret; + + ret = mongoc_cursor_next(cursor, (const BSON **) &doc); + if (ret) + { + bson_iter_t it; + + bson_copy_to(doc, reply); + if (bson_iter_init_find(&it, reply, "n")) + count = bsonIterDouble(&it); + } + mongoc_cursor_destroy(cursor); + } + bsonDestroy(reply); + bsonDestroy(command); + + return count; +} + +void +bsonOidToString(const bson_oid_t *o, char str[25]) +{ + bson_oid_to_string(o, str); +} + +const char * +bsonIterCode(BSON_ITERATOR *i) +{ + return bson_iter_code(i, NULL); +} + +const char * +bsonIterRegex(BSON_ITERATOR *i) +{ + return bson_iter_regex(i, NULL); +} + +const bson_value_t * +bsonIterValue(BSON_ITERATOR *i) +{ + return bson_iter_value(i); +} + +void +bsonToJsonStringValue(StringInfo output, BSON_ITERATOR *iter, bool isArray) +{ + if (isArray) + dumpJsonArray(output, iter); + else + dumpJsonObject(output, iter); +} + +/* + * dumpJsonObject + * Converts BSON document to a JSON string. + * + * isArray signifies if bsonData is contents of array or object. + * [Some of] special BSON datatypes are converted to JSON using + * "Strict MongoDB Extended JSON" [1]. + * + * [1] http://docs.mongodb.org/manual/reference/mongodb-extended-json/ + */ +void +dumpJsonObject(StringInfo output, BSON_ITERATOR *iter) +{ + uint32_t len; + const uint8_t *data; + BSON bson; + + bson_iter_document(iter, &len, &data); + if (bson_init_static(&bson, data, len)) + { + char *json = bson_as_json(&bson, NULL); + + if (json != NULL) + { + appendStringInfoString(output, json); + bson_free(json); + } + } +} + +void +dumpJsonArray(StringInfo output, BSON_ITERATOR *iter) +{ + uint32_t len; + const uint8_t *data; + BSON bson; + + bson_iter_array(iter, &len, &data); + if (bson_init_static(&bson, data, len)) + { + char *json; + + if ((json = bson_array_as_json(&bson, NULL))) + { + appendStringInfoString(output, json); + bson_free(json); + } + } +} + +char * +bsonAsJson(const BSON *bsonDocument) +{ + return bson_as_json(bsonDocument, NULL); +} diff --git a/mongo_wrapper.h b/mongo_wrapper.h new file mode 100644 index 0000000..30efe7f --- /dev/null +++ b/mongo_wrapper.h @@ -0,0 +1,94 @@ +/*------------------------------------------------------------------------- + * + * mongo_wrapper.h + * Wrapper functions for remote MongoDB servers + * + * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 2004-2024, EnterpriseDB Corporation. + * Portions Copyright (c) 2012–2014 Citus Data, Inc. + * + * IDENTIFICATION + * mongo_wrapper.h + * + *------------------------------------------------------------------------- + */ +#ifndef MONGO_WRAPPER_H +#define MONGO_WRAPPER_H + +#include "mongo_fdw.h" + +#include "mongoc.h" +#define json_object json_object_tmp + +#include + +MONGO_CONN *mongoConnect(MongoFdwOptions *opt); + +void mongoDisconnect(MONGO_CONN *conn); +bool mongoInsert(MONGO_CONN *conn, char *database, char *collection, + BSON *b); +bool mongoUpdate(MONGO_CONN *conn, char *database, char *collection, + BSON *b, BSON *op); +bool mongoDelete(MONGO_CONN *conn, char *database, char *collection, + BSON *b); +MONGO_CURSOR *mongoCursorCreate(MONGO_CONN *conn, char *database, + char *collection, BSON *q); +const BSON *mongoCursorBson(MONGO_CURSOR *c); +bool mongoCursorNext(MONGO_CURSOR *c, BSON *b); +void mongoCursorDestroy(MONGO_CURSOR *c); +double mongoAggregateCount(MONGO_CONN *conn, const char *database, + const char *collection, const BSON *b); + +BSON *bsonCreate(void); +void bsonDestroy(BSON *b); + +bool bsonIterInit(BSON_ITERATOR *it, BSON *b); +bool bsonIterSubObject(BSON_ITERATOR *it, BSON *b); +int32_t bsonIterInt32(BSON_ITERATOR *it); +int64_t bsonIterInt64(BSON_ITERATOR *it); +double bsonIterDouble(BSON_ITERATOR *it); +bool bsonIterBool(BSON_ITERATOR *it); +const char *bsonIterString(BSON_ITERATOR *it); +const char *bsonIterBinData(BSON_ITERATOR *it, uint32_t *len); +const bson_oid_t *bsonIterOid(BSON_ITERATOR *it); +time_t bsonIterDate(BSON_ITERATOR *it); +int bsonIterType(BSON_ITERATOR *it); +int bsonIterNext(BSON_ITERATOR *it); +bool bsonIterSubIter(BSON_ITERATOR *it, BSON_ITERATOR *sub); +void bsonOidFromString(bson_oid_t *o, char *str); +void bsonOidToString(const bson_oid_t *o, char str[25]); +const char *bsonIterCode(BSON_ITERATOR *i); +const char *bsonIterRegex(BSON_ITERATOR *i); +const char *bsonIterKey(BSON_ITERATOR *i); +const bson_value_t *bsonIterValue(BSON_ITERATOR *i); + +void bsonIteratorFromBuffer(BSON_ITERATOR *i, const char *buffer); + +BSON *bsonCreate(); +bool bsonAppendOid(BSON *b, const char *key, bson_oid_t *v); +bool bsonAppendBool(BSON *b, const char *key, bool v); +bool bsonAppendNull(BSON *b, const char *key); +bool bsonAppendInt32(BSON *b, const char *key, int v); +bool bsonAppendInt64(BSON *b, const char *key, int64_t v); +bool bsonAppendDouble(BSON *b, const char *key, double v); +bool bsonAppendUTF8(BSON *b, const char *key, char *v); +bool bsonAppendBinary(BSON *b, const char *key, char *v, size_t len); +bool bsonAppendDate(BSON *b, const char *key, time_t v); +bool bsonAppendStartArray(BSON *b, const char *key, BSON *c); +bool bsonAppendFinishArray(BSON *b, BSON *c); +bool bsonAppendStartObject(BSON *b, char *key, BSON *r); +bool bsonAppendFinishObject(BSON *b, BSON *r); +bool bsonAppendBson(BSON *b, char *key, BSON *c); +bool jsonToBsonAppendElement(BSON *bb, const char *k, + struct json_object *v); +json_object *jsonTokenerPrase(char *s); + +char *bsonAsJson(const BSON *bsonDocument); + +void bsonToJsonStringValue(StringInfo output, BSON_ITERATOR *iter, + bool isArray); +void dumpJsonObject(StringInfo output, BSON_ITERATOR *iter); +void dumpJsonArray(StringInfo output, BSON_ITERATOR *iter); + + +#endif /* MONGO_QUERY_H */ diff --git a/mongodb_init.sh b/mongodb_init.sh new file mode 100755 index 0000000..005707c --- /dev/null +++ b/mongodb_init.sh @@ -0,0 +1,21 @@ +#!/bin/sh +export MONGO_HOST="localhost" +export MONGO_PORT="27017" +export MONGO_USER_NAME="edb" +export MONGO_PWD="edb" + +# Below commands must be run in MongoDB to create mongo_fdw_regress and mongo_fdw_regress1 databases +# used in regression tests with edb user and edb password. + +# use mongo_fdw_regress +# db.createUser({user:"edb",pwd:"edb",roles:[{role:"dbOwner", db:"mongo_fdw_regress"},{role:"readWrite", db:"mongo_fdw_regress"}]}) +# use mongo_fdw_regress1 +# db.createUser({user:"edb",pwd:"edb",roles:[{role:"dbOwner", db:"mongo_fdw_regress1"},{role:"readWrite", db:"mongo_fdw_regress1"}]}) +# use mongo_fdw_regress2 +# db.createUser({user:"edb",pwd:"edb",roles:[{role:"dbOwner", db:"mongo_fdw_regress2"},{role:"readWrite", db:"mongo_fdw_regress2"}]}) + +mongoimport --host=$MONGO_HOST --port=$MONGO_PORT -u $MONGO_USER_NAME -p $MONGO_PWD --db mongo_fdw_regress --collection countries --jsonArray --drop --maintainInsertionOrder --quiet < data/mongo_fixture.json +mongoimport --host=$MONGO_HOST --port=$MONGO_PORT -u $MONGO_USER_NAME -p $MONGO_PWD --db mongo_fdw_regress --collection warehouse --jsonArray --drop --maintainInsertionOrder --quiet < data/mongo_warehouse.json +mongoimport --host=$MONGO_HOST --port=$MONGO_PORT -u $MONGO_USER_NAME -p $MONGO_PWD --db mongo_fdw_regress --collection testlog --jsonArray --drop --maintainInsertionOrder --quiet < data/mongo_testlog.json +mongoimport --host=$MONGO_HOST --port=$MONGO_PORT -u $MONGO_USER_NAME -p $MONGO_PWD --db mongo_fdw_regress --collection testdevice --jsonArray --drop --maintainInsertionOrder --quiet < data/mongo_testdevice.json +mongo --host=$MONGO_HOST --port=$MONGO_PORT -u $MONGO_USER_NAME -p $MONGO_PWD --authenticationDatabase "mongo_fdw_regress" < data/mongo_test_data.js > /dev/null diff --git a/option.c b/option.c new file mode 100644 index 0000000..0dba5d4 --- /dev/null +++ b/option.c @@ -0,0 +1,287 @@ +/*------------------------------------------------------------------------- + * + * option.c + * FDW option handling for mongo_fdw + * + * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 2004-2024, EnterpriseDB Corporation. + * Portions Copyright (c) 2012–2014 Citus Data, Inc. + * + * IDENTIFICATION + * option.c + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "miscadmin.h" +#include "mongo_wrapper.h" + +/* + * Validate the generic options given to a FOREIGN DATA WRAPPER, SERVER, + * USER MAPPING or FOREIGN TABLE that uses postgres_fdw. + * + * Raise an ERROR if the option or its value is considered invalid. + */ +extern Datum mongo_fdw_validator(PG_FUNCTION_ARGS); + +PG_FUNCTION_INFO_V1(mongo_fdw_validator); + +/* + * mongo_fdw_validator + * Validates options given to one of the following commands: + * foreign data wrapper, server, user mapping, or foreign table. + * + * This function errors out if the given option name or its value is considered + * invalid. + */ +Datum +mongo_fdw_validator(PG_FUNCTION_ARGS) +{ + Datum optionArray = PG_GETARG_DATUM(0); + Oid optionContextId = PG_GETARG_OID(1); + List *optionList = untransformRelOptions(optionArray); + ListCell *optionCell; + + foreach(optionCell, optionList) + { + DefElem *optionDef = (DefElem *) lfirst(optionCell); + char *optionName = optionDef->defname; + bool optionValid = false; + int32 optionIndex; + + for (optionIndex = 0; optionIndex < ValidOptionCount; optionIndex++) + { + const MongoValidOption *validOption; + + validOption = &(ValidOptionArray[optionIndex]); + + if ((optionContextId == validOption->optionContextId) && + (strncmp(optionName, validOption->optionName, NAMEDATALEN) == 0)) + { + optionValid = true; + break; + } + } + + /* If invalid option, display an informative error message */ + if (!optionValid) + { + StringInfo optionNamesString; + + optionNamesString = mongo_option_names_string(optionContextId); + ereport(ERROR, + (errcode(ERRCODE_FDW_INVALID_OPTION_NAME), + errmsg("invalid option \"%s\"", optionName), + errhint("Valid options in this context are: %s.", + optionNamesString->data))); + } + + /* If port option is given, error out if its value isn't an integer */ + if (strncmp(optionName, OPTION_NAME_PORT, NAMEDATALEN) == 0) + { + char *intString = defGetString(optionDef); + long port; + char *endp; + + errno = 0; + port = strtol(intString, &endp, 10); + + if (intString == endp) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("invalid input syntax for type %s: \"%s\"", + "unsigned short", intString))); + + if (errno == ERANGE || port < 0 || port > USHRT_MAX) + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), + errmsg("port value \"%s\" is out of range for type %s", + intString, "unsigned short"))); + + if (*endp && *endp != ' ') + ereport(ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("invalid input syntax for type %s: \"%s\"", + "unsigned short", intString))); + } + else if (strcmp(optionName, OPTION_NAME_USE_REMOTE_ESTIMATE) == 0 + || strcmp(optionName, OPTION_NAME_WEAK_CERT) == 0 || + strcmp(optionName, OPTION_NAME_ENABLE_JOIN_PUSHDOWN) == 0 || + strcmp(optionName, OPTION_NAME_SSL) == 0 || + strcmp(optionName, OPTION_NAME_ENABLE_AGGREGATE_PUSHDOWN) == 0 || + strcmp(optionName, OPTION_NAME_ENABLE_ORDER_BY_PUSHDOWN) == 0 + ) + { + /* These accept only boolean values */ + (void) defGetBoolean(optionDef); + } + } + + PG_RETURN_VOID(); +} + +/* + * mongo_option_names_string + * Finds all options that are valid for the current context, and + * concatenates these option names in a comma separated string. + */ +StringInfo +mongo_option_names_string(Oid currentContextId) +{ + StringInfo optionNamesString = makeStringInfo(); + bool firstOptionPrinted = false; + int32 optionIndex; + + for (optionIndex = 0; optionIndex < ValidOptionCount; optionIndex++) + { + const MongoValidOption *validOption; + + validOption = &(ValidOptionArray[optionIndex]); + + /* If option belongs to current context, append option name */ + if (currentContextId == validOption->optionContextId) + { + if (firstOptionPrinted) + appendStringInfoString(optionNamesString, ", "); + + appendStringInfoString(optionNamesString, validOption->optionName); + firstOptionPrinted = true; + } + } + + return optionNamesString; +} + +/* + * mongo_get_options + * Returns the option values to be used when connecting to and querying + * MongoDB. + * + * To resolve these values, the function checks the foreign table's options, + * and if not present, falls back to default values. + */ +MongoFdwOptions * +mongo_get_options(Oid foreignTableId) +{ + ForeignTable *foreignTable; + ForeignServer *foreignServer; + UserMapping *mapping; + List *optionList = NIL; + MongoFdwOptions *options; + ListCell *lc; + + foreignTable = GetForeignTable(foreignTableId); + foreignServer = GetForeignServer(foreignTable->serverid); + mapping = GetUserMapping(GetUserId(), foreignTable->serverid); + + optionList = mongo_list_concat(optionList, foreignServer->options); + optionList = mongo_list_concat(optionList, foreignTable->options); + optionList = mongo_list_concat(optionList, mapping->options); + + options = (MongoFdwOptions *) palloc0(sizeof(MongoFdwOptions)); + + options->use_remote_estimate = false; + options->ssl = false; + options->weak_cert_validation = false; + options->enable_join_pushdown = true; + options->enable_aggregate_pushdown = true; + options->enable_order_by_pushdown = true; + + /* Loop through the options */ + foreach(lc, optionList) + { + DefElem *def = (DefElem *) lfirst(lc); + + if (strcmp(def->defname, OPTION_NAME_READ_PREFERENCE) == 0) + options->readPreference = defGetString(def); + + else if (strcmp(def->defname, OPTION_NAME_AUTHENTICATION_DATABASE) == 0) + options->authenticationDatabase = defGetString(def); + + else if (strcmp(def->defname, OPTION_NAME_REPLICA_SET) == 0) + options->replicaSet = defGetString(def); + + else if (strcmp(def->defname, OPTION_NAME_SSL) == 0) + options->ssl = defGetBoolean(def); + + else if (strcmp(def->defname, OPTION_NAME_PEM_FILE) == 0) + options->pem_file = defGetString(def); + + else if (strcmp(def->defname, OPTION_NAME_PEM_PWD) == 0) + options->pem_pwd = defGetString(def); + + else if (strcmp(def->defname, OPTION_NAME_CA_FILE) == 0) + options->ca_file = defGetString(def); + + else if (strcmp(def->defname, OPTION_NAME_CA_DIR) == 0) + options->ca_dir = defGetString(def); + + else if (strcmp(def->defname, OPTION_NAME_CRL_FILE) == 0) + options->crl_file = defGetString(def); + + else if (strcmp(def->defname, OPTION_NAME_WEAK_CERT) == 0) + options->weak_cert_validation = defGetBoolean(def); + + else if (strcmp(def->defname, OPTION_NAME_ENABLE_JOIN_PUSHDOWN) == 0) + options->enable_join_pushdown = defGetBoolean(def); + + else if (strcmp(def->defname, + OPTION_NAME_ENABLE_AGGREGATE_PUSHDOWN) == 0) + options->enable_aggregate_pushdown = defGetBoolean(def); + + else if (strcmp(def->defname, + OPTION_NAME_ENABLE_ORDER_BY_PUSHDOWN) == 0) + options->enable_order_by_pushdown = defGetBoolean(def); + + else /* This is for continuation */ + + if (strcmp(def->defname, OPTION_NAME_ADDRESS) == 0) + options->svr_address = pstrdup(defGetString(def)); + + else if (strcmp(def->defname, OPTION_NAME_PORT) == 0) + options->svr_port = atoi(defGetString(def)); + + else if (strcmp(def->defname, OPTION_NAME_DATABASE) == 0) + options->svr_database = pstrdup(defGetString(def)); + + else if (strcmp(def->defname, OPTION_NAME_COLLECTION) == 0) + options->collectionName = pstrdup(defGetString(def)); + + else if (strcmp(def->defname, OPTION_NAME_USERNAME) == 0) + options->svr_username = defGetString(def); + + else if (strcmp(def->defname, OPTION_NAME_PASSWORD) == 0) + options->svr_password = defGetString(def); + + else if (strcmp(def->defname, OPTION_NAME_USE_REMOTE_ESTIMATE) == 0) + options->use_remote_estimate = defGetBoolean(def); + } + + /* Default values, if required */ + if (!options->svr_address) + options->svr_address = pstrdup(DEFAULT_IP_ADDRESS); + + if (!options->svr_port) + options->svr_port = DEFAULT_PORT_NUMBER; + + if (!options->svr_database) + options->svr_database = pstrdup(DEFAULT_DATABASE_NAME); + + if (!options->collectionName) + options->collectionName = get_rel_name(foreignTableId); + + return options; +} + +void +mongo_free_options(MongoFdwOptions *options) +{ + if (options) + { + pfree(options->svr_address); + pfree(options->svr_database); + pfree(options->collectionName); + pfree(options); + } +} diff --git a/sql/aggregate_pushdown.sql b/sql/aggregate_pushdown.sql new file mode 100644 index 0000000..372cb49 --- /dev/null +++ b/sql/aggregate_pushdown.sql @@ -0,0 +1,536 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` + +-- Before running this file user must create database mongo_fdw_regress on +-- MongoDB with all permission for MONGO_USER_NAME user with MONGO_PASS +-- password and ran mongodb_init.sh file to load collections. + +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; + +-- Create foreign tables. +CREATE FOREIGN TABLE fdw137_t1 (_id NAME, c1 INTEGER, c2 TEXT, c3 CHAR(9), c4 INTEGER, c5 pg_catalog.Date, c6 DECIMAL, c7 INTEGER, c8 INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl1'); +CREATE FOREIGN TABLE fdw137_t2 (_id NAME, c1 INTEGER, c2 TEXT, c3 TEXT) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); + +INSERT INTO fdw137_t1 VALUES (0, 1500, 'EMP15', 'FINANCE', 1300, '2000-12-25', 950.0, 400, 60); +INSERT INTO fdw137_t1 VALUES (0, 1600, 'EMP16', 'ADMIN', 600); +INSERT INTO fdw137_t2 VALUES (0, 50, 'TESTING', 'NASHIK'); +INSERT INTO fdw137_t2 VALUES (0); + +-- Create local table. +CREATE TABLE fdw137_local AS + SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM fdw137_t1; + +-- Simple aggregates. ORDER BY push-down not possible because only column names allowed. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), sum(c1), avg(c1), min(c4), max(c1), sum(c1) * (random() <= 1)::int AS sum2 FROM fdw137_t1 WHERE c4 > 600 GROUP BY c4 ORDER BY 1 ASC NULLS FIRST, 2 ASC NULLS FIRST; +SELECT count(*), sum(c1), avg(c1), min(c4), max(c1), sum(c1) * (random() <= 1)::int AS sum2 FROM fdw137_t1 WHERE c4 > 600 GROUP BY c4 ORDER BY 1 ASC NULLS FIRST, 2 ASC NULLS FIRST; + +-- GROUP BY clause HAVING expressions +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c1), count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY c1 ASC NULLS FIRST; +SELECT c1, sum(c1), count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY c1 ASC NULLS FIRST; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, min(c2) FROM fdw137_t1 WHERE c3 = 'ADMIN' GROUP BY c8 HAVING min(c8) = 20 ORDER BY c8 ASC NULLS FIRST; +SELECT c8, min(c2) FROM fdw137_t1 WHERE c3 = 'ADMIN' GROUP BY c8 HAVING min(c8) = 20 ORDER BY c8 ASC NULLS FIRST; + +-- Multi-column GROUP BY clause. Push-down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; +SET mongo_fdw.enable_order_by_pushdown TO ON; + +-- Aggregation on expression. Don't push-down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c1+2) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY c1 ASC NULLS FIRST; +SELECT c1, sum(c1+2) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY c1 ASC NULLS FIRST; + +-- Aggregate with unshippable GROUP BY clause are not pushed +EXPLAIN (VERBOSE, COSTS OFF) +SELECT avg(c4) FROM fdw137_t1 GROUP BY c4 * (random() <= 1)::int ORDER BY 1; +SELECT avg(c4) FROM fdw137_t1 GROUP BY c4 * (random() <= 1)::int ORDER BY 1; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c1) FROM fdw137_t1 GROUP BY c1 HAVING min(c1 * 3) > 500 ORDER BY c1; +SELECT c1, sum(c1) FROM fdw137_t1 GROUP BY c1 HAVING min(c1 * 3) > 500 ORDER BY c1; + +-- FDW-134: Test ORDER BY with COLLATE. Shouldn't push-down +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY c2 COLLATE "en_US" ASC NULLS FIRST; +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY c2 COLLATE "en_US" ASC NULLS FIRST; + +-- Using expressions in HAVING clause. Pushed down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c3, count(*) FROM fdw137_t1 GROUP BY c3 HAVING abs(max(c8)) = abs(10) ORDER BY 1, 2; +SELECT c3, count(*) FROM fdw137_t1 GROUP BY c3 HAVING abs(max(c8)) = abs(10) ORDER BY 1, 2; + +-- Unshippable HAVING clause will be evaluated locally, and other qual in HAVING clause is pushed down +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM (SELECT c3, count(c1) FROM fdw137_t1 GROUP BY c3 HAVING (avg(c1) / avg(c1)) * random() <= 1 and min(c1) > 100) x; +SELECT count(*) FROM (SELECT c3, count(c1) FROM fdw137_t1 GROUP BY c3 HAVING (avg(c1) / avg(c1)) * random() <= 1 and min(c1) > 100) x; + +-- Aggregate over join query +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t1.c8), avg(t2.c1) FROM fdw137_t1 t1 INNER JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8%2 = 0 ORDER BY 1 DESC NULLS LAST; +SELECT sum(t1.c8), avg(t2.c1) FROM fdw137_t1 t1 INNER JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8%2 = 0 ORDER BY 1 DESC NULLS LAST; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, count(*), t2.c4 FROM fdw137_t2 t1 INNER JOIN fdw137_t1 t2 ON (t1.c1 = t2.c8) GROUP BY t1.c1, t2.c4 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +SELECT t1.c1, count(*), t2.c4 FROM fdw137_t2 t1 INNER JOIN fdw137_t1 t2 ON (t1.c1 = t2.c8) GROUP BY t1.c1, t2.c4 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 ORDER BY 2 ASC NULLS FIRST; +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 ORDER BY 2 ASC NULLS FIRST; +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 ORDER BY 2 ASC NULLS FIRST; +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 ORDER BY 2 ASC NULLS FIRST; +SET mongo_fdw.enable_order_by_pushdown TO ON; + +-- Aggregate is not pushed down as aggregation contains random() +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1 * (random() <= 1)::int) AS sum, avg(c1) FROM fdw137_t1 ORDER BY 1; +SELECT sum(c1 * (random() <= 1)::int) AS sum, avg(c1) FROM fdw137_t1 ORDER BY 1; + +-- Not pushed down due to local conditions present in underneath input rel +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t1.c8) FROM fdw137_t1 t1 INNER JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE ((t1.c8 * t2.c1)/(t1.c8 * t2.c1)) * random() <= 1 ORDER BY 1; +SELECT sum(t1.c8) FROM fdw137_t1 t1 INNER JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE ((t1.c8 * t2.c1)/(t1.c8 * t2.c1)) * random() <= 1 ORDER BY 1; + +-- Aggregates in subquery are pushed down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(x.a), sum(x.a) FROM (SELECT c8 a, sum(c1) b FROM fdw137_t1 GROUP BY c8 ORDER BY 1, 2) x; +SELECT count(x.a), sum(x.a) FROM (SELECT c8 a, sum(c1) b FROM fdw137_t1 GROUP BY c8 ORDER BY 1, 2) x; + +-- Aggregate is still pushed down by taking unshippable expression out +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c4 * (random() <= 1)::int AS sum1, sum(c1) AS sum2 FROM fdw137_t1 GROUP BY c4 ORDER BY 1, 2; +SELECT c4 * (random() <= 1)::int AS sum1, sum(c1) AS sum2 FROM fdw137_t1 GROUP BY c4 ORDER BY 1, 2; + +-- Testing ORDER BY, DISTINCT, FILTER and Ordered-sets within aggregates +-- ORDER BY within aggregates (same column used to order) are not pushed +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1 ORDER BY c1) FROM fdw137_t1 WHERE c1 < 500 GROUP BY c2 ORDER BY 1; +SELECT sum(c1 ORDER BY c1) FROM fdw137_t1 WHERE c1 < 500 GROUP BY c2 ORDER BY 1; + +-- ORDER BY within aggregate (different column used to order also using DESC) +-- are not pushed. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c8 ORDER BY c1 desc) FROM fdw137_t1 WHERE c1 > 1000 and c8 > 20; +SELECT sum(c8 ORDER BY c1 desc) FROM fdw137_t1 WHERE c1 > 1000 and c8 > 20; + +-- DISTINCT within aggregate. Don't push down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(DISTINCT (c1)) FROM fdw137_t1 WHERE c4 = 600 and c1 < 500; +SELECT sum(DISTINCT (c1)) FROM fdw137_t1 WHERE c4 = 600 and c1 < 500; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(DISTINCT (t1.c1)) FROM fdw137_t1 t1 join fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 < 30 GROUP BY (t2.c1) ORDER BY 1; +SELECT sum(DISTINCT (t1.c1)) FROM fdw137_t1 t1 join fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 < 30 GROUP BY (t2.c1) ORDER BY 1; + +-- DISTINCT, ORDER BY and FILTER within aggregate, not pushed down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), sum(DISTINCT c1 ORDER BY c1) filter (WHERE c1%3 < 2), c4 FROM fdw137_t1 WHERE c4 = 600 GROUP BY c4; +SELECT sum(c1), sum(DISTINCT c1 ORDER BY c1) filter (WHERE c1%3 < 2), c4 FROM fdw137_t1 WHERE c4 = 600 GROUP BY c4; + +-- FILTER within aggregate, not pushed +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1) filter (WHERE c1 < 1000 and c4 > 500) FROM fdw137_t1 GROUP BY c4 ORDER BY 1 nulls last; +SELECT sum(c1) filter (WHERE c1 < 1000 and c4 > 500) FROM fdw137_t1 GROUP BY c4 ORDER BY 1 nulls last; + +-- Outer query is aggregation query +EXPLAIN (VERBOSE, COSTS OFF) +SELECT DISTINCT (SELECT count(*) filter (WHERE t2.c1 = 20 and t2.c1 < 30) FROM fdw137_t1 t1 WHERE t1.c1 = 500) FROM fdw137_t2 t2 ORDER BY 1; +SELECT DISTINCT (SELECT count(*) filter (WHERE t2.c1 = 20 and t2.c1 < 30) FROM fdw137_t1 t1 WHERE t1.c1 = 500) FROM fdw137_t2 t2 ORDER BY 1; + +-- Inner query is aggregation query +EXPLAIN (VERBOSE, COSTS OFF) +SELECT DISTINCT (SELECT count(t1.c1) filter (WHERE t2.c1 = 20 and t2.c1 < 30) FROM fdw137_t1 t1 WHERE t1.c1 > 600) FROM fdw137_t2 t2 ORDER BY 1; +SELECT DISTINCT (SELECT count(t1.c1) filter (WHERE t2.c1 = 20 and t2.c1 < 30) FROM fdw137_t1 t1 WHERE t1.c1 > 600) FROM fdw137_t2 t2 ORDER BY 1; + +-- Ordered-sets within aggregate, not pushed down. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, rank('10'::varchar) within group (ORDER BY c3), percentile_cont(c8/200::numeric) within group (ORDER BY c1) FROM fdw137_t1 GROUP BY c8 HAVING percentile_cont(c8/200::numeric) within group (ORDER BY c1) < 500 ORDER BY c8; +SELECT c8, rank('10'::varchar) within group (ORDER BY c3), percentile_cont(c8/200::numeric) within group (ORDER BY c1) FROM fdw137_t1 GROUP BY c8 HAVING percentile_cont(c8/200::numeric) within group (ORDER BY c1) < 500 ORDER BY c8; + +-- Subquery in FROM clause HAVING aggregate +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), x.b FROM fdw137_t1, (SELECT c1 a, sum(c1) b FROM fdw137_t2 GROUP BY c1) x WHERE fdw137_t1.c8 = x.a GROUP BY x.b ORDER BY 1, 2; +SELECT count(*), x.b FROM fdw137_t1, (SELECT c1 a, sum(c1) b FROM fdw137_t2 GROUP BY c1) x WHERE fdw137_t1.c8 = x.a GROUP BY x.b ORDER BY 1, 2; + +-- Join with IS NULL check in HAVING +EXPLAIN (VERBOSE, COSTS OFF) +SELECT avg(t1.c1), sum(t2.c1) FROM fdw137_t1 t1 join fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t2.c1 HAVING avg(t1.c1) is null ORDER BY 1 nulls last, 2; +SELECT avg(t1.c1), sum(t2.c1) FROM fdw137_t1 t1 join fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t2.c1 HAVING avg(t1.c1) is null ORDER BY 1 nulls last, 2; + +-- ORDER BY expression is part of the target list but not pushed down to +-- foreign server. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1) * (random() <= 1)::int AS sum FROM fdw137_t1 ORDER BY 1; +SELECT sum(c1) * (random() <= 1)::int AS sum FROM fdw137_t1 ORDER BY 1; + +-- LATERAL join, with parameterization +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum FROM fdw137_t1 t1, lateral (SELECT sum(t2.c1) sum FROM fdw137_t2 t2 GROUP BY t2.c1) qry WHERE t1.c8 * 2 = qry.sum ORDER BY 1; + +-- Check with placeHolderVars +EXPLAIN (VERBOSE, COSTS OFF) +SELECT q.b, count(fdw137_t1.c1), sum(q.a) FROM fdw137_t1 left join (SELECT min(13), avg(fdw137_t1.c1), sum(fdw137_t2.c1) FROM fdw137_t1 right join fdw137_t2 ON (fdw137_t1.c8 = fdw137_t2.c1) WHERE fdw137_t1.c8 = 20) q(a, b, c) ON (fdw137_t1.c8 = q.b) WHERE fdw137_t1.c1 between 100 and 500 GROUP BY q.b ORDER BY 1 nulls last, 2; +SELECT q.b, count(fdw137_t1.c1), sum(q.a) FROM fdw137_t1 left join (SELECT min(13), avg(fdw137_t1.c1), sum(fdw137_t2.c1) FROM fdw137_t1 right join fdw137_t2 ON (fdw137_t1.c8 = fdw137_t2.c1) WHERE fdw137_t1.c8 = 20) q(a, b, c) ON (fdw137_t1.c8 = q.b) WHERE fdw137_t1.c1 between 100 and 500 GROUP BY q.b ORDER BY 1 nulls last, 2; + +-- Not supported cases + +-- The COUNT of column +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(c8) FROM fdw137_t1 ; +SELECT count(c8) FROM fdw137_t1 ; + +-- Grouping sets +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum(c1) FROM fdw137_t1 WHERE c8 > 10 GROUP BY rollup(c8) ORDER BY 1 nulls last; +SELECT c8, sum(c1) FROM fdw137_t1 WHERE c8 > 10 GROUP BY rollup(c8) ORDER BY 1 nulls last; + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum(c1) FROM fdw137_t1 WHERE c8 > 3 GROUP BY cube(c8) ORDER BY 1 nulls last; +SELECT c8, sum(c1) FROM fdw137_t1 WHERE c8 > 3 GROUP BY cube(c8) ORDER BY 1 nulls last; + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, c4, sum(c1) FROM fdw137_t1 WHERE c8 > 20 GROUP BY grouping sets(c8, c4) ORDER BY 1 nulls last, 2 nulls last; +SELECT c8, c4, sum(c1) FROM fdw137_t1 WHERE c8 > 20 GROUP BY grouping sets(c8, c4) ORDER BY 1 nulls last, 2 nulls last; + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum(c1), grouping(c8) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1 nulls last; +SELECT c8, sum(c1), grouping(c8) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1 nulls last; + +-- DISTINCT itself is not pushed down, whereas underneath aggregate is pushed +EXPLAIN (VERBOSE, COSTS OFF) +SELECT DISTINCT sum(c1) s FROM fdw137_t1 WHERE c1 > 1000 GROUP BY c1 ORDER BY 1; +SELECT DISTINCT sum(c1) s FROM fdw137_t1 WHERE c1 > 1000 GROUP BY c1 ORDER BY 1; + +-- WindowAgg +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, sum(c8), count(c8) over (partition by c8%2) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; +SELECT c8, sum(c8), count(c8) over (partition by c8%2) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, array_agg(c8) over (partition by c8%2 ORDER BY c8 desc) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; +SELECT c8, array_agg(c8) over (partition by c8%2 ORDER BY c8 desc) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c8, array_agg(c8) over (partition by c8%2 ORDER BY c8 range between current row and unbounded following) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; +SELECT c8, array_agg(c8) over (partition by c8%2 ORDER BY c8 range between current row and unbounded following) FROM fdw137_t1 WHERE c8 > 10 GROUP BY c8 ORDER BY 1; + +-- User defined function for user defined aggregate, VARIADIC +CREATE FUNCTION least_accum(anyelement, variadic anyarray) +returns anyelement language sql AS + 'SELECT least($1, min($2[i])) FROM generate_subscripts($2,2) g(i)'; +CREATE aggregate least_agg(variadic items anyarray) ( + stype = anyelement, sfunc = least_accum +); +-- Not pushed down due to user defined aggregate +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, least_agg(c1) FROM fdw137_t1 GROUP BY c2 ORDER BY c2; +SELECT c2, least_agg(c1) FROM fdw137_t1 GROUP BY c2 ORDER BY c2; + +-- Test partition-wise aggregate +SET enable_partitionwise_aggregate TO ON; + +-- Create the partition tables +CREATE TABLE fprt1 (_id NAME, c1 INTEGER, c2 INTEGER, c3 TEXT) PARTITION BY RANGE(c1); +CREATE FOREIGN TABLE ftprt1_p1 PARTITION OF fprt1 FOR VALUES FROM (1) TO (4) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test1'); +CREATE FOREIGN TABLE ftprt1_p2 PARTITION OF fprt1 FOR VALUES FROM (5) TO (8) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test2'); + +-- Plan with partitionwise aggregates is enabled +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c1) FROM fprt1 GROUP BY c1 ORDER BY 2; +SELECT c1, sum(c1) FROM fprt1 GROUP BY c1 ORDER BY 2; + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, sum(c2), min(c2), count(*) FROM fprt1 GROUP BY c1 HAVING avg(c2) < 22 ORDER BY 2; +SELECT c1, sum(c2), min(c2), count(*) FROM fprt1 GROUP BY c1 HAVING avg(c2) < 22 ORDER BY 2; + +-- Check with whole-row reference +-- Should have all the columns in the target list for the given relation +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c1, count(t1) FROM fprt1 t1 GROUP BY c1 HAVING avg(c2) < 22 ORDER BY 1; +SELECT c1, count(t1) FROM fprt1 t1 GROUP BY c1 HAVING avg(c2) < 22 ORDER BY 1; + +SET enable_partitionwise_aggregate TO OFF; + +-- Support enable_aggregate_pushdown option at server level and table level. +-- Check only boolean values are accepted. +ALTER SERVER mongo_server OPTIONS (ADD enable_aggregate_pushdown 'non-bolean'); + +-- Test the option at server level. +ALTER SERVER mongo_server OPTIONS (ADD enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + +-- Test the option at table level. Setting option at table level does not +-- affect the setting at server level. +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (ADD enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'false'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; + +-- Test option for aggregation over join. Allow aggregation only if enabled for +-- both the relations involved in the join. +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'false'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (ADD enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2; + +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2; + +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'false'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + +-- FDW-560: Aggregation over nested join. As nested join push down is not +-- supported, aggregation shouldn't get pushdown. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) INNER JOIN fdw137_t1 t3 ON (t3.c1 = t1.c1) GROUP BY t1.c8 ORDER BY 2; +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) INNER JOIN fdw137_t1 t3 ON (t3.c1 = t1.c1) GROUP BY t1.c8 ORDER BY 2; + +-- Check when enable_join_pushdown is OFF and enable_aggregate_pushdown is ON. +-- Shouldn't push down join as well as aggregation. +ALTER SERVER mongo_server OPTIONS (ADD enable_join_pushdown 'false'); +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2; + +-- FDW-134: Test with number of columns more than 32 +CREATE FOREIGN TABLE f_test_large (_id int, + a01 int, a02 int, a03 int, a04 int, a05 int, a06 int, a07 int, a08 int, a09 int, a10 int, + a11 int, a12 int, a13 int, a14 int, a15 int, a16 int, a17 int, a18 int, a19 int, a20 int, + a21 int, a22 int, a23 int, a24 int, a25 int, a26 int, a27 int, a28 int, a29 int, a30 int, + a31 int, a32 int, a33 int, a34 int, a35 int) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'mongo_test_large'); + +-- Shouldn't pushdown ORDERBY clause due to exceeded number of path keys limit. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT a32, sum(a32) FROM f_test_large GROUP BY + a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, + a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, + a31, a32, a33, a34, a35 ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST, a33 ASC NULLS FIRST, a34 DESC NULLS LAST, a35 ASC NULLS FIRST; +SELECT a32, sum(a32) FROM f_test_large GROUP BY + a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, + a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, + a31, a32, a33, a34, a35 ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST, a33 ASC NULLS FIRST, a34 DESC NULLS LAST, a35 ASC NULLS FIRST; + +-- Should pushdown ORDERBY clause because number of path keys are in limit. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT a32, sum(a32) FROM f_test_large GROUP BY + a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, + a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, + a31, a32 ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST; +SELECT a32, sum(a32) FROM f_test_large GROUP BY + a01, a02, a03, a04, a05, a06, a07, a08, a09, a10, a11, a12, a13, a14, a15, + a16, a17, a18, a19, a20, a21, a22, a23, a24, a25, a26, a27, a28, a29, a30, + a31, a32 ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST; + +-- FDW-131: Limit and offset pushdown with Aggregate pushdown. +SELECT avg(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT 1 OFFSET 1; +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT 1 OFFSET 1; + +-- Limit 0, Offset 0 with aggregates. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT 0 OFFSET 0; +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT 0 OFFSET 0; + +-- Limit NULL +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT NULL OFFSET 2; +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT NULL OFFSET 2; + +-- Limit ALL +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT ALL OFFSET 2; +SELECT sum(c1), c1 FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT ALL OFFSET 2; + +-- Limit with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, sum(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY 1 ASC NULLS FIRST LIMIT -1; +-- Should throw an error. +SELECT c1, sum(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY 1 ASC NULLS FIRST LIMIT -1; + +-- Offset with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, sum(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY 1 ASC NULLS FIRST OFFSET -2; +-- Should throw an error. +SELECT c1, sum(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY 1 ASC NULLS FIRST OFFSET -2; + +-- Limit/Offset with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, avg(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT -1 OFFSET -2; +-- Should throw an error. +SELECT c1, avg(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT -1 OFFSET -2; + +-- Limit with expression evaluating to -ve value. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, avg(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM fdw137_t2)); +SELECT c1, avg(c1) FROM fdw137_t2 GROUP BY c1 ORDER BY c1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM fdw137_t2)); + +-- FDW-559: Test mongo_fdw.enable_aggregate_pushdown GUC. +-- Check default value. Should be ON. +SHOW mongo_fdw.enable_aggregate_pushdown; +-- Negative testing for GUC value. +SET mongo_fdw.enable_aggregate_pushdown to 'abc'; +--Disable the GUC enable_aggregate_pushdown. +SET mongo_fdw.enable_aggregate_pushdown to false; +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +-- Shouldn't pushdown aggregate because GUC is OFF. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; +--Enable the GUC enable_aggregate_pushdown. +SET mongo_fdw.enable_aggregate_pushdown to on; +ALTER SERVER mongo_server OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +-- Should pushdown aggregate because GUC is ON. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; +SELECT count(*) FROM fdw137_t1 GROUP BY c1 HAVING min(c1) > 500 ORDER BY 1; +-- Test for aggregation over join when server and table options for both the +-- tables is true and guc is enabled. Should pushdown. +SET mongo_fdw.enable_aggregate_pushdown to on; +SET mongo_fdw.enable_join_pushdown to on; +ALTER SERVER mongo_server OPTIONS (SET enable_join_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_aggregate_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; +--Disable the GUC enable_join_pushdown. Shouldn't pushdown aggregate. +SET mongo_fdw.enable_join_pushdown to off; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; +SET mongo_fdw.enable_join_pushdown to on; +--Disable the GUC enable_aggregate_pushdown. Shouldn't pushdown. +SET mongo_fdw.enable_aggregate_pushdown to false; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; +SELECT count(*), t1.c8 FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) GROUP BY t1.c8 ORDER BY 2 ASC NULLS FIRST; + +-- FDW-589: Test enable_order_by_pushdown option at server and table level. +SET mongo_fdw.enable_join_pushdown to true; +SET mongo_fdw.enable_aggregate_pushdown to true; +SET mongo_fdw.enable_order_by_pushdown to true; +ALTER SERVER mongo_server OPTIONS (ADD enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (ADD enable_join_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (ADD enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (ADD enable_join_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_aggregate_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (ADD enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_order_by_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; +-- Test that setting option at table level does not affect the setting at +-- server level. +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'false'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; +ALTER FOREIGN TABLE fdw137_t2 OPTIONS (SET enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; +SELECT sum(t2.c1), t1.c8, avg(t1.c8) FROM fdw137_t1 t1 LEFT JOIN fdw137_t2 t2 ON (t1.c8 = t2.c1) WHERE t1.c8 > 10 GROUP BY t1.c8 HAVING avg(t1.c8)*1 > 10 + ORDER BY 2 ASC NULLS FIRST; +-- When option enable_aggregate_pushdown is disabled. Shouldn't pushdown +-- aggregate as well as ORDER BY too. +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS OFF) +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; +SELECT c2, sum(c1) FROM fdw137_t1 GROUP BY c1, c2 HAVING min(c1) > 500 ORDER BY 1 ASC NULLS FIRST; +ALTER FOREIGN TABLE fdw137_t1 OPTIONS (SET enable_aggregate_pushdown 'true'); + +-- Cleanup +DELETE FROM fdw137_t1 WHERE c8 IS NULL; +DELETE FROM fdw137_t1 WHERE c8 = 60; +DELETE FROM fdw137_t2 WHERE c1 IS NULL; +DELETE FROM fdw137_t2 WHERE c1 = 50; +DROP FOREIGN TABLE fdw137_t1; +DROP FOREIGN TABLE fdw137_t2; +DROP FOREIGN TABLE ftprt1_p1; +DROP FOREIGN TABLE ftprt1_p2; +DROP FOREIGN TABLE f_test_large; +DROP TABLE fprt1; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/sql/connection_validation.sql b/sql/connection_validation.sql new file mode 100644 index 0000000..4d5ae79 --- /dev/null +++ b/sql/connection_validation.sql @@ -0,0 +1,65 @@ +\set VERBOSITY terse +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` + +-- Before running this file user must create database mongo_fdw_regress on +-- MongoDB with all permission for MONGO_USER_NAME user with MONGO_PASS +-- password and ran mongodb_init.sh file to load collections. + +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; + +-- Create foreign tables and validate +CREATE FOREIGN TABLE f_mongo_test (_id name, a int, b varchar) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'mongo_test'); +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; + +-- +-- fdw-108: After a change to a pg_foreign_server or pg_user_mapping catalog +-- entry, connection should be invalidated. +-- + +-- Alter one of the SERVER option +-- Set wrong address for mongo_server +ALTER SERVER mongo_server OPTIONS (SET address '127.0.0.10'); +ALTER SERVER mongo_server OPTIONS (SET port '9999'); +-- Should fail with an error +INSERT INTO f_mongo_test VALUES ('0', 2, 'RECORD INSERTED'); +UPDATE f_mongo_test SET b = 'RECORD UPDATED' WHERE a = 2; +DELETE FROM f_mongo_test WHERE a = 2; +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; +-- Set correct address for mongo_server +ALTER SERVER mongo_server OPTIONS (SET address :MONGO_HOST); +ALTER SERVER mongo_server OPTIONS (SET port :MONGO_PORT); +-- Should able to insert the data +INSERT INTO f_mongo_test VALUES ('0', 2, 'RECORD INSERTED'); +DELETE FROM f_mongo_test WHERE a = 2; + +-- Drop user mapping and create with invalid username and password for public +-- user mapping +DROP USER MAPPING FOR public SERVER mongo_server; +CREATE USER MAPPING FOR public SERVER mongo_server + OPTIONS (username 'wrong', password 'wrong'); +-- Should fail with an error +INSERT INTO f_mongo_test VALUES ('0', 3, 'RECORD INSERTED'); +UPDATE f_mongo_test SET b = 'RECORD UPDATED' WHERE a = 3; +DELETE FROM f_mongo_test WHERE a = 3; +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; +-- Drop user mapping and create without username and password for public +-- user mapping +DROP USER MAPPING FOR public SERVER mongo_server; +CREATE USER MAPPING FOR public SERVER mongo_server; +-- Should able to insert the data +INSERT INTO f_mongo_test VALUES ('0', 3, 'RECORD INSERTED'); +DELETE FROM f_mongo_test WHERE a = 3; + +-- Cleanup +DROP FOREIGN TABLE f_mongo_test; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/sql/dml.sql b/sql/dml.sql new file mode 100644 index 0000000..aa61bff --- /dev/null +++ b/sql/dml.sql @@ -0,0 +1,217 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` + +-- Before running this file user must create database mongo_fdw_regress, +-- mongo_fdw_regress1 and mongo_fdw_regress2 databases on MongoDB with all +-- permission for MONGO_USER_NAME user with MONGO_PASS password and ran +-- mongodb_init.sh file to load collections. + +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; + +-- Create foreign tables +CREATE FOREIGN TABLE f_mongo_test (_id name, a int, b varchar) SERVER mongo_server + OPTIONS (database 'mongo_fdw_regress', collection 'mongo_test'); +CREATE FOREIGN TABLE f_mongo_test1 (_id name, a int, b varchar) SERVER mongo_server + OPTIONS (database 'mongo_fdw_regress1', collection 'mongo_test1'); +CREATE FOREIGN TABLE f_mongo_test2 (_id name, a int, b varchar) SERVER mongo_server + OPTIONS (database 'mongo_fdw_regress2', collection 'mongo_test2'); +-- Creating foreign table without specifying database. +CREATE FOREIGN TABLE f_mongo_test3 (_id name, a int, b varchar) SERVER mongo_server + OPTIONS (collection 'mongo_test3'); +CREATE FOREIGN TABLE f_mongo_test6 (_id name, a int, b text[]) SERVER mongo_server + OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl6'); + +-- Verify the INSERT/UPDATE/DELETE operations on a collection (mongo_test) +-- exist in a database (mongo_fdw_regress) in mongoDB. +SELECT a,b FROM f_mongo_test ORDER BY 1, 2; +INSERT INTO f_mongo_test VALUES ('0', 10 , 'INSERT'); +SELECT a,b FROM f_mongo_test ORDER BY 1, 2; +UPDATE f_mongo_test SET b = 'UPDATE' WHERE a = 10; +SELECT a,b FROM f_mongo_test ORDER BY 1, 2; +DELETE FROM f_mongo_test WHERE a = 10; +SELECT a,b FROM f_mongo_test ORDER BY 1, 2; + +-- Verify the INSERT/UPDATE/DELETE operations on a collection (mongo_test1) +-- not exist in a database (mongo_fdw_regress1) in mongoDB. +SELECT a,b FROM f_mongo_test1 ORDER BY 1, 2; +INSERT INTO f_mongo_test1 VALUES ('0', 10 , 'INSERT'); +SELECT a,b FROM f_mongo_test1 ORDER BY 1, 2; +UPDATE f_mongo_test1 SET b = 'UPDATE' WHERE a = 10; +SELECT a,b FROM f_mongo_test1 ORDER BY 1, 2; +DELETE FROM f_mongo_test1 WHERE a = 10; +SELECT a,b FROM f_mongo_test1 ORDER BY 1, 2; + +-- Verify the INSERT/UPDATE/DELETE operations on a collection (mongo_test2) +-- not exist in a non exist database (mongo_fdw_regress2) in mongoDB. +SELECT a,b FROM f_mongo_test2 ORDER BY 1, 2; +INSERT INTO f_mongo_test2 VALUES ('0', 10 , 'INSERT'); +SELECT a,b FROM f_mongo_test2 ORDER BY 1, 2; +UPDATE f_mongo_test2 SET b = 'UPDATE' WHERE a = 10; +SELECT a,b FROM f_mongo_test2 ORDER BY 1, 2; +DELETE FROM f_mongo_test2 WHERE a = 10; +SELECT a,b FROM f_mongo_test2 ORDER BY 1, 2; + +-- Verify the INSERT/UPDATE/DELETE operations on a collection (mongo_test) +-- when foreign table created without database option. +SELECT a,b FROM f_mongo_test3 ORDER BY 1, 2; +INSERT INTO f_mongo_test3 VALUES ('0', 10 , 'INSERT'); +SELECT a,b FROM f_mongo_test3 ORDER BY 1, 2; +UPDATE f_mongo_test3 SET b = 'UPDATE' WHERE a = 10; +SELECT a,b FROM f_mongo_test3 ORDER BY 1, 2; +DELETE FROM f_mongo_test3 WHERE a = 10; +SELECT a,b FROM f_mongo_test3 ORDER BY 1, 2; + +-- FDW-158: Fix server crash when analyzing a foreign table. +ANALYZE f_mongo_test; +-- Should give correct number of rows now. +SELECT reltuples FROM pg_class WHERE relname = 'f_mongo_test'; +-- Check count using select query on table. +SELECT count(*) FROM f_mongo_test; + +-- Some more variants of vacuum and analyze +VACUUM f_mongo_test; +VACUUM FULL f_mongo_test; +VACUUM FREEZE f_mongo_test; +ANALYZE f_mongo_test; +ANALYZE f_mongo_test(a); +VACUUM ANALYZE f_mongo_test; + +-- FDW-226: Fix COPY FROM and foreign partition routing results in a +-- server crash + +-- Should fail as foreign table direct copy is not supported +COPY f_mongo_test TO '/tmp/data.txt' delimiter ','; +COPY f_mongo_test (a) TO '/tmp/data.txt' delimiter ','; +COPY f_mongo_test (b) TO '/tmp/data.txt' delimiter ','; + +-- Should pass +COPY (SELECT * FROM f_mongo_test) TO '/tmp/data.txt' delimiter ','; +COPY (SELECT a, b FROM f_mongo_test) TO '/tmp/data.txt' delimiter ','; +COPY (SELECT a FROM f_mongo_test) TO '/tmp/data.txt' delimiter ','; +COPY (SELECT b FROM f_mongo_test) TO '/tmp/data.txt' delimiter ','; + +-- Should throw an error as copy to foreign table is not supported +DO +$$ +BEGIN + COPY f_mongo_test FROM '/tmp/data.txt' delimiter ','; +EXCEPTION WHEN others THEN + IF SQLERRM = 'COPY and foreign partition routing not supported in mongo_fdw' OR + SQLERRM = 'cannot copy to foreign table "f_mongo_test"' THEN + RAISE NOTICE 'ERROR: COPY and foreign partition routing not supported in mongo_fdw'; + ELSE + RAISE NOTICE '%', SQLERRM; + END IF; +END; +$$ +LANGUAGE plpgsql; + +DO +$$ +BEGIN + COPY f_mongo_test(a, b) FROM '/tmp/data.txt' delimiter ','; +EXCEPTION WHEN others THEN + IF SQLERRM = 'COPY and foreign partition routing not supported in mongo_fdw' OR + SQLERRM = 'cannot copy to foreign table "f_mongo_test"' THEN + RAISE NOTICE 'ERROR: COPY and foreign partition routing not supported in mongo_fdw'; + ELSE + RAISE NOTICE '%', SQLERRM; + END IF; +END; +$$ +LANGUAGE plpgsql; + +DO +$$ +BEGIN + COPY f_mongo_test(a) FROM '/tmp/data.txt' delimiter ','; +EXCEPTION WHEN others THEN + IF SQLERRM = 'COPY and foreign partition routing not supported in mongo_fdw' OR + SQLERRM = 'cannot copy to foreign table "f_mongo_test"' THEN + RAISE NOTICE 'ERROR: COPY and foreign partition routing not supported in mongo_fdw'; + ELSE + RAISE NOTICE '%', SQLERRM; + END IF; +END; +$$ +LANGUAGE plpgsql; + +DO +$$ +BEGIN + COPY f_mongo_test(b) FROM '/tmp/data.txt' delimiter ','; +EXCEPTION WHEN others THEN + IF SQLERRM = 'COPY and foreign partition routing not supported in mongo_fdw' OR + SQLERRM = 'cannot copy to foreign table "f_mongo_test"' THEN + RAISE NOTICE 'ERROR: COPY and foreign partition routing not supported in mongo_fdw'; + ELSE + RAISE NOTICE '%', SQLERRM; + END IF; +END; +$$ +LANGUAGE plpgsql; + +--FDW-466: Document update for array elements shouldn't lead to the crash +INSERT INTO f_mongo_test6 VALUES (0, 1, ARRAY ['INSERT', 'DELETE']); +SELECT a, b FROM f_mongo_test6 ORDER BY a; +UPDATE f_mongo_test6 SET b[1] = 'UPDATE' WHERE a = 1; +SELECT a, b FROM f_mongo_test6 ORDER BY a; +DELETE FROM f_mongo_test6 WHERE b[2] = 'DELETE'; +SELECT a, b FROM f_mongo_test6 ORDER BY a; + +--FDW-481: UPDATE/DELETE shouldn't lead to crash when _id is NULL. +-- If first column type is not NAME then UPDATE/DELETE should result into an error. +CREATE FOREIGN TABLE f_mongo_test7 (_id text, a int, b text) SERVER mongo_server + OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl7'); +SELECT a, b FROM f_mongo_test7 ORDER BY 1; +UPDATE f_mongo_test7 SET b = 'UPDATED' WHERE a = 10; +DELETE FROM f_mongo_test7 WHERE a = 10; +DROP FOREIGN TABLE f_mongo_test7; + +-- If first column name is not _id then UPDATE/DELETE should result into an error. +CREATE FOREIGN TABLE f_mongo_test7 (id1 NAME, a int, b text) SERVER mongo_server + OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl7'); +SELECT a, b FROM f_mongo_test7 ORDER BY 1; +UPDATE f_mongo_test7 SET b = 'UPDATED' WHERE a = 10; +DELETE FROM f_mongo_test7 WHERE a = 10; +DROP FOREIGN TABLE f_mongo_test7; + +-- UPDATE/DELETE when _id is NULL. Shouldn't crash. +CREATE FOREIGN TABLE f_mongo_test7 (_id NAME, a int, b text) SERVER mongo_server + OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl7'); +SELECT a, b FROM f_mongo_test7 ORDER BY 1; +SELECT * FROM f_mongo_test7 WHERE a = 10 ORDER BY 1; +UPDATE f_mongo_test7 SET b = 'UPDATED' WHERE _id IS NULL; +SELECT a, b FROM f_mongo_test7 ORDER BY 1; +DELETE FROM f_mongo_test7 WHERE a = 20; +SELECT a, b FROM f_mongo_test7 ORDER BY 1; + +-- Retain original data of test_tbl7 +UPDATE f_mongo_test7 SET b = 'ROW1' WHERE a = 10; +INSERT INTO f_mongo_test7 VALUES(0, 20, 'ROW2'); + +-- When _id is non-objectId type on MongoDB. Should result into an error. +CREATE FOREIGN TABLE f_mongo_test8 (_id NAME, a int, b text) SERVER mongo_server + OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl8'); +SELECT * FROM f_mongo_test8 ORDER BY 1; +UPDATE f_mongo_test8 SET b = 'UPDATED' WHERE a = 2; +DELETE FROM f_mongo_test8 WHERE a = 2; +SELECT a, b FROM f_mongo_test8 ORDER BY 1; + +-- Cleanup +DROP FOREIGN TABLE f_mongo_test; +DROP FOREIGN TABLE f_mongo_test1; +DROP FOREIGN TABLE f_mongo_test2; +DROP FOREIGN TABLE f_mongo_test3; +DROP FOREIGN TABLE f_mongo_test6; +DROP FOREIGN TABLE f_mongo_test7; +DROP FOREIGN TABLE f_mongo_test8; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/sql/join_pushdown.sql b/sql/join_pushdown.sql new file mode 100644 index 0000000..4cd4cdd --- /dev/null +++ b/sql/join_pushdown.sql @@ -0,0 +1,597 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` + +-- Before running this file user must create database mongo_fdw_regress on +-- MongoDB with all permission for MONGO_USER_NAME user with MONGO_PASS +-- password and ran mongodb_init.sh file to load collections. + +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; + +CREATE SERVER mongo_server1 FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server1; + +-- Create foreign tables. +CREATE FOREIGN TABLE f_test_tbl1 (_id NAME, c1 INTEGER, c2 TEXT, c3 CHAR(9), c4 INTEGER, c5 pg_catalog.Date, c6 DECIMAL, c7 INTEGER, c8 INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl1'); +CREATE FOREIGN TABLE f_test_tbl2 (_id NAME, c1 INTEGER, c2 TEXT, c3 TEXT) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); +CREATE FOREIGN TABLE f_test_tbl3 (_id NAME, c1 INTEGER, c2 TEXT, c3 TEXT) + SERVER mongo_server1 OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); +CREATE FOREIGN TABLE test_text ( __doc text) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'warehouse'); +CREATE FOREIGN TABLE test_varchar ( __doc varchar) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'warehouse'); +CREATE FOREIGN TABLE f_test_tbl4 (_id NAME, c1 INTEGER, c2 TEXT, c3 CHAR(9), c4 INTEGER, c5 pg_catalog.Date, c6 DECIMAL, c7 INTEGER, c8 INTEGER) + SERVER mongo_server1 OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl1'); + +INSERT INTO f_test_tbl1 VALUES (0, 1500, 'EMP15', 'FINANCE', 1300, '2000-12-25', 950.0, 400, 60); +INSERT INTO f_test_tbl1 VALUES (0, 1600, 'EMP16', 'ADMIN', 600); +INSERT INTO f_test_tbl2 VALUES (0, 50, 'TESTING', 'NASHIK'); +INSERT INTO f_test_tbl2 VALUES (0); + + +-- Create local table. +CREATE TABLE l_test_tbl1 AS + SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM f_test_tbl1; + +-- Push down LEFT OUTER JOIN. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl1 e LEFT OUTER JOIN f_test_tbl2 d ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl1 e LEFT OUTER JOIN f_test_tbl2 d ON e.c8 = d.c1 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 OR e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST OFFSET 50; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 OR e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST OFFSET 50; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +SET mongo_fdw.enable_order_by_pushdown TO ON; + +-- Column comparing with 'Constant' pushed down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = 20 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = 20 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + +-- Push down RIGHT OUTER JOIN. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl1 e RIGHT OUTER JOIN f_test_tbl2 d ON e.c8 = d.c1 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl1 e RIGHT OUTER JOIN f_test_tbl2 d ON e.c8 = d.c1 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c4 > d.c1 OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c4 > d.c1 OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON ((d.c1 = e.c8 OR e.c4 > d.c1) AND e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON ((d.c1 = e.c8 OR e.c4 > d.c1) OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; +-- Column comparing with 'Constant' pushed down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = 20 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON (d.c1 = 20 AND e.c2 = 'EMP1') ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + +-- Push INNER JOIN. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON ((d.c1 = e.c8 OR e.c4 > d.c1) AND e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON ((d.c1 = e.c8 OR e.c4 > d.c1) OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c2 < d.c3) ORDER BY 1, 3 OFFSET 60; + +-- Column comparing with 'Constant' pushed down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') ORDER BY 1, 3; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') ORDER BY 1, 3; +-- INNER JOIN with WHERE clause. Should execute where condition separately +-- (NOT added into join clauses) on remote side. +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (d.c8 = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (d.c8 = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; +-- INNER JOIN in which join clause is not pushable but WHERE condition is +-- pushable with join clause 'TRUE'. +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (abs(d.c8) = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (abs(d.c8) = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (abs(d.c8) = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; +SELECT d.c1, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (abs(d.c8) = e.c1) WHERE d.c1 = 100 ORDER BY e.c3 DESC NULLS LAST, d.c1 DESC NULLS LAST; +SET mongo_fdw.enable_order_by_pushdown TO ON; + +SET enable_mergejoin TO OFF; +SET enable_nestloop TO OFF; +-- Local-Foreign table joins. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN l_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN l_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +RESET enable_mergejoin; +RESET enable_nestloop; + +-- JOIN in sub-query, should be pushed down. +EXPLAIN (COSTS OFF) +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 IN (SELECT f1.c1 FROM f_test_tbl1 f1 LEFT JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1)) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 IN (SELECT f1.c1 FROM f_test_tbl1 f1 LEFT JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1)) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +EXPLAIN (COSTS OFF) +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 = (SELECT f1.c1 FROM f_test_tbl1 f1 LEFT JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1) LIMIT 1) ORDER BY 1, 3; +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 = (SELECT f1.c1 FROM f_test_tbl1 f1 LEFT JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1) LIMIT 1) ORDER BY 1, 3; +EXPLAIN (COSTS OFF) +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 = (SELECT f1.c1 FROM f_test_tbl1 f1 INNER JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1) LIMIT 1) ORDER BY 1, 3; +SELECT l.c1, l.c6, l.c8 + FROM l_test_tbl1 l + WHERE l.c1 = (SELECT f1.c1 FROM f_test_tbl1 f1 INNER JOIN f_test_tbl2 f2 ON (f1.c8 = f2.c1) LIMIT 1) ORDER BY 1, 3; + +-- Execute JOIN through PREPARE statement. +PREPARE pre_stmt_left_join AS +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c4 > d.c1) ORDER BY 1, 3 OFFSET 70; +EXPLAIN (COSTS OFF) +EXECUTE pre_stmt_left_join; +EXECUTE pre_stmt_left_join; +PREPARE pre_stmt_inner_join AS +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 OR e.c4 > d.c1) ORDER BY 1, 3 OFFSET 70; +EXPLAIN (COSTS OFF) +EXECUTE pre_stmt_inner_join; +EXECUTE pre_stmt_inner_join; + +-- join + WHERE clause push-down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE d.c1 = 10 ORDER BY 1 DESC NULLS LAST, 3 DESC NULLS LAST; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE d.c1 = 10 ORDER BY 1 DESC NULLS LAST, 3 DESC NULLS LAST; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE e.c8 = 10 ORDER BY 1, 3; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE e.c8 = 10 ORDER BY 1, 3; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE d.c2 = 'SALES' ORDER BY 1, 3; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE d.c2 = 'SALES' ORDER BY 1, 3; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE e.c2 = 'EMP2' ORDER BY 1, 3; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 WHERE e.c2 = 'EMP2' ORDER BY 1, 3; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') WHERE d.c1 = 10 OR e.c8 = 30 ORDER BY 1, 3; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') WHERE d.c1 = 10 OR e.c8 = 30 ORDER BY 1, 3; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, d.c6, d.c8 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8 AND (e.c1 = 20 OR d.c2 = 'EMP1')) WHERE e.c1 = 20 AND d.c8 = 20 ORDER BY 1, 3; +SELECT d.c1, d.c2, e.c1, e.c2, d.c6, d.c8 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8 AND (e.c1 = 20 OR d.c2 = 'EMP1')) WHERE e.c1 = 20 AND d.c8 = 20 ORDER BY 1, 3; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8 AND (d.c5 = '02-22-1981' OR d.c5 = '12-17-1980')) ORDER BY 1, 3; +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8 AND (d.c5 = '02-22-1981' OR d.c5 = '12-17-1980')) ORDER BY 1, 3; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) WHERE d.c5 = '02-22-1981' ORDER BY 1; +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) WHERE d.c5 = '02-22-1981' ORDER BY 1; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') WHERE d.c1 = 10 OR e.c8 = 30 ORDER BY 1 DESC NULLS LAST, 3 DESC NULLS LAST; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND d.c1 = 20 OR e.c2 = 'EMP1') WHERE d.c1 = 10 OR e.c8 = 30 ORDER BY 1 DESC NULLS LAST, 3 DESC NULLS LAST; + +-- Natural join, should push-down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d NATURAL JOIN f_test_tbl1 e WHERE e.c1 > d.c8 ORDER BY 1; +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d NATURAL JOIN f_test_tbl1 e WHERE e.c1 > d.c8 ORDER BY 1; +-- Self join, should push-down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d INNER JOIN f_test_tbl1 e ON e.c8 = d.c8 ORDER BY 1 OFFSET 65; +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d INNER JOIN f_test_tbl1 e ON e.c8 = d.c8 ORDER BY 1 OFFSET 65; + +-- Join in CTE. +-- Explain plan difference between v11 (or pre) and later. +EXPLAIN (COSTS false, VERBOSE) +WITH t (c1_1, c1_3, c2_1) AS ( + SELECT d.c1, d.c3, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (d.c8 = e.c1) +) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1; +WITH t (c1_1, c1_3, c2_1) AS ( + SELECT d.c1, d.c3, e.c1 + FROM f_test_tbl1 d JOIN f_test_tbl2 e ON (d.c8 = e.c1) +) SELECT c1_1, c2_1 FROM t ORDER BY c1_3, c1_1; + +-- WHERE with boolean expression. Should push-down. +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl2 e LEFT JOIN f_test_tbl1 d ON (e.c1 = d.c8) WHERE d.c5 = '02-22-1981' OR d.c5 = '12-17-1980' ORDER BY 1; +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl2 e LEFT JOIN f_test_tbl1 d ON (e.c1 = d.c8) WHERE d.c5 = '02-22-1981' OR d.c5 = '12-17-1980' ORDER BY 1; + +-- Nested joins(Don't push-down nested join) +SET enable_mergejoin TO OFF; +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) LEFT JOIN f_test_tbl1 f ON (f.c8 = e.c1) ORDER BY d.c1 OFFSET 65 ; +SELECT d.c1, d.c2, d.c5, e.c1, e.c2 + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) LEFT JOIN f_test_tbl1 f ON (f.c8 = e.c1) ORDER BY d.c1 OFFSET 65; +RESET enable_mergejoin; + +-- Not supported expressions won't push-down(e.g. function expression, etc.) +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (ABS(d.c1) = e.c8) ORDER BY 1, 3; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (ABS(d.c1) = e.c8) ORDER BY 1, 3; + +-- Don't pushdown when whole row reference is involved. +EXPLAIN (COSTS OFF) +SELECT d, e + FROM f_test_tbl1 d LEFT JOIN f_test_tbl2 e ON (e.c1 = d.c8) LEFT JOIN f_test_tbl1 f ON (f.c8 = e.c1) ORDER BY e.c1 OFFSET 65; + +-- Don't pushdown when full document retrieval is involved. +EXPLAIN (COSTS OFF) +SELECT json_data.key AS key1, json_data.value AS value1 + FROM test_text, test_varchar, json_each_text(test_text.__doc::json) AS json_data WHERE key NOT IN ('_id') ORDER BY json_data.key COLLATE "C"; +SELECT json_data.key AS key1, json_data.value AS value1 + FROM test_text, test_varchar, json_each_text(test_text.__doc::json) AS json_data WHERE key NOT IN ('_id') ORDER BY json_data.key COLLATE "C"; + +-- Join two tables from two different foreign servers. +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl3 e ON d.c1 = e.c1 ORDER BY 1; + +-- SEMI JOIN, not pushed down +EXPLAIN (COSTS OFF) +SELECT d.c2 + FROM f_test_tbl1 d WHERE EXISTS (SELECT 1 FROM f_test_tbl2 e WHERE d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; +SELECT d.c2 + FROM f_test_tbl1 d WHERE EXISTS (SELECT 1 FROM f_test_tbl2 e WHERE d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + +-- ANTI JOIN, not pushed down +EXPLAIN (COSTS OFF) +SELECT d.c2 + FROM f_test_tbl1 d WHERE NOT EXISTS (SELECT 1 FROM f_test_tbl2 e WHERE d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; +SELECT d.c2 + FROM f_test_tbl1 d WHERE NOT EXISTS (SELECT 1 FROM f_test_tbl2 e WHERE d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + +-- FULL OUTER JOIN, should not pushdown. +EXPLAIN (COSTS OFF) +SELECT d.c1, e.c1 + FROM f_test_tbl1 d FULL JOIN f_test_tbl2 e ON (d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; +SELECT d.c1, e.c1 + FROM f_test_tbl1 d FULL JOIN f_test_tbl2 e ON (d.c8 = e.c1) ORDER BY d.c2 LIMIT 10; + +-- CROSS JOIN can be pushed down +EXPLAIN (COSTS OFF) +SELECT e.c1, d.c2 + FROM f_test_tbl1 d CROSS JOIN f_test_tbl2 e ORDER BY e.c1, d.c2 LIMIT 10; +SELECT e.c1, d.c2 + FROM f_test_tbl1 d CROSS JOIN f_test_tbl2 e ORDER BY e.c1, d.c2 LIMIT 10; + +-- FDW-131: Limit and offset pushdown with join pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT round(2.2) OFFSET 2; +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT round(2.2) OFFSET 2; + +-- Limit as NULL, no LIMIT/OFFSET pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (t1.c8 = t2.c1) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT NULL OFFSET 1; +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (t1.c8 = t2.c1) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT NULL OFFSET 1; + +-- Limit as ALL, no LIMIT/OFFSET pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (t1.c8 = t2.c1) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT ALL OFFSET 1; +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (t1.c8 = t2.c1) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT ALL OFFSET 1; + +-- Offset as NULL, no LIMIT/OFFSET pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT 3 OFFSET NULL; +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT 3 OFFSET NULL; + +-- Limit with -ve value. Shouldn't pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT -2; +-- Should throw an error. +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT -2; + +-- Offset with -ve value. Shouldn't pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST OFFSET -1; +-- Should throw an error. +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST OFFSET -1; + +-- Limit/Offset with -ve value. Shouldn't pushdown. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT -3 OFFSET -1; +-- Should throw an error. +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT -3 OFFSET -1; + +-- Limit with expression evaluating to -ve value. +EXPLAIN (COSTS false, VERBOSE) +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM f_test_tbl1)); +-- Should throw an error. +SELECT t1.c1, t2.c1 + FROM f_test_tbl1 t1 JOIN f_test_tbl2 t2 ON (TRUE) ORDER BY t1.c1 ASC NULLS FIRST, t2.c1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM f_test_tbl1)); + +-- Test partition-wise join +SET enable_partitionwise_join TO on; + +-- Create the partition tables +CREATE TABLE fprt1 (_id NAME, c1 INTEGER, c2 INTEGER, c3 TEXT) PARTITION BY RANGE(c1); +CREATE FOREIGN TABLE ftprt1_p1 PARTITION OF fprt1 FOR VALUES FROM (1) TO (4) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test1'); +CREATE FOREIGN TABLE ftprt1_p2 PARTITION OF fprt1 FOR VALUES FROM (5) TO (8) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test2'); + +CREATE TABLE fprt2 (_id NAME, c1 INTEGER, c2 INTEGER, c3 TEXT) PARTITION BY RANGE(c2); +CREATE FOREIGN TABLE ftprt2_p1 PARTITION OF fprt2 FOR VALUES FROM (1) TO (4) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test3'); +CREATE FOREIGN TABLE ftprt2_p2 PARTITION OF fprt2 FOR VALUES FROM (5) TO (8) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test4'); + +-- Inner join two tables +-- Different explain plan on v10 as partition-wise join is not supported there. +SET enable_mergejoin TO OFF; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, t2.c2 + FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.c1 = t2.c2) ORDER BY 1,2; +SELECT t1.c1, t2.c2 + FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.c1 = t2.c2) ORDER BY 1,2; + +-- Inner join three tables +-- Different explain plan on v10 as partition-wise join is not supported there. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, t2.c2, t3.c2 + FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.c1 = t2.c2) INNER JOIN fprt1 t3 ON (t3.c1 = t2.c2) ORDER BY 1,2; +SELECT t1.c1, t2.c2, t3.c2 + FROM fprt1 t1 INNER JOIN fprt2 t2 ON (t1.c1 = t2.c2) INNER JOIN fprt1 t3 ON (t3.c1 = t2.c2) ORDER BY 1,2; +RESET enable_mergejoin; + +-- Join with lateral reference +-- Different explain plan on v10 as partition-wise join is not supported there. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, t1.c2 + FROM fprt1 t1, LATERAL (SELECT t2.c1, t2.c2 FROM fprt2 t2 + WHERE t1.c1 = t2.c2 AND t1.c2 = t2.c1) q WHERE t1.c1 % 2 = 0 ORDER BY 1,2; +SELECT t1.c1, t1.c2 + FROM fprt1 t1, LATERAL (SELECT t2.c1, t2.c2 FROM fprt2 t2 + WHERE t1.c1 = t2.c2 AND t1.c2 = t2.c1) q WHERE t1.c1 % 2 = 0 ORDER BY 1,2; + +-- With PHVs, partitionwise join selected but no join pushdown +-- Table alias in foreign scan is different for v12, v11 and v10. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c1, t1.phv, t2.c2, t2.phv + FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE c1 % 2 = 0) t1 LEFT JOIN + (SELECT 't2_phv' phv, * FROM fprt2 WHERE c2 % 2 = 0) t2 ON (t1.c1 = t2.c2) + ORDER BY t1.c1, t2.c2; +SELECT t1.c1, t1.phv, t2.c2, t2.phv + FROM (SELECT 't1_phv' phv, * FROM fprt1 WHERE c1 % 2 = 0) t1 LEFT JOIN + (SELECT 't2_phv' phv, * FROM fprt2 WHERE c2 % 2 = 0) t2 ON (t1.c1 = t2.c2) + ORDER BY t1.c1, t2.c2; +RESET enable_partitionwise_join; + +-- FDW-445: Support enable_join_pushdown option at server level and table level. +-- Check only boolean values are accepted. +ALTER SERVER mongo_server OPTIONS (ADD enable_join_pushdown 'abc11'); + +-- Test the option at server level. +ALTER SERVER mongo_server OPTIONS (ADD enable_join_pushdown 'false'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; +ALTER SERVER mongo_server OPTIONS (SET enable_join_pushdown 'true'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + +-- Test the option with outer rel. +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (ADD enable_join_pushdown 'false'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'true'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + +-- Test the option with inner rel. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (ADD enable_join_pushdown 'false'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'true'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + +-- Test that setting option at table level does not affect the setting at +-- server level. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'false'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'false'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT t1.c1, t2.c2 + FROM f_test_tbl3 t1 JOIN f_test_tbl4 t2 ON (t1.c1 = t2.c8) ORDER BY 1, 2; + +-- FDW-558: Test mongo_fdw.enable_join_pushdown GUC. +-- Negative testing for GUC value. +SET mongo_fdw.enable_join_pushdown to 'abc'; +-- Check default value. Should be ON. +SHOW mongo_fdw.enable_join_pushdown; +-- Join pushdown should happen as the GUC enable_join_pushdown is true. +ALTER SERVER mongo_server OPTIONS (SET enable_join_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'true'); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON (d.c1 = e.c8) ORDER BY 1, 2; +--Disable the GUC enable_join_pushdown. +SET mongo_fdw.enable_join_pushdown to false; +-- Join pushdown shouldn't happen as the GUC enable_join_pushdown is false. +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON (d.c1 = e.c8) ORDER BY 1, 2; +-- Enable the GUC and table level option is set to false, should not pushdown. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'false'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'false'); +SET mongo_fdw.enable_join_pushdown to true; +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT d.c1, e.c8 + FROM f_test_tbl2 d JOIN f_test_tbl1 e ON (d.c1 = e.c8) ORDER BY 1, 2; + +-- FDW-589: Test enable_order_by_pushdown option at server and table level. +SET mongo_fdw.enable_join_pushdown to true; +SET mongo_fdw.enable_order_by_pushdown to true; +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'true'); +ALTER SERVER mongo_server OPTIONS (ADD enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (ADD enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (ADD enable_order_by_pushdown 'true'); +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +-- One table level option is OFF. Shouldn't pushdown ORDER BY. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_order_by_pushdown 'false'); +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +-- Test that setting option at table level does not affect the setting at +-- server level. +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'false'); +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_order_by_pushdown 'true'); +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'true'); +-- When enable_join_pushdown option is disabled. Shouldn't pushdown join and +-- hence, ORDER BY too. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_join_pushdown 'false'); +ALTER FOREIGN TABLE f_test_tbl2 OPTIONS (SET enable_join_pushdown 'false'); +EXPLAIN (COSTS OFF) +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON (d.c1 = e.c8 AND e.c4 > d.c1 AND e.c2 < d.c3) ORDER BY 1 ASC NULLS FIRST, 3 ASC NULLS FIRST; + +DELETE FROM f_test_tbl1 WHERE c8 IS NULL; +DELETE FROM f_test_tbl1 WHERE c8 = 60; +DELETE FROM f_test_tbl2 WHERE c1 IS NULL; +DELETE FROM f_test_tbl2 WHERE c1 = 50; +DROP FOREIGN TABLE f_test_tbl1; +DROP FOREIGN TABLE f_test_tbl2; +DROP FOREIGN TABLE f_test_tbl3; +DROP FOREIGN TABLE f_test_tbl4; +DROP FOREIGN TABLE test_text; +DROP FOREIGN TABLE test_varchar; +DROP TABLE l_test_tbl1; +DROP FOREIGN TABLE ftprt1_p1; +DROP FOREIGN TABLE ftprt1_p2; +DROP FOREIGN TABLE ftprt2_p1; +DROP FOREIGN TABLE ftprt2_p2; +DROP TABLE IF EXISTS fprt1; +DROP TABLE IF EXISTS fprt2; +DROP USER MAPPING FOR public SERVER mongo_server1; +DROP SERVER mongo_server1; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/sql/limit_offset_pushdown.sql b/sql/limit_offset_pushdown.sql new file mode 100644 index 0000000..af357b7 --- /dev/null +++ b/sql/limit_offset_pushdown.sql @@ -0,0 +1,122 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` + +-- Before running this file user must create database mongo_fdw_regress, +-- mongo_fdw_regress1 and mongo_fdw_regress2 databases on MongoDB with all +-- permission for MONGO_USER_NAME user with MONGO_PASS password and ran +-- mongodb_init.sh file to load collections. + +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; + +CREATE FOREIGN TABLE fdw131_t1 (_id NAME, c1 INTEGER, c2 TEXT, c3 TEXT) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); + +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1; + +-- LIMIT/OFFSET pushdown. +-- Limit with Offset should get pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 2 OFFSET 2; +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 2 OFFSET 2; + +-- If ORDER BY is not pushable then limit/Offset shouldn't get pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 LIMIT 2 OFFSET 2; +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 LIMIT 2 OFFSET 2; + +-- With ORDER BY pushdown disabled, limit shouldn't get pushdown. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 2 OFFSET 2; +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 2 OFFSET 2; +SET mongo_fdw.enable_order_by_pushdown TO ON; + +-- Only limit should get pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 2 DESC NULLS LAST LIMIT 3; +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 2 DESC NULLS LAST LIMIT 3; + +-- Expression in limit clause. Should get pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 DESC NULLS LAST LIMIT round(3.2) OFFSET 2; +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 DESC NULLS LAST LIMIT round(3.2) OFFSET 2; + +-- Only Offset without limit should get pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 ASC NULLS FIRST OFFSET 2; +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 ASC NULLS FIRST OFFSET 2; + +-- Limit ALL +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT ALL; +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT ALL; + +-- Limit ALL with OFFSET +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT ALL OFFSET 1; +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT ALL OFFSET 1; + +-- Limit NULL +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 ASC NULLS FIRST LIMIT NULL; +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 ASC NULLS FIRST LIMIT NULL; +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 ASC NULLS FIRST LIMIT NULL OFFSET 2; +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 3 ASC NULLS FIRST LIMIT NULL OFFSET 2; + +-- Limit 0 and Offset 0 +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 0; +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 0; +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 0 OFFSET 0; +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT 0 OFFSET 0; + +-- Offset NULL. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 2 ASC NULLS FIRST LIMIT 5 OFFSET NULL; +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 2 ASC NULLS FIRST LIMIT 5 OFFSET NULL; + +-- Limit with placeholder. Shouldn't get pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 2 LIMIT (SELECT COUNT(*) FROM fdw131_t1); +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 2 LIMIT (SELECT COUNT(*) FROM fdw131_t1); + +-- Limit with expression, shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT (10 - (SELECT COUNT(*) FROM fdw131_t1)); +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT (10 - (SELECT COUNT(*) FROM fdw131_t1)); + +-- Limit with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT -1; +-- Should throw an error. +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT -1; + +-- Offset with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST OFFSET -2; +-- Should throw an error. +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST OFFSET -2; + +-- Limit/Offset with -ve value. Shouldn't pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT -1 OFFSET -2; +-- Should throw an error. +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT -1 OFFSET -2; + +-- Limit with expression evaluating to -ve value. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM fdw131_t1)); +SELECT c1, c2, c3 FROM fdw131_t1 ORDER BY 1 ASC NULLS FIRST LIMIT (1 - (SELECT COUNT(*) FROM fdw131_t1)); + +DROP FOREIGN TABLE fdw131_t1; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/sql/pushdown.sql b/sql/pushdown.sql new file mode 100644 index 0000000..6bf665b --- /dev/null +++ b/sql/pushdown.sql @@ -0,0 +1,383 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` + +-- Before running this file user must create database mongo_fdw_regress on +-- MongoDB with all permission for MONGO_USER_NAME user with MONGO_PASS +-- password and ran mongodb_init.sh file to load collections. + +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; + +-- Create foreign tables +CREATE FOREIGN TABLE f_mongo_test (_id name, a int, b varchar) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'mongo_test'); +CREATE FOREIGN TABLE f_test_tbl1 (_id name, c1 INTEGER, c2 VARCHAR(10), c3 CHAR(9), c4 INTEGER, c5 pg_catalog.Date, c6 DECIMAL, c7 INTEGER, c8 INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl1'); +CREATE FOREIGN TABLE f_test_tbl2 (_id name, c1 INTEGER, c2 VARCHAR(14), c3 VARCHAR(13)) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); +CREATE FOREIGN TABLE f_test_tbl3 (_id name, name TEXT, marks FLOAT ARRAY, pass BOOLEAN) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl3'); + +-- Inserts some values in mongo_test collection. +INSERT INTO f_mongo_test VALUES ('0', 1, 'One'); +INSERT INTO f_mongo_test VALUES ('0', 2, 'Two'); +INSERT INTO f_mongo_test VALUES ('0', 3, 'Three'); + +SET datestyle TO ISO; + +-- Sample data +SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM f_test_tbl1 ORDER BY c1; + +-- WHERE clause pushdown +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c6 AS "salary", c8 FROM f_test_tbl1 e + WHERE c6 IN (1600, 2450) + ORDER BY c1 ASC NULLS FIRST; +SELECT c1, c2, c6 AS "salary", c8 FROM f_test_tbl1 e + WHERE c6 IN (1600, 2450) + ORDER BY c1; + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c6 FROM f_test_tbl1 e + WHERE c6 > 3000 + ORDER BY c1 ASC NULLS FIRST; +SELECT c1, c2, c6 FROM f_test_tbl1 e + WHERE c6 > 3000 + ORDER BY c1 ASC NULLS FIRST; + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c6, c8 FROM f_test_tbl1 e + WHERE c6 = 1500 + ORDER BY c1 DESC NULLS LAST; +SELECT c1, c2, c6, c8 FROM f_test_tbl1 e + WHERE c6 = 1500 + ORDER BY c1 DESC NULLS LAST; + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c6, c8 FROM f_test_tbl1 e + WHERE c6 BETWEEN 1000 AND 4000 + ORDER BY c1 ASC NULLS FIRST; +SELECT c1, c2, c6, c8 FROM f_test_tbl1 e + WHERE c6 BETWEEN 1000 AND 4000 + ORDER BY c1 ASC NULLS FIRST; + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c4, c6, c8 FROM f_test_tbl1 e + WHERE c4 IS NOT NULL + ORDER BY c1; +SELECT c1, c2, c4, c6, c8 FROM f_test_tbl1 e + WHERE c4 IS NOT NULL + ORDER BY c1; + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c5 FROM f_test_tbl1 e + WHERE c5 <= '1980-12-17' + ORDER BY c1 ASC NULLS FIRST; +SELECT c1, c2, c5 FROM f_test_tbl1 e + WHERE c5 <= '1980-12-17' + ORDER BY c1 ASC NULLS FIRST; + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c6, c8 FROM f_test_tbl1 e + WHERE c2 IN ('EMP6', 'EMP12', 'EMP5') + ORDER BY c1; +SELECT c1, c2, c6, c8 FROM f_test_tbl1 e + WHERE c2 IN ('EMP6', 'EMP12', 'EMP5') + ORDER BY c1; + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c6, c8 FROM f_test_tbl1 e + WHERE c3 LIKE 'SALESMAN' + ORDER BY c1; +SELECT c1, c2, c6, c8 FROM f_test_tbl1 e + WHERE c3 LIKE 'SALESMAN' + ORDER BY c1; + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c6, c8 FROM f_test_tbl1 e + WHERE c3 LIKE 'MANA%' + ORDER BY c1; +SELECT c1, c2, c6, c8 FROM f_test_tbl1 e + WHERE c3 LIKE 'MANA%' + ORDER BY c1; + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT a FROM f_mongo_test + WHERE a%2 = 1 + ORDER BY a ASC NULLS FIRST; +SELECT a FROM f_mongo_test + WHERE a%2 = 1 + ORDER BY a ASC NULLS FIRST; + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT a, b FROM f_mongo_test + WHERE a >= 1 AND b LIKE '%O%' + ORDER BY a; +SELECT a, b FROM f_mongo_test + WHERE a >= 1 AND b LIKE '%O%' + ORDER BY a; + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2, c5 FROM f_test_tbl1 e + WHERE c5 <= '1980-12-17' AND c2 IN ('EMP1', 'EMP5', 'EMP10') AND c1 = 100 + ORDER BY c1; +SELECT c1, c2, c5 FROM f_test_tbl1 e + WHERE c5 <= '1980-12-17' AND c2 IN ('EMP1', 'EMP5', 'EMP10') AND c1 = 100 + ORDER BY c1; + +-- The ORDER BY clause shouldn't push-down due to explicit COLLATE. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2 FROM f_test_tbl1 + WHERE c2 = 'EMP10' + ORDER BY c2 COLLATE "en_US" DESC NULLS LAST; +SELECT c1, c2 FROM f_test_tbl1 + WHERE c2 = 'EMP10' + ORDER BY c2 COLLATE "en_US" DESC NULLS LAST; + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2 FROM f_test_tbl1 + WHERE c2 < 'EMP10' + ORDER BY c2 DESC NULLS LAST; +SELECT c1, c2 FROM f_test_tbl1 + WHERE c2 < 'EMP10' + ORDER BY c2 DESC NULLS LAST; + +-- Should push down if two columns of same table are +-- involved in single WHERE clause operator expression. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c4 FROM f_test_tbl1 + WHERE c1 > c4 + ORDER BY c1 ASC NULLS FIRST; +SELECT c1, c4 FROM f_test_tbl1 + WHERE c1 > c4 + ORDER BY c1 ASC NULLS FIRST; + +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c4, c7, c8 FROM f_test_tbl1 + WHERE c1 < c4 AND c7 < c8 + ORDER BY c1; +SELECT c1, c4, c7, c8 FROM f_test_tbl1 + WHERE c1 < c4 AND c7 < c8 + ORDER BY c1; + +-- With ORDER BY pushdown disabled. +SET mongo_fdw.enable_order_by_pushdown TO OFF; +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c4 FROM f_test_tbl1 + WHERE c1 > c4 + ORDER BY c1 ASC NULLS FIRST; +SELECT c1, c4 FROM f_test_tbl1 + WHERE c1 > c4 + ORDER BY c1 ASC NULLS FIRST; +SET mongo_fdw.enable_order_by_pushdown TO ON; + +-- Nested operator expression in WHERE clause. Should pushdown. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2 FROM f_test_tbl1 + WHERE (c1 > 1000) > FALSE + ORDER BY c1 ASC NULLS FIRST; +SELECT c1, c2 FROM f_test_tbl1 + WHERE (c1 > 1000) > FALSE + ORDER BY c1 ASC NULLS FIRST; +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c2 FROM f_test_tbl1 + WHERE (c1 > 1000) > 0::BOOLEAN + ORDER BY c1 ASC NULLS FIRST; +SELECT c1, c2 FROM f_test_tbl1 + WHERE (c1 > 1000) > 0::BOOLEAN + ORDER BY c1 ASC NULLS FIRST; + +-- Shouldn't push down operators where the constant is an array. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT name, marks FROM f_test_tbl3 + WHERE marks = ARRAY[23::FLOAT, 24::FLOAT] + ORDER BY name; +SELECT name, marks FROM f_test_tbl3 + WHERE marks = ARRAY[23::FLOAT, 24::FLOAT] + ORDER BY name; + +-- Pushdown in prepared statement. +PREPARE pre_stmt_f_mongo_test(int) AS + SELECT b FROM f_mongo_test WHERE a = $1 ORDER BY b; +EXPLAIN (VERBOSE, COSTS FALSE) +EXECUTE pre_stmt_f_mongo_test(1); +EXECUTE pre_stmt_f_mongo_test(1); +EXPLAIN (VERBOSE, COSTS FALSE) +EXECUTE pre_stmt_f_mongo_test(2); +EXECUTE pre_stmt_f_mongo_test(2); + +-- FDW-297: Only operator expressions should be pushed down in WHERE clause. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT name, marks FROM f_test_tbl3 + WHERE pass = true + ORDER BY name DESC NULLS LAST; +SELECT name, marks FROM f_test_tbl3 + WHERE pass = true + ORDER BY name DESC NULLS LAST; + +-- INSERT NULL values and check behaviour. +INSERT INTO f_test_tbl2 VALUES ('0', NULL, NULL, NULL); + +-- Should pushdown and shouldn't result row with NULL VALUES. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1 FROM f_test_tbl2 WHERE c1 < 1; +SELECT c1 FROM f_test_tbl2 WHERE c1 < 1; +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1 FROM f_test_tbl2 WHERE c2 = c3; +SELECT c1 FROM f_test_tbl2 WHERE c2 = c3; + +-- Test with IS NULL, shouldn't push down +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1 FROM f_test_tbl2 WHERE c2 IS NULL; +SELECT c1 FROM f_test_tbl2 WHERE c2 IS NULL; + +-- FDW-134: Test with number of columns more than 32 +CREATE FOREIGN TABLE f_test_large (_id int, + a01 int, a02 int, a03 int, a04 int, a05 int, a06 int, a07 int, a08 int, a09 int, a10 int, + a11 int, a12 int, a13 int, a14 int, a15 int, a16 int, a17 int, a18 int, a19 int, a20 int, + a21 int, a22 int, a23 int, a24 int, a25 int, a26 int, a27 int, a28 int, a29 int, a30 int, + a31 int, a32 int, a33 int, a34 int, a35 int) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'mongo_test_large'); + +-- Shouldn't pushdown ORDERBY clause due to exceeded number of path keys limit. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT _id, a01, a31, a32, a33, a34, a35 FROM f_test_large ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST, a33 ASC NULLS FIRST, a34 DESC NULLS LAST, a35 ASC NULLS FIRST; +SELECT _id, a01, a31, a32, a33, a34, a35 FROM f_test_large ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST, a33 ASC NULLS FIRST, a34 DESC NULLS LAST, a35 ASC NULLS FIRST; + +-- Should pushdown ORDERBY clause because number of path keys are in limit. +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT _id, a01, a31, a32, a33, a34, a35 FROM f_test_large ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST; +SELECT _id, a01, a31, a32, a33, a34, a35 FROM f_test_large ORDER BY + a01 ASC NULLS FIRST, a02 ASC NULLS FIRST, a03 ASC NULLS FIRST, a04 ASC NULLS FIRST, a05 ASC NULLS FIRST, + a06 ASC NULLS FIRST, a07 ASC NULLS FIRST, a08 ASC NULLS FIRST, a09 ASC NULLS FIRST, a10 ASC NULLS FIRST, + a11 ASC NULLS FIRST, a12 ASC NULLS FIRST, a13 ASC NULLS FIRST, a14 ASC NULLS FIRST, a15 ASC NULLS FIRST, + a16 ASC NULLS FIRST, a17 ASC NULLS FIRST, a18 ASC NULLS FIRST, a19 ASC NULLS FIRST, a20 ASC NULLS FIRST, + a21 ASC NULLS FIRST, a22 ASC NULLS FIRST, a23 ASC NULLS FIRST, a24 ASC NULLS FIRST, a25 ASC NULLS FIRST, + a26 ASC NULLS FIRST, a27 ASC NULLS FIRST, a28 ASC NULLS FIRST, a29 ASC NULLS FIRST, a30 ASC NULLS FIRST, + a31 ASC NULLS FIRST, a32 ASC NULLS FIRST; + +-- FDW-564: Test ORDER BY with user defined operators. Create the operator +-- family required for the test. +CREATE OPERATOR PUBLIC.<^ ( + LEFTARG = INT4, + RIGHTARG = INT4, + PROCEDURE = INT4EQ +); + +CREATE OPERATOR PUBLIC.=^ ( + LEFTARG = INT4, + RIGHTARG = INT4, + PROCEDURE = INT4LT +); + +CREATE OPERATOR PUBLIC.>^ ( + LEFTARG = INT4, + RIGHTARG = INT4, + PROCEDURE = INT4GT +); + +CREATE OPERATOR FAMILY my_op_family USING btree; + +CREATE FUNCTION MY_OP_CMP(A INT, B INT) RETURNS INT AS + $$ BEGIN RETURN BTINT4CMP(A, B); END $$ LANGUAGE PLPGSQL; + +CREATE OPERATOR CLASS my_op_class FOR TYPE INT USING btree FAMILY my_op_family AS + OPERATOR 1 PUBLIC.<^, + OPERATOR 3 PUBLIC.=^, + OPERATOR 5 PUBLIC.>^, + FUNCTION 1 my_op_cmp(INT, INT); + +-- FDW-564: User defined operators are not pushed down. +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT * FROM f_mongo_test ORDER BY a USING OPERATOR(public.<^); +EXPLAIN (COSTS FALSE, VERBOSE) +SELECT MIN(a) FROM f_mongo_test GROUP BY b ORDER BY 1 USING OPERATOR(public.<^); + +-- FDW-589: Test enable_order_by_pushdown option at server and table level. +-- Test the option at server level. +-- Check only boolean values are accepted. +ALTER SERVER mongo_server OPTIONS (ADD enable_order_by_pushdown 'abc11'); +ALTER SERVER mongo_server OPTIONS (ADD enable_order_by_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c4 FROM f_test_tbl1 + WHERE c1 > c4 + ORDER BY c1 ASC NULLS FIRST; +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c4 FROM f_test_tbl1 + WHERE c1 > c4 + ORDER BY c1 ASC NULLS FIRST; +-- Test that setting option at table level does not affect the setting at +-- server level. +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'false'); +-- Test the option at table level. +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (ADD enable_order_by_pushdown 'true'); +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c4 FROM f_test_tbl1 + WHERE c1 > c4 + ORDER BY c1 ASC NULLS FIRST; +SELECT c1, c4 FROM f_test_tbl1 + WHERE c1 > c4 + ORDER BY c1 ASC NULLS FIRST; +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_order_by_pushdown 'false'); +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT c1, c4 FROM f_test_tbl1 + WHERE c1 > c4 + ORDER BY c1 ASC NULLS FIRST; +SELECT c1, c4 FROM f_test_tbl1 + WHERE c1 > c4 + ORDER BY c1 ASC NULLS FIRST; +ALTER SERVER mongo_server OPTIONS (SET enable_order_by_pushdown 'true'); +ALTER FOREIGN TABLE f_test_tbl1 OPTIONS (SET enable_order_by_pushdown 'true'); + +-- FDW-631: Test pushdown of boolean expression +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT name, pass FROM f_test_tbl3 WHERE pass = false ORDER BY name; +SELECT name, pass FROM f_test_tbl3 WHERE pass = false ORDER BY name; +EXPLAIN (VERBOSE, COSTS FALSE) +SELECT name, pass FROM f_test_tbl3 WHERE pass = true ORDER BY name; +SELECT name, pass FROM f_test_tbl3 WHERE pass = true ORDER BY name; + +-- Cleanup +DELETE FROM f_mongo_test WHERE a != 0; +DELETE FROM f_test_tbl2 WHERE c1 IS NULL; +DROP FOREIGN TABLE f_mongo_test; +DROP FOREIGN TABLE f_test_tbl1; +DROP FOREIGN TABLE f_test_tbl2; +DROP FOREIGN TABLE f_test_tbl3; +DROP FOREIGN TABLE f_test_large; +DROP OPERATOR CLASS my_op_class USING btree; +DROP FUNCTION my_op_cmp(a INT, b INT); +DROP OPERATOR FAMILY my_op_family USING btree; +DROP OPERATOR public.>^(INT, INT); +DROP OPERATOR public.=^(INT, INT); +DROP OPERATOR public.<^(INT, INT); +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/sql/select.sql b/sql/select.sql new file mode 100644 index 0000000..13c2a7c --- /dev/null +++ b/sql/select.sql @@ -0,0 +1,399 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` + +-- Before running this file user must create database mongo_fdw_regress on +-- MongoDB with all permission for MONGO_USER_NAME user with MONGO_PASS +-- password and ran mongodb_init.sh file to load collections. + +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; + +-- Check version +SELECT mongo_fdw_version(); + +-- Create foreign tables +CREATE FOREIGN TABLE f_mongo_test (_id name, a int, b text) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'mongo_test'); +CREATE FOREIGN TABLE f_test_tbl1 (_id NAME, c1 INTEGER, c2 VARCHAR(10), c3 CHAR(9),c4 INTEGER, c5 pg_catalog.Date, c6 DECIMAL, c7 INTEGER, c8 INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl1'); +CREATE FOREIGN TABLE f_test_tbl2 (_id NAME, c1 INTEGER, c2 VARCHAR(14), c3 VARCHAR(13)) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl2'); +CREATE FOREIGN TABLE countries (_id NAME, name VARCHAR, population INTEGER, capital VARCHAR, hdi FLOAT) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'countries'); +CREATE FOREIGN TABLE country_elections (_id NAME, "lastElections.type" VARCHAR, "lastElections.date" pg_catalog.TIMESTAMP) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'countries'); +CREATE FOREIGN TABLE main_exports (_id NAME, "mainExports" TEXT[] ) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'countries'); +CREATE FOREIGN TABLE test_json ( __doc json) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'warehouse'); +CREATE FOREIGN TABLE test_jsonb ( __doc jsonb) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'warehouse'); +CREATE FOREIGN TABLE test_text ( __doc text) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'warehouse'); +CREATE FOREIGN TABLE test_varchar ( __doc varchar) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'warehouse'); +CREATE FOREIGN TABLE f_test5 (_id NAME, c1 NUMERIC) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test5'); +CREATE FOREIGN TABLE f_test_tbl4 (_id NAME, a NUMERIC(12, 2)) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl4'); +CREATE FOREIGN TABLE f_test_tbl5 (_id NAME, a BOOLEAN) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl4'); +CREATE FOREIGN TABLE f_test_tbl6 (_id NAME, a INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl5'); +CREATE FOREIGN TABLE f_test_tbl7 (_id NAME, a INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test_tbl4'); +CREATE FOREIGN TABLE testlog (_id NAME, log VARCHAR, "logMeta.logMac" VARCHAR, "logMeta.nestMore.level" INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'testlog'); +CREATE FOREIGN TABLE testdevice (_id NAME, name VARCHAR, mac VARCHAR, level INTEGER) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'testdevice'); + +SET datestyle TO ISO; + +-- Retrieve data from foreign table using SELECT statement. +SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM f_test_tbl1 + ORDER BY c1 DESC, c8; +SELECT DISTINCT c8 FROM f_test_tbl1 ORDER BY 1; +SELECT c2 AS "Employee Name" FROM f_test_tbl1 ORDER BY c2 COLLATE "C"; +SELECT c8, c6, c7 FROM f_test_tbl1 ORDER BY 1, 2, 3; +SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM f_test_tbl1 + WHERE c1 = 100 ORDER BY 1; +SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM f_test_tbl1 + WHERE c1 = 100 OR c1 = 700 ORDER BY 1; +SELECT c1, c2, c3 FROM f_test_tbl1 WHERE c3 like 'SALESMAN' ORDER BY 1; +SELECT c1, c2, c3 FROM f_test_tbl1 WHERE c1 IN (100, 700) ORDER BY 1; +SELECT c1, c2, c3 FROM f_test_tbl1 WHERE c1 NOT IN (100, 700) ORDER BY 1 LIMIT 5; +SELECT c1, c2, c8 FROM f_test_tbl1 WHERE c8 BETWEEN 10 AND 20 ORDER BY 1; +SELECT c1, c2, c6 FROM f_test_tbl1 ORDER BY 1 OFFSET 5; + +-- Retrieve data from foreign table using group by clause. +SELECT c8 "Department", COUNT(c1) "Total Employees" FROM f_test_tbl1 + GROUP BY c8 ORDER BY c8; +SELECT c8, SUM(c6) FROM f_test_tbl1 + GROUP BY c8 HAVING c8 IN (10, 30) ORDER BY c8; +SELECT c8, SUM(c6) FROM f_test_tbl1 + GROUP BY c8 HAVING SUM(c6) > 9400 ORDER BY c8; + +-- Retrieve data from foreign table using sub-queries. +SELECT c1, c2, c6 FROM f_test_tbl1 + WHERE c8 <> ALL (SELECT c1 FROM f_test_tbl2 WHERE c1 IN (10, 30, 40)) + ORDER BY c1; +SELECT c1, c2, c3 FROM f_test_tbl2 + WHERE EXISTS (SELECT 1 FROM f_test_tbl1 WHERE f_test_tbl2.c1 = f_test_tbl1.c8) + ORDER BY 1, 2; +SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM f_test_tbl1 + WHERE c8 NOT IN (SELECT c1 FROM f_test_tbl2) ORDER BY c1; + +-- Retrieve data from foreign table using UNION operator. +SELECT c1, c2 FROM f_test_tbl2 UNION +SELECT c1, c2 FROM f_test_tbl1 ORDER BY c1; + +SELECT c1, c2 FROM f_test_tbl2 UNION ALL +SELECT c1, c2 FROM f_test_tbl1 ORDER BY c1; + +-- Retrieve data from foreign table using INTERSECT operator. +SELECT c1, c2 FROM f_test_tbl1 WHERE c1 >= 800 INTERSECT +SELECT c1, c2 FROM f_test_tbl1 WHERE c1 >= 400 ORDER BY c1; + +SELECT c1, c2 FROM f_test_tbl1 WHERE c1 >= 800 INTERSECT ALL +SELECT c1, c2 FROM f_test_tbl1 WHERE c1 >= 400 ORDER BY c1; + +-- Retrieve data from foreign table using EXCEPT operator. +SELECT c1, c2 FROM f_test_tbl1 EXCEPT +SELECT c1, c2 FROM f_test_tbl1 WHERE c1 > 900 ORDER BY c1; + +SELECT c1, c2 FROM f_test_tbl1 EXCEPT ALL +SELECT c1, c2 FROM f_test_tbl1 WHERE c1 > 900 ORDER BY c1; + +-- Retrieve data from foreign table using CTE (with clause). +WITH + with_qry AS (SELECT c1, c2, c3 FROM f_test_tbl2) +SELECT e.c2, e.c6, w.c1, w.c2 FROM f_test_tbl1 e, with_qry w + WHERE e.c8 = w.c1 ORDER BY e.c8, e.c2 COLLATE "C"; + +WITH + test_tbl2_costs AS (SELECT d.c2, SUM(c6) test_tbl2_total FROM f_test_tbl1 e, f_test_tbl2 d + WHERE e.c8 = d.c1 GROUP BY 1), + avg_cost AS (SELECT SUM(test_tbl2_total)/COUNT(*) avg FROM test_tbl2_costs) +SELECT * FROM test_tbl2_costs + WHERE test_tbl2_total > (SELECT avg FROM avg_cost) ORDER BY c2 COLLATE "C"; + +-- Retrieve data from foreign table using window clause. +SELECT c8, c1, c6, AVG(c6) OVER (PARTITION BY c8) FROM f_test_tbl1 + ORDER BY c8, c1; +SELECT c8, c1, c6, COUNT(c6) OVER (PARTITION BY c8) FROM f_test_tbl1 + WHERE c8 IN (10, 30, 40, 50, 60, 70) ORDER BY c8, c1; +SELECT c8, c1, c6, SUM(c6) OVER (PARTITION BY c8) FROM f_test_tbl1 + ORDER BY c8, c1; + +-- Views +CREATE VIEW smpl_vw AS + SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM f_test_tbl1 ORDER BY c1; +SELECT * FROM smpl_vw ORDER BY 1; + +CREATE VIEW comp_vw (s1, s2, s3, s6, s7, s8, d2) AS + SELECT s.c1, s.c2, s.c3, s.c6, s.c7, s.c8, d.c2 + FROM f_test_tbl2 d, f_test_tbl1 s WHERE d.c1 = s.c8 AND d.c1 = 10 + ORDER BY s.c1; +SELECT * FROM comp_vw ORDER BY 1; + +CREATE TEMPORARY VIEW temp_vw AS + SELECT c1, c2, c3 FROM f_test_tbl2; +SELECT * FROM temp_vw ORDER BY 1, 2; + +CREATE VIEW mul_tbl_view AS + SELECT d.c1 dc1, d.c2 dc2, e.c1 ec1, e.c2 ec2, e.c6 ec6 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY d.c1; +SELECT * FROM mul_tbl_view ORDER BY 1, 2, 3; + +-- Foreign-Foreign table joins + +-- CROSS JOIN. +SELECT f_test_tbl2.c2, f_test_tbl1.c2 + FROM f_test_tbl2 CROSS JOIN f_test_tbl1 ORDER BY 1, 2; +-- INNER JOIN. +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d, f_test_tbl1 e WHERE d.c1 = e.c8 ORDER BY 1, 3; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; +-- OUTER JOINS. +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d FULL OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + +-- Local-Foreign table joins. +CREATE TABLE l_test_tbl1 AS + SELECT c1, c2, c3, c4, c5, c6, c7, c8 FROM f_test_tbl1; +CREATE TABLE l_test_tbl2 AS + SELECT c1, c2, c3 FROM f_test_tbl2; + +-- CROSS JOIN. +SELECT f_test_tbl2.c2, l_test_tbl1.c2 FROM f_test_tbl2 CROSS JOIN l_test_tbl1 ORDER BY 1, 2; +-- INNER JOIN. +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM l_test_tbl2 d, f_test_tbl1 e WHERE d.c1 = e.c8 ORDER BY 1, 3; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d INNER JOIN l_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; +-- OUTER JOINS. +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d LEFT OUTER JOIN l_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d RIGHT OUTER JOIN l_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; +SELECT d.c1, d.c2, e.c1, e.c2, e.c6, e.c8 + FROM f_test_tbl2 d FULL OUTER JOIN l_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; + +-- Retrieve complex data containing Sub-fields, dates, Arrays +SELECT * FROM countries ORDER BY _id; +SELECT * FROM country_elections ORDER BY _id; +SELECT * FROM main_exports ORDER BY _id; + +-- Retrieve complex data containing Json objects (__doc tests) +SELECT json_data.key AS key1, json_data.value AS value1 + FROM test_json, json_each_text(test_json.__doc) AS json_data + WHERE key NOT IN ('_id') ORDER BY json_data.key COLLATE "C"; +SELECT json_data.key AS key1, json_data.value AS value1 + FROM test_jsonb, jsonb_each_text(test_jsonb.__doc) AS json_data + WHERE key NOT IN ('_id') ORDER BY json_data.key COLLATE "C"; +SELECT json_data.key AS key1, json_data.value AS value1 + FROM test_text, json_each_text(test_text.__doc::json) AS json_data + WHERE key NOT IN ('_id') ORDER BY json_data.key COLLATE "C"; +SELECT json_data.key AS key1, json_data.value AS value1 + FROM test_varchar, json_each_text(test_varchar.__doc::json) AS json_data + WHERE key NOT IN ('_id') ORDER BY json_data.key COLLATE "C"; + +-- Inserts some values in mongo_test collection. +INSERT INTO f_mongo_test VALUES ('0', 1, 'One'); +INSERT INTO f_mongo_test VALUES ('0', 2, 'Two'); +INSERT INTO f_mongo_test VALUES ('0', 3, 'Three'); +INSERT INTO f_mongo_test VALUES ('0', 4, 'Four'); +INSERT INTO f_mongo_test VALUES ('0', 5, 'Five'); +INSERT INTO f_mongo_test VALUES ('0', 6, 'Six'); +INSERT INTO f_mongo_test VALUES ('0', 7, 'Seven'); +INSERT INTO f_mongo_test VALUES ('0', 8, 'Eight'); +INSERT INTO f_mongo_test VALUES ('0', 9, 'Nine'); +INSERT INTO f_mongo_test VALUES ('0', 10, 'Ten'); + +-- Retrieve Data From foreign tables in functions. +CREATE OR REPLACE FUNCTION test_param_where() RETURNS void AS $$ +DECLARE + n varchar; +BEGIN + FOR x IN 1..9 LOOP + SELECT b INTO n FROM f_mongo_test WHERE a = x; + RAISE NOTICE 'Found number %', n; + END LOOP; + return; +END +$$ LANGUAGE plpgsql; + +SELECT test_param_where(); + +-- FDW-103: Parameter expression should work correctly with WHERE clause. +SELECT a, b FROM f_mongo_test WHERE a = (SELECT 2) ORDER BY a; +SELECT a, b FROM f_mongo_test WHERE b = (SELECT 'Seven'::text) ORDER BY a; +-- Create local table and load data into it. +CREATE TABLE l_mongo_test AS SELECT a, b FROM f_mongo_test; +-- Check correlated query. +SELECT a, b FROM l_mongo_test lt + WHERE lt.b = (SELECT b FROM f_mongo_test ft WHERE lt.b = ft.b) + ORDER BY a; +SELECT a, b FROM l_mongo_test lt + WHERE lt.a = (SELECT a FROM f_mongo_test ft WHERE lt.a = ft.a) + ORDER BY a; +SELECT c1, c8 FROM f_test_tbl1 ft1 + WHERE ft1.c8 = (SELECT c1 FROM f_test_tbl2 ft2 WHERE ft1.c8 = ft2.c1) + ORDER BY c1 LIMIT 2; + +-- FDW-197: Casting target list should give correct result. +SELECT a::float FROM f_mongo_test ORDER BY a LIMIT 2; +SELECT a::boolean FROM f_mongo_test ORDER BY a LIMIT 2; +SELECT a, b::varchar FROM f_mongo_test ORDER BY a LIMIT 3; +SELECT a::float, b::varchar FROM f_mongo_test ORDER BY a LIMIT 2; +SELECT a::real, b::char(20) FROM f_mongo_test ORDER BY a LIMIT 2; +SELECT c1, c2::text FROM f_test_tbl1 ORDER BY c1 LIMIT 2; +SELECT a, LENGTH(b) FROM f_mongo_test ORDER BY 1 LIMIT 2; +SELECT t1.c6::float, t1.c6::int, t1.c5::timestamptz, t1.c3::text, t2.c1::numeric, t2.c3 + FROM f_test_tbl1 t1, f_test_tbl2 t2 WHERE t1.c8 = t2.c1 + ORDER BY t2.c1, t1.c6 LIMIT 5; +SELECT SUM(a::float), SUM(a % 2), a % 2 AS "a % 2"FROM f_mongo_test + GROUP BY a % 2 ORDER BY 2; +SELECT (c6::float + (c1 * length(c3::text))) AS "c1 + c6", c1, c6 + FROM f_test_tbl1 ORDER BY c1 LIMIT 5; + +-- FDW-249; LEFT JOIN LATERAL should not crash +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.a, t1.b, t3.a, t1_a FROM f_mongo_test t1 LEFT JOIN LATERAL ( + SELECT t2.a, t1.a AS t1_a FROM f_mongo_test t2) t3 ON t1.a = t3.a ORDER BY 1 ASC NULLS FIRST; +SELECT t1.a, t1.b, t3.a, t1_a FROM f_mongo_test t1 LEFT JOIN LATERAL ( + SELECT t2.a, t1.a AS t1_a FROM f_mongo_test t2) t3 ON t1.a = t3.a ORDER BY 1 ASC NULLS FIRST; +SELECT t1.c1, t3.c1, t3.t1_c8 FROM f_test_tbl1 t1 INNER JOIN LATERAL ( + SELECT t2.c1, t1.c8 AS t1_c8 FROM f_test_tbl2 t2) t3 ON t3.c1 = t3.t1_c8 + ORDER BY 1, 2, 3; +SELECT t1.c1, t3.c1, t3.t1_c8 FROM l_test_tbl1 t1 LEFT JOIN LATERAL ( + SELECT t2.c1, t1.c8 AS t1_c8 FROM f_test_tbl2 t2) t3 ON t3.c1 = t3.t1_c8 + ORDER BY 1, 2, 3; +SELECT c1, c2, (SELECT r FROM (SELECT c1 AS c1) x, LATERAL (SELECT c1 AS r) y) + FROM f_test_tbl1 ORDER BY 1, 2, 3; +-- LATERAL JOIN with RIGHT should throw error +SELECT t1.c1, t3.c1, t3.t1_c8 FROM f_test_tbl1 t1 RIGHT JOIN LATERAL ( + SELECT t2.c1, t1.c8 AS t1_c8 FROM f_test_tbl2 t2) t3 ON t3.c1 = t3.t1_c8 + ORDER BY 1, 2, 3; + +-- FDW-262: Should throw an error when we select system attribute. +SELECT xmin FROM f_test_tbl1; +SELECT ctid, xmax, tableoid FROM f_test_tbl1; +SELECT xmax, c1 FROM f_test_tbl1; +SELECT count(tableoid) FROM f_test_tbl1; + +-- FDW-391: Support whole-row reference. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.c2, t1 FROM f_test_tbl1 t1 + WHERE c1 = 100 ORDER BY 1; + +-- Force hash-join for consistent result. +SET enable_mergejoin TO off; +SET enable_nestloop TO off; +EXPLAIN (VERBOSE, COSTS OFF) +SELECT d, d.c2, e.c1, e + FROM f_test_tbl2 d LEFT OUTER JOIN f_test_tbl1 e ON d.c1 = e.c8 ORDER BY 1, 3; +RESET enable_mergejoin; +RESET enable_nestloop; + +-- FDW-427: The numeric value should display correctly as per precision and +-- scale defined. +SELECT c1 FROM f_test5 ORDER BY 1; +-- Number with the required precision. +DROP FOREIGN TABLE f_test5; +CREATE FOREIGN TABLE f_test5 (_id NAME, c1 NUMERIC(8, 6)) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test5'); +SELECT c1 FROM f_test5 ORDER BY 1; +-- Number with less scale. Should round-off the scale. +DROP FOREIGN TABLE f_test5; +CREATE FOREIGN TABLE f_test5 (_id NAME, c1 NUMERIC(6, 2)) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test5'); +SELECT c1 FROM f_test5 ORDER BY 1; +-- Number only with precision. +DROP FOREIGN TABLE f_test5; +CREATE FOREIGN TABLE f_test5 (_id NAME, c1 NUMERIC(2)) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test5'); +SELECT c1 FROM f_test5 ORDER BY 1; +-- Number with improper precision and scale, +-- resulting in error "numeric field overflow". +DROP FOREIGN TABLE f_test5; +CREATE FOREIGN TABLE f_test5 (_id NAME, c1 NUMERIC(3, 2)) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'test5'); +SELECT c1 FROM f_test5 ORDER BY 1; + +-- FDW-418: Resolve data compatibility. +SELECT a FROM f_test_tbl4 ORDER BY 1; +SELECT a FROM f_test_tbl5 ORDER BY 1; +SELECT a FROM f_test_tbl6 ORDER BY 1; +SELECT a FROM f_test_tbl7 ORDER BY 1; + + +-- FDW-529: Fix server crash caused due to missed handling of Param node for +-- comparison expressions while preparing query filter. + +CREATE OR REPLACE FUNCTION fdw529_test_param_where() RETURNS int AS $$ +DECLARE + val1 INT := 5; + val2 INT := 10; + cnt INT; +BEGIN + SELECT count(*) INTO cnt FROM f_mongo_test WHERE a > val1 AND a < val2; + RETURN cnt; +END +$$ LANGUAGE plpgsql; + +SELECT fdw529_test_param_where(); +SELECT fdw529_test_param_where(); +SELECT fdw529_test_param_where(); +SELECT fdw529_test_param_where(); +SELECT fdw529_test_param_where(); +-- This should not crash +SELECT fdw529_test_param_where(); + +-- FDW-669: Fix issue join pushdown doesn't return a result for join condition +-- on sub-column. This has been fixed by omitting a dot (".") from variables +-- used (declared by $let field) to form the MongoDB query pipeline. +SELECT * FROM testlog t INNER JOIN testdevice d + ON d.level = t."logMeta.nestMore.level"; + +-- Cleanup +DELETE FROM f_mongo_test WHERE a != 0; +DROP TABLE l_test_tbl1; +DROP TABLE l_test_tbl2; +DROP TABLE l_mongo_test; +DROP VIEW smpl_vw; +DROP VIEW comp_vw; +DROP VIEW temp_vw; +DROP VIEW mul_tbl_view; +DROP FUNCTION test_param_where(); +DROP FUNCTION fdw529_test_param_where(); +DROP FOREIGN TABLE f_mongo_test; +DROP FOREIGN TABLE f_test_tbl1; +DROP FOREIGN TABLE f_test_tbl2; +DROP FOREIGN TABLE countries; +DROP FOREIGN TABLE country_elections; +DROP FOREIGN TABLE main_exports; +DROP FOREIGN TABLE test_json; +DROP FOREIGN TABLE test_jsonb; +DROP FOREIGN TABLE test_text; +DROP FOREIGN TABLE test_varchar; +DROP FOREIGN TABLE f_test5; +DROP FOREIGN TABLE f_test_tbl4; +DROP FOREIGN TABLE f_test_tbl5; +DROP FOREIGN TABLE f_test_tbl6; +DROP FOREIGN TABLE f_test_tbl7; +DROP FOREIGN TABLE testlog; +DROP FOREIGN TABLE testdevice; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw; diff --git a/sql/server_options.sql b/sql/server_options.sql new file mode 100644 index 0000000..03bb200 --- /dev/null +++ b/sql/server_options.sql @@ -0,0 +1,116 @@ +\set MONGO_HOST `echo \'"$MONGO_HOST"\'` +\set MONGO_PORT `echo \'"$MONGO_PORT"\'` +\set MONGO_USER_NAME `echo \'"$MONGO_USER_NAME"\'` +\set MONGO_PASS `echo \'"$MONGO_PWD"\'` + +-- Before running this file user must create database mongo_fdw_regress on +-- MongoDB with all permission for MONGO_USER_NAME user with MONGO_PASS +-- password and ran mongodb_init.sh file to load collections. + +\c contrib_regression +CREATE EXTENSION IF NOT EXISTS mongo_fdw; +CREATE SERVER mongo_server FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port :MONGO_PORT); +CREATE USER MAPPING FOR public SERVER mongo_server; + +-- Port outside ushort range. Error. +CREATE SERVER mongo_server1 FOREIGN DATA WRAPPER mongo_fdw + OPTIONS (address :MONGO_HOST, port '65537'); +ALTER SERVER mongo_server OPTIONS (SET port '65537'); + +-- Validate extension, server and mapping details +CREATE OR REPLACE FUNCTION show_details(host TEXT, port TEXT, uid TEXT, pwd TEXT) RETURNS int AS $$ +DECLARE + ext TEXT; + srv TEXT; + sopts TEXT; + uopts TEXT; +BEGIN + SELECT e.fdwname, srvname, array_to_string(s.srvoptions, ','), array_to_string(u.umoptions, ',') + INTO ext, srv, sopts, uopts + FROM pg_foreign_data_wrapper e LEFT JOIN pg_foreign_server s ON e.oid = s.srvfdw LEFT JOIN pg_user_mapping u ON s.oid = u.umserver + WHERE e.fdwname = 'mongo_fdw' + ORDER BY 1, 2, 3, 4; + + raise notice 'Extension : %', ext; + raise notice 'Server : %', srv; + + IF strpos(sopts, host) <> 0 AND strpos(sopts, port) <> 0 THEN + raise notice 'Server_Options : matched'; + END IF; + + IF strpos(uopts, uid) <> 0 AND strpos(uopts, pwd) <> 0 THEN + raise notice 'User_Mapping_Options : matched'; + END IF; + + return 1; +END; +$$ language plpgsql; + +SELECT show_details(:MONGO_HOST, :MONGO_PORT, :MONGO_USER_NAME, :MONGO_PASS); + +-- Create foreign tables and perform basic SQL operations +CREATE FOREIGN TABLE f_mongo_test (_id name, a int, b varchar) + SERVER mongo_server OPTIONS (database 'mongo_fdw_regress', collection 'mongo_test'); +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; +INSERT INTO f_mongo_test VALUES ('0', 2, 'mongo_test insert'); +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; +UPDATE f_mongo_test SET b = 'mongo_test update' WHERE a = 2; +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; +DELETE FROM f_mongo_test WHERE a = 2; +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; + +-- Test SSL option when MongoDB server running in non-SSL mode. +-- Set non-boolean value, should throw an error. +ALTER SERVER mongo_server OPTIONS (ssl '1'); +ALTER SERVER mongo_server OPTIONS (ssl 'x'); +-- Check for default value i.e. false +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; +-- Set 'true'. +ALTER SERVER mongo_server OPTIONS (ssl 'true'); +-- Results into an error as MongoDB server is running in non-SSL mode. +\set VERBOSITY terse +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; +\set VERBOSITY default +-- Switch back to 'false'. +ALTER SERVER mongo_server OPTIONS (SET ssl 'false'); +-- Should now be successful. +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; + +-- Alter server to add authentication_database option +ALTER SERVER mongo_server OPTIONS (ADD authentication_database 'NOT_EXIST_DB'); +ALTER USER MAPPING FOR public SERVER mongo_server + OPTIONS (ADD username :MONGO_USER_NAME, password :MONGO_PASS); +-- Below query will fail with authentication error as user cannot be +-- authenticated against given authentication_database. +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; +-- Now changed to valid authentication_database so select query should work. +ALTER SERVER mongo_server + OPTIONS (SET authentication_database 'mongo_fdw_regress'); +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; + +ALTER SERVER mongo_server + OPTIONS (DROP authentication_database); +ALTER USER MAPPING FOR public SERVER mongo_server + OPTIONS (DROP username, DROP password); + +-- FDW-464: Support use_remote_estimate option at server level. +-- Check only boolean values are accepted. +ALTER SERVER mongo_server OPTIONS (ADD use_remote_estimate 'abc11'); +-- Check default behaviour. Should be 'false'. +EXPLAIN(COSTS OFF) +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; +-- Enable remote estimation. +ALTER SERVER mongo_server OPTIONS (ADD use_remote_estimate 'true'); +EXPLAIN(COSTS OFF) +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; +-- Disable remote estimation. +ALTER SERVER mongo_server OPTIONS (SET use_remote_estimate 'false'); +EXPLAIN(COSTS OFF) +SELECT a, b FROM f_mongo_test ORDER BY 1, 2; + +-- Cleanup +DROP FOREIGN TABLE f_mongo_test; +DROP USER MAPPING FOR public SERVER mongo_server; +DROP SERVER mongo_server; +DROP EXTENSION mongo_fdw;