diff --git a/docs/reference/searchable-json.md b/docs/reference/searchable-json.md index dbc1e2b4..a3955419 100644 --- a/docs/reference/searchable-json.md +++ b/docs/reference/searchable-json.md @@ -43,6 +43,8 @@ SELECT eql_v2.add_search_config( ); ``` +> **Note:** JSONB literals in INSERT and UPDATE statements work directly without explicit `::jsonb` type casts. The proxy infers the JSONB type from the target column and handles encryption transparently. + ### JSON document structure Examples assume an encrypted JSON document with the following structure: @@ -591,6 +593,8 @@ SELECT jsonb_array_length(jsonb_path_query(encrypted_jsonb, '$.unknown')) FROM c ## Containment Operators +> **Note:** Containment operators work directly with JSONB literals without requiring explicit `::jsonb` type casts. The examples below use the simplified syntax intentionally. + ### `@>` (Contains Operator) Tests whether the left JSONB value contains the right JSONB value. diff --git a/mise.toml b/mise.toml index 84d4ae02..898e1516 100644 --- a/mise.toml +++ b/mise.toml @@ -34,7 +34,7 @@ CS_PROXY__HOST = "host.docker.internal" # Misc DOCKER_CLI_HINTS = "false" # Please don't show us What's Next. -CS_EQL_VERSION = "eql-2.1.8" +CS_EQL_VERSION = "eql-2.2.1" [tools] @@ -174,6 +174,18 @@ run = """ cargo nextest run --no-fail-fast --nocapture -p cipherstash-proxy-integration """ +[tasks."test:integration:without_multitenant"] +description = "Runs integration tests excluding multitenant" +run = """ +cargo nextest run --no-fail-fast --nocapture -E 'package(cipherstash-proxy-integration) and not test(multitenant)' +""" + +[tasks."test:integration:multitenant"] +description = "Runs multitenant integration tests only" +run = """ +cargo nextest run --no-fail-fast --nocapture -E 'package(cipherstash-proxy-integration) and test(multitenant)' +""" + [tasks."test:local:mapper"] alias = 'lm' description = "Runs test/s" @@ -311,8 +323,6 @@ echo mise --env tcp run postgres:setup mise --env tls run postgres:setup -mise run test:integration:showcase - echo echo '###############################################' echo '# Test: Prometheus' @@ -354,7 +364,7 @@ echo mise --env tls run proxy:up proxy-tls --extra-args "--detach --wait" mise --env tls run test:wait_for_postgres_to_quack --port 6432 --max-retries 20 --tls -cargo nextest run --no-fail-fast --nocapture -E 'package(cipherstash-proxy-integration) and not test(multitenant)' +mise --env tls run test:integration:without_multitenant mise --env tls run proxy:down echo @@ -369,7 +379,7 @@ unset CS_DEFAULT_KEYSET_ID mise --env tls run proxy:up proxy-tls --extra-args "--detach --wait" mise --env tls run test:wait_for_postgres_to_quack --port 6432 --max-retries 20 --tls -cargo nextest run --no-fail-fast --nocapture -E 'package(cipherstash-proxy-integration) and test(multitenant)' +mise --env tls run test:integration:multitenant echo "'set CS_DEFAULT_KEYSET_ID = {{default_keyset_id}}'" export CS_DEFAULT_KEYSET_ID="{{default_keyset_id}}" diff --git a/packages/cipherstash-proxy-integration/src/common.rs b/packages/cipherstash-proxy-integration/src/common.rs index 23f8761e..627c4302 100644 --- a/packages/cipherstash-proxy-integration/src/common.rs +++ b/packages/cipherstash-proxy-integration/src/common.rs @@ -8,11 +8,12 @@ use rustls::{ use serde_json::Value; use std::sync::{Arc, Once}; use tokio_postgres::{types::ToSql, Client, NoTls, Row, SimpleQueryMessage}; +use tracing::info; use tracing_subscriber::{filter::Directive, EnvFilter, FmtSubscriber}; pub const PROXY: u16 = 6432; -pub const PG_LATEST: u16 = 5532; -pub const PG_V17_TLS: u16 = 5617; +pub const PG_PORT: u16 = 5532; +pub const PG_TLS_PORT: u16 = 5617; pub const TEST_SCHEMA_SQL: &str = include_str!(concat!("../../../tests/sql/schema.sql")); @@ -52,7 +53,7 @@ pub async fn clear() { pub async fn reset_schema() { let port = std::env::var("CS_DATABASE__PORT") .map(|s| s.parse().unwrap()) - .unwrap_or(PG_LATEST); + .unwrap_or(PG_PORT); let client = connect_with_tls(port).await; client.simple_query(TEST_SCHEMA_SQL).await.unwrap(); @@ -61,7 +62,7 @@ pub async fn reset_schema() { pub async fn reset_schema_to(schema: &'static str) { let port = std::env::var("CS_DATABASE__PORT") .map(|s| s.parse().unwrap()) - .unwrap_or(PG_LATEST); + .unwrap_or(PG_PORT); let client = connect_with_tls(port).await; client.simple_query(schema).await.unwrap(); @@ -81,7 +82,7 @@ pub async fn table_exists(table: &str) -> bool { let port = std::env::var("CS_DATABASE__PORT") .map(|s| s.parse().unwrap()) - .unwrap_or(PG_LATEST); + .unwrap_or(PG_PORT); let client = connect_with_tls(port).await; let messages = client.simple_query(&query).await.unwrap(); @@ -209,6 +210,26 @@ where rows.iter().map(|row| row.get(0)).collect::>() } +/// Get database port from environment or use default. +fn get_database_port() -> u16 { + std::env::var("CS_DATABASE__PORT") + .ok() + .and_then(|s| s.parse().ok()) + .unwrap_or(PG_PORT) +} + +pub async fn query_direct_by(sql: &str, param: &(dyn ToSql + Sync)) -> Vec +where + T: for<'a> tokio_postgres::types::FromSql<'a>, +{ + let port = get_database_port(); + info!(port); + + let client = connect_with_tls(port).await; + let rows = client.query(sql, &[param]).await.unwrap(); + rows.iter().map(|row| row.get(0)).collect() +} + pub async fn simple_query(sql: &str) -> Vec where ::Err: std::fmt::Debug, diff --git a/packages/cipherstash-proxy-integration/src/insert/insert_with_literal.rs b/packages/cipherstash-proxy-integration/src/insert/insert_with_literal.rs index e681f714..4990a0dc 100644 --- a/packages/cipherstash-proxy-integration/src/insert/insert_with_literal.rs +++ b/packages/cipherstash-proxy-integration/src/insert/insert_with_literal.rs @@ -9,10 +9,6 @@ mod tests { macro_rules! test_insert_with_literal { ($name: ident, $type: ident, $pg_type: ident) => { - test_insert_with_literal!($name, $type, $pg_type, false); - }; - - ($name: ident, $type: ident, $pg_type: ident, $cast: expr) => { #[tokio::test] pub async fn $name() { trace(); @@ -26,14 +22,8 @@ mod tests { let expected = vec![encrypted_val.clone()]; - let cast_to_type: &str = if $cast { - &format!("::{}", stringify!($pg_type)) - } else { - "" - }; - - let insert_sql = format!("INSERT INTO encrypted (id, {encrypted_col}) VALUES ($1, '{encrypted_val}'{cast_to_type})"); - let select_sql = format!("SELECT {encrypted_col}{cast_to_type} FROM encrypted WHERE id = $1"); + let insert_sql = format!("INSERT INTO encrypted (id, {encrypted_col}) VALUES ($1, '{encrypted_val}')"); + let select_sql = format!("SELECT {encrypted_col} FROM encrypted WHERE id = $1"); execute_query(&insert_sql, &[&id]).await; let actual = query_by::<$type>(&select_sql, &id).await; @@ -46,10 +36,6 @@ mod tests { macro_rules! test_insert_simple_query_with_literal { ($name: ident, $type: ident, $pg_type: ident) => { - test_insert_simple_query_with_literal!($name, $type, $pg_type, false); - }; - - ($name: ident, $type: ident, $pg_type: ident, $cast: expr) => { #[tokio::test] pub async fn $name() { trace(); @@ -62,15 +48,8 @@ mod tests { let encrypted_col = format!("encrypted_{}", stringify!($pg_type)); let encrypted_val = crate::value_for_type!($type, random_limited()); - let cast_to_type: &str = if $cast { - &format!("::{}", stringify!($pg_type)) - } else { - "" - }; - - let insert_sql = format!("INSERT INTO encrypted (id, {encrypted_col}) VALUES ({id}, '{encrypted_val}'{cast_to_type})"); - let select_sql = format!("SELECT {encrypted_col}{cast_to_type} FROM encrypted WHERE id = {id}"); - + let insert_sql = format!("INSERT INTO encrypted (id, {encrypted_col}) VALUES ({id}, '{encrypted_val}')"); + let select_sql = format!("SELECT {encrypted_col} FROM encrypted WHERE id = {id}"); let expected = vec![encrypted_val]; @@ -89,7 +68,7 @@ mod tests { test_insert_with_literal!(insert_with_literal_bool, bool, bool); test_insert_with_literal!(insert_with_literal_text, String, text); test_insert_with_literal!(insert_with_literal_date, NaiveDate, date); - test_insert_with_literal!(insert_with_literal_jsonb, Value, jsonb, true); + test_insert_with_literal!(insert_with_literal_jsonb, Value, jsonb); test_insert_simple_query_with_literal!(insert_simple_query_with_literal_int2, i16, int2); test_insert_simple_query_with_literal!(insert_simple_query_with_literal_int4, i32, int4); @@ -98,12 +77,7 @@ mod tests { test_insert_simple_query_with_literal!(insert_simple_query_with_literal_bool, bool, bool); test_insert_simple_query_with_literal!(insert_simple_query_with_literal_text, String, text); test_insert_simple_query_with_literal!(insert_simple_query_with_literal_date, NaiveDate, date); - test_insert_simple_query_with_literal!( - insert_simple_query_with_literal_jsonb, - Value, - jsonb, - true - ); + test_insert_simple_query_with_literal!(insert_simple_query_with_literal_jsonb, Value, jsonb); // ----------------------------------------------------------------- diff --git a/packages/cipherstash-proxy-integration/src/map_literals.rs b/packages/cipherstash-proxy-integration/src/map_literals.rs index 24e49526..0d60ad5f 100644 --- a/packages/cipherstash-proxy-integration/src/map_literals.rs +++ b/packages/cipherstash-proxy-integration/src/map_literals.rs @@ -1,6 +1,6 @@ #[cfg(test)] mod tests { - use crate::common::{clear, connect_with_tls, random_id, PROXY}; + use crate::common::{clear, connect_with_tls, query_direct_by, random_id, trace, PROXY}; #[tokio::test] async fn map_literal() { @@ -45,8 +45,14 @@ mod tests { println!("encrypted: {:?}", rows[0]) } + /// Verify JSONB literal insertion and retrieval without explicit type casts. + /// + /// JSONB literals in INSERT and SELECT statements work directly with the proxy + /// without requiring `::jsonb` type annotations. The proxy infers the JSONB type + /// from the target column and handles encryption/decryption transparently. #[tokio::test] async fn map_jsonb() { + trace(); clear().await; let client = connect_with_tls(PROXY).await; @@ -55,12 +61,12 @@ mod tests { let encrypted_jsonb = serde_json::json!({"key": "value"}); let sql = format!( - "INSERT INTO encrypted (id, encrypted_jsonb) VALUES ($1, '{encrypted_jsonb}'::jsonb)", + "INSERT INTO encrypted (id, encrypted_jsonb) VALUES ($1, '{encrypted_jsonb}')", ); client.query(&sql, &[&id]).await.unwrap(); - let sql = "SELECT id, encrypted_jsonb::jsonb FROM encrypted WHERE id = $1"; + let sql = "SELECT id, encrypted_jsonb FROM encrypted WHERE id = $1"; let rows = client.query(sql, &[&id]).await.unwrap(); assert_eq!(rows.len(), 1); @@ -74,6 +80,58 @@ mod tests { } } + /// Sanity check: verify JSONB is actually encrypted in database + /// + /// This test catches silent encryption failures where plaintext is stored. + /// Insert via proxy, query DIRECT from database to verify encryption, + /// then query via proxy to verify decryption round-trip. + #[tokio::test] + async fn jsonb_encryption_sanity_check() { + trace(); + clear().await; + + let id = random_id(); + let plaintext_json = serde_json::json!({"key": "value"}); + + // Insert through proxy (should encrypt) + let client = connect_with_tls(PROXY).await; + let sql = "INSERT INTO encrypted (id, encrypted_jsonb) VALUES ($1, $2)"; + client.query(sql, &[&id, &plaintext_json]).await.unwrap(); + + // Query DIRECT from database (bypassing proxy, no decryption) + // The stored value should NOT be readable as the original JSON + let sql = "SELECT encrypted_jsonb::text FROM encrypted WHERE id = $1"; + let stored: Vec = query_direct_by(sql, &id).await; + + assert_eq!(stored.len(), 1, "Expected exactly one row"); + let stored_text = &stored[0]; + + // Verify it's NOT the plaintext JSON (encryption actually happened) + let plaintext_str = plaintext_json.to_string(); + assert_ne!( + stored_text, &plaintext_str, + "ENCRYPTION FAILED: Stored value matches plaintext! Data was not encrypted." + ); + + // Additional verification: the encrypted format should be different structure + if let Ok(stored_json) = serde_json::from_str::(stored_text) { + assert_ne!( + stored_json, plaintext_json, + "ENCRYPTION FAILED: Stored JSON structure matches plaintext!" + ); + } + + // Round-trip: query through proxy should decrypt back to original + let sql = "SELECT encrypted_jsonb FROM encrypted WHERE id = $1"; + let rows = client.query(sql, &[&id]).await.unwrap(); + assert_eq!(rows.len(), 1, "Expected exactly one row for round-trip"); + let decrypted: serde_json::Value = rows[0].get(0); + assert_eq!( + decrypted, plaintext_json, + "DECRYPTION FAILED: Round-trip value doesn't match original!" + ); + } + #[tokio::test] async fn map_repeated_literals_different_columns_regression() { clear().await; diff --git a/packages/cipherstash-proxy-integration/src/select/jsonb_containment_index.rs b/packages/cipherstash-proxy-integration/src/select/jsonb_containment_index.rs new file mode 100644 index 00000000..d9085c14 --- /dev/null +++ b/packages/cipherstash-proxy-integration/src/select/jsonb_containment_index.rs @@ -0,0 +1,331 @@ +//! Tests for JSONB containment operators +//! +//! Verifies that the containment operator transformation works correctly: +//! - @> operator is transformed to eql_v2.jsonb_contains() +//! - eql_v2.jsonb_contains() function works with encrypted data +//! - Both return correct results matching the expected data pattern +//! +//! ## Test Data +//! +//! Uses fixture data loaded via `mise run proxy:fixtures` (500 rows with IDs 1000000-1000499). +//! Pattern: `{"string": "value_N", "number": N}` where N = n % 10 +//! This gives ~50 rows per value (value_0 through value_9). + +#[cfg(test)] +mod tests { + use crate::common::{connect_with_tls, trace, PROXY}; + use serde_json::json; + use tracing::info; + + /// ID range for fixture data (loaded via mise run proxy:fixtures) + const FIXTURE_ID_START: i64 = 1000000; + const FIXTURE_ID_END: i64 = 1000499; + /// Total number of fixture rows for containment tests + const FIXTURE_COUNT: i64 = 500; + + /// Operand type for containment operator tests + #[derive(Debug, Clone, Copy)] + enum OperandType { + /// Table column reference: `encrypted_jsonb` + EncryptedColumn, + /// Parameterized value: `$1` + Parameter, + /// JSON literal in SQL: `'{"key":"value"}'` + Literal, + } + + /// Query protocol - determines how the query is executed + #[derive(Debug, Clone, Copy)] + enum QueryProtocol { + /// Extended query protocol with parameters (client.query with params) + Extended, + /// Simple query protocol with SQL string only + Simple, + } + + /// Configuration for a single containment operator test + struct ContainmentTestCase { + /// Left-hand side operand type + lhs: OperandType, + /// Right-hand side operand type + rhs: OperandType, + /// Expected row count (approximately) + expected_count: i64, + /// Allowed variance for count assertion + variance: i64, + } + + impl ContainmentTestCase { + fn new(lhs: OperandType, rhs: OperandType) -> Self { + // Adjust expected count based on operand types and containment semantics. + // + // Expected counts and variance tolerances: + // - Subset searches (column @> search): expect ~50 rows (FIXTURE_COUNT / 10 values) + // Variance ±10 accounts for modulo distribution not being perfectly uniform + // - Exact match searches (search @> column): expect 1 row, variance 0 (deterministic) + let (expected_count, variance) = match (&lhs, &rhs) { + // LHS is encrypted column: column @> RHS (column contains RHS) + // Uses subset search {"string": "value_1"} - matches ~50 rows + (OperandType::EncryptedColumn, OperandType::Parameter) => (50, 10), + (OperandType::EncryptedColumn, OperandType::Literal) => (50, 10), + (OperandType::EncryptedColumn, OperandType::EncryptedColumn) => (50, 10), + + // LHS is parameter: parameter @> RHS + (OperandType::Parameter, OperandType::Parameter) => (50, 10), + (OperandType::Parameter, OperandType::Literal) => (50, 10), + // Parameter @> encrypted column: uses exact match search - matches 1 row + (OperandType::Parameter, OperandType::EncryptedColumn) => (1, 0), + + // LHS is literal: literal @> RHS + (OperandType::Literal, OperandType::Parameter) => (50, 10), + (OperandType::Literal, OperandType::Literal) => (50, 10), + // Literal @> encrypted column: uses exact match search - matches 1 row + (OperandType::Literal, OperandType::EncryptedColumn) => (1, 0), + }; + + Self { + lhs, + rhs, + expected_count, + variance, + } + } + + /// Get the appropriate search value based on operand types + /// + /// For `column @> search`: use subset `{"string": "value_1"}` - matches ~50 rows + /// For `search @> column`: use exact match `{"string": "value_1", "number": 1}` - matches 1 row + fn search_value(&self) -> serde_json::Value { + match (&self.lhs, &self.rhs) { + // When searching if param/literal contains column, use exact match + (OperandType::Parameter, OperandType::EncryptedColumn) + | (OperandType::Literal, OperandType::EncryptedColumn) => { + json!({"string": "value_1", "number": 1}) + } + // Otherwise use subset search + _ => json!({"string": "value_1"}), + } + } + + /// Determine query protocol based on operand types + fn protocol(&self) -> QueryProtocol { + match (&self.lhs, &self.rhs) { + (OperandType::Parameter, _) | (_, OperandType::Parameter) => { + QueryProtocol::Extended + } + _ => QueryProtocol::Simple, + } + } + + /// Build SQL query string based on operand types + /// Filters by fixture ID range to isolate from other test data + fn build_sql(&self, search_json: &serde_json::Value) -> String { + let lhs = match self.lhs { + OperandType::EncryptedColumn => "encrypted_jsonb".to_string(), + OperandType::Parameter => "$1".to_string(), + OperandType::Literal => format!("'{}'", search_json), + }; + + let rhs = match self.rhs { + OperandType::EncryptedColumn => "encrypted_jsonb".to_string(), + OperandType::Parameter => { + // If LHS is also a parameter, this is $2 + if matches!(self.lhs, OperandType::Parameter) { + "$2".to_string() + } else { + "$1".to_string() + } + } + OperandType::Literal => format!("'{}'", search_json), + }; + + // Filter by fixture ID range to isolate from other test data + format!( + "SELECT COUNT(*) FROM encrypted WHERE {} @> {} AND id BETWEEN {} AND {}", + lhs, rhs, FIXTURE_ID_START, FIXTURE_ID_END + ) + } + + /// Execute the test case + async fn run(&self, client: &tokio_postgres::Client, search_json: &serde_json::Value) { + let sql = self.build_sql(search_json); + info!("Testing @> with LHS={:?}, RHS={:?}", self.lhs, self.rhs); + info!("SQL: {}", sql); + + let count: i64 = match self.protocol() { + QueryProtocol::Extended => { + let rows = client.query(&sql, &[search_json]).await.unwrap(); + rows[0].get(0) + } + QueryProtocol::Simple => { + let rows = client.simple_query(&sql).await.unwrap(); + // Find the first Row message (there may be RowDescription and CommandComplete messages) + rows.iter() + .find_map(|msg| { + if let tokio_postgres::SimpleQueryMessage::Row(row) = msg { + row.get(0).map(|v| v.parse::().unwrap()) + } else { + None + } + }) + .expect("No Row message found in simple_query response") + } + }; + + info!("Result count: {}", count); + + let min = self.expected_count - self.variance; + let max = self.expected_count + self.variance; + assert!( + count >= min && count <= max, + "@> with LHS={:?}, RHS={:?}: expected {}-{} rows, got {}", + self.lhs, + self.rhs, + min, + max, + count + ); + } + } + + /// Generate a containment operator test from operand types + /// + /// Tests use fixture data in ID range FIXTURE_ID_START to FIXTURE_ID_END. + /// Data is inserted once per test run if not already present. + /// + /// Search value varies by test type: + /// - `column @> search`: subset `{"string": "value_1"}` matches ~50 rows + /// - `search @> column`: exact `{"string": "value_1", "number": 1}` matches 1 row + macro_rules! containment_test { + ($name:ident, lhs = $lhs:ident, rhs = $rhs:ident) => { + #[tokio::test] + async fn $name() { + trace(); + ensure_fixture_data().await; + + let client = connect_with_tls(PROXY).await; + let test_case = ContainmentTestCase::new(OperandType::$lhs, OperandType::$rhs); + let search_value = test_case.search_value(); + test_case.run(&client, &search_value).await; + } + }; + } + + /// Ensure fixture data exists in the specific ID range. + /// + /// Uses IDs FIXTURE_ID_START to FIXTURE_ID_END to isolate from other tests. + /// Does NOT call clear() - preserves data from other tests. + /// Only inserts if the fixture data is missing. + async fn ensure_fixture_data() { + let client = connect_with_tls(PROXY).await; + + // Check if fixture data already exists + let sql = format!( + "SELECT COUNT(*) FROM encrypted WHERE id BETWEEN {} AND {}", + FIXTURE_ID_START, FIXTURE_ID_END + ); + let rows = client.query(&sql, &[]).await.unwrap(); + let count: i64 = rows[0].get(0); + + info!( + "Fixture records in range {}-{}: {}", + FIXTURE_ID_START, FIXTURE_ID_END, count + ); + + if count >= FIXTURE_COUNT { + return; // Fixture data already exists + } + + info!("Inserting fixture data..."); + + // Insert fixture rows with specific IDs + let stmt = client + .prepare("INSERT INTO encrypted (id, encrypted_jsonb) VALUES ($1, $2) ON CONFLICT (id) DO NOTHING") + .await + .unwrap(); + + for n in 1..=FIXTURE_COUNT { + let id = FIXTURE_ID_START + n - 1; + let encrypted_jsonb = json!({ + "string": format!("value_{}", n % 10), + "number": n, + }); + client + .execute(&stmt, &[&id, &encrypted_jsonb]) + .await + .unwrap(); + } + + info!("Inserted {} fixture rows", FIXTURE_COUNT); + } + + // ============================================================================ + // @> Containment Operator Tests via Macro + // ============================================================================ + + // Encrypted column @> parameter (extended protocol) + containment_test!( + encrypted_contains_param, + lhs = EncryptedColumn, + rhs = Parameter + ); + + // Encrypted column @> literal (simple protocol) + containment_test!( + encrypted_contains_literal, + lhs = EncryptedColumn, + rhs = Literal + ); + + // Parameter @> encrypted column (extended protocol) + containment_test!( + param_contains_encrypted, + lhs = Parameter, + rhs = EncryptedColumn + ); + + // Literal @> encrypted column (simple protocol) + containment_test!( + literal_contains_encrypted, + lhs = Literal, + rhs = EncryptedColumn + ); + + /// Test: Verify eql_v2.jsonb_contains() function works through proxy + /// + /// Tests explicit eql_v2.jsonb_contains() function call works correctly. + /// Uses fixture data in ID range FIXTURE_ID_START to FIXTURE_ID_END. + /// + /// With 500 rows and "string": "value_N" where N = n % 10, + /// we expect ~50 rows to have "string": "value_1". + #[tokio::test] + async fn jsonb_contains_function_works() { + trace(); + ensure_fixture_data().await; + + let client = connect_with_tls(PROXY).await; + + // Use extended query protocol with parameterized query + // Filter by fixture ID range to isolate from other test data + let search_value = json!({"string": "value_1"}); + let sql = format!( + "SELECT COUNT(*) FROM encrypted WHERE eql_v2.jsonb_contains(encrypted_jsonb, $1) AND id BETWEEN {} AND {}", + FIXTURE_ID_START, FIXTURE_ID_END + ); + + info!("Testing eql_v2.jsonb_contains() function with SQL: {}", sql); + + let rows = client.query(&sql, &[&search_value]).await.unwrap(); + let count: i64 = rows[0].get(0); + + info!("jsonb_contains() query returned {} matching rows", count); + + // With 500 fixture rows and "string": "value_N" where N = n % 10, + // we expect ~50 rows to have "string": "value_1" + assert!( + (40..=60).contains(&count), // Allow some variance + "Expected approximately 50 rows with jsonb_contains(), got {}", + count + ); + } +} diff --git a/packages/cipherstash-proxy-integration/src/select/mod.rs b/packages/cipherstash-proxy-integration/src/select/mod.rs index c005eeed..ad2b02a4 100644 --- a/packages/cipherstash-proxy-integration/src/select/mod.rs +++ b/packages/cipherstash-proxy-integration/src/select/mod.rs @@ -2,6 +2,7 @@ mod group_by; mod jsonb_array_elements; mod jsonb_array_length; mod jsonb_contained_by; +mod jsonb_containment_index; mod jsonb_contains; mod jsonb_get_field; mod jsonb_get_field_as_ciphertext; diff --git a/packages/cipherstash-proxy-integration/src/update/update_with_literal.rs b/packages/cipherstash-proxy-integration/src/update/update_with_literal.rs index 40c43b9f..00eaddb1 100644 --- a/packages/cipherstash-proxy-integration/src/update/update_with_literal.rs +++ b/packages/cipherstash-proxy-integration/src/update/update_with_literal.rs @@ -9,10 +9,6 @@ mod tests { macro_rules! test_update_with_literal { ($name: ident, $type: ident, $pg_type: ident) => { - test_update_with_literal!($name, $type, $pg_type, false); - }; - - ($name: ident, $type: ident, $pg_type: ident, $cast: expr) => { #[tokio::test] pub async fn $name() { trace(); @@ -27,35 +23,27 @@ mod tests { let expected = vec![encrypted_val.clone()]; - let cast_to_type: &str = if $cast { - &format!("::{}", stringify!($pg_type)) - } else { - "" - }; - // First insert a record - let insert_sql = format!("INSERT INTO encrypted (id, {encrypted_col}) VALUES ($1, $2)"); + let insert_sql = + format!("INSERT INTO encrypted (id, {encrypted_col}) VALUES ($1, $2)"); execute_query(&insert_sql, &[&id, &initial_val]).await; // Then update it with literal value - let update_sql = format!("UPDATE encrypted SET {encrypted_col} = '{encrypted_val}'{cast_to_type} WHERE id = $1"); - let select_sql = format!("SELECT {encrypted_col}{cast_to_type} FROM encrypted WHERE id = $1"); + let update_sql = format!( + "UPDATE encrypted SET {encrypted_col} = '{encrypted_val}' WHERE id = $1" + ); + let select_sql = format!("SELECT {encrypted_col} FROM encrypted WHERE id = $1"); execute_query(&update_sql, &[&id]).await; let actual = query_by::<$type>(&select_sql, &id).await; assert_eq!(expected, actual); - } }; } macro_rules! test_update_simple_query_with_literal { ($name: ident, $type: ident, $pg_type: ident) => { - test_update_simple_query_with_literal!($name, $type, $pg_type, false); - }; - - ($name: ident, $type: ident, $pg_type: ident, $cast: expr) => { #[tokio::test] pub async fn $name() { trace(); @@ -69,20 +57,13 @@ mod tests { let initial_val = crate::value_for_type!($type, 1); let encrypted_val = crate::value_for_type!($type, random_limited()); - let cast_to_type: &str = if $cast { - &format!("::{}", stringify!($pg_type)) - } else { - "" - }; - // First insert a record - let insert_sql = format!("INSERT INTO encrypted (id, {encrypted_col}) VALUES ({id}, '{initial_val}'{cast_to_type})"); + let insert_sql = format!("INSERT INTO encrypted (id, {encrypted_col}) VALUES ({id}, '{initial_val}')"); execute_simple_query(&insert_sql).await; // Then update it with literal value - let update_sql = format!("UPDATE encrypted SET {encrypted_col} = '{encrypted_val}'{cast_to_type} WHERE id = {id}"); - let select_sql = format!("SELECT {encrypted_col}{cast_to_type} FROM encrypted WHERE id = {id}"); - + let update_sql = format!("UPDATE encrypted SET {encrypted_col} = '{encrypted_val}' WHERE id = {id}"); + let select_sql = format!("SELECT {encrypted_col} FROM encrypted WHERE id = {id}"); let expected = vec![encrypted_val]; @@ -101,7 +82,7 @@ mod tests { test_update_with_literal!(update_with_literal_bool, bool, bool); test_update_with_literal!(update_with_literal_text, String, text); test_update_with_literal!(update_with_literal_date, NaiveDate, date); - test_update_with_literal!(update_with_literal_jsonb, Value, jsonb, true); + test_update_with_literal!(update_with_literal_jsonb, Value, jsonb); test_update_simple_query_with_literal!(update_simple_query_with_literal_int2, i16, int2); test_update_simple_query_with_literal!(update_simple_query_with_literal_int4, i32, int4); @@ -110,12 +91,7 @@ mod tests { test_update_simple_query_with_literal!(update_simple_query_with_literal_bool, bool, bool); test_update_simple_query_with_literal!(update_simple_query_with_literal_text, String, text); test_update_simple_query_with_literal!(update_simple_query_with_literal_date, NaiveDate, date); - test_update_simple_query_with_literal!( - update_simple_query_with_literal_jsonb, - Value, - jsonb, - true - ); + test_update_simple_query_with_literal!(update_simple_query_with_literal_jsonb, Value, jsonb); // ----------------------------------------------------------------- diff --git a/packages/cipherstash-proxy-integration/src/update/update_with_null_literal.rs b/packages/cipherstash-proxy-integration/src/update/update_with_null_literal.rs index 5b046fd6..b05b908a 100644 --- a/packages/cipherstash-proxy-integration/src/update/update_with_null_literal.rs +++ b/packages/cipherstash-proxy-integration/src/update/update_with_null_literal.rs @@ -50,11 +50,7 @@ mod tests { test_update_with_null_literal!(update_with_null_literal_jsonb, Value, jsonb); macro_rules! test_update_simple_query_with_null_literal { - ($name: ident, $type: ident, $pg_type: ident) => { - test_update_simple_query_with_null_literal!($name, $type, $pg_type, false); - }; - ($name: ident, $type: ident, $pg_type: ident, $cast: expr) => { #[tokio::test] pub async fn $name() { trace(); @@ -67,15 +63,8 @@ mod tests { let initial_val = crate::value_for_type!($type, random_limited()); let encrypted_val: Option = None; - let cast_to_type: &str = if $cast { - &format!("::{}", stringify!($pg_type)) - } else { - "" - }; - - let insert_sql = - format!("INSERT INTO encrypted (id, {encrypted_col}) VALUES ({id}, '{initial_val}'{cast_to_type})"); + format!("INSERT INTO encrypted (id, {encrypted_col}) VALUES ({id}, '{initial_val}')"); let update_sql = format!("UPDATE encrypted SET {encrypted_col} = NULL WHERE id = {id}"); let select_sql = format!("SELECT {encrypted_col} FROM encrypted WHERE id = {id}"); @@ -128,8 +117,7 @@ mod tests { test_update_simple_query_with_null_literal!( update_simple_query_with_null_literal_jsonb, Value, - jsonb, - true + jsonb ); // ----------------------------------------------------------------- diff --git a/packages/cipherstash-proxy/src/postgresql/messages/bind.rs b/packages/cipherstash-proxy/src/postgresql/messages/bind.rs index ed528a87..c04e3c7f 100644 --- a/packages/cipherstash-proxy/src/postgresql/messages/bind.rs +++ b/packages/cipherstash-proxy/src/postgresql/messages/bind.rs @@ -104,7 +104,7 @@ fn get_param_type(idx: usize, param_types: &[i32], col: &Column) -> Type { param_types .get(idx) .and_then(|oid| Type::from_oid(*oid as u32)) - .map_or_else(|| col.postgres_type.clone(), |t| t) + .unwrap_or_else(|| col.postgres_type.clone()) } impl BindParam { diff --git a/packages/eql-mapper/src/eql_mapper.rs b/packages/eql-mapper/src/eql_mapper.rs index 1cf992fd..50bde65b 100644 --- a/packages/eql-mapper/src/eql_mapper.rs +++ b/packages/eql-mapper/src/eql_mapper.rs @@ -58,15 +58,16 @@ pub fn type_check<'ast>( /// /// In any case, support for those statements is coming soon! pub fn requires_type_check(statement: &Statement) -> bool { - match statement { + matches!( + statement, Statement::Query(_) - | Statement::Insert(_) - | Statement::Update { .. } - | Statement::Delete(_) - | Statement::Merge { .. } - | Statement::Prepare { .. } => true, // not - _ => false, - } + | Statement::Insert(_) + | Statement::Update { .. } + | Statement::Delete(_) + | Statement::Merge { .. } + | Statement::Prepare { .. } + | Statement::Explain { .. } + ) } /// The error type returned by various functions in the `eql_mapper` crate. diff --git a/packages/eql-mapper/src/inference/infer_type_impls/statement.rs b/packages/eql-mapper/src/inference/infer_type_impls/statement.rs index ed89c1a0..0d682517 100644 --- a/packages/eql-mapper/src/inference/infer_type_impls/statement.rs +++ b/packages/eql-mapper/src/inference/infer_type_impls/statement.rs @@ -79,6 +79,15 @@ impl<'ast> InferType<'ast, Statement> for TypeInferencer<'ast> { )) } + Statement::Explain { + // Note: inner statement's type inference happens through normal AST traversal + statement: _inner_statement, + .. + } => { + // Recursively type-check the inner statement so transformations apply + // EXPLAIN itself returns metadata, not the query results - give it empty projection + self.unify_node_with_type(statement, Type::empty_projection())?; + } _ => {} }; diff --git a/packages/eql-mapper/src/inference/sql_types/sql_decls.rs b/packages/eql-mapper/src/inference/sql_types/sql_decls.rs index c66f3f31..abedfec6 100644 --- a/packages/eql-mapper/src/inference/sql_types/sql_decls.rs +++ b/packages/eql-mapper/src/inference/sql_types/sql_decls.rs @@ -73,6 +73,9 @@ static SQL_FUNCTION_TYPES: LazyLock, FunctionDecl> eql_v2.jsonb_array_length(T) -> Native where T: JsonLike; eql_v2.jsonb_array_elements(T) -> SetOf where T: JsonLike; eql_v2.jsonb_array_elements_text(T) -> SetOf where T: JsonLike; + eql_v2.jsonb_array(T) -> Native where T: Contain; + eql_v2.jsonb_contains(T, T) -> Native where T: Contain; + eql_v2.jsonb_contained_by(T, T) -> Native where T: Contain; }; HashMap::from_iter( diff --git a/packages/eql-mapper/src/lib.rs b/packages/eql-mapper/src/lib.rs index 92005d91..09afc9a1 100644 --- a/packages/eql-mapper/src/lib.rs +++ b/packages/eql-mapper/src/lib.rs @@ -1708,11 +1708,19 @@ mod test { match type_check(schema, &statement) { Ok(typed) => { - match typed.transform(test_helpers::dummy_encrypted_json_selector(&statement, vec![ast::Value::SingleQuotedString("medications".to_owned())])) { - Ok(statement) => assert_eq!( - statement.to_string(), - format!("SELECT id, notes {op} ''::JSONB::eql_v2_encrypted AS meds FROM patients") - ), + match typed.transform(test_helpers::dummy_encrypted_json_selector( + &statement, + vec![ast::Value::SingleQuotedString("medications".to_owned())], + )) { + Ok(statement) => { + let expected = match op { + "@>" => "SELECT id, eql_v2.jsonb_contains(notes, ''::JSONB::eql_v2_encrypted) AS meds FROM patients".to_string(), + "<@" => "SELECT id, eql_v2.jsonb_contained_by(notes, ''::JSONB::eql_v2_encrypted) AS meds FROM patients".to_string(), + // Other operators are not transformed + _ => format!("SELECT id, notes {op} ''::JSONB::eql_v2_encrypted AS meds FROM patients"), + }; + assert_eq!(statement.to_string(), expected) + } Err(err) => panic!("transformation failed: {err}"), } } @@ -1720,6 +1728,197 @@ mod test { } } + #[test] + fn jsonb_array_function() { + let schema = resolver(schema! { + tables: { + patients: { + id, + notes (EQL: JsonLike + Contain), + } + } + }); + + let statement = parse( + "SELECT id FROM patients WHERE eql_v2.jsonb_array(notes) @> eql_v2.jsonb_array(notes)", + ); + + match type_check(schema, &statement) { + Ok(_) => (), + Err(err) => panic!("type check failed for eql_v2.jsonb_array: {err}"), + } + } + + #[test] + fn jsonb_contains_function() { + let schema = resolver(schema! { + tables: { + patients: { + id, + notes (EQL: JsonLike + Contain), + } + } + }); + + let statement = parse("SELECT id FROM patients WHERE eql_v2.jsonb_contains(notes, notes)"); + + match type_check(schema, &statement) { + Ok(_) => (), + Err(err) => panic!("type check failed for eql_v2.jsonb_contains: {err}"), + } + } + + #[test] + fn jsonb_contained_by_function() { + let schema = resolver(schema! { + tables: { + patients: { + id, + notes (EQL: JsonLike + Contain), + } + } + }); + + let statement = + parse("SELECT id FROM patients WHERE eql_v2.jsonb_contained_by(notes, notes)"); + + match type_check(schema, &statement) { + Ok(_) => (), + Err(err) => panic!("type check failed for eql_v2.jsonb_contained_by: {err}"), + } + } + + #[test] + fn eql_v2_jsonb_contains_with_param() { + let schema = resolver(schema! { + tables: { + patients: { + id, + notes (EQL: JsonLike + Contain), + } + } + }); + + let statement = parse("SELECT id FROM patients WHERE eql_v2.jsonb_contains(notes, $1)"); + + let typed = type_check(schema, &statement) + .map_err(|err| err.to_string()) + .unwrap(); + + // Verify param was inferred as EQL type + assert!(typed.params_contain_eql(), "param $1 should be EQL type"); + + // Verify transformation output - function passes through, param gets cast + match typed.transform(HashMap::new()) { + Ok(statement) => assert_eq!( + statement.to_string(), + "SELECT id FROM patients WHERE eql_v2.jsonb_contains(notes, $1::JSONB::eql_v2_encrypted)" + ), + Err(err) => panic!("transformation failed: {err}"), + } + } + + #[test] + fn containment_operator_transforms_to_function() { + let schema = resolver(schema! { + tables: { + patients: { + id, + notes (EQL: JsonLike + Contain), + } + } + }); + + let statement = parse("SELECT id FROM patients WHERE notes @> $1"); + + let typed = + type_check(schema, &statement).expect("type check failed for containment operator"); + let transformed = typed + .transform(HashMap::new()) + .expect("transformation failed"); + let sql = transformed.to_string(); + + // Verify function call exists + assert!( + sql.contains("eql_v2.jsonb_contains"), + "Expected @> to be transformed to eql_v2.jsonb_contains, got: {sql}" + ); + + // CRITICAL: Verify the parameter is cast to enable GIN index usage + // The cast ::JSONB::eql_v2_encrypted is required for GIN indexes to work + assert!( + sql.contains("::JSONB::eql_v2_encrypted") || sql.contains("::jsonb::eql_v2_encrypted"), + "Expected parameter to be cast as ::JSONB::eql_v2_encrypted for GIN index support, got: {sql}" + ); + } + + #[test] + fn contained_by_operator_transforms_to_function() { + let schema = resolver(schema! { + tables: { + patients: { + id, + notes (EQL: JsonLike + Contain), + } + } + }); + + let statement = parse("SELECT id FROM patients WHERE $1 <@ notes"); + + let typed = + type_check(schema, &statement).expect("type check failed for contained_by operator"); + let transformed = typed + .transform(HashMap::new()) + .expect("transformation failed"); + let sql = transformed.to_string(); + + // Verify function call exists + assert!( + sql.contains("eql_v2.jsonb_contained_by"), + "Expected <@ to be transformed to eql_v2.jsonb_contained_by, got: {sql}" + ); + + // CRITICAL: Verify the parameter is cast to enable GIN index usage + assert!( + sql.contains("::JSONB::eql_v2_encrypted") || sql.contains("::jsonb::eql_v2_encrypted"), + "Expected parameter to be cast as ::JSONB::eql_v2_encrypted for GIN index support, got: {sql}" + ); + } + + #[test] + fn explain_statement_transforms_containment_operator() { + let schema = resolver(schema! { + tables: { + patients: { + id, + notes (EQL: JsonLike + Contain), + } + } + }); + + // EXPLAIN wraps the inner SELECT - transformation should still apply + let statement = parse("EXPLAIN SELECT id FROM patients WHERE notes @> $1"); + + let typed = type_check(schema, &statement) + .expect("type check failed for EXPLAIN with containment operator"); + let transformed = typed + .transform(HashMap::new()) + .expect("transformation failed"); + let sql = transformed.to_string(); + + // Verify EXPLAIN is preserved + assert!( + sql.starts_with("EXPLAIN"), + "Expected EXPLAIN prefix preserved, got: {sql}" + ); + + // Verify function call exists inside the EXPLAIN + assert!( + sql.contains("eql_v2.jsonb_contains"), + "Expected @> inside EXPLAIN to be transformed to eql_v2.jsonb_contains, got: {sql}" + ); + } + #[test] fn eql_term_partial_is_unified_with_eql_term_whole() { // init_tracing(); diff --git a/packages/eql-mapper/src/transformation_rules/mod.rs b/packages/eql-mapper/src/transformation_rules/mod.rs index cf9f4346..2d440042 100644 --- a/packages/eql-mapper/src/transformation_rules/mod.rs +++ b/packages/eql-mapper/src/transformation_rules/mod.rs @@ -15,6 +15,7 @@ mod cast_literals_as_encrypted; mod cast_params_as_encrypted; mod fail_on_placeholder_change; mod preserve_effective_aliases; +mod rewrite_containment_ops; mod rewrite_standard_sql_fns_on_eql_types; use std::marker::PhantomData; @@ -23,6 +24,7 @@ pub(crate) use cast_literals_as_encrypted::*; pub(crate) use cast_params_as_encrypted::*; pub(crate) use fail_on_placeholder_change::*; pub(crate) use preserve_effective_aliases::*; +pub(crate) use rewrite_containment_ops::*; pub(crate) use rewrite_standard_sql_fns_on_eql_types::*; use crate::EqlMapperError; diff --git a/packages/eql-mapper/src/transformation_rules/rewrite_containment_ops.rs b/packages/eql-mapper/src/transformation_rules/rewrite_containment_ops.rs new file mode 100644 index 00000000..3142f8ca --- /dev/null +++ b/packages/eql-mapper/src/transformation_rules/rewrite_containment_ops.rs @@ -0,0 +1,116 @@ +use std::collections::HashMap; +use std::mem; +use std::sync::Arc; + +use sqltk::parser::ast::Value as SqltkValue; +use sqltk::parser::ast::{ + BinaryOperator, Expr, Function, FunctionArg, FunctionArgExpr, FunctionArgumentList, + FunctionArguments, Ident, ObjectName, ObjectNamePart, ValueWithSpan, +}; +use sqltk::parser::tokenizer::Span; +use sqltk::{NodeKey, NodePath, Visitable}; + +use crate::unifier::{Type, Value}; +use crate::EqlMapperError; + +use super::TransformationRule; + +/// Rewrites `@>` and `<@` operators on EQL types to function calls. +/// +/// - `col @> val` → `eql_v2.jsonb_contains(col, val)` +/// - `val <@ col` → `eql_v2.jsonb_contained_by(val, col)` +/// +/// This transformation enables GIN index usage when the index is created on +/// `eql_v2.jsonb_array(encrypted_col)`. +#[derive(Debug)] +pub struct RewriteContainmentOps<'ast> { + node_types: Arc, Type>>, +} + +impl<'ast> RewriteContainmentOps<'ast> { + pub fn new(node_types: Arc, Type>>) -> Self { + Self { node_types } + } + + /// Returns `true` if at least one operand of a binary expression is an EQL type. + /// + /// Note: We check the operands (left/right), not the BinaryOp result type, + /// because containment operators return Native (boolean), not EQL. + #[inline] + fn uses_eql_type(&self, left: &'ast Expr, right: &'ast Expr) -> bool { + self.is_eql_typed(left) || self.is_eql_typed(right) + } + + /// Checks if an expression has an EQL type in node_types. + fn is_eql_typed(&self, expr: &'ast Expr) -> bool { + matches!( + self.node_types.get(&NodeKey::new(expr)), + Some(Type::Value(Value::Eql(_))) + ) + } + + fn make_function_call(fn_name: &str, left: Expr, right: Expr) -> Expr { + Expr::Function(Function { + name: ObjectName(vec![ + ObjectNamePart::Identifier(Ident::new("eql_v2")), + ObjectNamePart::Identifier(Ident::new(fn_name)), + ]), + uses_odbc_syntax: false, + args: FunctionArguments::List(FunctionArgumentList { + args: vec![ + FunctionArg::Unnamed(FunctionArgExpr::Expr(left)), + FunctionArg::Unnamed(FunctionArgExpr::Expr(right)), + ], + duplicate_treatment: None, + clauses: vec![], + }), + parameters: FunctionArguments::None, + filter: None, + null_treatment: None, + over: None, + within_group: vec![], + }) + } +} + +impl<'ast> TransformationRule<'ast> for RewriteContainmentOps<'ast> { + fn apply( + &mut self, + node_path: &NodePath<'ast>, + target_node: &mut N, + ) -> Result { + if self.would_edit(node_path, target_node) { + let expr = target_node.downcast_mut::().unwrap(); + if let Expr::BinaryOp { left, op, right } = expr { + let fn_name = match op { + BinaryOperator::AtArrow => "jsonb_contains", // @> + BinaryOperator::ArrowAt => "jsonb_contained_by", // <@ + _ => return Ok(false), + }; + + // Use mem::replace to move (not copy) the original nodes, + // preserving their NodeKey identity for downstream casting rules + let dummy = Expr::Value(ValueWithSpan { + value: SqltkValue::Null, + span: Span::empty(), + }); + let left_expr = mem::replace(&mut **left, dummy.clone()); + let right_expr = mem::replace(&mut **right, dummy); + *expr = Self::make_function_call(fn_name, left_expr, right_expr); + return Ok(true); + } + } + Ok(false) + } + + fn would_edit(&mut self, node_path: &NodePath<'ast>, _target_node: &N) -> bool { + // Use node_path to get the original AST node (with correct NodeKey identity) + if let Some((Expr::BinaryOp { left, op, right },)) = node_path.last_1_as::() { + if matches!(op, BinaryOperator::AtArrow | BinaryOperator::ArrowAt) { + // Only rewrite if at least one operand is EQL-typed + return self.uses_eql_type(left, right); + } + } + false + } +} diff --git a/packages/eql-mapper/src/type_checked_statement.rs b/packages/eql-mapper/src/type_checked_statement.rs index c41d93dc..772ea723 100644 --- a/packages/eql-mapper/src/type_checked_statement.rs +++ b/packages/eql-mapper/src/type_checked_statement.rs @@ -6,8 +6,8 @@ use sqltk::{AsNodeKey, NodeKey, Transformable}; use crate::unifier::EqlTerm; use crate::{ CastLiteralsAsEncrypted, CastParamsAsEncrypted, DryRunnable, EqlMapperError, - FailOnPlaceholderChange, Param, PreserveEffectiveAliases, RewriteStandardSqlFnsOnEqlTypes, - TransformationRule, + FailOnPlaceholderChange, Param, PreserveEffectiveAliases, RewriteContainmentOps, + RewriteStandardSqlFnsOnEqlTypes, TransformationRule, }; use crate::unifier::{Projection, Type, Value}; @@ -152,6 +152,7 @@ impl<'ast> TypeCheckedStatement<'ast> { ) -> DryRunnable<'_, impl TransformationRule<'_>> { DryRunnable::new(( RewriteStandardSqlFnsOnEqlTypes::new(Arc::clone(&self.node_types)), + RewriteContainmentOps::new(Arc::clone(&self.node_types)), PreserveEffectiveAliases, CastLiteralsAsEncrypted::new(encrypted_literals), FailOnPlaceholderChange::new(), diff --git a/packages/showcase/src/common.rs b/packages/showcase/src/common.rs index cc6c9cb2..7e363523 100644 --- a/packages/showcase/src/common.rs +++ b/packages/showcase/src/common.rs @@ -9,8 +9,8 @@ use tokio_postgres::{types::ToSql, Client, SimpleQueryMessage}; use tracing_subscriber::{filter::Directive, EnvFilter, FmtSubscriber}; pub const PROXY: u16 = 6432; -pub const PG_LATEST: u16 = 5532; -pub const PG_V17_TLS: u16 = 5617; +pub const PG_PORT: u16 = 5532; +pub const PG_TLS_PORT: u16 = 5617; static INIT: Once = Once::new(); @@ -33,7 +33,7 @@ pub async fn table_exists(table: &str) -> bool { let port = std::env::var("CS_DATABASE__PORT") .map(|s| s.parse().unwrap()) - .unwrap_or(PG_LATEST); + .unwrap_or(PG_PORT); let client = connect_with_tls(port).await; let messages = client.simple_query(&query).await.unwrap();