diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
new file mode 100644
index 0000000..61cf7a9
--- /dev/null
+++ b/.github/workflows/docs.yml
@@ -0,0 +1,57 @@
+name: Deploy Documentation
+
+on:
+ push:
+ branches:
+ - main
+ paths:
+ - 'docs/**'
+ - 'zensical.toml'
+ - '.github/workflows/docs.yml'
+ workflow_dispatch:
+
+permissions:
+ contents: read
+ pages: write
+ id-token: write
+
+concurrency:
+ group: "pages"
+ cancel-in-progress: false
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Setup Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.12'
+
+ - name: Install uv
+ uses: astral-sh/setup-uv@v4
+
+ - name: Build documentation
+ run: uvx zensical build
+
+ - name: Setup Pages
+ uses: actions/configure-pages@v4
+
+ - name: Upload artifact
+ uses: actions/upload-pages-artifact@v3
+ with:
+ path: './site'
+
+ deploy:
+ environment:
+ name: github-pages
+ url: ${{ steps.deployment.outputs.page_url }}
+ runs-on: ubuntu-latest
+ needs: build
+ steps:
+ - name: Deploy to GitHub Pages
+ id: deployment
+ uses: actions/deploy-pages@v4
diff --git a/.gitignore b/.gitignore
index c698ade..bc83c7b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,3 +29,7 @@ target
# macOS
.DS_Store
+
+# Documentation
+site/
+.playwright-mcp/
diff --git a/README.md b/README.md
index 49df246..7994335 100644
--- a/README.md
+++ b/README.md
@@ -1,127 +1,30 @@
-# Postgres Stream
+
+
+
-Reliably stream Postgres table changes to external systems (queues, webhooks, etc.) with automatic failover and zero event loss.
+Postgres Stream
-## How Does It Work?
+
+ Reliably stream Postgres table changes to external systems with automatic failover and zero event loss.
+
-1. **User manages subscriptions** directly in the database (`pgstream.subscriptions` table)
-2. **Subscription changes automatically create database triggers** on the target tables
-3. **Application events fire the managed triggers**, which insert into a partitioned `events` table
-4. **Postgres Stream streams events** via logical replication and delivers them to your sink
-
-```mermaid
-flowchart LR
- subgraph Postgres
- A[subscriptions
table] -->|creates
triggers| B[trigger
on users]
- B -->|insert
into| C[events
partitioned
20250112..]
- C -->|logical
replication| D[replication
slot]
- end
- D -->|streams| E[Postgres
Stream]
- E -->|delivers| F[sink
queue/http]
-
- style Postgres fill:#dbeafe,stroke:#2563eb,stroke-width:3px
- style A fill:#f3e8ff,stroke:#9333ea,stroke-width:2px,color:#581c87
- style B fill:#fed7aa,stroke:#ea580c,stroke-width:2px,color:#7c2d12
- style C fill:#bfdbfe,stroke:#2563eb,stroke-width:2px,color:#1e40af
- style D fill:#99f6e4,stroke:#14b8a6,stroke-width:2px,color:#115e59
- style E fill:#fecdd3,stroke:#e11d48,stroke-width:3px,color:#881337
- style F fill:#bbf7d0,stroke:#16a34a,stroke-width:3px,color:#14532d
-```
-
-
-Example of generated trigger function (simplified for readability)
-
-```sql
--- Auto-generated when you insert into subscriptions table
-create or replace function pgstream._publish_after_insert_on_users()
-returns trigger as $$
-declare
- v_jsonb_output jsonb := '[]'::jsonb;
- v_base_payload jsonb := jsonb_build_object(
- 'tg_op', tg_op,
- 'tg_table_name', tg_table_name,
- 'tg_table_schema', tg_table_schema,
- 'timestamp', (extract(epoch from now()) * 1000)::bigint
- );
-begin
- -- Check subscription "user-signup" condition
- if new.email_verified = true then
- v_jsonb_output := v_jsonb_output || (jsonb_build_object(
- 'tg_name', 'user-signup',
- 'new', jsonb_build_object('id', new.id, 'email', new.email),
- 'old', null
- ) || v_base_payload || '{}'::jsonb); -- payload_extensions would go here
- end if;
-
- -- Write to events table if any subscriptions matched
- if jsonb_array_length(v_jsonb_output) > 0 then
- insert into pgstream.events (payload, stream_id)
- select elem, 1
- from jsonb_array_elements(v_jsonb_output) as t(elem);
- end if;
-
- return new;
-end;
-$$ language plpgsql
-set search_path = ''
-security definer;
-
--- Trigger that calls the function
-create constraint trigger pgstream_insert
-after insert on public.users
-deferrable initially deferred
-for each row
-when ((new.email_verified = true))
-execute procedure pgstream._publish_after_insert_on_users();
-```
-
-> **Note:** The actual generated code handles multiple subscriptions per table, merges `when` clauses with OR logic, and includes all `payload_extensions`. This example shows the structure for a single subscription.
-
-
-
-## Benefits
+## Features
- **Single binary** - No complex infrastructure or high-availability destinations required
- **Postgres-native durability** - Events are stored in the database, WAL can be released immediately
-- **No data loss** - As long as downtime is less than partition retention (7 days by default)
-
-### Automatic Recovery
-
-Postgres Stream automatically handles two failure scenarios without operator intervention:
-
-**Sink failure** (queue unavailable, webhook fails, etc.)
-- Checkpoints the failed event and continues consuming the replication stream
-- Periodically retries delivering the checkpoint event
-- When sink recovers, replays all missed events via `COPY` from the events table
-
-**Slot invalidation** (WAL exceeded `max_slot_wal_keep_size`)
-- Detects the "can no longer get changes from replication slot" error
-- Queries `confirmed_flush_lsn` from the invalidated slot to find the last processed position
-- Sets a failover checkpoint and creates a new slot
-- When replication resumes, replays missed events from the checkpoint
-
-Both scenarios use the same replay mechanism: events are read directly from the `events` table (not WAL), guaranteeing no data loss as long as events are within partition retention.
-
-## Drawbacks
+- **Zero data loss** - As long as downtime is less than partition retention (7 days by default)
+- **Automatic recovery** - Handles both sink failures and slot invalidation without operator intervention
-- **Small overhead** - Additional INSERT into `events` table on every subscribed operation
-- **Partition management** - Need to monitor partition growth if event volume is very high
-- **Not for dynamic subscriptions** - Each subscription change recreates database triggers (expensive operation)
+## Supported Sinks
-## Using It
+Kafka, NATS, RabbitMQ, Redis Strings, Redis Streams, Webhook, AWS SQS, AWS SNS, AWS Kinesis, GCP Pub/Sub, Elasticsearch, Meilisearch
-### 1. Requirements
-
-- Postgres 15+ with `wal_level=logical`
-- User with `replication` privilege
-
-### 2. Configuration
-
-Create `config.yaml`:
+## Quick Start
```yaml
+# config.yaml
stream:
- id: 1 # Unique stream ID
+ id: 1
pg_connection:
host: localhost
port: 5432
@@ -131,501 +34,35 @@ stream:
tls:
enabled: false
batch:
- max_size: 1000 # Events per batch
- max_fill_secs: 5 # Max time to fill batch
+ max_size: 1000
+ max_fill_secs: 5
sink:
- type: memory # Built-in test sink
+ type: webhook
+ url: https://httpbin.org/post
```
-### 3. Run the Binary
-
```bash
-# Start Postgres Stream
-postgres-stream
+# Run with Docker
+docker run -v $(pwd)/config.yaml:/config.yaml \
+ ghcr.io/psteinroe/postgres-stream:webhook-latest
-# Or with Docker
-docker run -v $(pwd)/config.yaml:/config.yaml postgres-stream
+# Create a subscription
+psql -c "
+INSERT INTO pgstream.subscriptions (key, stream_id, operation, schema_name, table_name)
+VALUES ('user-created', 1, 'INSERT', 'public', 'users');
+"
```
-### 4. Create Subscriptions
-
-Subscriptions define which events to capture. Simply insert into the `subscriptions` table:
-
-```sql
--- Subscribe to verified user signups
-insert into pgstream.subscriptions (
- key,
- stream_id,
- operation,
- schema_name,
- table_name,
- when_clause,
- column_names,
- payload_extensions
-) values (
- 'user-signup', -- Unique identifier
- 1, -- Stream ID from config
- 'INSERT', -- Operation: INSERT, UPDATE, or DELETE
- 'public', -- Schema name
- 'users', -- Table name
- 'new.email_verified = true', -- Optional filter (SQL expression)
- array['id', 'email', 'created_at'], -- Columns to include in payload
- '[]'::jsonb -- Payload extensions (see below)
-);
-```
-
-This automatically creates the trigger on `public.users`. Now when you insert a verified user, the event is captured.
-
-**To reduce no-op trigger recreation** (important for production), use merge to only update when values actually change:
-
-
-Click to show merge-based helper function
-
-```sql
--- Helper function that only recreates triggers when subscription actually changes
-create or replace function set_subscriptions(
- p_stream_id bigint,
- p_subscriptions pgstream.subscriptions[]
-)
-returns void
-language plpgsql
-security definer
-set search_path to ''
-as $$
-begin
- create temporary table temp_subscriptions as
- select * from unnest(p_subscriptions);
-
- -- Only update if values actually changed (avoids trigger recreation)
- merge into pgstream.subscriptions as target
- using temp_subscriptions as source
- on (target.key = source.key and target.stream_id = p_stream_id)
- when matched and (
- target.operation is distinct from source.operation or
- target.schema_name is distinct from source.schema_name or
- target.table_name is distinct from source.table_name or
- target.when_clause is distinct from source.when_clause or
- target.column_names is distinct from source.column_names or
- target.metadata is distinct from source.metadata or
- target.payload_extensions is distinct from source.payload_extensions or
- target.metadata_extensions is distinct from source.metadata_extensions
- ) then update set
- operation = source.operation,
- schema_name = source.schema_name,
- table_name = source.table_name,
- when_clause = source.when_clause,
- column_names = source.column_names,
- metadata = source.metadata,
- payload_extensions = source.payload_extensions,
- metadata_extensions = source.metadata_extensions
- when not matched then insert (
- key, stream_id, operation, schema_name, table_name,
- when_clause, column_names, metadata, payload_extensions, metadata_extensions
- ) values (
- source.key, p_stream_id, source.operation, source.schema_name,
- source.table_name, source.when_clause, source.column_names,
- source.metadata, source.payload_extensions, source.metadata_extensions
- );
-
- -- Remove subscriptions not in input
- delete from pgstream.subscriptions
- where stream_id = p_stream_id
- and not exists (
- select 1 from temp_subscriptions
- where pgstream.subscriptions.key = temp_subscriptions.key
- );
-
- drop table temp_subscriptions;
-end;
-$$;
-```
-
-
-
-### 5. Testing Your Subscription
+## Documentation
-Now when a user signs up with verified email:
+Full documentation available at **[psteinroe.github.io/postgres-stream](https://psteinroe.github.io/postgres-stream/)**
-```sql
-insert into users (email, email_verified) values ('user@example.com', true);
-```
-
-The sink receives:
-
-```json
-{
- "tg_name": "user-signup",
- "tg_op": "INSERT",
- "tg_table_name": "users",
- "tg_table_schema": "public",
- "timestamp": 1703001234567,
- "new": {
- "id": 123,
- "email": "user@example.com",
- "created_at": "2024-12-12T10:30:00Z"
- },
- "old": null
-}
-```
-
-### 6. Manual Event Insertion
-
-You can also insert events directly into the `pgstream.events` table instead of using subscriptions. This is useful for custom events not tied to table changes, background jobs, or events from external sources.
-
-```sql
-insert into pgstream.events (payload, stream_id, metadata)
-values (
- '{"type": "job-completed", "job_id": 123, "result": "success"}'::jsonb,
- 1, -- Stream ID from config
- '{"topic": "background-jobs"}'::jsonb
-);
-```
-
-Required fields:
-- `payload` (jsonb) - The event data
-- `stream_id` (bigint) - Must match the stream ID from your config
-
-Optional:
-- `metadata` (jsonb) - Routing information (topic, partition key, etc.)
-
-The `id` and `created_at` fields are auto-generated.
-
-### 7. Payload Extensions
-
-Use `payload_extensions` to add computed fields to the event payload:
-
-```sql
-insert into pgstream.subscriptions (
- key, stream_id, operation, schema_name, table_name,
- when_clause, column_names, payload_extensions
-) values (
- 'order-notification',
- 1,
- 'INSERT',
- 'public',
- 'orders',
- null,
- array['id', 'user_id', 'total'],
- '[
- {"json_path": "order_date", "expression": "new.created_at::date::text"},
- {"json_path": "total_formatted", "expression": "''$'' || new.total::text"}
- ]'::jsonb
-);
-```
-
-Result:
-
-```json
-{
- "tg_name": "order-notification",
- "new": {"id": 456, "user_id": 123, "total": 99.99},
- "order_date": "2024-12-12",
- "total_formatted": "$99.99"
-}
-```
-
-Common use cases:
-- **Computed fields**: Add derived values like `new.created_at::date`
-- **Formatted values**: Add display-ready strings
-- **Context info**: Add `auth.uid()`, `current_setting('app.tenant_id')`
-
-> **Note:** For routing information (topic, queue, partition key, etc.), use `metadata` and `metadata_extensions` instead.
-
-### 8. Event Metadata
-
-Use metadata for routing and sink configuration (topic, queue, partition key, index, etc.). Metadata is stored separately from the payload and read by sinks to determine where and how to deliver events.
-
-#### Static Metadata
-
-Use the `metadata` column for routing values that are the same for every event:
-
-```sql
-insert into pgstream.subscriptions (
- key, stream_id, operation, schema_name, table_name,
- column_names, metadata
-) values (
- 'user-events',
- 1,
- 'INSERT',
- 'public',
- 'users',
- array['id', 'email'],
- '{"topic": "user-events", "priority": "high"}'::jsonb
-);
-```
-
-#### Dynamic Metadata (metadata_extensions)
-
-Use `metadata_extensions` to compute routing values from row data. The format is identical to `payload_extensions`. You can use any SQL expression, including Supabase auth functions:
-
-```sql
-insert into pgstream.subscriptions (
- key, stream_id, operation, schema_name, table_name,
- column_names, metadata_extensions
-) values (
- 'user-events',
- 1,
- 'INSERT',
- 'public',
- 'users',
- array['id', 'email'],
- '[
- {"json_path": "partition_key", "expression": "new.user_id::text"},
- {"json_path": "topic", "expression": "''users-'' || new.region"}
- ]'::jsonb
-);
-```
-
-#### Nested Paths
-
-Use dot notation in `json_path` to create nested objects:
-
-```sql
-'[
- {"json_path": "auth.user_id", "expression": "auth.uid()::text"},
- {"json_path": "auth.role", "expression": "auth.role()"}
-]'::jsonb
-```
-
-This produces:
-```json
-{
- "auth": {
- "user_id": "d0c12345-abcd-1234-efgh-567890abcdef",
- "role": "authenticated"
- }
-}
-```
-
-#### Combined Example
-
-You can use both static and dynamic metadata together:
-
-```sql
-insert into pgstream.subscriptions (
- key, stream_id, operation, schema_name, table_name,
- column_names, metadata, metadata_extensions
-) values (
- 'user-events',
- 1,
- 'INSERT',
- 'public',
- 'users',
- array['id', 'email'],
- '{"priority": "high"}'::jsonb,
- '[
- {"json_path": "topic", "expression": "''users-'' || new.region"},
- {"json_path": "partition_key", "expression": "new.user_id::text"}
- ]'::jsonb
-);
-```
-
-The resulting event metadata merges static and dynamic values:
-```json
-{
- "priority": "high",
- "topic": "users-eu-west-1",
- "partition_key": "123"
-}
-```
-
-## How Failover Works
-
-When the sink fails (e.g., queue goes down), Postgres Stream:
-
-1. Saves the failed event's ID as a checkpoint
-2. Continues consuming the replication stream (events still written to table)
-3. Periodically retries delivering the checkpoint event
-4. On success, uses `COPY` to stream all events between checkpoint and current position
-5. Replays missed events in order, then returns to normal streaming
-
-**Guarantees:**
-- No events lost (as long as downtime < partition retention)
-- Events delivered at least once
-- Order preserved within partitions
-- No WAL retention required (events stored in table)
-
-## How Slot Recovery Works
-
-When the replication slot is invalidated (WAL exceeded `max_slot_wal_keep_size`), Postgres Stream:
-
-1. Detects the "can no longer get changes from replication slot" error
-2. Queries `confirmed_flush_lsn` from the invalidated slot (Postgres preserves this)
-3. Finds the first event with `lsn > confirmed_flush_lsn`
-4. Sets a failover checkpoint at that event
-5. Drops the invalidated slot
-6. Restarts the pipeline (ETL creates a fresh slot)
-7. When replication events arrive, triggers failover replay from checkpoint
-
-**Guarantees:**
-- Automatic recovery without operator intervention
-- No events lost (events stored in table, not dependent on WAL)
-- Works as long as events are within partition retention
-
-## Automated Partition Management
-
-Postgres Stream automatically manages daily partitions in the background:
-
-**Retention policy:**
-- **Creates partitions** 7 days in advance
-- **Drops partitions** older than 7 days
-- **Runs on startup** and then daily
-
-## Implementing Custom Sinks
-
-Create custom sinks to deliver events to any destination (HTTP, Kafka, RabbitMQ, etc.).
-
-> **Important:** Sink dependencies and implementations should be behind feature flags to avoid bloating the binary. Users should only compile the sinks they actually use.
-
-### 1. Add Dependencies with Feature Flags
-
-Update `Cargo.toml`:
-```toml
-[dependencies]
-# Existing dependencies...
-
-# Optional sink dependencies
-reqwest = { version = "0.11", features = ["json"], optional = true }
-rdkafka = { version = "0.36", optional = true }
-
-[features]
-# Sink feature flags
-sink-http = ["dep:reqwest"]
-sink-kafka = ["dep:rdkafka"]
-```
-
-### 2. Create the Sink Implementation
-
-Create a new file `src/sink/http.rs`:
-
-```rust
-use etl::error::EtlResult;
-use reqwest::Client;
-use serde::Deserialize;
-use tracing::info;
-
-use crate::sink::Sink;
-use crate::types::TriggeredEvent;
-
-#[derive(Clone, Debug, Deserialize)]
-pub struct HttpSinkConfig {
- pub url: String,
- #[serde(default)]
- pub headers: std::collections::HashMap,
-}
-
-#[derive(Clone)]
-pub struct HttpSink {
- config: HttpSinkConfig,
- client: Client,
-}
-
-impl HttpSink {
- pub fn new(config: HttpSinkConfig) -> Self {
- let client = Client::new();
- Self { config, client }
- }
-}
-
-impl Sink for HttpSink {
- fn name() -> &'static str {
- "http"
- }
-
- async fn publish_events(&self, events: Vec) -> EtlResult<()> {
- for event in events {
- let mut request = self.client
- .post(&self.config.url)
- .json(&event.payload);
-
- // Add custom headers
- for (key, value) in &self.config.headers {
- request = request.header(key, value);
- }
-
- let response = request.send().await?;
-
- if !response.status().is_success() {
- return Err(etl::etl_error!(
- etl::error::ErrorKind::Network,
- "HTTP request failed: {}",
- response.status()
- ));
- }
-
- info!("published event {} to {}", event.id.id, self.config.url);
- }
-
- Ok(())
- }
-}
-```
-
-### 3. Register in `src/sink/mod.rs`
-
-```rust
-mod base;
-pub mod memory;
-
-#[cfg(feature = "sink-http")]
-pub mod http;
-
-pub use base::Sink;
-```
-
-### 4. Add to SinkConfig Enum
-
-Update `src/config/sink.rs`:
-
-```rust
-use serde::Deserialize;
-
-#[cfg(feature = "sink-http")]
-use crate::sink::http::HttpSinkConfig;
-
-#[derive(Clone, Debug, Deserialize)]
-#[serde(tag = "type", rename_all = "lowercase")]
-pub enum SinkConfig {
- Memory,
-
- #[cfg(feature = "sink-http")]
- Http(HttpSinkConfig),
-}
-```
-
-### 5. Wire Up in Core
-
-Update `src/core.rs`:
-
-```rust
-#[cfg(feature = "sink-http")]
-use crate::sink::http::HttpSink;
-
-// In start_pipeline_with_config():
-let sink = match &config.sink {
- SinkConfig::Memory => MemorySink::new(),
-
- #[cfg(feature = "sink-http")]
- SinkConfig::Http(cfg) => HttpSink::new(cfg.clone()),
-};
-```
-
-### 6. Build and Use
-
-Build with the HTTP sink feature:
-```bash
-cargo build --release --features sink-http
-```
-
-Use in `config.yaml`:
-```yaml
-sink:
- type: http
- url: https://webhook.example.com/events
- headers:
- Authorization: Bearer token123
- X-Custom-Header: value
-```
+- [Getting Started](https://psteinroe.github.io/postgres-stream/getting-started/)
+- [How It Works](https://psteinroe.github.io/postgres-stream/concepts/how-it-works/)
+- [Sinks](https://psteinroe.github.io/postgres-stream/sinks/)
+- [Configuration Reference](https://psteinroe.github.io/postgres-stream/reference/configuration/)
+## License
+MIT
diff --git a/docs/concepts/event-structure.md b/docs/concepts/event-structure.md
new file mode 100644
index 0000000..fd9afda
--- /dev/null
+++ b/docs/concepts/event-structure.md
@@ -0,0 +1,67 @@
+# Event Structure
+
+Events have two parts: **payload** (the data) and **metadata** (routing info).
+
+## Payload
+
+The event body sent to your sink. Contains the row data and trigger context.
+
+```json
+{
+ "tg_name": "user-created",
+ "tg_op": "INSERT",
+ "tg_table_name": "users",
+ "tg_table_schema": "public",
+ "timestamp": 1703001234567,
+ "new": {
+ "id": 123,
+ "email": "user@example.com"
+ }
+}
+```
+
+| Field | Description |
+|-------|-------------|
+| `tg_name` | Subscription key |
+| `tg_op` | Operation: INSERT, UPDATE, DELETE |
+| `tg_table_name` | Source table |
+| `tg_table_schema` | Source schema |
+| `timestamp` | Event time (Unix ms) |
+| `new` | New row data (INSERT/UPDATE) |
+| `old` | Previous row data (UPDATE/DELETE) |
+
+### Selecting Columns
+
+By default, all columns are included. Use `column_names` to select specific columns:
+
+```sql
+INSERT INTO pgstream.subscriptions (key, stream_id, operation, schema_name, table_name, column_names)
+VALUES ('user-created', 1, 'INSERT', 'public', 'users', ARRAY['id', 'email']);
+```
+
+## Metadata
+
+Routing configuration read by sinks. Controls where and how events are delivered.
+
+```sql
+INSERT INTO pgstream.subscriptions (key, stream_id, operation, schema_name, table_name, metadata)
+VALUES ('user-created', 1, 'INSERT', 'public', 'users', '{"topic": "users", "priority": "high"}');
+```
+
+Each sink reads specific metadata fields:
+
+| Sink | Fields |
+|------|--------|
+| Kafka | `topic` |
+| NATS | `topic` |
+| RabbitMQ | `exchange`, `routing_key` |
+| Redis Strings | `key` |
+| Redis Streams | `stream` |
+| Webhook | `url`, `headers` |
+| SQS | `queue_url` |
+| SNS | `topic` |
+| Kinesis | `stream` |
+| Elasticsearch | `index` |
+| Meilisearch | `index` |
+
+See [Extensions](extensions.md) to compute payload and metadata values dynamically.
diff --git a/docs/concepts/extensions.md b/docs/concepts/extensions.md
new file mode 100644
index 0000000..20ba646
--- /dev/null
+++ b/docs/concepts/extensions.md
@@ -0,0 +1,101 @@
+# Extensions
+
+Add computed values to payload or metadata using SQL expressions.
+
+## Payload Extensions
+
+Add fields to the event body:
+
+```sql
+INSERT INTO pgstream.subscriptions (key, stream_id, operation, schema_name, table_name, payload_extensions)
+VALUES (
+ 'order-created', 1, 'INSERT', 'public', 'orders',
+ '[
+ {"json_path": "total_formatted", "expression": "''$'' || new.total::text"},
+ {"json_path": "order_date", "expression": "new.created_at::date::text"}
+ ]'
+);
+```
+
+Result:
+
+```json
+{
+ "new": {"id": 456, "total": 99.99},
+ "total_formatted": "$99.99",
+ "order_date": "2024-12-12"
+}
+```
+
+## Metadata Extensions
+
+Compute routing values from row data:
+
+```sql
+INSERT INTO pgstream.subscriptions (key, stream_id, operation, schema_name, table_name, metadata_extensions)
+VALUES (
+ 'order-created', 1, 'INSERT', 'public', 'orders',
+ '[
+ {"json_path": "topic", "expression": "''orders-'' || new.region"}
+ ]'
+);
+```
+
+Routes events to `orders-us-east-1`, `orders-eu-west-1`, etc. based on the row's region.
+
+## Extension Format
+
+| Field | Description |
+|-------|-------------|
+| `json_path` | Where to place the value (supports dot notation: `context.user_id`) |
+| `expression` | SQL expression evaluated in trigger context |
+
+## Available Variables
+
+| Variable | Available | Description |
+|----------|-----------|-------------|
+| `new` | INSERT, UPDATE | New row data |
+| `old` | UPDATE, DELETE | Previous row data |
+| `tg_op` | Always | Operation name |
+| `tg_table_name` | Always | Table name |
+| `tg_table_schema` | Always | Schema name |
+
+## Examples
+
+### Dynamic Topic Routing
+
+```sql
+-- Route by table name
+'[{"json_path": "topic", "expression": "''events-'' || tg_table_name"}]'
+
+-- Route by row field
+'[{"json_path": "topic", "expression": "''orders-'' || new.priority"}]'
+```
+
+### Add Auth Context
+
+```sql
+-- Supabase auth
+'[
+ {"json_path": "context.user_id", "expression": "auth.uid()::text"},
+ {"json_path": "context.role", "expression": "auth.role()"}
+]'
+
+-- Session variables
+'[{"json_path": "tenant_id", "expression": "current_setting(''app.tenant_id'', true)"}]'
+```
+
+### Computed Fields
+
+```sql
+'[
+ {"json_path": "year", "expression": "extract(year from new.created_at)::int"},
+ {"json_path": "full_name", "expression": "new.first_name || '' '' || new.last_name"}
+]'
+```
+
+## Notes
+
+- String literals need double single quotes: `''literal''`
+- Expressions must return JSONB-compatible values
+- Metadata extensions override static metadata values
diff --git a/docs/concepts/failover.md b/docs/concepts/failover.md
new file mode 100644
index 0000000..2b1f884
--- /dev/null
+++ b/docs/concepts/failover.md
@@ -0,0 +1,37 @@
+# Failover and Recovery
+
+Postgres Stream handles both sink failures and slot invalidation automatically.
+
+## How It Works
+
+Events are stored in a partitioned table, not just the WAL. This means:
+
+- WAL can be released immediately
+- Recovery reads from the table, not the WAL
+- 7-day partition retention = 7-day recovery window
+
+## Sink Failure
+
+When the sink is unavailable:
+
+1. Checkpoint saved at failed event
+2. Retries until sink recovers
+3. Replays missed events via `COPY`
+4. Resumes normal streaming
+
+## Slot Invalidation
+
+When the replication slot is invalidated:
+
+1. Queries last confirmed LSN from invalidated slot
+2. Sets checkpoint to first event after that LSN
+3. Drops slot and restarts pipeline
+4. Replays from checkpoint
+
+No operator intervention required.
+
+## Guarantees
+
+- **Zero data loss** - As long as downtime < partition retention (7 days)
+- **At-least-once delivery** - Events may repeat during replay
+- **Order preserved** - Events replay in order
diff --git a/docs/concepts/how-it-works.md b/docs/concepts/how-it-works.md
new file mode 100644
index 0000000..f34bdba
--- /dev/null
+++ b/docs/concepts/how-it-works.md
@@ -0,0 +1,111 @@
+# How It Works
+
+## Overview
+
+Events are inserted into `pgstream.events` and streamed via logical replication to your sink.
+
+**Two ways to create events:**
+
+1. **Subscriptions** (optional) - Define triggers that automatically capture table changes
+2. **Manual inserts** - Insert directly into `pgstream.events` from your application
+
+See [Manual Events](manual-events.md) for the direct insert approach.
+
+```mermaid
+flowchart LR
+ subgraph Postgres
+ A[subscriptions
table] -->|creates
triggers| B[trigger
on users]
+ B -->|insert
into| C[events
partitioned
20250112..]
+ C -->|logical
replication| D[replication
slot]
+ end
+ D -->|streams| E[Postgres
Stream]
+ E -->|delivers| F[sink
queue/http]
+
+ style Postgres fill:#3b82f6,stroke:#60a5fa,stroke-width:3px,color:#fff
+ style A fill:#3b82f6,stroke:#60a5fa,stroke-width:2px,color:#fff
+ style B fill:#3b82f6,stroke:#60a5fa,stroke-width:2px,color:#fff
+ style C fill:#3b82f6,stroke:#60a5fa,stroke-width:2px,color:#fff
+ style D fill:#3b82f6,stroke:#60a5fa,stroke-width:2px,color:#fff
+ style E fill:#3b82f6,stroke:#60a5fa,stroke-width:3px,color:#fff
+ style F fill:#3b82f6,stroke:#60a5fa,stroke-width:3px,color:#fff
+```
+
+## Why This Approach?
+
+Postgres Stream uses logical replication, but subscribes only to the `events` table instead of your application tables.
+
+Traditional CDC reads application tables directly:
+
+- WAL retention grows if the sink is slow
+- Slot invalidation = data loss
+- Recovery depends on WAL availability
+- Destination must be highly available (e.g. Kafka cluster) to avoid data loss
+
+But we only care about the events we subscribed to, and do not want to replicate your entire database! By streaming from a single partitioned `events` table, we get:
+
+- WAL released immediately after event written
+- Events table provides durability (7-day retention)
+- Recovery reads from the table, not WAL
+- Slot invalidation triggers automatic recovery
+- Destination can be simple (single webhook, Redis instance) - no HA required
+
+## Subscriptions (Optional)
+
+When you insert a subscription, Postgres Stream creates a trigger on the target table:
+
+```sql
+-- Auto-generated when you insert into subscriptions table
+create or replace function pgstream._publish_after_insert_on_users()
+returns trigger as $$
+declare
+ v_jsonb_output jsonb := '[]'::jsonb;
+ v_base_payload jsonb := jsonb_build_object(
+ 'tg_op', tg_op,
+ 'tg_table_name', tg_table_name,
+ 'tg_table_schema', tg_table_schema,
+ 'timestamp', (extract(epoch from now()) * 1000)::bigint
+ );
+begin
+ -- Check subscription "user-signup" condition
+ if new.email_verified = true then
+ v_jsonb_output := v_jsonb_output || (jsonb_build_object(
+ 'tg_name', 'user-signup',
+ 'new', jsonb_build_object('id', new.id, 'email', new.email),
+ 'old', null
+ ) || v_base_payload);
+ end if;
+
+ -- Write to events table if any subscriptions matched
+ if jsonb_array_length(v_jsonb_output) > 0 then
+ insert into pgstream.events (payload, stream_id)
+ select elem, 1
+ from jsonb_array_elements(v_jsonb_output) as t(elem);
+ end if;
+
+ return new;
+end;
+$$ language plpgsql;
+```
+
+The actual generated code handles multiple subscriptions per table, merges `when` clauses with OR logic, and includes all payload extensions.
+
+## Event Flow
+
+1. **Event written** - Via subscription trigger or direct insert into `pgstream.events`
+2. **Logical replication** - Postgres Stream receives the event
+3. **Sink delivery** - Event delivered to your configured sink
+
+## Partitioned Events Table
+
+Events are stored in a partitioned table with daily partitions:
+
+- **7-day retention** by default
+- **Automatic partition creation** 7 days ahead
+- **Automatic partition cleanup** for old data
+- Partitioning keeps the table fast and manageable
+
+## Next Steps
+
+- [Subscriptions](subscriptions.md) - Auto-capture table changes
+- [Manual Events](manual-events.md) - Direct event insertion
+- [Event Structure](event-structure.md) - Payload and metadata format
diff --git a/docs/concepts/manual-events.md b/docs/concepts/manual-events.md
new file mode 100644
index 0000000..0577ee0
--- /dev/null
+++ b/docs/concepts/manual-events.md
@@ -0,0 +1,78 @@
+# Manual Event Insertion
+
+Insert events directly without using subscriptions.
+
+## Overview
+
+While subscriptions are the primary way to capture events, you can also insert events directly into the `pgstream.events` table. This is useful for:
+
+- Custom events not tied to table changes
+- Background job notifications
+- Events from external sources
+- Testing and debugging
+
+## Basic Usage
+
+```sql
+insert into pgstream.events (payload, stream_id)
+values (
+ '{"type": "job-completed", "job_id": 123, "result": "success"}'::jsonb,
+ 1 -- Stream ID from config
+);
+```
+
+## Required Fields
+
+| Field | Type | Description |
+|-------|------|-------------|
+| `payload` | jsonb | The event data |
+| `stream_id` | bigint | Must match the `stream.id` in your config |
+
+## Optional Fields
+
+| Field | Type | Description |
+|-------|------|-------------|
+| `metadata` | jsonb | Routing information (topic, partition key, etc.) |
+
+The `id` and `created_at` fields are auto-generated.
+
+## With Metadata
+
+Use metadata for routing:
+
+```sql
+insert into pgstream.events (payload, stream_id, metadata)
+values (
+ '{"type": "background-job", "job_id": 123}'::jsonb,
+ 1,
+ '{"topic": "background-jobs", "priority": "high"}'::jsonb
+);
+```
+
+## Event Flow
+
+Manually inserted events follow the same flow as trigger-generated events:
+
+1. **Inserted** into `pgstream.events`
+2. **Captured** via logical replication
+3. **Delivered** to your configured sink
+
+The only difference is they don't have the trigger metadata (`tg_name`, `tg_op`, etc.).
+
+## Payload Structure
+
+You can use any JSON structure. The payload is delivered directly to the sink without modification.
+
+## Combining with Subscriptions
+
+Manual events and subscription-triggered events can coexist in the same stream. They're processed in order based on insertion time.
+
+Use different metadata keys to route them to different destinations:
+
+```sql
+-- Manual event to specific topic
+metadata = '{"topic": "manual-events"}'
+
+-- Subscription routes to different topic
+metadata_extensions = '[{"json_path": "topic", "expression": "''table-events''"}]'
+```
diff --git a/docs/concepts/subscriptions.md b/docs/concepts/subscriptions.md
new file mode 100644
index 0000000..4649572
--- /dev/null
+++ b/docs/concepts/subscriptions.md
@@ -0,0 +1,177 @@
+# Subscriptions
+
+Define which table changes to capture and how they should be formatted.
+
+## Overview
+
+Subscriptions are stored in the `pgstream.subscriptions` table. When you insert, update, or delete a subscription, Postgres Stream automatically creates or updates the corresponding database triggers.
+
+## Creating a Subscription
+
+```sql
+insert into pgstream.subscriptions (
+ key,
+ stream_id,
+ operation,
+ schema_name,
+ table_name,
+ when_clause,
+ column_names,
+ payload_extensions
+) values (
+ 'user-signup', -- Unique identifier
+ 1, -- Stream ID from config
+ 'INSERT', -- Operation: INSERT, UPDATE, or DELETE
+ 'public', -- Schema name
+ 'users', -- Table name
+ 'new.email_verified = true', -- Optional filter (SQL expression)
+ array['id', 'email', 'created_at'], -- Columns to include in payload
+ '[]'::jsonb -- Payload extensions (see below)
+);
+```
+
+## Subscription Fields
+
+| Field | Type | Required | Description |
+|-------|------|----------|-------------|
+| `key` | text | Yes | Unique identifier for the subscription |
+| `stream_id` | bigint | Yes | Must match the `stream.id` in your config |
+| `operation` | text | Yes | `INSERT`, `UPDATE`, or `DELETE` |
+| `schema_name` | text | Yes | Database schema (usually `public`) |
+| `table_name` | text | Yes | Target table name |
+| `when_clause` | text | No | SQL expression to filter events |
+| `column_names` | text[] | No | Columns to include in payload (null = all) |
+| `payload_extensions` | jsonb | No | Computed fields to add to payload |
+| `metadata` | jsonb | No | Static routing metadata |
+| `metadata_extensions` | jsonb | No | Dynamic routing metadata |
+
+## Filtering Events
+
+Use `when_clause` to capture only specific events:
+
+```sql
+-- Only capture high-value orders
+insert into pgstream.subscriptions (key, stream_id, operation, schema_name, table_name, when_clause)
+values ('high-value-orders', 1, 'INSERT', 'public', 'orders', 'new.total > 1000');
+
+-- Only capture status changes
+insert into pgstream.subscriptions (key, stream_id, operation, schema_name, table_name, when_clause)
+values ('status-changed', 1, 'UPDATE', 'public', 'orders', 'old.status IS DISTINCT FROM new.status');
+```
+
+The `when_clause` is a SQL expression. Use `new` to reference the new row (INSERT/UPDATE) and `old` for the previous row (UPDATE/DELETE).
+
+## Selecting Columns
+
+By default, all columns are included. Use `column_names` to select specific columns:
+
+```sql
+-- Only include id, email, and created_at
+insert into pgstream.subscriptions (key, stream_id, operation, schema_name, table_name, column_names)
+values ('user-created', 1, 'INSERT', 'public', 'users', array['id', 'email', 'created_at']);
+```
+
+## Event Output
+
+When a subscription matches, the event looks like:
+
+```json
+{
+ "tg_name": "user-signup",
+ "tg_op": "INSERT",
+ "tg_table_name": "users",
+ "tg_table_schema": "public",
+ "timestamp": 1703001234567,
+ "new": {
+ "id": 123,
+ "email": "user@example.com",
+ "created_at": "2024-12-12T10:30:00Z"
+ },
+ "old": null
+}
+```
+
+For UPDATE operations, both `new` and `old` are populated. For DELETE, only `old` is populated.
+
+## Multiple Subscriptions per Table
+
+You can have multiple subscriptions on the same table:
+
+```sql
+-- Capture all user inserts
+insert into pgstream.subscriptions (key, stream_id, operation, schema_name, table_name)
+values ('all-users', 1, 'INSERT', 'public', 'users');
+
+-- Also capture verified users separately
+insert into pgstream.subscriptions (key, stream_id, operation, schema_name, table_name, when_clause)
+values ('verified-users', 1, 'INSERT', 'public', 'users', 'new.email_verified = true');
+```
+
+Both subscriptions will fire for a verified user, creating two events with different `tg_name` values.
+
+## Avoiding Unnecessary Trigger Recreation
+
+Each subscription change recreates the trigger, which can be expensive. Use MERGE to only update when values actually change:
+
+```sql
+create or replace function set_subscriptions(
+ p_stream_id bigint,
+ p_subscriptions pgstream.subscriptions[]
+)
+returns void
+language plpgsql
+security definer
+set search_path to ''
+as $$
+begin
+ create temporary table temp_subscriptions as
+ select * from unnest(p_subscriptions);
+
+ -- Only update if values actually changed (avoids trigger recreation)
+ merge into pgstream.subscriptions as target
+ using temp_subscriptions as source
+ on (target.key = source.key and target.stream_id = p_stream_id)
+ when matched and (
+ target.operation is distinct from source.operation or
+ target.schema_name is distinct from source.schema_name or
+ target.table_name is distinct from source.table_name or
+ target.when_clause is distinct from source.when_clause or
+ target.column_names is distinct from source.column_names or
+ target.metadata is distinct from source.metadata or
+ target.payload_extensions is distinct from source.payload_extensions or
+ target.metadata_extensions is distinct from source.metadata_extensions
+ ) then update set
+ operation = source.operation,
+ schema_name = source.schema_name,
+ table_name = source.table_name,
+ when_clause = source.when_clause,
+ column_names = source.column_names,
+ metadata = source.metadata,
+ payload_extensions = source.payload_extensions,
+ metadata_extensions = source.metadata_extensions
+ when not matched then insert (
+ key, stream_id, operation, schema_name, table_name,
+ when_clause, column_names, metadata, payload_extensions, metadata_extensions
+ ) values (
+ source.key, p_stream_id, source.operation, source.schema_name,
+ source.table_name, source.when_clause, source.column_names,
+ source.metadata, source.payload_extensions, source.metadata_extensions
+ );
+
+ -- Remove subscriptions not in input
+ delete from pgstream.subscriptions
+ where stream_id = p_stream_id
+ and not exists (
+ select 1 from temp_subscriptions
+ where pgstream.subscriptions.key = temp_subscriptions.key
+ );
+
+ drop table temp_subscriptions;
+end;
+$$;
+```
+
+## Next Steps
+
+- [Event Structure](event-structure.md) - Payload and metadata format
+- [Extensions](extensions.md) - Add computed fields and dynamic routing
diff --git a/docs/getting-started.md b/docs/getting-started.md
new file mode 100644
index 0000000..57fb7df
--- /dev/null
+++ b/docs/getting-started.md
@@ -0,0 +1,64 @@
+# Getting Started
+
+## Requirements
+
+- **Postgres 15+** with `wal_level=logical`
+- User with `REPLICATION` privilege
+
+Check your setting:
+```sql
+SHOW wal_level;
+```
+
+Enable if needed (requires restart):
+```sql
+ALTER SYSTEM SET wal_level = logical;
+```
+
+## Configure
+
+Create `config.yaml`:
+
+```yaml
+stream:
+ id: 1
+ pg_connection:
+ host: localhost
+ port: 5432
+ name: mydb
+ username: postgres
+ password: postgres
+ tls:
+ enabled: false
+ batch:
+ max_size: 1000
+ max_fill_secs: 5
+
+sink:
+ type: webhook
+ url: https://httpbin.org/post
+```
+
+## Run
+
+```bash
+docker run -v $(pwd)/config.yaml:/config.yaml \
+ ghcr.io/psteinroe/postgres-stream:webhook-latest
+```
+
+Each sink has its own image: `kafka-latest`, `nats-latest`, `sqs-latest`, etc.
+
+## Create a Subscription
+
+```sql
+INSERT INTO pgstream.subscriptions (key, stream_id, operation, schema_name, table_name)
+VALUES ('user-created', 1, 'INSERT', 'public', 'users');
+```
+
+Now inserts into `users` are streamed to your webhook.
+
+## Next Steps
+
+- [Subscriptions](concepts/subscriptions.md) - Filter events, select columns
+- [Sinks](sinks/index.md) - Configure your destination
+- [Configuration Reference](reference/configuration.md) - All options
diff --git a/docs/index.md b/docs/index.md
new file mode 100644
index 0000000..9bd5dd5
--- /dev/null
+++ b/docs/index.md
@@ -0,0 +1,90 @@
+
+
+
+
+Postgres Stream
+
+
+ Reliably stream Postgres table changes to external systems with automatic failover and zero event loss.
+
+
+## What is Postgres Stream?
+
+Postgres Stream captures changes from your Postgres tables and delivers them to external systems like Kafka, RabbitMQ, Redis, Webhooks, and cloud services. It uses Postgres native logical replication and stores events durably in the database itself.
+
+## Key Features
+
+- **Single binary** - No complex infrastructure or high-availability destinations required
+- **Postgres-native durability** - Events are stored in the database, WAL can be released immediately
+- **Zero data loss** - As long as downtime is less than partition retention (7 days by default)
+- **Automatic recovery** - Handles both sink failures and slot invalidation without operator intervention
+
+## How It Works
+
+Events are inserted into the `pgstream.events` table and streamed via logical replication to your sink.
+
+**Two ways to create events:**
+
+1. **Subscriptions** (optional) - Define triggers that automatically capture table changes
+2. **Manual inserts** - Insert directly into `pgstream.events` from your application or database functions
+
+## Supported Sinks
+
+| Sink | Use Case |
+|------|----------|
+| [Kafka](sinks/kafka.md) | High-throughput event streaming |
+| [NATS](sinks/nats.md) | Lightweight pub/sub messaging |
+| [RabbitMQ](sinks/rabbitmq.md) | Enterprise message broker |
+| [Redis Strings](sinks/redis-strings.md) | Key-value caching |
+| [Redis Streams](sinks/redis-streams.md) | Append-only event log |
+| [Webhook](sinks/webhook.md) | HTTP POST delivery |
+| [AWS SQS](sinks/sqs.md) | Managed queue service |
+| [AWS SNS](sinks/sns.md) | Managed pub/sub service |
+| [AWS Kinesis](sinks/kinesis.md) | Real-time data streaming |
+| [GCP Pub/Sub](sinks/gcp-pubsub.md) | Google Cloud messaging |
+| [Elasticsearch](sinks/elasticsearch.md) | Search indexing |
+| [Meilisearch](sinks/meilisearch.md) | Search indexing |
+
+## Quick Start
+
+```yaml
+# config.yaml
+stream:
+ id: 1
+ pg_connection:
+ host: localhost
+ port: 5432
+ name: mydb
+ username: postgres
+ password: postgres
+ tls:
+ enabled: false
+ batch:
+ max_size: 1000
+ max_fill_secs: 5
+
+sink:
+ type: kafka
+ brokers: localhost:9092
+ topic: events
+```
+
+```bash
+# Run with Docker
+docker run -v $(pwd)/config.yaml:/config.yaml \
+ ghcr.io/psteinroe/postgres-stream:kafka-latest
+```
+
+## Trade-offs
+
+While Postgres Stream provides strong durability guarantees, there are some considerations:
+
+- **Small overhead** - Additional INSERT into `events` table on every subscribed operation
+- **Partition management** - Monitor partition growth if event volume is very high
+- **Not for dynamic subscriptions** - Each subscription change recreates database triggers
+
+## Next Steps
+
+- [Getting Started](getting-started.md) - Set up your first stream
+- [How It Works](concepts/how-it-works.md) - Understand the architecture
+- [Sinks](sinks/index.md) - Choose your destination
diff --git a/docs/media/logo.png b/docs/media/logo.png
new file mode 100644
index 0000000..6ceea04
Binary files /dev/null and b/docs/media/logo.png differ
diff --git a/docs/reference/configuration.md b/docs/reference/configuration.md
new file mode 100644
index 0000000..16afd34
--- /dev/null
+++ b/docs/reference/configuration.md
@@ -0,0 +1,196 @@
+# Configuration Reference
+
+Complete reference for `config.yaml` options.
+
+## Full Example
+
+```yaml
+stream:
+ id: 1
+ pg_connection:
+ host: localhost
+ port: 5432
+ name: mydb
+ username: postgres
+ password: postgres
+ tls:
+ enabled: false
+ trusted_root_certs: ""
+ keepalive: 60
+ batch:
+ max_size: 1000
+ max_fill_secs: 5
+
+sink:
+ type: kafka
+ brokers: localhost:9092
+ topic: events
+```
+
+## Stream Configuration
+
+### `stream.id`
+
+| | |
+|--|--|
+| Type | integer |
+| Required | Yes |
+
+Unique identifier for this stream. Must match the `stream_id` in subscription records.
+
+### `stream.pg_connection`
+
+Database connection settings.
+
+#### `host`
+
+| | |
+|--|--|
+| Type | string |
+| Required | Yes |
+
+Database hostname or IP address.
+
+#### `port`
+
+| | |
+|--|--|
+| Type | integer |
+| Required | Yes |
+| Default | 5432 |
+
+Database port.
+
+#### `name`
+
+| | |
+|--|--|
+| Type | string |
+| Required | Yes |
+
+Database name.
+
+#### `username`
+
+| | |
+|--|--|
+| Type | string |
+| Required | Yes |
+
+Database username. Must have REPLICATION privilege.
+
+#### `password`
+
+| | |
+|--|--|
+| Type | string |
+| Required | Yes |
+
+Database password. Supports environment variable substitution: `${ENV_VAR}`.
+
+#### `tls.enabled`
+
+| | |
+|--|--|
+| Type | boolean |
+| Required | No |
+| Default | false |
+
+Enable TLS for database connection.
+
+#### `tls.trusted_root_certs`
+
+| | |
+|--|--|
+| Type | string |
+| Required | No |
+
+Path to CA certificate file for TLS verification.
+
+#### `keepalive`
+
+| | |
+|--|--|
+| Type | integer |
+| Required | No |
+| Default | 60 |
+
+TCP keepalive interval in seconds.
+
+### `stream.batch`
+
+Controls how events are grouped before delivery.
+
+#### `max_size`
+
+| | |
+|--|--|
+| Type | integer |
+| Required | No |
+| Default | 1000 |
+
+Maximum events per batch. Larger batches improve throughput but increase latency.
+
+#### `max_fill_secs`
+
+| | |
+|--|--|
+| Type | integer |
+| Required | No |
+| Default | 5 |
+
+Maximum time to fill a batch in seconds. Lower values reduce latency but may result in smaller batches.
+
+## Sink Configuration
+
+The `sink` section defines the delivery destination. Each sink type has different options.
+
+### Common Pattern
+
+```yaml
+sink:
+ type:
+ # sink-specific options
+```
+
+### Available Types
+
+- `memory` - Built-in test sink
+- `kafka` - Apache Kafka
+- `nats` - NATS messaging
+- `rabbitmq` - RabbitMQ
+- `redis-strings` - Redis key-value
+- `redis-streams` - Redis Streams
+- `webhook` - HTTP POST
+- `sqs` - AWS SQS
+- `sns` - AWS SNS
+- `kinesis` - AWS Kinesis
+- `gcp-pubsub` - GCP Pub/Sub
+- `elasticsearch` - Elasticsearch
+- `meilisearch` - Meilisearch
+
+See the [Sinks](../sinks/index.md) section for sink-specific configuration.
+
+## Environment Variables
+
+Use `${VAR_NAME}` syntax to substitute environment variables:
+
+```yaml
+stream:
+ pg_connection:
+ password: ${POSTGRES_PASSWORD}
+
+sink:
+ type: kafka
+ sasl_password: ${KAFKA_PASSWORD}
+```
+
+## Configuration File Location
+
+By default, Postgres Stream looks for `config.yaml` in the current directory.
+
+Override with the `CONFIG_PATH` environment variable:
+
+```bash
+CONFIG_PATH=/etc/postgres-stream/config.yaml postgres-stream
+```
diff --git a/docs/reference/event-format.md b/docs/reference/event-format.md
new file mode 100644
index 0000000..f3116a3
--- /dev/null
+++ b/docs/reference/event-format.md
@@ -0,0 +1,244 @@
+# Event Format Reference
+
+JSON structure of events delivered to sinks.
+
+## Event Envelope
+
+Events from subscriptions have this structure:
+
+```json
+{
+ "tg_name": "user-signup",
+ "tg_op": "INSERT",
+ "tg_table_name": "users",
+ "tg_table_schema": "public",
+ "timestamp": 1703001234567,
+ "new": {
+ "id": 123,
+ "email": "user@example.com",
+ "created_at": "2024-12-12T10:30:00Z"
+ },
+ "old": null
+}
+```
+
+## Fields
+
+### `tg_name`
+
+| | |
+|--|--|
+| Type | string |
+
+The subscription `key` that matched this event.
+
+### `tg_op`
+
+| | |
+|--|--|
+| Type | string |
+| Values | `INSERT`, `UPDATE`, `DELETE` |
+
+The database operation that triggered the event.
+
+### `tg_table_name`
+
+| | |
+|--|--|
+| Type | string |
+
+Name of the table where the operation occurred.
+
+### `tg_table_schema`
+
+| | |
+|--|--|
+| Type | string |
+
+Schema of the table where the operation occurred.
+
+### `timestamp`
+
+| | |
+|--|--|
+| Type | integer |
+
+Unix timestamp in milliseconds when the event was created.
+
+### `new`
+
+| | |
+|--|--|
+| Type | object or null |
+
+The new row values. Present for `INSERT` and `UPDATE` operations.
+
+Contains only the columns specified in `column_names`, or all columns if not specified.
+
+### `old`
+
+| | |
+|--|--|
+| Type | object or null |
+
+The previous row values. Present for `UPDATE` and `DELETE` operations.
+
+Contains only the columns specified in `column_names`, or all columns if not specified.
+
+## Operation-Specific Structure
+
+### INSERT
+
+```json
+{
+ "tg_op": "INSERT",
+ "new": { /* new row */ },
+ "old": null
+}
+```
+
+### UPDATE
+
+```json
+{
+ "tg_op": "UPDATE",
+ "new": { /* new row */ },
+ "old": { /* previous row */ }
+}
+```
+
+### DELETE
+
+```json
+{
+ "tg_op": "DELETE",
+ "new": null,
+ "old": { /* deleted row */ }
+}
+```
+
+## Payload Extensions
+
+Fields from `payload_extensions` are added at the top level:
+
+```json
+{
+ "tg_name": "order-created",
+ "tg_op": "INSERT",
+ "new": { "id": 456, "total": 99.99 },
+ "date": "2024-12-12",
+ "total_formatted": "$99.99"
+}
+```
+
+Nested paths create nested objects:
+
+```json
+{
+ "tg_name": "user-action",
+ "new": { "id": 123 },
+ "context": {
+ "user_id": "abc-123",
+ "role": "admin"
+ }
+}
+```
+
+## Manual Events
+
+Events inserted directly into `pgstream.events` have whatever structure you provide in the `payload` column:
+
+```json
+{
+ "type": "custom-event",
+ "data": { "key": "value" }
+}
+```
+
+They don't have trigger metadata unless you include it.
+
+## Metadata vs Payload
+
+**Payload** is the event data delivered to the sink:
+- Stored in `pgstream.events.payload`
+- Sent to the destination as the message body
+
+**Metadata** controls routing and delivery:
+- Stored in `pgstream.events.metadata`
+- Read by sinks to determine topic, queue, index, etc.
+- Not typically included in the delivered message body
+
+## Data Type Mapping
+
+PostgreSQL types are serialized to JSON:
+
+| PostgreSQL | JSON |
+|------------|------|
+| integer, bigint | number |
+| numeric, decimal | number |
+| boolean | boolean |
+| text, varchar | string |
+| timestamp, date | string (ISO 8601) |
+| json, jsonb | object/array |
+| array | array |
+| uuid | string |
+
+## Example Events
+
+### User Signup
+
+```json
+{
+ "tg_name": "user-signup",
+ "tg_op": "INSERT",
+ "tg_table_name": "users",
+ "tg_table_schema": "public",
+ "timestamp": 1703001234567,
+ "new": {
+ "id": 1,
+ "email": "user@example.com",
+ "email_verified": true,
+ "created_at": "2024-12-12T10:30:00Z"
+ },
+ "old": null
+}
+```
+
+### Order Status Change
+
+```json
+{
+ "tg_name": "order-status-changed",
+ "tg_op": "UPDATE",
+ "tg_table_name": "orders",
+ "tg_table_schema": "public",
+ "timestamp": 1703001234567,
+ "new": {
+ "id": 456,
+ "status": "shipped",
+ "updated_at": "2024-12-12T14:30:00Z"
+ },
+ "old": {
+ "id": 456,
+ "status": "pending",
+ "updated_at": "2024-12-12T10:30:00Z"
+ }
+}
+```
+
+### User Deleted
+
+```json
+{
+ "tg_name": "user-deleted",
+ "tg_op": "DELETE",
+ "tg_table_name": "users",
+ "tg_table_schema": "public",
+ "timestamp": 1703001234567,
+ "new": null,
+ "old": {
+ "id": 1,
+ "email": "user@example.com"
+ }
+}
+```
diff --git a/docs/reference/subscriptions-table.md b/docs/reference/subscriptions-table.md
new file mode 100644
index 0000000..a855178
--- /dev/null
+++ b/docs/reference/subscriptions-table.md
@@ -0,0 +1,194 @@
+# Subscriptions Table Reference
+
+Schema reference for `pgstream.subscriptions`.
+
+## Schema
+
+```sql
+create table pgstream.subscriptions (
+ key text not null,
+ stream_id bigint not null,
+ operation text not null,
+ schema_name text not null,
+ table_name text not null,
+ when_clause text,
+ column_names text[],
+ metadata jsonb,
+ payload_extensions jsonb,
+ metadata_extensions jsonb,
+ primary key (key, stream_id)
+);
+```
+
+## Columns
+
+### `key`
+
+| | |
+|--|--|
+| Type | text |
+| Required | Yes |
+| Primary Key | Yes (with stream_id) |
+
+Unique identifier for the subscription within a stream. Used as `tg_name` in generated events.
+
+### `stream_id`
+
+| | |
+|--|--|
+| Type | bigint |
+| Required | Yes |
+| Primary Key | Yes (with key) |
+
+Must match the `stream.id` in your config. Links the subscription to a specific Postgres Stream instance.
+
+### `operation`
+
+| | |
+|--|--|
+| Type | text |
+| Required | Yes |
+| Values | `INSERT`, `UPDATE`, `DELETE` |
+
+The database operation to capture.
+
+### `schema_name`
+
+| | |
+|--|--|
+| Type | text |
+| Required | Yes |
+
+Database schema containing the target table.
+
+### `table_name`
+
+| | |
+|--|--|
+| Type | text |
+| Required | Yes |
+
+Name of the table to monitor.
+
+### `when_clause`
+
+| | |
+|--|--|
+| Type | text |
+| Required | No |
+
+SQL expression to filter events. Use `new` for new values and `old` for previous values.
+
+Examples:
+```sql
+'new.email_verified = true'
+'old.status IS DISTINCT FROM new.status'
+'new.amount > 1000'
+```
+
+### `column_names`
+
+| | |
+|--|--|
+| Type | text[] |
+| Required | No |
+| Default | All columns |
+
+Array of column names to include in the event payload. If null, all columns are included.
+
+Example:
+```sql
+array['id', 'email', 'created_at']
+```
+
+### `metadata`
+
+| | |
+|--|--|
+| Type | jsonb |
+| Required | No |
+
+Static metadata for routing. Applied to every event from this subscription.
+
+Example:
+```sql
+'{"topic": "user-events", "priority": "high"}'::jsonb
+```
+
+### `payload_extensions`
+
+| | |
+|--|--|
+| Type | jsonb |
+| Required | No |
+
+Array of computed fields to add to the event payload.
+
+Format:
+```json
+[
+ {"json_path": "field_name", "expression": "SQL expression"}
+]
+```
+
+Example:
+```sql
+'[
+ {"json_path": "date", "expression": "new.created_at::date::text"},
+ {"json_path": "total_formatted", "expression": "''$'' || new.total::text"}
+]'::jsonb
+```
+
+### `metadata_extensions`
+
+| | |
+|--|--|
+| Type | jsonb |
+| Required | No |
+
+Array of computed metadata fields. Same format as `payload_extensions`.
+
+Example:
+```sql
+'[
+ {"json_path": "topic", "expression": "''events-'' || tg_table_name"},
+ {"json_path": "partition_key", "expression": "new.user_id::text"}
+]'::jsonb
+```
+
+## Extension Format
+
+Both `payload_extensions` and `metadata_extensions` use the same format:
+
+```json
+[
+ {
+ "json_path": "path.to.field",
+ "expression": "SQL expression"
+ }
+]
+```
+
+### `json_path`
+
+Dot notation path where the value will be placed. Supports nesting:
+- `"field"` → `{"field": value}`
+- `"nested.field"` → `{"nested": {"field": value}}`
+
+### `expression`
+
+SQL expression evaluated in trigger context:
+- Use `new` to reference new row values
+- Use `old` to reference old row values
+- String literals need double single quotes: `''literal''`
+- Any valid SQL expression that returns a JSONB-compatible value
+
+## Trigger Behavior
+
+When you insert, update, or delete a subscription:
+
+1. Postgres Stream's trigger functions recalculate
+2. New database triggers are created/modified on target tables
+3. Existing triggers are dropped if no subscriptions remain
+
+This happens automatically via internal triggers on the subscriptions table.
diff --git a/docs/sinks/custom-sinks.md b/docs/sinks/custom-sinks.md
new file mode 100644
index 0000000..5109cc5
--- /dev/null
+++ b/docs/sinks/custom-sinks.md
@@ -0,0 +1,205 @@
+# Implementing Custom Sinks
+
+Build your own sink to deliver events to any destination.
+
+## Overview
+
+Sinks implement the `Sink` trait to define how events are delivered. Each sink should be behind a feature flag to keep the binary minimal.
+
+## Step 1: Add Dependencies
+
+Update `Cargo.toml` with your sink's dependencies behind a feature flag:
+
+```toml
+[dependencies]
+# Existing dependencies...
+
+# Optional sink dependencies
+reqwest = { version = "0.11", features = ["json"], optional = true }
+
+[features]
+# Sink feature flags
+sink-http = ["dep:reqwest"]
+```
+
+## Step 2: Create the Sink
+
+Create a new file `src/sink/http.rs`:
+
+```rust
+use etl::error::EtlResult;
+use reqwest::Client;
+use serde::Deserialize;
+use std::sync::Arc;
+
+use crate::sink::Sink;
+use crate::types::TriggeredEvent;
+
+#[derive(Clone, Debug, Deserialize)]
+pub struct HttpSinkConfig {
+ pub url: String,
+ #[serde(default)]
+ pub headers: std::collections::HashMap,
+}
+
+#[derive(Clone)]
+pub struct HttpSink {
+ config: HttpSinkConfig,
+ client: Arc,
+}
+
+impl HttpSink {
+ pub fn new(config: HttpSinkConfig) -> Self {
+ let client = Client::new();
+ Self {
+ config,
+ client: Arc::new(client),
+ }
+ }
+}
+
+impl Sink for HttpSink {
+ fn name() -> &'static str {
+ "http"
+ }
+
+ async fn publish_events(&self, events: Vec) -> EtlResult<()> {
+ for event in events {
+ let mut request = self.client
+ .post(&self.config.url)
+ .json(&event.payload);
+
+ for (key, value) in &self.config.headers {
+ request = request.header(key, value);
+ }
+
+ let response = request.send().await.map_err(|e| {
+ etl::etl_error!(
+ etl::error::ErrorKind::DestinationError,
+ "HTTP request failed",
+ e.to_string()
+ )
+ })?;
+
+ if !response.status().is_success() {
+ return Err(etl::etl_error!(
+ etl::error::ErrorKind::DestinationError,
+ "HTTP request failed",
+ format!("status: {}", response.status())
+ ));
+ }
+ }
+
+ Ok(())
+ }
+}
+```
+
+## Step 3: Register the Module
+
+Update `src/sink/mod.rs`:
+
+```rust
+mod base;
+pub mod memory;
+
+#[cfg(feature = "sink-http")]
+pub mod http;
+
+pub use base::Sink;
+```
+
+## Step 4: Add to Config Enum
+
+Update `src/config/sink.rs`:
+
+```rust
+use serde::Deserialize;
+
+#[cfg(feature = "sink-http")]
+use crate::sink::http::HttpSinkConfig;
+
+#[derive(Clone, Debug, Deserialize)]
+#[serde(tag = "type", rename_all = "lowercase")]
+pub enum SinkConfig {
+ Memory,
+
+ #[cfg(feature = "sink-http")]
+ Http(HttpSinkConfig),
+}
+```
+
+## Step 5: Wire Up in Core
+
+Update `src/core.rs`:
+
+```rust
+#[cfg(feature = "sink-http")]
+use crate::sink::http::HttpSink;
+
+// In start_pipeline_with_config():
+let sink = match &config.sink {
+ SinkConfig::Memory => MemorySink::new(),
+
+ #[cfg(feature = "sink-http")]
+ SinkConfig::Http(cfg) => HttpSink::new(cfg.clone()),
+};
+```
+
+## Step 6: Build and Use
+
+Build with your sink feature:
+
+```bash
+cargo build --release --features sink-http
+```
+
+Use in `config.yaml`:
+
+```yaml
+sink:
+ type: http
+ url: https://api.example.com/events
+ headers:
+ Authorization: Bearer token123
+```
+
+## Dynamic Routing
+
+To support per-event routing, read from event metadata:
+
+```rust
+fn resolve_url<'a>(&'a self, event: &'a TriggeredEvent) -> &'a str {
+ if let Some(ref metadata) = event.metadata {
+ if let Some(url) = metadata.get("url").and_then(|v| v.as_str()) {
+ return url;
+ }
+ }
+ &self.config.url
+}
+```
+
+## Error Handling
+
+Use the ETL error types for consistent error reporting:
+
+```rust
+use etl::error::{EtlError, ErrorKind};
+
+// Configuration errors
+etl::etl_error!(ErrorKind::ConfigError, "No URL configured")
+
+// Destination errors
+etl::etl_error!(ErrorKind::DestinationError, "Failed to send", details)
+
+// Invalid data errors
+etl::etl_error!(ErrorKind::InvalidData, "Failed to serialize", details)
+```
+
+## Best Practices
+
+1. **Use feature flags** - Don't add dependencies to the default build
+2. **Support batching** - Process multiple events efficiently
+3. **Handle errors gracefully** - Return appropriate error types
+4. **Support dynamic routing** - Read from event metadata when possible
+5. **Document configuration** - Add doc comments to config structs
diff --git a/docs/sinks/elasticsearch.md b/docs/sinks/elasticsearch.md
new file mode 100644
index 0000000..4755e31
--- /dev/null
+++ b/docs/sinks/elasticsearch.md
@@ -0,0 +1,104 @@
+# Elasticsearch
+
+Index events as searchable documents in Elasticsearch.
+
+## Docker Image
+
+```bash
+docker pull ghcr.io/psteinroe/postgres-stream:elasticsearch-latest
+```
+
+## Configuration
+
+```yaml
+sink:
+ type: elasticsearch
+ url: http://localhost:9200
+ index: events
+```
+
+### With Authentication
+
+```yaml
+sink:
+ type: elasticsearch
+ url: http://elastic:password@localhost:9200
+ index: events
+```
+
+## Options
+
+| Option | Type | Required | Default | Metadata Override | Description |
+|--------|------|----------|---------|-------------------|-------------|
+| `url` | string | Yes | - | No | Elasticsearch URL (may include credentials) |
+| `index` | string | No | - | Yes | Default index (can be overridden per-event) |
+
+## Dynamic Routing
+
+Route events to different indexes using metadata:
+
+```sql
+-- Route by table name
+metadata_extensions = '[
+ {"json_path": "index", "expression": "tg_table_name || ''-events''"}
+]'
+
+-- Or use static metadata
+metadata = '{"index": "users"}'
+```
+
+The sink reads `index` from event metadata.
+
+## Document Format
+
+Events are indexed with:
+- Document ID: Event ID
+- Body: Event payload (not the full envelope)
+
+The payload is indexed directly, so your Elasticsearch mapping should match your payload structure.
+
+## Bulk Operations
+
+Events are indexed using the Bulk API for efficiency. All events in a batch are sent in a single bulk request.
+
+## Example
+
+Complete configuration:
+
+```yaml
+stream:
+ id: 1
+ pg_connection:
+ host: localhost
+ port: 5432
+ name: mydb
+ username: postgres
+ password: postgres
+ tls:
+ enabled: false
+ batch:
+ max_size: 1000
+ max_fill_secs: 5
+
+sink:
+ type: elasticsearch
+ url: http://localhost:9200
+ index: postgres-events
+```
+
+## Index Mapping
+
+Create an index mapping that matches your event payload structure:
+
+```json
+PUT /postgres-events
+{
+ "mappings": {
+ "properties": {
+ "id": { "type": "keyword" },
+ "email": { "type": "keyword" },
+ "created_at": { "type": "date" }
+ }
+ }
+}
+```
diff --git a/docs/sinks/gcp-pubsub.md b/docs/sinks/gcp-pubsub.md
new file mode 100644
index 0000000..c6fc07b
--- /dev/null
+++ b/docs/sinks/gcp-pubsub.md
@@ -0,0 +1,89 @@
+# GCP Pub/Sub
+
+Publish events to Google Cloud Pub/Sub.
+
+## Docker Image
+
+```bash
+docker pull ghcr.io/psteinroe/postgres-stream:gcp-pubsub-latest
+```
+
+## Configuration
+
+```yaml
+sink:
+ type: gcp-pubsub
+ project_id: my-project
+ topic: my-topic
+```
+
+### For Local Testing
+
+```yaml
+sink:
+ type: gcp-pubsub
+ project_id: my-project
+ topic: my-topic
+ emulator_host: localhost:8085
+```
+
+## Options
+
+| Option | Type | Required | Default | Metadata Override | Description |
+|--------|------|----------|---------|-------------------|-------------|
+| `project_id` | string | Yes | - | No | GCP project ID |
+| `topic` | string | Yes | - | No | Pub/Sub topic name |
+| `emulator_host` | string | No | - | No | Emulator host for testing |
+
+## Authentication
+
+The sink uses the default GCP credentials chain:
+
+1. `GOOGLE_APPLICATION_CREDENTIALS` environment variable
+2. Application Default Credentials (ADC)
+3. GCE metadata service (when running on GCP)
+
+## Emulator Support
+
+For local testing with the Pub/Sub emulator:
+
+1. Set `PUBSUB_EMULATOR_HOST` environment variable:
+ ```bash
+ export PUBSUB_EMULATOR_HOST=localhost:8085
+ ```
+
+2. Set `emulator_host` in config to enable auto-topic creation:
+ ```yaml
+ emulator_host: localhost:8085
+ ```
+
+## Dynamic Routing
+
+Unlike other sinks, GCP Pub/Sub does not support dynamic topic routing via metadata. The topic must be specified in the sink configuration.
+
+## Example
+
+Complete configuration:
+
+```yaml
+stream:
+ id: 1
+ pg_connection:
+ host: localhost
+ port: 5432
+ name: mydb
+ username: postgres
+ password: postgres
+ tls:
+ enabled: false
+ batch:
+ max_size: 1000
+ max_fill_secs: 5
+
+sink:
+ type: gcp-pubsub
+ project_id: my-gcp-project
+ topic: postgres-events
+```
+
+Messages are published as JSON-serialized payloads. The Pub/Sub client handles batching internally (default: 10ms, 100 messages, or 1MiB).
diff --git a/docs/sinks/index.md b/docs/sinks/index.md
new file mode 100644
index 0000000..9743477
--- /dev/null
+++ b/docs/sinks/index.md
@@ -0,0 +1,64 @@
+# Sinks
+
+Choose where to deliver your events.
+
+## Overview
+
+Sinks define where Postgres Stream delivers events. Each sink type has a dedicated Docker image for minimal size.
+
+## Available Sinks
+
+| Sink | Docker Image | Use Case |
+|------|--------------|----------|
+| [Kafka](kafka.md) | `kafka-latest` | High-throughput event streaming |
+| [NATS](nats.md) | `nats-latest` | Lightweight pub/sub messaging |
+| [RabbitMQ](rabbitmq.md) | `rabbitmq-latest` | Enterprise message broker |
+| [Redis Strings](redis-strings.md) | `redis-strings-latest` | Key-value caching |
+| [Redis Streams](redis-streams.md) | `redis-streams-latest` | Append-only event log |
+| [Webhook](webhook.md) | `webhook-latest` | HTTP POST delivery |
+| [AWS SQS](sqs.md) | `sqs-latest` | Managed queue service |
+| [AWS SNS](sns.md) | `sns-latest` | Managed pub/sub service |
+| [AWS Kinesis](kinesis.md) | `kinesis-latest` | Real-time data streaming |
+| [GCP Pub/Sub](gcp-pubsub.md) | `gcp-pubsub-latest` | Google Cloud messaging |
+| [Elasticsearch](elasticsearch.md) | `elasticsearch-latest` | Search indexing |
+| [Meilisearch](meilisearch.md) | `meilisearch-latest` | Search indexing |
+
+## Dynamic Routing
+
+Most sinks support dynamic routing via event metadata. This lets you route events to different destinations based on row data:
+
+```sql
+-- Route to different topics based on table name
+insert into pgstream.subscriptions (
+ key, stream_id, operation, schema_name, table_name,
+ column_names, metadata_extensions
+) values (
+ 'all-events',
+ 1,
+ 'INSERT',
+ 'public',
+ 'orders',
+ array['id', 'user_id'],
+ '[{"json_path": "topic", "expression": "''events-'' || tg_table_name"}]'::jsonb
+);
+```
+
+See each sink's documentation for supported metadata fields.
+
+## Choosing a Sink
+
+**For high throughput**: Kafka, Kinesis, or GCP Pub/Sub
+
+**For simple integration**: Webhook
+
+**For AWS infrastructure**: SQS, SNS, or Kinesis
+
+**For GCP infrastructure**: GCP Pub/Sub
+
+**For real-time messaging**: NATS or RabbitMQ
+
+**For caching**: Redis Strings
+
+**For event log**: Redis Streams
+
+**For search indexing**: Elasticsearch or Meilisearch
diff --git a/docs/sinks/kafka.md b/docs/sinks/kafka.md
new file mode 100644
index 0000000..557bb39
--- /dev/null
+++ b/docs/sinks/kafka.md
@@ -0,0 +1,85 @@
+# Kafka
+
+High-throughput event streaming to Apache Kafka.
+
+## Docker Image
+
+```bash
+docker pull ghcr.io/psteinroe/postgres-stream:kafka-latest
+```
+
+## Configuration
+
+```yaml
+sink:
+ type: kafka
+ brokers: localhost:9092
+ topic: events
+```
+
+### With Authentication
+
+```yaml
+sink:
+ type: kafka
+ brokers: broker.example.com:9092
+ topic: events
+ sasl_mechanism: PLAIN
+ sasl_username: ${KAFKA_USERNAME}
+ sasl_password: ${KAFKA_PASSWORD}
+ security_protocol: SASL_SSL
+```
+
+## Options
+
+| Option | Type | Required | Default | Metadata Override | Description |
+|--------|------|----------|---------|-------------------|-------------|
+| `brokers` | string | Yes | - | No | Comma-separated list of Kafka brokers |
+| `topic` | string | No | - | Yes | Default topic (can be overridden per-event) |
+| `sasl_username` | string | No | - | No | SASL username for authentication |
+| `sasl_password` | string | No | - | No | SASL password for authentication |
+| `sasl_mechanism` | string | No | - | No | SASL mechanism (PLAIN, SCRAM-SHA-256) |
+| `security_protocol` | string | No | - | No | Security protocol (SASL_SSL, SASL_PLAINTEXT) |
+| `delivery_timeout_ms` | integer | No | 5000 | No | Message delivery timeout |
+
+## Dynamic Routing
+
+Route events to different topics using metadata:
+
+```sql
+-- Route by table name
+metadata_extensions = '[
+ {"json_path": "topic", "expression": "''events-'' || tg_table_name"}
+]'
+```
+
+The sink reads the `topic` key from event metadata.
+
+## Example
+
+Complete configuration:
+
+```yaml
+stream:
+ id: 1
+ pg_connection:
+ host: localhost
+ port: 5432
+ name: mydb
+ username: postgres
+ password: postgres
+ tls:
+ enabled: false
+ batch:
+ max_size: 1000
+ max_fill_secs: 5
+
+sink:
+ type: kafka
+ brokers: localhost:9092
+ topic: postgres-events
+```
+
+Events are produced with:
+- **Key**: Event ID
+- **Value**: JSON-serialized payload
diff --git a/docs/sinks/kinesis.md b/docs/sinks/kinesis.md
new file mode 100644
index 0000000..50c7e4f
--- /dev/null
+++ b/docs/sinks/kinesis.md
@@ -0,0 +1,101 @@
+# AWS Kinesis
+
+Stream events to Amazon Kinesis Data Streams.
+
+## Docker Image
+
+```bash
+docker pull ghcr.io/psteinroe/postgres-stream:kinesis-latest
+```
+
+## Configuration
+
+```yaml
+sink:
+ type: kinesis
+ stream_name: my-stream
+ region: us-east-1
+```
+
+### With Explicit Credentials
+
+```yaml
+sink:
+ type: kinesis
+ stream_name: my-stream
+ region: us-east-1
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_SECRET_ACCESS_KEY}
+```
+
+### For Local Testing
+
+```yaml
+sink:
+ type: kinesis
+ stream_name: my-stream
+ region: us-east-1
+ endpoint_url: http://localhost:4566
+ access_key_id: local
+ secret_access_key: local
+```
+
+## Options
+
+| Option | Type | Required | Default | Metadata Override | Description |
+|--------|------|----------|---------|-------------------|-------------|
+| `stream_name` | string | No | - | Yes | Kinesis stream name (can be overridden per-event) |
+| `region` | string | Yes | - | No | AWS region |
+| `endpoint_url` | string | No | - | No | Custom endpoint for LocalStack |
+| `access_key_id` | string | No | - | No | AWS access key (uses default chain if not set) |
+| `secret_access_key` | string | No | - | No | AWS secret key (uses default chain if not set) |
+
+## Dynamic Routing
+
+Route events to different streams using metadata:
+
+```sql
+-- Route by region
+metadata_extensions = '[
+ {"json_path": "stream", "expression": "''events-'' || new.region"}
+]'
+```
+
+The sink reads `stream` from event metadata.
+
+## Partition Key
+
+The event ID is used as the partition key for shard distribution.
+
+## Batching
+
+Records are sent using `PutRecords` with up to 500 records per request (Kinesis limit). Multiple batches are sent concurrently.
+
+## Example
+
+Complete configuration:
+
+```yaml
+stream:
+ id: 1
+ pg_connection:
+ host: localhost
+ port: 5432
+ name: mydb
+ username: postgres
+ password: postgres
+ tls:
+ enabled: false
+ batch:
+ max_size: 1000
+ max_fill_secs: 5
+
+sink:
+ type: kinesis
+ stream_name: postgres-events
+ region: us-east-1
+```
+
+Records are published with:
+- Partition Key: Event ID
+- Data: JSON-serialized payload
diff --git a/docs/sinks/meilisearch.md b/docs/sinks/meilisearch.md
new file mode 100644
index 0000000..cb9da7f
--- /dev/null
+++ b/docs/sinks/meilisearch.md
@@ -0,0 +1,114 @@
+# Meilisearch
+
+Index events as searchable documents in Meilisearch.
+
+## Docker Image
+
+```bash
+docker pull ghcr.io/psteinroe/postgres-stream:meilisearch-latest
+```
+
+## Configuration
+
+```yaml
+sink:
+ type: meilisearch
+ url: http://localhost:7700
+ index: events
+```
+
+### With Authentication
+
+```yaml
+sink:
+ type: meilisearch
+ url: http://localhost:7700
+ index: events
+ api_key: ${MEILISEARCH_API_KEY}
+```
+
+## Options
+
+| Option | Type | Required | Default | Metadata Override | Description |
+|--------|------|----------|---------|-------------------|-------------|
+| `url` | string | Yes | - | No | Meilisearch URL |
+| `index` | string | No | - | Yes | Default index (can be overridden per-event) |
+| `api_key` | string | No | - | No | API key for authentication |
+
+## Dynamic Routing
+
+Route events to different indexes using metadata:
+
+```sql
+-- Route by table name
+metadata_extensions = '[
+ {"json_path": "index", "expression": "tg_table_name"}
+]'
+
+-- Or use static metadata
+metadata = '{"index": "products"}'
+```
+
+The sink reads `index` from event metadata.
+
+## Primary Key
+
+Meilisearch requires each document to have a primary key. Options:
+
+1. **Configure in Meilisearch**: Set the primary key field when creating the index
+2. **Add via payload_extensions**: Transform the event ID into the expected field
+
+```sql
+-- Add id field from event
+payload_extensions = '[
+ {"json_path": "id", "expression": "new.id::text"}
+]'
+```
+
+## Document Format
+
+The event payload is indexed directly (not the full envelope). Your payload structure should match your Meilisearch schema.
+
+## Task Handling
+
+The sink waits for indexing tasks to complete before returning success. This ensures data consistency but may impact throughput for large batches.
+
+## Example
+
+Complete configuration:
+
+```yaml
+stream:
+ id: 1
+ pg_connection:
+ host: localhost
+ port: 5432
+ name: mydb
+ username: postgres
+ password: postgres
+ tls:
+ enabled: false
+ batch:
+ max_size: 1000
+ max_fill_secs: 5
+
+sink:
+ type: meilisearch
+ url: http://localhost:7700
+ index: products
+ api_key: masterKey
+```
+
+## Index Setup
+
+Create an index with appropriate settings:
+
+```bash
+curl -X POST 'http://localhost:7700/indexes' \
+ -H 'Content-Type: application/json' \
+ -H 'Authorization: Bearer masterKey' \
+ --data-binary '{
+ "uid": "products",
+ "primaryKey": "id"
+ }'
+```
diff --git a/docs/sinks/nats.md b/docs/sinks/nats.md
new file mode 100644
index 0000000..0be642a
--- /dev/null
+++ b/docs/sinks/nats.md
@@ -0,0 +1,74 @@
+# NATS
+
+Lightweight pub/sub messaging with NATS.
+
+## Docker Image
+
+```bash
+docker pull ghcr.io/psteinroe/postgres-stream:nats-latest
+```
+
+## Configuration
+
+```yaml
+sink:
+ type: nats
+ url: nats://localhost:4222
+ subject: events
+```
+
+### With Authentication
+
+```yaml
+sink:
+ type: nats
+ url: nats://user:password@nats.example.com:4222
+ subject: events
+```
+
+## Options
+
+| Option | Type | Required | Default | Metadata Override | Description |
+|--------|------|----------|---------|-------------------|-------------|
+| `url` | string | Yes | - | No | NATS server URL |
+| `subject` | string | No | - | Yes | Default subject (can be overridden per-event) |
+
+## Dynamic Routing
+
+Route events to different subjects using metadata:
+
+```sql
+-- Route by table name
+metadata_extensions = '[
+ {"json_path": "topic", "expression": "''events.'' || tg_table_name"}
+]'
+```
+
+The sink reads the `topic` key from event metadata.
+
+## Example
+
+Complete configuration:
+
+```yaml
+stream:
+ id: 1
+ pg_connection:
+ host: localhost
+ port: 5432
+ name: mydb
+ username: postgres
+ password: postgres
+ tls:
+ enabled: false
+ batch:
+ max_size: 1000
+ max_fill_secs: 5
+
+sink:
+ type: nats
+ url: nats://localhost:4222
+ subject: postgres.events
+```
+
+Messages are published as JSON-serialized payloads.
diff --git a/docs/sinks/rabbitmq.md b/docs/sinks/rabbitmq.md
new file mode 100644
index 0000000..beef78c
--- /dev/null
+++ b/docs/sinks/rabbitmq.md
@@ -0,0 +1,92 @@
+# RabbitMQ
+
+Enterprise message broker with RabbitMQ.
+
+## Docker Image
+
+```bash
+docker pull ghcr.io/psteinroe/postgres-stream:rabbitmq-latest
+```
+
+## Configuration
+
+```yaml
+sink:
+ type: rabbitmq
+ url: amqp://guest:guest@localhost:5672
+ exchange: events
+ routing_key: postgres.events
+```
+
+### With Queue Binding
+
+```yaml
+sink:
+ type: rabbitmq
+ url: amqp://guest:guest@localhost:5672
+ exchange: events
+ routing_key: postgres.events
+ queue: events-queue
+```
+
+## Options
+
+| Option | Type | Required | Default | Metadata Override | Description |
+|--------|------|----------|---------|-------------------|-------------|
+| `url` | string | Yes | - | No | RabbitMQ connection URL |
+| `exchange` | string | No | - | Yes | Exchange name (can be overridden per-event) |
+| `routing_key` | string | No | - | Yes | Routing key (can be overridden per-event) |
+| `queue` | string | No | - | No | Optional queue to declare and bind |
+
+## Dynamic Routing
+
+Route events using metadata:
+
+```sql
+-- Dynamic exchange and routing key
+metadata_extensions = '[
+ {"json_path": "exchange", "expression": "''events''"},
+ {"json_path": "routing_key", "expression": "tg_table_name || ''.'' || tg_op"}
+]'
+```
+
+The sink reads `exchange` and `routing_key` from event metadata.
+
+## Exchange Setup
+
+The sink declares the exchange as a **topic exchange** with:
+- Durable: true
+- Auto-delete: false
+
+If a queue is specified, it's declared and bound to the exchange with the routing key.
+
+## Example
+
+Complete configuration:
+
+```yaml
+stream:
+ id: 1
+ pg_connection:
+ host: localhost
+ port: 5432
+ name: mydb
+ username: postgres
+ password: postgres
+ tls:
+ enabled: false
+ batch:
+ max_size: 1000
+ max_fill_secs: 5
+
+sink:
+ type: rabbitmq
+ url: amqp://guest:guest@localhost:5672
+ exchange: postgres-events
+ routing_key: events.#
+ queue: events-consumer
+```
+
+Messages are published with:
+- Content-Type: `application/json`
+- Delivery mode: Persistent (2)
diff --git a/docs/sinks/redis-streams.md b/docs/sinks/redis-streams.md
new file mode 100644
index 0000000..ca2bde5
--- /dev/null
+++ b/docs/sinks/redis-streams.md
@@ -0,0 +1,88 @@
+# Redis Streams
+
+Append events to a Redis Stream for ordered event log.
+
+## Docker Image
+
+```bash
+docker pull ghcr.io/psteinroe/postgres-stream:redis-streams-latest
+```
+
+## Configuration
+
+```yaml
+sink:
+ type: redis-streams
+ url: redis://localhost:6379
+ stream_name: events
+```
+
+### With Length Limit
+
+```yaml
+sink:
+ type: redis-streams
+ url: redis://localhost:6379
+ stream_name: events
+ max_len: 100000
+```
+
+## Options
+
+| Option | Type | Required | Default | Metadata Override | Description |
+|--------|------|----------|---------|-------------------|-------------|
+| `url` | string | Yes | - | No | Redis connection URL |
+| `stream_name` | string | No | - | Yes | Default stream (can be overridden per-event) |
+| `max_len` | integer | No | - | No | Maximum stream length (uses MAXLEN ~) |
+
+## Dynamic Routing
+
+Route events to different streams using metadata:
+
+```sql
+-- Route by table name
+metadata_extensions = '[
+ {"json_path": "stream", "expression": "''events:'' || tg_table_name"}
+]'
+```
+
+The sink reads the `stream` key from event metadata.
+
+## Message Format
+
+Events are added using `XADD` with:
+- Auto-generated stream entry ID (`*`)
+- Single field: `payload` containing JSON
+
+Example when read with `XREAD`:
+```text
+1704067200000-0 payload {"id": 1, "name": "test"}
+```
+
+## Example
+
+Complete configuration:
+
+```yaml
+stream:
+ id: 1
+ pg_connection:
+ host: localhost
+ port: 5432
+ name: mydb
+ username: postgres
+ password: postgres
+ tls:
+ enabled: false
+ batch:
+ max_size: 1000
+ max_fill_secs: 5
+
+sink:
+ type: redis-streams
+ url: redis://localhost:6379
+ stream_name: postgres-events
+ max_len: 100000
+```
+
+The `max_len` option uses approximate trimming (`MAXLEN ~`) for efficiency.
diff --git a/docs/sinks/redis-strings.md b/docs/sinks/redis-strings.md
new file mode 100644
index 0000000..58ca694
--- /dev/null
+++ b/docs/sinks/redis-strings.md
@@ -0,0 +1,85 @@
+# Redis Strings
+
+Store events as key-value pairs in Redis.
+
+## Docker Image
+
+```bash
+docker pull ghcr.io/psteinroe/postgres-stream:redis-strings-latest
+```
+
+## Configuration
+
+```yaml
+sink:
+ type: redis-strings
+ url: redis://localhost:6379
+```
+
+### With Key Prefix
+
+```yaml
+sink:
+ type: redis-strings
+ url: redis://localhost:6379
+ key_prefix: events
+```
+
+## Options
+
+| Option | Type | Required | Default | Metadata Override | Description |
+|--------|------|----------|---------|-------------------|-------------|
+| `url` | string | Yes | - | No | Redis connection URL |
+| `key_prefix` | string | No | - | No | Prefix for all keys |
+| `key` | - | - | - | Yes | Full key (via metadata only) |
+
+## Key Resolution
+
+Keys are determined in this order:
+
+1. `key` in event metadata
+2. Event ID with optional prefix
+
+Without a key in metadata:
+- No prefix: key = event ID
+- With prefix: key = `{prefix}:{event_id}`
+
+## Dynamic Routing
+
+Set custom keys per-event using metadata:
+
+```sql
+-- Use user_id as the key
+metadata_extensions = '[
+ {"json_path": "key", "expression": "''user:'' || new.user_id::text"}
+]'
+```
+
+The sink reads the `key` from event metadata.
+
+## Example
+
+Complete configuration:
+
+```yaml
+stream:
+ id: 1
+ pg_connection:
+ host: localhost
+ port: 5432
+ name: mydb
+ username: postgres
+ password: postgres
+ tls:
+ enabled: false
+ batch:
+ max_size: 1000
+ max_fill_secs: 5
+
+sink:
+ type: redis-strings
+ url: redis://localhost:6379
+ key_prefix: postgres
+```
+
+Values are stored as JSON-serialized payloads using Redis `SET` command.
diff --git a/docs/sinks/sns.md b/docs/sinks/sns.md
new file mode 100644
index 0000000..7644cfd
--- /dev/null
+++ b/docs/sinks/sns.md
@@ -0,0 +1,95 @@
+# AWS SNS
+
+Publish events to Amazon SNS topics.
+
+## Docker Image
+
+```bash
+docker pull ghcr.io/psteinroe/postgres-stream:sns-latest
+```
+
+## Configuration
+
+```yaml
+sink:
+ type: sns
+ topic_arn: arn:aws:sns:us-east-1:123456789:my-topic
+ region: us-east-1
+```
+
+### With Explicit Credentials
+
+```yaml
+sink:
+ type: sns
+ topic_arn: arn:aws:sns:us-east-1:123456789:my-topic
+ region: us-east-1
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_SECRET_ACCESS_KEY}
+```
+
+### For Local Testing
+
+```yaml
+sink:
+ type: sns
+ topic_arn: arn:aws:sns:us-east-1:000000000000:my-topic
+ region: us-east-1
+ endpoint_url: http://localhost:4566
+ access_key_id: local
+ secret_access_key: local
+```
+
+## Options
+
+| Option | Type | Required | Default | Metadata Override | Description |
+|--------|------|----------|---------|-------------------|-------------|
+| `topic_arn` | string | No | - | Yes | SNS topic ARN (can be overridden per-event) |
+| `region` | string | Yes | - | No | AWS region |
+| `endpoint_url` | string | No | - | No | Custom endpoint for LocalStack |
+| `access_key_id` | string | No | - | No | AWS access key (uses default chain if not set) |
+| `secret_access_key` | string | No | - | No | AWS secret key (uses default chain if not set) |
+
+## Dynamic Routing
+
+Route events to different topics using metadata:
+
+```sql
+-- Route by event type
+metadata_extensions = '[
+ {"json_path": "topic", "expression": "''arn:aws:sns:us-east-1:123456789:'' || new.event_type"}
+]'
+```
+
+The sink reads `topic` from event metadata.
+
+## Batching
+
+Messages are published using `PublishBatch` with up to 10 messages per request (SNS limit). Multiple batches are sent concurrently.
+
+## Example
+
+Complete configuration:
+
+```yaml
+stream:
+ id: 1
+ pg_connection:
+ host: localhost
+ port: 5432
+ name: mydb
+ username: postgres
+ password: postgres
+ tls:
+ enabled: false
+ batch:
+ max_size: 1000
+ max_fill_secs: 5
+
+sink:
+ type: sns
+ topic_arn: arn:aws:sns:us-east-1:123456789:postgres-events
+ region: us-east-1
+```
+
+Messages are published as JSON-serialized payloads.
diff --git a/docs/sinks/sqs.md b/docs/sinks/sqs.md
new file mode 100644
index 0000000..920041e
--- /dev/null
+++ b/docs/sinks/sqs.md
@@ -0,0 +1,95 @@
+# AWS SQS
+
+Send events to Amazon SQS queues.
+
+## Docker Image
+
+```bash
+docker pull ghcr.io/psteinroe/postgres-stream:sqs-latest
+```
+
+## Configuration
+
+```yaml
+sink:
+ type: sqs
+ queue_url: https://sqs.us-east-1.amazonaws.com/123456789/my-queue
+ region: us-east-1
+```
+
+### With Explicit Credentials
+
+```yaml
+sink:
+ type: sqs
+ queue_url: https://sqs.us-east-1.amazonaws.com/123456789/my-queue
+ region: us-east-1
+ access_key_id: ${AWS_ACCESS_KEY_ID}
+ secret_access_key: ${AWS_SECRET_ACCESS_KEY}
+```
+
+### For Local Testing
+
+```yaml
+sink:
+ type: sqs
+ queue_url: http://localhost:9324/queue/my-queue
+ region: us-east-1
+ endpoint_url: http://localhost:9324
+ access_key_id: local
+ secret_access_key: local
+```
+
+## Options
+
+| Option | Type | Required | Default | Metadata Override | Description |
+|--------|------|----------|---------|-------------------|-------------|
+| `queue_url` | string | No | - | Yes | SQS queue URL (can be overridden per-event) |
+| `region` | string | Yes | - | No | AWS region |
+| `endpoint_url` | string | No | - | No | Custom endpoint for LocalStack/ElasticMQ |
+| `access_key_id` | string | No | - | No | AWS access key (uses default chain if not set) |
+| `secret_access_key` | string | No | - | No | AWS secret key (uses default chain if not set) |
+
+## Dynamic Routing
+
+Route events to different queues using metadata:
+
+```sql
+-- Route by priority
+metadata_extensions = '[
+ {"json_path": "queue_url", "expression": "''https://sqs.us-east-1.amazonaws.com/123456789/'' || new.priority || ''-queue''"}
+]'
+```
+
+The sink reads `queue_url` from event metadata.
+
+## Batching
+
+Messages are sent using `SendMessageBatch` with up to 10 messages per request (SQS limit). Multiple batches are sent concurrently.
+
+## Example
+
+Complete configuration:
+
+```yaml
+stream:
+ id: 1
+ pg_connection:
+ host: localhost
+ port: 5432
+ name: mydb
+ username: postgres
+ password: postgres
+ tls:
+ enabled: false
+ batch:
+ max_size: 1000
+ max_fill_secs: 5
+
+sink:
+ type: sqs
+ queue_url: https://sqs.us-east-1.amazonaws.com/123456789/postgres-events
+ region: us-east-1
+```
+
+Messages are sent as JSON-serialized payloads.
diff --git a/docs/sinks/webhook.md b/docs/sinks/webhook.md
new file mode 100644
index 0000000..26d9c50
--- /dev/null
+++ b/docs/sinks/webhook.md
@@ -0,0 +1,102 @@
+# Webhook
+
+Deliver events via HTTP POST requests.
+
+## Docker Image
+
+```bash
+docker pull ghcr.io/psteinroe/postgres-stream:webhook-latest
+```
+
+## Configuration
+
+```yaml
+sink:
+ type: webhook
+ url: https://api.example.com/events
+```
+
+### With Headers
+
+```yaml
+sink:
+ type: webhook
+ url: https://api.example.com/events
+ headers:
+ Authorization: Bearer ${API_TOKEN}
+ X-Custom-Header: value
+ timeout_ms: 30000
+```
+
+## Options
+
+| Option | Type | Required | Default | Metadata Override | Description |
+|--------|------|----------|---------|-------------------|-------------|
+| `url` | string | No | - | Yes | Default URL (can be overridden per-event) |
+| `headers` | object | No | {} | Yes | Custom headers to include |
+| `timeout_ms` | integer | No | 30000 | No | Request timeout in milliseconds |
+
+## Dynamic Routing
+
+Route events to different URLs using metadata:
+
+```sql
+-- Route by tenant
+metadata_extensions = '[
+ {"json_path": "url", "expression": "''https://api.example.com/'' || new.tenant_id"}
+]'
+```
+
+Add headers dynamically:
+
+```sql
+-- Add tenant header
+metadata = '{"headers": {"X-Tenant-Id": "tenant-123"}}'
+```
+
+The sink reads `url` and `headers` from event metadata.
+
+## Request Format
+
+Events are sent as JSON with:
+- Method: POST
+- Content-Type: `application/json`
+- Body: Array of event payloads
+
+When multiple events target the same URL, they're batched into a single request:
+
+```json
+[
+ {"id": 1, "name": "event1"},
+ {"id": 2, "name": "event2"}
+]
+```
+
+## Example
+
+Complete configuration:
+
+```yaml
+stream:
+ id: 1
+ pg_connection:
+ host: localhost
+ port: 5432
+ name: mydb
+ username: postgres
+ password: postgres
+ tls:
+ enabled: false
+ batch:
+ max_size: 1000
+ max_fill_secs: 5
+
+sink:
+ type: webhook
+ url: https://httpbin.org/post
+ headers:
+ Authorization: Bearer secret-token
+ timeout_ms: 30000
+```
+
+The sink expects a 2xx response. Non-success status codes cause the batch to fail and retry.
diff --git a/justfile b/justfile
index e9ffa21..fefd919 100644
--- a/justfile
+++ b/justfile
@@ -41,3 +41,11 @@ reset-git:
git pull
just clear-branches
+# Serve documentation locally
+docs:
+ uvx zensical serve
+
+# Build documentation
+docs-build:
+ uvx zensical build
+
diff --git a/uv.lock b/uv.lock
new file mode 100644
index 0000000..8792d62
--- /dev/null
+++ b/uv.lock
@@ -0,0 +1,158 @@
+version = 1
+revision = 2
+requires-python = ">=3.11"
+
+[[package]]
+name = "click"
+version = "8.3.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "colorama", marker = "sys_platform == 'win32'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" },
+]
+
+[[package]]
+name = "colorama"
+version = "0.4.6"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
+]
+
+[[package]]
+name = "deepmerge"
+version = "2.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/a8/3a/b0ba594708f1ad0bc735884b3ad854d3ca3bdc1d741e56e40bbda6263499/deepmerge-2.0.tar.gz", hash = "sha256:5c3d86081fbebd04dd5de03626a0607b809a98fb6ccba5770b62466fe940ff20", size = 19890, upload-time = "2024-08-30T05:31:50.308Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/2d/82/e5d2c1c67d19841e9edc74954c827444ae826978499bde3dfc1d007c8c11/deepmerge-2.0-py3-none-any.whl", hash = "sha256:6de9ce507115cff0bed95ff0ce9ecc31088ef50cbdf09bc90a09349a318b3d00", size = 13475, upload-time = "2024-08-30T05:31:48.659Z" },
+]
+
+[[package]]
+name = "markdown"
+version = "3.10"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/7d/ab/7dd27d9d863b3376fcf23a5a13cb5d024aed1db46f963f1b5735ae43b3be/markdown-3.10.tar.gz", hash = "sha256:37062d4f2aa4b2b6b32aefb80faa300f82cc790cb949a35b8caede34f2b68c0e", size = 364931, upload-time = "2025-11-03T19:51:15.007Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/70/81/54e3ce63502cd085a0c556652a4e1b919c45a446bd1e5300e10c44c8c521/markdown-3.10-py3-none-any.whl", hash = "sha256:b5b99d6951e2e4948d939255596523444c0e677c669700b1d17aa4a8a464cb7c", size = 107678, upload-time = "2025-11-03T19:51:13.887Z" },
+]
+
+[[package]]
+name = "postgres-stream-docs"
+version = "0.1.0"
+source = { editable = "." }
+dependencies = [
+ { name = "zensical" },
+]
+
+[package.metadata]
+requires-dist = [{ name = "zensical" }]
+
+[[package]]
+name = "pygments"
+version = "2.19.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" },
+]
+
+[[package]]
+name = "pymdown-extensions"
+version = "10.20"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "markdown" },
+ { name = "pyyaml" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/3e/35/e3814a5b7df295df69d035cfb8aab78b2967cdf11fcfae7faed726b66664/pymdown_extensions-10.20.tar.gz", hash = "sha256:5c73566ab0cf38c6ba084cb7c5ea64a119ae0500cce754ccb682761dfea13a52", size = 852774, upload-time = "2025-12-31T19:59:42.211Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ea/10/47caf89cbb52e5bb764696fd52a8c591a2f0e851a93270c05a17f36000b5/pymdown_extensions-10.20-py3-none-any.whl", hash = "sha256:ea9e62add865da80a271d00bfa1c0fa085b20d133fb3fc97afdc88e682f60b2f", size = 268733, upload-time = "2025-12-31T19:59:40.652Z" },
+]
+
+[[package]]
+name = "pyyaml"
+version = "6.0.3"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/6d/16/a95b6757765b7b031c9374925bb718d55e0a9ba8a1b6a12d25962ea44347/pyyaml-6.0.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:44edc647873928551a01e7a563d7452ccdebee747728c1080d881d68af7b997e", size = 185826, upload-time = "2025-09-25T21:31:58.655Z" },
+ { url = "https://files.pythonhosted.org/packages/16/19/13de8e4377ed53079ee996e1ab0a9c33ec2faf808a4647b7b4c0d46dd239/pyyaml-6.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:652cb6edd41e718550aad172851962662ff2681490a8a711af6a4d288dd96824", size = 175577, upload-time = "2025-09-25T21:32:00.088Z" },
+ { url = "https://files.pythonhosted.org/packages/0c/62/d2eb46264d4b157dae1275b573017abec435397aa59cbcdab6fc978a8af4/pyyaml-6.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:10892704fc220243f5305762e276552a0395f7beb4dbf9b14ec8fd43b57f126c", size = 775556, upload-time = "2025-09-25T21:32:01.31Z" },
+ { url = "https://files.pythonhosted.org/packages/10/cb/16c3f2cf3266edd25aaa00d6c4350381c8b012ed6f5276675b9eba8d9ff4/pyyaml-6.0.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:850774a7879607d3a6f50d36d04f00ee69e7fc816450e5f7e58d7f17f1ae5c00", size = 882114, upload-time = "2025-09-25T21:32:03.376Z" },
+ { url = "https://files.pythonhosted.org/packages/71/60/917329f640924b18ff085ab889a11c763e0b573da888e8404ff486657602/pyyaml-6.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8bb0864c5a28024fac8a632c443c87c5aa6f215c0b126c449ae1a150412f31d", size = 806638, upload-time = "2025-09-25T21:32:04.553Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/6f/529b0f316a9fd167281a6c3826b5583e6192dba792dd55e3203d3f8e655a/pyyaml-6.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1d37d57ad971609cf3c53ba6a7e365e40660e3be0e5175fa9f2365a379d6095a", size = 767463, upload-time = "2025-09-25T21:32:06.152Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/6a/b627b4e0c1dd03718543519ffb2f1deea4a1e6d42fbab8021936a4d22589/pyyaml-6.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:37503bfbfc9d2c40b344d06b2199cf0e96e97957ab1c1b546fd4f87e53e5d3e4", size = 794986, upload-time = "2025-09-25T21:32:07.367Z" },
+ { url = "https://files.pythonhosted.org/packages/45/91/47a6e1c42d9ee337c4839208f30d9f09caa9f720ec7582917b264defc875/pyyaml-6.0.3-cp311-cp311-win32.whl", hash = "sha256:8098f252adfa6c80ab48096053f512f2321f0b998f98150cea9bd23d83e1467b", size = 142543, upload-time = "2025-09-25T21:32:08.95Z" },
+ { url = "https://files.pythonhosted.org/packages/da/e3/ea007450a105ae919a72393cb06f122f288ef60bba2dc64b26e2646fa315/pyyaml-6.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:9f3bfb4965eb874431221a3ff3fdcddc7e74e3b07799e0e84ca4a0f867d449bf", size = 158763, upload-time = "2025-09-25T21:32:09.96Z" },
+ { url = "https://files.pythonhosted.org/packages/d1/33/422b98d2195232ca1826284a76852ad5a86fe23e31b009c9886b2d0fb8b2/pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196", size = 182063, upload-time = "2025-09-25T21:32:11.445Z" },
+ { url = "https://files.pythonhosted.org/packages/89/a0/6cf41a19a1f2f3feab0e9c0b74134aa2ce6849093d5517a0c550fe37a648/pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0", size = 173973, upload-time = "2025-09-25T21:32:12.492Z" },
+ { url = "https://files.pythonhosted.org/packages/ed/23/7a778b6bd0b9a8039df8b1b1d80e2e2ad78aa04171592c8a5c43a56a6af4/pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28", size = 775116, upload-time = "2025-09-25T21:32:13.652Z" },
+ { url = "https://files.pythonhosted.org/packages/65/30/d7353c338e12baef4ecc1b09e877c1970bd3382789c159b4f89d6a70dc09/pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c", size = 844011, upload-time = "2025-09-25T21:32:15.21Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/9d/b3589d3877982d4f2329302ef98a8026e7f4443c765c46cfecc8858c6b4b/pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc", size = 807870, upload-time = "2025-09-25T21:32:16.431Z" },
+ { url = "https://files.pythonhosted.org/packages/05/c0/b3be26a015601b822b97d9149ff8cb5ead58c66f981e04fedf4e762f4bd4/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e", size = 761089, upload-time = "2025-09-25T21:32:17.56Z" },
+ { url = "https://files.pythonhosted.org/packages/be/8e/98435a21d1d4b46590d5459a22d88128103f8da4c2d4cb8f14f2a96504e1/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea", size = 790181, upload-time = "2025-09-25T21:32:18.834Z" },
+ { url = "https://files.pythonhosted.org/packages/74/93/7baea19427dcfbe1e5a372d81473250b379f04b1bd3c4c5ff825e2327202/pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5", size = 137658, upload-time = "2025-09-25T21:32:20.209Z" },
+ { url = "https://files.pythonhosted.org/packages/86/bf/899e81e4cce32febab4fb42bb97dcdf66bc135272882d1987881a4b519e9/pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b", size = 154003, upload-time = "2025-09-25T21:32:21.167Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/08/67bd04656199bbb51dbed1439b7f27601dfb576fb864099c7ef0c3e55531/pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd", size = 140344, upload-time = "2025-09-25T21:32:22.617Z" },
+ { url = "https://files.pythonhosted.org/packages/d1/11/0fd08f8192109f7169db964b5707a2f1e8b745d4e239b784a5a1dd80d1db/pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8", size = 181669, upload-time = "2025-09-25T21:32:23.673Z" },
+ { url = "https://files.pythonhosted.org/packages/b1/16/95309993f1d3748cd644e02e38b75d50cbc0d9561d21f390a76242ce073f/pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1", size = 173252, upload-time = "2025-09-25T21:32:25.149Z" },
+ { url = "https://files.pythonhosted.org/packages/50/31/b20f376d3f810b9b2371e72ef5adb33879b25edb7a6d072cb7ca0c486398/pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c", size = 767081, upload-time = "2025-09-25T21:32:26.575Z" },
+ { url = "https://files.pythonhosted.org/packages/49/1e/a55ca81e949270d5d4432fbbd19dfea5321eda7c41a849d443dc92fd1ff7/pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5", size = 841159, upload-time = "2025-09-25T21:32:27.727Z" },
+ { url = "https://files.pythonhosted.org/packages/74/27/e5b8f34d02d9995b80abcef563ea1f8b56d20134d8f4e5e81733b1feceb2/pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6", size = 801626, upload-time = "2025-09-25T21:32:28.878Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/11/ba845c23988798f40e52ba45f34849aa8a1f2d4af4b798588010792ebad6/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6", size = 753613, upload-time = "2025-09-25T21:32:30.178Z" },
+ { url = "https://files.pythonhosted.org/packages/3d/e0/7966e1a7bfc0a45bf0a7fb6b98ea03fc9b8d84fa7f2229e9659680b69ee3/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be", size = 794115, upload-time = "2025-09-25T21:32:31.353Z" },
+ { url = "https://files.pythonhosted.org/packages/de/94/980b50a6531b3019e45ddeada0626d45fa85cbe22300844a7983285bed3b/pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26", size = 137427, upload-time = "2025-09-25T21:32:32.58Z" },
+ { url = "https://files.pythonhosted.org/packages/97/c9/39d5b874e8b28845e4ec2202b5da735d0199dbe5b8fb85f91398814a9a46/pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c", size = 154090, upload-time = "2025-09-25T21:32:33.659Z" },
+ { url = "https://files.pythonhosted.org/packages/73/e8/2bdf3ca2090f68bb3d75b44da7bbc71843b19c9f2b9cb9b0f4ab7a5a4329/pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb", size = 140246, upload-time = "2025-09-25T21:32:34.663Z" },
+ { url = "https://files.pythonhosted.org/packages/9d/8c/f4bd7f6465179953d3ac9bc44ac1a8a3e6122cf8ada906b4f96c60172d43/pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac", size = 181814, upload-time = "2025-09-25T21:32:35.712Z" },
+ { url = "https://files.pythonhosted.org/packages/bd/9c/4d95bb87eb2063d20db7b60faa3840c1b18025517ae857371c4dd55a6b3a/pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310", size = 173809, upload-time = "2025-09-25T21:32:36.789Z" },
+ { url = "https://files.pythonhosted.org/packages/92/b5/47e807c2623074914e29dabd16cbbdd4bf5e9b2db9f8090fa64411fc5382/pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7", size = 766454, upload-time = "2025-09-25T21:32:37.966Z" },
+ { url = "https://files.pythonhosted.org/packages/02/9e/e5e9b168be58564121efb3de6859c452fccde0ab093d8438905899a3a483/pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788", size = 836355, upload-time = "2025-09-25T21:32:39.178Z" },
+ { url = "https://files.pythonhosted.org/packages/88/f9/16491d7ed2a919954993e48aa941b200f38040928474c9e85ea9e64222c3/pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5", size = 794175, upload-time = "2025-09-25T21:32:40.865Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/3f/5989debef34dc6397317802b527dbbafb2b4760878a53d4166579111411e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764", size = 755228, upload-time = "2025-09-25T21:32:42.084Z" },
+ { url = "https://files.pythonhosted.org/packages/d7/ce/af88a49043cd2e265be63d083fc75b27b6ed062f5f9fd6cdc223ad62f03e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35", size = 789194, upload-time = "2025-09-25T21:32:43.362Z" },
+ { url = "https://files.pythonhosted.org/packages/23/20/bb6982b26a40bb43951265ba29d4c246ef0ff59c9fdcdf0ed04e0687de4d/pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac", size = 156429, upload-time = "2025-09-25T21:32:57.844Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/f4/a4541072bb9422c8a883ab55255f918fa378ecf083f5b85e87fc2b4eda1b/pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3", size = 143912, upload-time = "2025-09-25T21:32:59.247Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/f9/07dd09ae774e4616edf6cda684ee78f97777bdd15847253637a6f052a62f/pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3", size = 189108, upload-time = "2025-09-25T21:32:44.377Z" },
+ { url = "https://files.pythonhosted.org/packages/4e/78/8d08c9fb7ce09ad8c38ad533c1191cf27f7ae1effe5bb9400a46d9437fcf/pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba", size = 183641, upload-time = "2025-09-25T21:32:45.407Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/5b/3babb19104a46945cf816d047db2788bcaf8c94527a805610b0289a01c6b/pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c", size = 831901, upload-time = "2025-09-25T21:32:48.83Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/cc/dff0684d8dc44da4d22a13f35f073d558c268780ce3c6ba1b87055bb0b87/pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702", size = 861132, upload-time = "2025-09-25T21:32:50.149Z" },
+ { url = "https://files.pythonhosted.org/packages/b1/5e/f77dc6b9036943e285ba76b49e118d9ea929885becb0a29ba8a7c75e29fe/pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c", size = 839261, upload-time = "2025-09-25T21:32:51.808Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/88/a9db1376aa2a228197c58b37302f284b5617f56a5d959fd1763fb1675ce6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065", size = 805272, upload-time = "2025-09-25T21:32:52.941Z" },
+ { url = "https://files.pythonhosted.org/packages/da/92/1446574745d74df0c92e6aa4a7b0b3130706a4142b2d1a5869f2eaa423c6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65", size = 829923, upload-time = "2025-09-25T21:32:54.537Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/7a/1c7270340330e575b92f397352af856a8c06f230aa3e76f86b39d01b416a/pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9", size = 174062, upload-time = "2025-09-25T21:32:55.767Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" },
+]
+
+[[package]]
+name = "zensical"
+version = "0.0.15"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "click" },
+ { name = "deepmerge" },
+ { name = "markdown" },
+ { name = "pygments" },
+ { name = "pymdown-extensions" },
+ { name = "pyyaml" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/fd/ad/87b7b591551c74de67b08dcf172532fbb6df6f0e626dce9f220aae293052/zensical-0.0.15.tar.gz", hash = "sha256:b3200c91b30370671c50b8b4aa41c20e55ff2814b9003ee23c9b6f923a0c19be", size = 3816831, upload-time = "2025-12-24T11:15:49.058Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e4/07/ede00d39ff6cff7ab4971d15caa04a6710b126cbf0c8342add0337f4db89/zensical-0.0.15-cp310-abi3-macosx_10_12_x86_64.whl", hash = "sha256:13f205d24baeb4a77d096c3d385a8496850567b45c397cd54dde7c22dcbf0da8", size = 11934661, upload-time = "2025-12-24T11:15:09.019Z" },
+ { url = "https://files.pythonhosted.org/packages/d6/32/24449c59f90a6a17dd0d9740ee44f245887d0c177c9c1dc452b9eb812024/zensical-0.0.15-cp310-abi3-macosx_11_0_arm64.whl", hash = "sha256:99702504d38d8f7da4bf9a69513d2f65a0371dcb8f0e9f180c861cb285adb61f", size = 11820384, upload-time = "2025-12-24T11:15:12.265Z" },
+ { url = "https://files.pythonhosted.org/packages/ac/db/64fed914788f2f5a8880dcc9a08ce25bcf1a7f2586a09e5d7d61003fd652/zensical-0.0.15-cp310-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:051ea368d268ffebf7ed7b6422f211a57680b2810fcde7f251816eb5c8d2f488", size = 12128961, upload-time = "2025-12-24T11:15:16.178Z" },
+ { url = "https://files.pythonhosted.org/packages/59/ca/04fd676880acad9571736e470e1f7bd8e6a2391f09952e69800f1b77fc75/zensical-0.0.15-cp310-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:31a333afaf54d042bb51ac7dc623bd1b3bbe173c0ce4c7b72764cdef143ff4e2", size = 12098586, upload-time = "2025-12-24T11:15:19.452Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/02/6a6ecad3b4cb07e11c8d160882cc937f8e3e96868f598c371892f1e594d7/zensical-0.0.15-cp310-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:072f80b7346ad4657a4c23008ae5cd3b3511e3bfac8d6bb277cbb6b3c11b6387", size = 12418552, upload-time = "2025-12-24T11:15:23.096Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/b7/2a7205dfbeeb8612fbb1e22ad9f770804a07e440b276dcf23cbce3592da4/zensical-0.0.15-cp310-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec1b146adb3ee4146a3f800e0a4e6c8e681092d87ac51c60e35e99b68a724b31", size = 12193618, upload-time = "2025-12-24T11:15:26.359Z" },
+ { url = "https://files.pythonhosted.org/packages/dc/f4/5f38022d09e668622e49e8b5606bbfb5177d42ab1b4e91706ef0d38e2422/zensical-0.0.15-cp310-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:07a24e11a0e00d14f37d209cc37df446f91908109ca86ab3fe41c0b981e32668", size = 12308805, upload-time = "2025-12-24T11:15:29.602Z" },
+ { url = "https://files.pythonhosted.org/packages/48/f2/6dd7657d6e1c1cd4b01ad531cea6a95eb660a2f960b03415d2184720a283/zensical-0.0.15-cp310-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:cc2bc9b4f89863d8b3443b0b9446ccaf5181085c3fc7a7e1bbc4708afe864f7d", size = 12367060, upload-time = "2025-12-24T11:15:32.621Z" },
+ { url = "https://files.pythonhosted.org/packages/d3/83/781bdfbf459ec84b085085b142f433541de70d642d665227442a4deb9360/zensical-0.0.15-cp310-abi3-musllinux_1_2_i686.whl", hash = "sha256:a3809be6d5f25bbd69dbe43715a60bf9d0186c51a1ec821e02f106e660e632b4", size = 12493255, upload-time = "2025-12-24T11:15:36.126Z" },
+ { url = "https://files.pythonhosted.org/packages/54/10/0440e848658b97467c15c356f9038906fa90ac52b7f969ed1cce5deaf017/zensical-0.0.15-cp310-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:4fd9382db77eff79eb347d4d0b6194c1c285e4151ad104bc9c8f65a27cb71d7f", size = 12430315, upload-time = "2025-12-24T11:15:39.697Z" },
+ { url = "https://files.pythonhosted.org/packages/19/d9/50f3e833075e678047cd6e302f731b4b992762d366949124c2eeed6bd96c/zensical-0.0.15-cp310-abi3-win32.whl", hash = "sha256:1d29712dd4659e26b351417534e2ad6364f506514e168ac4c0ed42093b3a9469", size = 11559368, upload-time = "2025-12-24T11:15:42.893Z" },
+ { url = "https://files.pythonhosted.org/packages/18/07/cf06fc620dd94b2ffaa3e9df47b174603e0234096fd857b25dcda6c4a538/zensical-0.0.15-cp310-abi3-win_amd64.whl", hash = "sha256:0d0b303ec8a7aec2e733239f21d3602ce29e0eaabe4e4d60c9bcccb5c2d162dc", size = 11740088, upload-time = "2025-12-24T11:15:45.631Z" },
+]
diff --git a/zensical.toml b/zensical.toml
new file mode 100644
index 0000000..49bb143
--- /dev/null
+++ b/zensical.toml
@@ -0,0 +1,63 @@
+[project]
+site_name = "Postgres Stream"
+logo = "media/logo.png"
+site_url = "https://psteinroe.github.io/postgres-stream/"
+site_description = "Reliably stream Postgres table changes to external systems with automatic failover and zero event loss"
+
+repo_url = "https://github.com/psteinroe/postgres-stream"
+repo_name = "psteinroe/postgres-stream"
+
+nav = [
+ { "Introduction" = "index.md" },
+ { "Getting Started" = "getting-started.md" },
+ { "Concepts" = [
+ "concepts/how-it-works.md",
+ "concepts/subscriptions.md",
+ "concepts/event-structure.md",
+ "concepts/extensions.md",
+ "concepts/failover.md",
+ "concepts/manual-events.md"
+ ]},
+ { "Sinks" = [
+ "sinks/index.md",
+ "sinks/kafka.md",
+ "sinks/nats.md",
+ "sinks/rabbitmq.md",
+ "sinks/redis-strings.md",
+ "sinks/redis-streams.md",
+ "sinks/webhook.md",
+ "sinks/sqs.md",
+ "sinks/sns.md",
+ "sinks/kinesis.md",
+ "sinks/gcp-pubsub.md",
+ "sinks/elasticsearch.md",
+ "sinks/meilisearch.md",
+ "sinks/custom-sinks.md"
+ ]},
+ { "Reference" = [
+ "reference/configuration.md",
+ "reference/subscriptions-table.md",
+ "reference/event-format.md"
+ ]}
+]
+
+[project.theme]
+features = [
+ "navigation.sections",
+ "navigation.expand",
+ "navigation.path",
+ "navigation.indexes",
+ "content.code.copy"
+]
+
+[[project.theme.palette]]
+media = "(prefers-color-scheme: light)"
+scheme = "default"
+toggle.icon = "lucide/sun"
+toggle.name = "Switch to dark mode"
+
+[[project.theme.palette]]
+media = "(prefers-color-scheme: dark)"
+scheme = "slate"
+toggle.icon = "lucide/moon"
+toggle.name = "Switch to light mode"