Skip to content

Commit 9adf1cf

Browse files
Mitchell ShiellMitchell Shiell
Mitchell Shiell
authored and
Mitchell Shiell
committed
added envs to conductor, reference by scripts
1 parent 947ad94 commit 9adf1cf

14 files changed

+119
-68
lines changed

Makefile

+9-9
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,16 @@
11
# Define all phony targets (targets that don't create files)
2-
.PHONY: phaseOne phaseTwo down clean cleanVolumes mockData
2+
.PHONY: dev-phase-one dev-stage clean-data reset-volumes load-sample-data
33

44
# Start Phase One development environment
5-
phaseOne:
5+
phase-one:
66
PROFILE=phaseOne docker compose -f ./docker-compose.phaseOne.yml --profile phaseOne up --attach conductor
77

8-
# Start Phase Two development environment
9-
stageDev:
8+
# Start Stage development environment
9+
stage-dev:
1010
PROFILE=stageDev docker compose -f ./docker-compose.phaseOne.yml --profile stageDev up --attach conductor
1111

1212
# Gracefully shutdown all containers while preserving volumes
13-
down:
13+
shutdown:
1414
@{ \
1515
printf "\033[1;36mConductor:\033[0m Checking for containers...\n"; \
1616
if docker compose -f ./docker-compose.phaseOne.yml ps -a -q 2>/dev/null | grep -q .; then \
@@ -25,7 +25,7 @@ down:
2525
}
2626

2727
# Shutdown all containers and remove all volumes (WARNING: Deletes all data)
28-
downVolumes:
28+
reset:
2929
@{ \
3030
printf "\033[1;33mWarning:\033[0m This will remove all containers AND their volumes. Data will be lost.\n"; \
3131
read -p "Are you sure you want to continue? [y/N] " confirm; \
@@ -46,11 +46,11 @@ downVolumes:
4646
}
4747

4848
# Load sample data into Elasticsearch
49-
mockData:
50-
PROFILE=mockData docker compose -f ./docker-compose.phaseOne.yml --profile mockData up --attach conductor
49+
load-data:
50+
PROFILE=data docker compose -f ./docker-compose.phaseOne.yml --profile data up --attach conductor
5151

5252
# Remove all documents from Elasticsearch (preserves index structure)
53-
clean:
53+
clean-data:
5454
@echo "\033[1;33mWarning:\033[0m This will delete ALL data from the Elasticsearch index."
5555
@echo "This action cannot be undone."
5656
@/bin/bash -c 'read -p "Are you sure you want to continue? [y/N] " confirm; \

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ previous phase's foundation while introducing new capabilities.
5050
1. **Clone the repo branch**
5151

5252
```
53-
git clone -b prelude https://github.com/overture-stack/conductor.git
53+
git clone -b preludeV2.1 https://github.com/overture-stack/conductor.git
5454
```
5555

5656
2. **Build the Stage image using the dockerfile** For phaseOne run:

apps/readme.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# Apps folder
22

3-
This folder contains the following
3+
This folder containers a
44

55
## CSV-Processor
66

docker-compose.phaseOne.yml

+19-4
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
services:
22
conductor:
3-
profiles: ['phaseOne', 'phaseTwo', 'phaseThree', 'platform', 'stageDev', 'mockData', 'clean']
3+
profiles: ['phaseOne', 'phaseTwo', 'phaseThree', 'platform', 'stageDev', 'data', 'clean']
44
image: node:18-alpine
55
container_name: conductor
66
ports:
@@ -15,8 +15,22 @@ services:
1515
- ./scripts:/scripts
1616
- ./volumes/health:/health
1717
environment:
18-
- PROFILE=${PROFILE:-platform}
19-
18+
PROFILE: ${PROFILE:-platform}
19+
ES_URL: http://elasticsearch:9200
20+
ES_USER: elastic
21+
ES_PASS: myelasticpassword
22+
ARRANGER_FILE_URL: http://arranger-file:5050/graphql
23+
ARRANGER_TABULAR_URL: http://arranger-tabular:5051/graphql
24+
FILE_INDEX_NAME: file-index
25+
FILE_ES_TEMPLATE_FILE: /usr/share/elasticsearch/config/file_data_index_template.json
26+
FILE_ES_TEMPLATE_NAME: file_template
27+
FILE_ES_ALIAS_NAME: file_centric
28+
TABULAR_INDEX_NAME: tabular-index
29+
TABULAR_ES_TEMPLATE_FILE: /usr/share/elasticsearch/config/tabular_data_index_template.json
30+
TABULAR_ES_TEMPLATE_NAME: tabular_template
31+
TABULAR_ES_ALIAS_NAME: tabular_centric
32+
STAGE_URL: http://stage:3000
33+
TABULAR_DATA_FILE: /data/tabularData.csv
2034
command: >
2135
sh -c '
2236
apk add --no-cache --quiet curl >/dev/null 2>&1
@@ -48,7 +62,7 @@ services:
4862
chmod +x scripts/deployments/stageDev.sh
4963
scripts/deployments/stageDev.sh
5064
;;
51-
mockData)
65+
data)
5266
echo "Running mock data submission..."
5367
chmod +x scripts/services/phaseOne/submitMockData.sh
5468
scripts/services/phaseOne/submitMockData.sh
@@ -90,6 +104,7 @@ services:
90104
discovery.type: single-node
91105
cluster.name: workflow.elasticsearch
92106
ES_JAVA_OPTS: -Xms512m -Xmx2048m
107+
ES_USER: elastic
93108
ELASTIC_PASSWORD: myelasticpassword
94109
xpack.security.enabled: 'true'
95110
logging:

scripts/deployments/phaseThree.sh

Whitespace-only changes.

scripts/deployments/phaseTwo.sh

Whitespace-only changes.

scripts/readme.md

+52
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
# Scripts
2+
3+
## Deployments
4+
5+
Deployment scripts are an ordered collection of service scripts made to
6+
systematically deploy platform infrastructure.
7+
8+
## Services
9+
10+
Service scripts are modular and designed to perform specific tasks (like health
11+
checks, data setup, and service validation) that can be easily referenced and
12+
executed by deployment scripts. The plug-and-play setup allows for flexible
13+
deployment configurations where scripts can be added, removed, or reordered as
14+
needed.
15+
16+
To reduce coginitive overload we have organized setup scripts by the phase they
17+
are introduced in. All configurable variables are located at the top of the
18+
script and are configurable from the enviorment variables of the conductor
19+
service within the docker compose file.
20+
21+
### PhaseOne.sh
22+
23+
Configurations:
24+
25+
- `DEBUG`: Boolean flag to enable/disable debug logging
26+
- `SCRIPT_DIR`: Base directory path for service scripts
27+
28+
Key Functions:
29+
30+
- `debug()`: Handles conditional debug output when DEBUG=true
31+
- `rs()`: (Run Script) Utility function that:
32+
- Verifies script existence
33+
- Sets execute permissions
34+
- Executes scripts with explicit shell command
35+
- Includes debug logging of permissions
36+
37+
Deployment Sequence:
38+
39+
1. Removes any existing health check files (These files, when present, flag
40+
downstream services to start)
41+
2. Initializes Elasticsearch
42+
3. Sets up File Data in Elasticsearch
43+
4. Sets up Tabular Data in Elasticsearch
44+
5. Updates Conductor health status (by creating a `healthcheck` file)
45+
6. Performs Stage verification
46+
7. Validates Arranger setup
47+
48+
The script includes progress tracking with colored output and completes by
49+
displaying the portal's access URL (default: http://localhost:3000). Each step
50+
includes error handling and debug logging capabilities when enabled.
51+
52+
## Services

scripts/services/phaseOne/arrangerCheck.sh

+2-4
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,7 @@
11
#!/bin/sh
22

3-
# Define configurations
4-
RETRY_DELAY=20
5-
ARRANGER_FILE_URL="http://arranger-file:5050/graphql"
6-
ARRANGER_TABULAR_URL="http://arranger-tabular:5051/graphql"
3+
# Define configurations with environment variables and defaults
4+
RETRY_DELAY="${RETRY_DELAY:-20}"
75

86
# Check Arrangers
97
echo -e "Checking Arranger services"

scripts/services/phaseOne/clearElasticsearchData.sh

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
#!/bin/sh
22

33
# Configuration
4-
ES_URL="http://elasticsearch:9200"
5-
INDEX_NAME="tabular-index"
6-
ES_AUTH="elastic:myelasticpassword"
4+
ES_URL="${ES_URL:-http://elasticsearch:9200}"
5+
INDEX_NAME="${INDEX_NAME:-tabular-index}"
6+
ES_AUTH="${ES_USER}:${ES_PASS}"
77

88
echo -e "\033[1;36mElasticsearch:\033[0m Clearing data from index $INDEX_NAME"
99

scripts/services/phaseOne/elasticsearchCheck.sh

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,7 @@
11
# Basic health check configuration
22
MAX_RETRIES=10
33
RETRY_COUNT=0
4-
ES_URL="http://elasticsearch:9200"
5-
ES_AUTH="elastic:myelasticpassword" # Matches your ELASTIC_PASSWORD env var
4+
ES_AUTH="${ES_USER}:${ES_PASS}"
65

76
# Check both authentication and cluster health
87
until curl -s -f -u $ES_AUTH "$ES_URL/_cluster/health?wait_for_status=yellow" > /dev/null; do

scripts/services/phaseOne/elasticsearchSetupFileData.sh

+8-13
Original file line numberDiff line numberDiff line change
@@ -1,32 +1,27 @@
11
#!/bin/sh
22

3-
TEMPLATE_FILE="/usr/share/elasticsearch/config/file_data_index_template.json"
4-
ES_URL="http://elasticsearch:9200"
5-
AUTH="-u elastic:myelasticpassword"
6-
INDEX_NAME="file-index"
7-
TEMPLATE_NAME="file_template"
8-
ALIAS_NAME="file_centric"
3+
ES_AUTH="${ES_USER}:${ES_PASS}"
94

105
# Check template file
11-
[ ! -f "$TEMPLATE_FILE" ] && printf "\033[1;31mError:\033[0m Template file not found at $TEMPLATE_FILE\n" && exit 1
6+
[ ! -f "$FILE_ES_TEMPLATE_FILE" ] && printf "\033[1;31mError:\033[0m Template file not found at $FILE_ES_TEMPLATE_FILE\n" && exit 1
127

138
# Set up template if it doesn't exist
149
printf "\033[1;36mConductor:\033[0m Setting up the Elasticsearch file index template\n"
15-
if ! curl -s $AUTH "$ES_URL/_template/$TEMPLATE_NAME" | grep -q "\"index_patterns\""; then
16-
curl -s $AUTH -X PUT "$ES_URL/_template/$TEMPLATE_NAME" \
17-
-H "Content-Type: application/json" -d @"$TEMPLATE_FILE" > /dev/null && \
10+
if ! curl -s -u "$ES_AUTH" "$ES_URL/_template/$FILE_ES_TEMPLATE_NAME" | grep -q "\"index_patterns\""; then
11+
curl -s -u "$ES_AUTH" -X PUT "$ES_URL/_template/$FILE_ES_TEMPLATE_NAME" \
12+
-H "Content-Type: application/json" -d @"$FILE_ES_TEMPLATE_FILE" > /dev/null && \
1813
printf "\033[1;32mSuccess:\033[0m Elasticsearch file index template created successfully\n"
1914
else
2015
printf "\033[1;36mElasticsearch (File):\033[0m File Index template already exists, skipping creation\n"
2116
fi
2217

2318
# Create index with alias if it doesn't exist
2419
printf "\033[1;36mConductor:\033[0m Setting up the Elasticsearch file index and alias\n"
25-
if ! curl -s -f $AUTH -X GET "$ES_URL/$INDEX_NAME" > /dev/null 2>&1; then
20+
if ! curl -s -f -u "$ES_AUTH" -X GET "$ES_URL/$FILE_INDEX_NAME" > /dev/null 2>&1; then
2621
printf "\033[1;36mElasticsearch (File):\033[0m Index does not exist, creating file index\n"
27-
response=$(curl -s -w "\n%{http_code}" $AUTH -X PUT "$ES_URL/$INDEX_NAME" \
22+
response=$(curl -s -w "\n%{http_code}" -u "$ES_AUTH" -X PUT "$ES_URL/$FILE_INDEX_NAME" \
2823
-H "Content-Type: application/json" \
29-
-d "{\"aliases\": {\"$ALIAS_NAME\": {}}}")
24+
-d "{\"aliases\": {\"$FILE_ES_ALIAS_NAME\": {}}}")
3025

3126
http_code=$(echo "$response" | tail -n1)
3227
if [ "$http_code" != "200" ] && [ "$http_code" != "201" ]; then
Original file line numberDiff line numberDiff line change
@@ -1,41 +1,35 @@
11
#!/bin/sh
2-
3-
TEMPLATE_FILE="/usr/share/elasticsearch/config/tabular_data_index_template.json"
4-
ES_URL="http://elasticsearch:9200"
5-
AUTH="-u elastic:myelasticpassword"
6-
INDEX_NAME="tabular-index"
7-
TEMPLATE_NAME="tabular_template"
8-
ALIAS_NAME="tabular_centric"
2+
ES_AUTH="${ES_USER}:${ES_PASS}"
93

104
# Check template file
11-
[ ! -f "$TEMPLATE_FILE" ] && printf "\033[1;31mError:\033[0m Template file not found at $TEMPLATE_FILE\n" && exit 1
5+
[ ! -f "$TABULAR_ES_TEMPLATE_FILE" ] && printf "\033[1;31mError:\033[0m Template file not found at $TABULAR_ES_TEMPLATE_FILE\n" && exit 1
126

137
# Set up template if it doesn't exist
148
printf "\033[1;36mConductor:\033[0m Setting up the Elasticsearch tabular index template\n"
15-
if ! curl -s $AUTH "$ES_URL/_template/$TEMPLATE_NAME" | grep -q "\"index_patterns\""; then
16-
curl -s $AUTH -X PUT "$ES_URL/_template/$TEMPLATE_NAME" \
17-
-H "Content-Type: application/json" -d @"$TEMPLATE_FILE" > /dev/null && \
18-
printf "\033[1;32mSuccess:\033[0m Elasticsearch tabular index template created successfully\n"
9+
if ! curl -s -u "$ES_AUTH" "$ES_URL/_template/$TABULAR_ES_TEMPLATE_NAME" | grep -q "\"index_patterns\""; then
10+
curl -s -u "$ES_AUTH" -X PUT "$ES_URL/_template/$TABULAR_ES_TEMPLATE_NAME" \
11+
-H "Content-Type: application/json" -d @"$TABULAR_ES_TEMPLATE_FILE" > /dev/null && \
12+
printf "\033[1;32mSuccess:\033[0m Elasticsearch tabular index template created successfully\n"
1913
else
20-
printf "\033[1;36mElasticsearch (Tabular):\033[0m Tabular Index template already exists, skipping creation\n"
14+
printf "\033[1;36mElasticsearch (Tabular):\033[0m Tabular Index template already exists, skipping creation\n"
2115
fi
2216

2317
# Create index with alias if it doesn't exist
2418
printf "\033[1;36mConductor:\033[0m Setting up the Elasticsearch tabular index and alias\n"
25-
if ! curl -s -f $AUTH -X GET "$ES_URL/$INDEX_NAME" > /dev/null 2>&1; then
26-
printf "\033[1;36mElasticsearch (Tabular):\033[0m Index does not exist, creating tabular index\n"
27-
response=$(curl -s -w "\n%{http_code}" $AUTH -X PUT "$ES_URL/$INDEX_NAME" \
28-
-H "Content-Type: application/json" \
29-
-d "{\"aliases\": {\"$ALIAS_NAME\": {}}}")
30-
31-
http_code=$(echo "$response" | tail -n1)
32-
if [ "$http_code" != "200" ] && [ "$http_code" != "201" ]; then
33-
printf "\033[1;31mError:\033[0m Failed to create tabular index. HTTP Code: $http_code\n"
34-
exit 1
35-
fi
36-
printf "\033[1;32mSuccess:\033[0m Tabular index and alias created\n"
19+
if ! curl -s -f -u "$ES_AUTH" -X GET "$ES_URL/$TABULAR_INDEX_NAME" > /dev/null 2>&1; then
20+
printf "\033[1;36mElasticsearch (Tabular):\033[0m Index does not exist, creating tabular index\n"
21+
response=$(curl -s -w "\n%{http_code}" -u "$ES_AUTH" -X PUT "$ES_URL/$TABULAR_INDEX_NAME" \
22+
-H "Content-Type: application/json" \
23+
-d "{\"aliases\": {\"$TABULAR_ES_ALIAS_NAME\": {}}}")
24+
25+
http_code=$(echo "$response" | tail -n1)
26+
if [ "$http_code" != "200" ] && [ "$http_code" != "201" ]; then
27+
printf "\033[1;31mError:\033[0m Failed to create tabular index. HTTP Code: $http_code\n"
28+
exit 1
29+
fi
30+
printf "\033[1;32mSuccess:\033[0m Tabular index and alias created\n"
3731
else
38-
printf "\033[1;36mElasticsearch (Tabular):\033[0m Tabular index already exists\n"
32+
printf "\033[1;36mElasticsearch (Tabular):\033[0m Tabular index already exists\n"
3933
fi
4034

4135
printf "\033[1;32mSuccess:\033[0m Elasticsearch tabular setup complete\n"

scripts/services/phaseOne/stageCheck.sh

-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@ RETRY_COUNT=0
55
MAX_RETRIES=10
66
RETRY_DELAY=5
77
TIMEOUT=10
8-
STAGE_URL="http://stage:3000"
98

109
printf "\033[1;36mConductor:\033[0m Checking if Stage is reachable\n"
1110

scripts/services/phaseOne/submitMockData.sh

+3-4
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,9 @@
33
# Configuration
44
MAX_RETRIES=10
55
RETRY_COUNT=0
6-
CSV_PROCESSOR_PATH="/csv-processor"
7-
ES_URL="http://elasticsearch:9200"
8-
DATA_FILE="/data/tabularData.csv"
9-
INDEX_NAME="tabular-index"
6+
CSV_PROCESSOR_PATH="${CSV_PROCESSOR_PATH:-/csv-processor}"
7+
DATA_FILE="${TABULAR_DATA_FILE}"
8+
INDEX_NAME="${TABULAR_INDEX_NAME}"
109

1110
# Check if data file exists
1211
if [ ! -f "$DATA_FILE" ]; then

0 commit comments

Comments
 (0)