|
1 | 1 | #!/usr/bin/env bash
|
| 2 | +set -e |
2 | 3 | export PGPASSWORD=$POSTGRES_PASSWORD
|
3 |
| -export VOLUME_DIR=/mnt/data |
| 4 | +# Upload files |
| 5 | +cloudStorageOps() { |
| 6 | + local LOCAL_STATE_FILE=state.txt |
| 7 | + local filepath=$1 |
| 8 | + local cloudpath=$2 |
4 | 9 |
|
5 |
| -date=$(date '+%y%m%d_%H%M') |
6 |
| -local_backupFile=$VOLUME_DIR/osmseed-db-${date}.sql.gz |
7 |
| -cloud_backupFile=database/osmseed-db-${date}.sql.gz |
8 |
| -stateFile=$VOLUME_DIR/state.txt |
9 |
| -restoreFile=$VOLUME_DIR/backup.sql.gz |
10 |
| - |
11 |
| -echo "Start...$DB_ACTION action" |
12 |
| -# Backing up DataBase |
13 |
| -if [ "$DB_ACTION" == "backup" ]; then |
14 |
| - # Backup database and make maximum compression at the slowest speed |
15 |
| - pg_dump -h $POSTGRES_HOST -U $POSTGRES_USER $POSTGRES_DB | gzip -9 >$local_backupFile |
16 |
| - |
17 |
| - # AWS |
18 |
| - if [ "$CLOUDPROVIDER" == "aws" ]; then |
19 |
| - echo "$AWS_S3_BUCKET/$cloud_backupFile" > $stateFile |
20 |
| - # Upload db backup file |
21 |
| - aws s3 cp $local_backupFile $AWS_S3_BUCKET/$cloud_backupFile |
22 |
| - # Upload state.txt file |
23 |
| - aws s3 cp $stateFile $AWS_S3_BUCKET/database/state.txt |
24 |
| - fi |
25 |
| - |
26 |
| - # GCP |
27 |
| - if [ "$CLOUDPROVIDER" == "gcp" ]; then |
28 |
| - echo "$GCP_STORAGE_BUCKET/$cloud_backupFile" > $stateFile |
29 |
| - # Upload db backup file |
30 |
| - gsutil cp $local_backupFile $GCP_STORAGE_BUCKET/$cloud_backupFile |
31 |
| - # Upload state.txt file |
32 |
| - gsutil cp $stateFile $GCP_STORAGE_BUCKET/database/state.txt |
33 |
| - fi |
34 |
| - |
35 |
| - # Azure |
36 |
| - if [ "$CLOUDPROVIDER" == "azure" ]; then |
37 |
| - # Save the path file |
38 |
| - echo "blob://$AZURE_STORAGE_ACCOUNT/$AZURE_CONTAINER_NAME/$cloud_backupFile" > $stateFile |
39 |
| - # Upload db backup file |
| 10 | + case "${CLOUDPROVIDER}" in |
| 11 | + aws) |
| 12 | + aws s3 cp ${filepath} s3://${AWS_S3_BUCKET}/${cloudpath} |
| 13 | + echo s3://${AWS_S3_BUCKET}/${cloudpath} >${LOCAL_STATE_FILE} |
| 14 | + aws s3 cp ${LOCAL_STATE_FILE} s3://${AWS_S3_BUCKET}/${BACKUP_CLOUD_FOLDER}/state.txt |
| 15 | + ;; |
| 16 | + gcp) |
| 17 | + gsutil cp ${filepath} gs://${GCP_STORAGE_BUCKET}/${cloudpath} |
| 18 | + echo "gs://${GCP_STORAGE_BUCKET}/${CLOUD_BACKUP_FILE}" >${LOCAL_STATE_FILE} |
| 19 | + gsutil cp ${LOCAL_STATE_FILE} gs://${GCP_STORAGE_BUCKET}/${BACKUP_CLOUD_FOLDER}/state.txt |
| 20 | + ;; |
| 21 | + azure) |
40 | 22 | az storage blob upload \
|
41 |
| - --container-name $AZURE_CONTAINER_NAME \ |
42 |
| - --file $local_backupFile \ |
43 |
| - --name $cloud_backupFile \ |
| 23 | + --container-name ${AZURE_CONTAINER_NAME} \ |
| 24 | + --file ${filepath} \ |
| 25 | + --name ${cloudpath} \ |
44 | 26 | --output table
|
45 |
| - # Upload state.txt file |
| 27 | + echo "blob://${AZURE_STORAGE_ACCOUNT}/${AZURE_CONTAINER_NAME}/${CLOUD_BACKUP_FILE}" >${LOCAL_STATE_FILE} |
46 | 28 | az storage blob upload \
|
47 |
| - --container-name $AZURE_CONTAINER_NAME \ |
48 |
| - --file $stateFile \ |
49 |
| - --name database/state.txt \ |
| 29 | + --container-name ${AZURE_CONTAINER_NAME} \ |
| 30 | + --file ${LOCAL_STATE_FILE} \ |
| 31 | + --name ${BACKUP_CLOUD_FOLDER}/state.txt \ |
50 | 32 | --output table
|
| 33 | + ;; |
| 34 | + esac |
| 35 | +} |
| 36 | + |
| 37 | +backupDB() { |
| 38 | + local LOCAL_BACKUP_FILE=${BACKUP_CLOUD_FILE}.sql.gz |
| 39 | + local CLOUD_BACKUP_FILE="${BACKUP_CLOUD_FOLDER}/${BACKUP_CLOUD_FILE}.sql.gz" |
| 40 | + if [ "$SET_DATE_AT_NAME" == "true" ]; then |
| 41 | + local CURRENT_DATE=$(date '+%Y%m%d-%H%M') |
| 42 | + LOCAL_BACKUP_FILE="${BACKUP_CLOUD_FILE}-${CURRENT_DATE}.sql.gz" |
| 43 | + CLOUD_BACKUP_FILE="${BACKUP_CLOUD_FOLDER}/${BACKUP_CLOUD_FILE}-${CURRENT_DATE}.sql.gz" |
51 | 44 | fi
|
52 |
| -fi |
53 | 45 |
|
54 |
| -# Restoring DataBase |
55 |
| -if [ "$DB_ACTION" == "restore" ]; then |
56 |
| - # AWS |
57 |
| - flag=true |
| 46 | + # Backup database with max compression |
| 47 | + echo "Backing up DB ${POSTGRES_DB} into ${LOCAL_BACKUP_FILE}" |
| 48 | + pg_dump -h ${POSTGRES_HOST} -U ${POSTGRES_USER} ${POSTGRES_DB} | gzip -9 >${LOCAL_BACKUP_FILE} |
| 49 | + |
| 50 | + # Handle cloud storage based on the provider |
| 51 | + cloudStorageOps "${LOCAL_BACKUP_FILE}" "${CLOUD_BACKUP_FILE}" |
| 52 | +} |
| 53 | + |
| 54 | +restoreDB() { |
| 55 | + local CURRENT_DATE=$(date '+%Y%m%d-%H%M') |
| 56 | + local RESTORE_FILE="backup.sql.gz" |
| 57 | + local LOG_RESULT_FILE="restore_results-${CURRENT_DATE}.log" |
| 58 | + local flag=true |
| 59 | + |
58 | 60 | while "$flag" = true; do
|
59 |
| - pg_isready -h $POSTGRES_HOST -p 5432 >/dev/null 2>&2 || continue |
60 |
| - flag=false |
61 |
| - wget -O $restoreFile $RESTORE_URL_FILE |
62 |
| - gunzip <$restoreFile | psql -h $POSTGRES_HOST -U $POSTGRES_USER -d $POSTGRES_DB |
63 |
| - echo " Import data to $POSTGRES_DB has finished ..." |
| 61 | + pg_isready -h ${POSTGRES_HOST} -p 5432 >/dev/null 2>&2 || continue |
| 62 | + flag=false |
| 63 | + wget -O ${RESTORE_FILE} ${RESTORE_URL_FILE} |
| 64 | + echo "Restoring ${RESTORE_URL_FILE} in ${POSTGRES_DB}" |
| 65 | + gunzip -c <${RESTORE_FILE} | psql -h ${POSTGRES_HOST} -U ${POSTGRES_USER} -d ${POSTGRES_DB} | tee ${LOG_RESULT_FILE} |
| 66 | + aws s3 cp ${LOG_RESULT_FILE} s3://${AWS_S3_BUCKET}/${LOG_RESULT_FILE} |
| 67 | + echo "Import data to ${POSTGRES_DB} has finished ..." |
| 68 | + done |
| 69 | +} |
| 70 | + |
| 71 | +delete_old_s3_files() { |
| 72 | + # Use RETENTION_DAYS from environment variable or default to 30 days |
| 73 | + if [ -z "${RETENTION_DAYS}" ]; then |
| 74 | + DAYS_AGO=30 |
| 75 | + else |
| 76 | + DAYS_AGO="${RETENTION_DAYS}" |
| 77 | + fi |
| 78 | + |
| 79 | + echo "Files older than $DAYS_AGO days will be deleted." |
| 80 | + echo "Processing s3://${AWS_S3_BUCKET}/${BACKUP_CLOUD_FOLDER}/" |
| 81 | + TARGET_DATE=$(date -d "${DAYS_AGO} days ago" +%Y-%m-%d) |
| 82 | + aws s3 ls "s3://${AWS_S3_BUCKET}/${BACKUP_CLOUD_FOLDER}/" --recursive | while read -r line; do |
| 83 | + FILE_DATE=$(echo "$line" | awk '{print $1}') |
| 84 | + FILE_PATH=$(echo "$line" | awk '{print $4}') |
| 85 | + if [[ "$FILE_DATE" < "$TARGET_DATE" && ! -z "$FILE_PATH" ]]; then |
| 86 | + echo "Deleting ${FILE_PATH} which was modified on ${FILE_DATE}" |
| 87 | + aws s3 rm "s3://${AWS_S3_BUCKET}/${FILE_PATH}" |
| 88 | + fi |
64 | 89 | done
|
| 90 | +} |
| 91 | + |
| 92 | +# Main logic |
| 93 | +case "${DB_ACTION}" in |
| 94 | +backup) |
| 95 | + backupDB |
| 96 | + ;; |
| 97 | +restore) |
| 98 | + restoreDB |
| 99 | + ;; |
| 100 | +*) |
| 101 | + echo "Unknown action: ${DB_ACTION}" |
| 102 | + exit 1 |
| 103 | + ;; |
| 104 | +esac |
| 105 | + |
| 106 | +# Check for the CLEAN_BACKUPS var |
| 107 | +if [ "$CLEANUP_BACKUPS" == "true" ]; then |
| 108 | + delete_old_s3_files |
| 109 | +else |
| 110 | + echo "CLEANUP_BACKUPS is not set to true. Skipping deletion." |
65 | 111 | fi
|
0 commit comments