diff --git a/.dockerignore b/.dockerignore index e0503085d..0c1ba2f12 100644 --- a/.dockerignore +++ b/.dockerignore @@ -12,6 +12,7 @@ !requirements.txt !requirements_test.txt !requirements-3.9.txt +!docker/azure-agent-start.sh !docker/opencv-python-headless-setup.py !docker/jetson-nano/opencv-tuple.patch !rootfs/ diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 22ec850fd..39d1a43be 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -54,7 +54,7 @@ jobs: uses: ./.github/templates/run_in_venv with: command: | - pip3 install --extra-index-url https://download.pytorch.org/whl/cpu -r requirements.txt -r requirements_test.txt -r requirements_ci.txt && \ + pip3 install --extra-index-url https://download.pytorch.org/whl/cpu --extra-index-url https://pkgs.dev.azure.com/viseron/Viseron%20Pipelines/_packaging/viseron-wheels/pypi/simple -r requirements.txt -r requirements_test.txt -r requirements_ci.txt && \ pre-commit install prepare-pre-commit: @@ -488,6 +488,21 @@ jobs: sudo add-apt-repository "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" sudo apt-get update sudo apt-get install -y --no-install-recommends libedgetpu1-std python3-gi python3-gst-1.0 + - name: Install libhailort for hailo docs + env: + AZURE_DEVOPS_EXT_PAT: ${{ secrets.AZURE_DEVOPS_EXT_PAT }} + run: | + set -e + HAILO_VERSION=$(grep '^HAILO_VERSION=' azure-pipelines/.env | cut -d'"' -f2) + echo "Logging in to Azure DevOps" + echo "$AZURE_DEVOPS_EXT_PAT" | az devops login --organization https://dev.azure.com/viseron || true + echo "Downloading libhailort-amd64 version $HAILO_VERSION" + az artifacts universal download --organization https://dev.azure.com/viseron/ --project="Viseron Pipelines" --scope project --feed viseron-binaries --name libhailort-amd64 --version "$HAILO_VERSION" --path libhailort_pkg + echo "Installing libhailort to /usr/local/lib" + sudo cp libhailort_pkg/libhailort.so.* /usr/local/lib/ + sudo ldconfig + ls -l /usr/local/lib/libhailort.so.* + az devops logout - name: Run script to check generated docs uses: ./.github/templates/run_in_venv with: diff --git a/.gitignore b/.gitignore index 9d5ebf2d3..4822a345c 100644 --- a/.gitignore +++ b/.gitignore @@ -37,9 +37,12 @@ !/docker/jetson-nano/tflite_runtime-2.5.0.post1-cp38-cp38-linux_aarch64.whl !/docker/rpi3 !/docker/rpi3/Dockerfile* +!/docker/rpi5 +!/docker/rpi5/Dockerfile* !/docker/ffprobe_wrapper !/docker/ffmpeg_wrapper !/docker/yolov4*.cfg +!/docker/azure-agent-start.sh !/docker/opencv-python-headless-setup.py !/docker/jetson-nano/opencv-tuple.patch !/rootfs/ diff --git a/.mypy.ini b/.mypy.ini index 18052b636..aa6b6be73 100644 --- a/.mypy.ini +++ b/.mypy.ini @@ -46,9 +46,15 @@ ignore_missing_imports = true [mypy-gi.repository.*] ignore_missing_imports = true +[mypy-hailo_platform.*] +ignore_missing_imports = true + [mypy-imutils.*] ignore_missing_imports = true +[mypy-jinja2.*] +ignore_missing_imports = true + [mypy-path.*] ignore_missing_imports = true diff --git a/azure-pipelines/.env b/azure-pipelines/.env index 7fe7ecfd9..1663a9252 100644 --- a/azure-pipelines/.env +++ b/azure-pipelines/.env @@ -1,12 +1,13 @@ UBUNTU_VERSION="jammy" UBUNTU_VERSION_NUMBER="22.04" -BASE_VERSION="1.8.0" +BASE_VERSION="1.9.0" OPENCV_VERSION="4.10.0" OPENVINO_VERSION="2024.0.0" FFMPEG_VERSION="5.1.2" DLIB_VERSION="19.24.4" +HAILO_VERSION="4.22.0" SKLEARN_VERSION="1.2.2" -WHEELS_VERSION="1.7" +WHEELS_VERSION="1.8" S6_OVERLAY_VERSION="2.1.0.2" CMAKE_VERSION=3.20.0 MAKEFLAGS="-j 5" @@ -23,7 +24,7 @@ JETPACK_VERSION_MAJOR=32 JETPACK_VERSION_MINOR=6 JETPACK_VERSION_PATCH=1 JETSON_NANO_FFMPEG_VERSION="4.2.2" -JETSON_NANO_FFMPEG_APT_VERSION="7:4.2.2-nvidia" +JETSON_NANO_FFMPEG_APT_VERSION="9:3.4.11-2ubuntu0.1l4t" CUDA_VERSION=12.3.2 DARKNET_COMMIT=27b37bf GPAC_VERSION=2.4.0 diff --git a/azure-pipelines/azure-pipelines-tools.yml b/azure-pipelines/azure-pipelines-tools.yml index 0f7270528..e54e086f4 100644 --- a/azure-pipelines/azure-pipelines-tools.yml +++ b/azure-pipelines/azure-pipelines-tools.yml @@ -30,3 +30,86 @@ jobs: architectures: - amd64-cuda - jetson-nano + - template: templates/build.yaml + parameters: + image: hailo + noCache: ${{ parameters.noCache }} + architectures: + - amd64 + - aarch64 + + # Publish hailo wheels and libhailort packages + - job: publish_hailo_artifacts + displayName: Publish Hailo artifacts + dependsOn: + - build_hailo_amd64 + - build_hailo_aarch64 + condition: succeeded() + strategy: + maxParallel: 1 + matrix: + amd64: + ARCH: amd64 + aarch64: + ARCH: aarch64 + pool: + vmImage: ubuntu-latest + steps: + - task: Docker@2 + displayName: Login to Docker Hub + inputs: + command: login + containerRegistry: "Docker Hub" + - script: | + set -e + ARCH=$(ARCH) + HAILO_VERSION=$(grep '^HAILO_VERSION=' azure-pipelines/.env | cut -d'"' -f2) + if [ -z "$HAILO_VERSION" ]; then + echo "Failed to determine HAILO_VERSION" >&2 + exit 1 + fi + IMAGE_NAME="roflcoopter/${ARCH}-hailo:${HAILO_VERSION}" + echo "Pulling $IMAGE_NAME" + docker pull $IMAGE_NAME + CID=$(docker create "$IMAGE_NAME" bash) + + mkdir -p hailo-wheels/${ARCH} + docker cp $CID:/wheels/. hailo-wheels/${ARCH}/ + + # Prepare libhailort universal package directory per architecture + mkdir -p libhailort-dist-${ARCH} + docker cp $CID:/usr/local/lib/libhailort.so.${HAILO_VERSION} libhailort-dist-${ARCH}/ + + docker rm $CID + echo "Extracted files:"; ls -1 hailo-wheels/${ARCH} + echo "libhailort files:"; ls -1 libhailort-dist-${ARCH} + echo "##vso[task.setvariable variable=HAILO_VERSION]$HAILO_VERSION" + displayName: Extract wheels + - task: TwineAuthenticate@1 + displayName: Authenticate to Azure Artifacts (Python feed) + inputs: + artifactFeed: "Viseron Pipelines/viseron-wheels" + - script: | + set -e + ARCH=$(ARCH) + python3 -m pip install --upgrade pip + python3 -m pip install --no-cache-dir twine==6.1.0 + echo "Uploading wheels to Azure Artifacts feed 'viseron-wheels'" + python3 -m twine upload --skip-existing --config-file $(PYPIRC_PATH) -r viseron-wheels hailo-wheels/${ARCH}/*.whl + displayName: Upload wheels + - task: UniversalPackages@0 + displayName: Publish libhailort universal package + inputs: + command: publish + publishDirectory: libhailort-dist-$(ARCH) + vstsFeedPublish: "Viseron Pipelines/viseron-binaries" + vstsFeedPackagePublish: libhailort-$(ARCH) + versionOption: custom + versionPublish: $(HAILO_VERSION) + packagePublishDescription: "libhailort shared library ($(ARCH)) for Hailo version $(HAILO_VERSION)" + - task: Docker@2 + displayName: Logoff Docker Hub + inputs: + command: logout + containerRegistry: "Docker Hub" + condition: always() diff --git a/azure-pipelines/docker-compose-build.yaml b/azure-pipelines/docker-compose-build.yaml index c3c1da518..987a18b0b 100644 --- a/azure-pipelines/docker-compose-build.yaml +++ b/azure-pipelines/docker-compose-build.yaml @@ -78,6 +78,18 @@ services: - roflcoopter/amd64-dlib:$DLIB_VERSION image: roflcoopter/amd64-dlib:$DLIB_VERSION + amd64-hailo: + build: + context: .. + dockerfile: ./docker/Dockerfile.hailo + args: + BUILD_FROM: ubuntu:$UBUNTU_VERSION + MAKEFLAGS: "$MAKEFLAGS" + HAILO_VERSION: "$HAILO_VERSION" + cache_from: + - roflcoopter/amd64-hailo:$HAILO_VERSION + image: roflcoopter/amd64-hailo:$HAILO_VERSION + amd64-wheels: build: context: .. @@ -86,6 +98,7 @@ services: ARCH: amd64 BUILD_FROM: ubuntu:$UBUNTU_VERSION DLIB_VERSION: "$DLIB_VERSION" + EXTRA_PIP_ARGS: --extra-index-url https://download.pytorch.org/whl/cpu OPENCV_VERSION: "$OPENCV_VERSION" cache_from: - roflcoopter/amd64-wheels:$WHEELS_VERSION @@ -102,6 +115,7 @@ services: IGC_VERSION: "$IGC_VERSION" LEVEL_ZERO_GPU: "$LEVEL_ZERO_GPU" OPENCL_SHA256_FILENAME: "$OPENCL_SHA256_FILENAME" + HAILO_VERSION: "$HAILO_VERSION" context: .. dockerfile: ./docker/amd64/Dockerfile.base cache_from: @@ -245,6 +259,7 @@ services: IGC_VERSION: "$IGC_VERSION" LEVEL_ZERO_GPU: "$LEVEL_ZERO_GPU" OPENCL_SHA256_FILENAME: "$OPENCL_SHA256_FILENAME" + HAILO_VERSION: "$HAILO_VERSION" cache_from: - roflcoopter/amd64-cuda-base:$BASE_VERSION image: roflcoopter/amd64-cuda-base:$BASE_VERSION @@ -416,6 +431,17 @@ services: - roflcoopter/aarch64-dlib:$DLIB_VERSION image: roflcoopter/aarch64-dlib:$DLIB_VERSION + aarch64-hailo: + build: + context: .. + dockerfile: ./docker/Dockerfile.hailo + args: + BUILD_FROM: ubuntu:$UBUNTU_VERSION + MAKEFLAGS: "$MAKEFLAGS" + HAILO_VERSION: "$HAILO_VERSION" + cache_from: + - roflcoopter/aarch64-hailo:$HAILO_VERSION + image: roflcoopter/aarch64-hailo:$HAILO_VERSION aarch64-wheels: build: context: .. @@ -436,6 +462,7 @@ services: args: UBUNTU_VERSION: "$UBUNTU_VERSION" FFMPEG_VERSION: "$FFMPEG_VERSION" + HAILO_VERSION: "$HAILO_VERSION" cache_from: - roflcoopter/aarch64-base:$BASE_VERSION image: roflcoopter/aarch64-base:$BASE_VERSION @@ -615,6 +642,19 @@ services: - roflcoopter/jetson-nano-viseron:dev image: roflcoopter/jetson-nano-viseron:dev + ################ RaspberryPi 5 ##################################### + rpi5-azure-agent: + build: + context: .. + dockerfile: ./docker/rpi5/Dockerfile.azure + args: + TARGETARCH: "linux-arm64" + UBUNTU_VERSION: "$UBUNTU_VERSION" + cache_from: + - roflcoopter/rpi5-azure-agent:latest + image: roflcoopter/rpi5-azure-agent:latest + + ################### Other ########################################## models: build: context: .. diff --git a/azure-pipelines/templates/build.yaml b/azure-pipelines/templates/build.yaml index 6245ba9fe..f0cbd4639 100644 --- a/azure-pipelines/templates/build.yaml +++ b/azure-pipelines/templates/build.yaml @@ -30,160 +30,159 @@ parameters: default: false jobs: - - job: "build_${{ parameters.image }}" - variables: - ${{ if eq(parameters.noCache, true) }}: - noCacheOption: "--no-cache" - ${{ if ne(parameters.noCache, true) }}: - noCacheOption: "" - timeoutInMinutes: ${{ parameters.timeoutJob }} - strategy: - matrix: - ${{ each architecture in parameters.architectures }}: - ${{ architecture }}: - arch: ${{ architecture }} - pool: - vmImage: "ubuntu-latest" - steps: - - template: release_version.yaml - parameters: - release: ${{ parameters.release }} - - task: Docker@2 - displayName: Login to Docker Hub - inputs: - command: login - containerRegistry: "Docker Hub" - - - script: | - df -h - displayName: List free space before cleaning - - script: | - docker rmi -f $(docker images -aq) || true - displayName: Clean up Docker images - - script: | - docker system prune --force --all --volumes - displayName: Docker prune - - script: | - sudo rm -rf /usr/local/lib/android - sudo rm -rf /usr/local/.ghcup - sudo rm -rf /opt/hostedtoolcache/CodeQL - displayName: Remove unused files - - script: | - df -h - displayName: List free space after cleaning - - - script: docker run --rm --privileged tonistiigi/binfmt --install all - displayName: Register QEMU for cross-builds - condition: and(succeeded(), eq('${{ parameters.crossBuild }}', true)) - - # - script: | - # cd $(Agent.BuildDirectory)/s/docker - # docker compose --file ../azure-pipelines/docker-compose-build.yaml --env-file ../azure-pipelines/.env pull $(arch)-${{ parameters.image }} - # displayName: Pull image for Docker layer caching - # continueOnError: true - # condition: and(succeeded(), eq('${{ parameters.imageNameOnly }}', false)) - # - script: | - # cd $(Agent.BuildDirectory)/s/docker - # docker compose --file ../azure-pipelines/docker-compose-build.yaml --env-file ../azure-pipelines/.env pull ${{ parameters.image }} - # displayName: Pull image for Docker layer caching - # continueOnError: true - # condition: and(succeeded(), eq('${{ parameters.imageNameOnly }}', true)) - - - script: > - cd $(Agent.BuildDirectory)/s/docker && - docker compose --file ../azure-pipelines/docker-compose-build.yaml - --env-file ../azure-pipelines/.env - build $(noCacheOption) - --build-arg BUILDKIT_INLINE_CACHE=1 - --build-arg VISERON_VERSION=$(viseronVersion) - --build-arg VISERON_GIT_COMMIT=$(Build.SourceVersion) - $(arch)-${{ parameters.image }} - displayName: Build $(arch)-${{ parameters.image }} - condition: and(succeeded(), eq('${{ parameters.imageNameOnly }}', false)) - env: - DOCKER_BUILDKIT: 1 - COMPOSE_DOCKER_CLI_BUILD: 1 - BUILDKIT_PROGRESS: plain - - script: > - cd $(Agent.BuildDirectory)/s/docker && - docker compose --file ../azure-pipelines/docker-compose-build.yaml - --env-file ../azure-pipelines/.env - build $(noCacheOption) - --build-arg BUILDKIT_INLINE_CACHE=1 - --build-arg VISERON_VERSION=$(viseronVersion) - --build-arg VISERON_GIT_COMMIT=$(Build.SourceVersion) - ${{ parameters.image }} - displayName: Build ${{ parameters.image }} - condition: and(succeeded(), eq('${{ parameters.imageNameOnly }}', true)) - env: - DOCKER_BUILDKIT: 1 - COMPOSE_DOCKER_CLI_BUILD: 1 - BUILDKIT_PROGRESS: plain - - # Runs only for non-release triggers - - script: | - cd $(Agent.BuildDirectory)/s/docker - docker compose --file ../azure-pipelines/docker-compose-build.yaml --env-file ../azure-pipelines/.env push $(arch)-${{ parameters.image }} - - # Extract the tag and store it in an environment variable - IMAGE_TAG=$(docker compose --file ../azure-pipelines/docker-compose-build.yaml --env-file ../azure-pipelines/.env config | grep "$(arch)-${{ parameters.image }}" -A2 | grep "image:" | sed 's/.*://g' | tr -d ' ' | cut -d':' -f2) - echo "Extracted IMAGE_TAG: $IMAGE_TAG" - - # Push tag with commit hash - docker image tag roflcoopter/$(arch)-${{ parameters.image }}:${IMAGE_TAG} roflcoopter/$(arch)-${{ parameters.image }}:$(Build.SourceVersion) - docker image push roflcoopter/$(arch)-${{ parameters.image }}:$(Build.SourceVersion) - - # Push tag with timestamp - now=$(date -u +"%Y%m%d%H%M%S") - docker image tag roflcoopter/$(arch)-${{ parameters.image }}:${IMAGE_TAG} roflcoopter/$(arch)-${{ parameters.image }}:$now - docker image push roflcoopter/$(arch)-${{ parameters.image }}:$now - displayName: Push $(arch)-${{ parameters.image }} (non-release) - condition: and(succeeded(), eq('${{ parameters.imageNameOnly }}', false), eq('${{ parameters.release }}', false)) - - script: | - cd $(Agent.BuildDirectory)/s/docker - docker compose --file ../azure-pipelines/docker-compose-build.yaml --env-file ../azure-pipelines/.env push ${{ parameters.image }} - - # Extract the tag and store it in an environment variable - IMAGE_TAG=$(docker compose --file ../azure-pipelines/docker-compose-build.yaml --env-file ../azure-pipelines/.env config | grep "$(arch)-${{ parameters.image }}" -A2 | grep "image:" | sed 's/.*://g' | tr -d ' ' | cut -d':' -f2) - echo "Extracted IMAGE_TAG: $IMAGE_TAG" - - # Push tag with commit hash - docker image tag roflcoopter/${{ parameters.image }}:${IMAGE_TAG} roflcoopter/${{ parameters.image }}:$(Build.SourceVersion) - docker image push roflcoopter/${{ parameters.image }}:$(Build.SourceVersion) - - # Push tag with timestamp - now=$(date -u +"%Y%m%d%H%M%S") - docker image tag roflcoopter/${{ parameters.image }}:${IMAGE_TAG} roflcoopter/${{ parameters.image }}:$now - docker image push roflcoopter/${{ parameters.image }}:$now - displayName: Push ${{ parameters.image }} (non-release) - condition: and(succeeded(), eq('${{ parameters.imageNameOnly }}', true), eq('${{ parameters.release }}', false)) - - # Runs only for release triggers - - script: | - echo tagging roflcoopter/$(arch)-${{ parameters.image }}:$(viseronVersion) - docker image tag roflcoopter/$(arch)-${{ parameters.image }}:dev roflcoopter/$(arch)-${{ parameters.image }}:$(viseronVersion) - docker image push roflcoopter/$(arch)-${{ parameters.image }}:$(viseronVersion) - - # Push tag with commit hash - docker image tag roflcoopter/$(arch)-${{ parameters.image }}:dev roflcoopter/$(arch)-${{ parameters.image }}:$(viseronVersion)-$(Build.SourceVersion) - docker image push roflcoopter/$(arch)-${{ parameters.image }}:$(viseronVersion)-$(Build.SourceVersion) - - # Push tag with timestamp - now=$(date -u +"%Y%m%d%H%M%S") - docker image tag roflcoopter/$(arch)-${{ parameters.image }}:dev roflcoopter/$(arch)-${{ parameters.image }}:$(viseronVersion)-$now - docker image push roflcoopter/$(arch)-${{ parameters.image }}:$(viseronVersion)-$now - - if [ $(latestRelease) = true ] ; then - echo tagging roflcoopter/$(arch)-${{ parameters.image }}:latest - docker image tag roflcoopter/$(arch)-${{ parameters.image }}:dev roflcoopter/$(arch)-${{ parameters.image }}:latest - docker image push roflcoopter/$(arch)-${{ parameters.image }}:latest - fi - displayName: Push $(arch)-${{ parameters.image }} (release) - condition: and(succeeded(), eq('${{ parameters.release }}', true)) - - - task: Docker@2 - displayName: Logoff Docker Hub - inputs: - command: logout - containerRegistry: "Docker Hub" - condition: always() + - ${{ each architecture in parameters.architectures }}: + - job: build_${{ replace(parameters.image, '-', '_') }}_${{ replace(architecture, '-', '_') }} + timeoutInMinutes: ${{ parameters.timeoutJob }} + variables: + ${{ if eq(parameters.noCache, true) }}: + noCacheOption: "--no-cache" + ${{ if ne(parameters.noCache, true) }}: + noCacheOption: "" + pool: + ${{ if or(eq(architecture, 'aarch64'), eq(architecture, 'jetson-nano'), eq(architecture, 'rpi5')) }}: + name: rpi5 + ${{ else }}: + vmImage: ubuntu-latest + steps: + - template: release_version.yaml + parameters: + release: ${{ parameters.release }} + - task: Docker@2 + displayName: Login to Docker Hub + inputs: + command: login + containerRegistry: "Docker Hub" + + - script: | + df -h + displayName: List free space before cleaning + - script: | + docker rmi -f $(docker images -aq) || true + displayName: Clean up Docker images + - script: | + docker system prune --force --all --volumes + displayName: Docker prune + - script: | + sudo rm -rf /usr/local/lib/android || true + sudo rm -rf /usr/local/.ghcup || true + sudo rm -rf /opt/hostedtoolcache/CodeQL || true + displayName: Remove unused files + - script: | + df -h + displayName: List free space after cleaning + + - script: docker run --rm --privileged tonistiigi/binfmt --install all + displayName: Register QEMU for cross-builds + condition: and(succeeded(), eq('${{ parameters.crossBuild }}', true)) + + # - script: | + # cd $(Agent.BuildDirectory)/s/docker + # docker compose --file ../azure-pipelines/docker-compose-build.yaml --env-file ../azure-pipelines/.env pull ${{ architecture }}-${{ parameters.image }} + # displayName: Pull image for Docker layer caching + # continueOnError: true + # condition: and(succeeded(), eq('${{ parameters.imageNameOnly }}', false)) + # - script: | + # cd $(Agent.BuildDirectory)/s/docker + # docker compose --file ../azure-pipelines/docker-compose-build.yaml --env-file ../azure-pipelines/.env pull ${{ parameters.image }} + # displayName: Pull image for Docker layer caching + # continueOnError: true + # condition: and(succeeded(), eq('${{ parameters.imageNameOnly }}', true)) + + - script: > + cd $(Agent.BuildDirectory)/s/docker && + docker compose --file ../azure-pipelines/docker-compose-build.yaml + --env-file ../azure-pipelines/.env + build $(noCacheOption) + --build-arg BUILDKIT_INLINE_CACHE=1 + --build-arg VISERON_VERSION=$(viseronVersion) + --build-arg VISERON_GIT_COMMIT=$(Build.SourceVersion) + ${{ architecture }}-${{ parameters.image }} + displayName: Build ${{ architecture }}-${{ parameters.image }} + condition: and(succeeded(), eq('${{ parameters.imageNameOnly }}', false)) + env: + DOCKER_BUILDKIT: 1 + COMPOSE_DOCKER_CLI_BUILD: 1 + BUILDKIT_PROGRESS: plain + - script: > + cd $(Agent.BuildDirectory)/s/docker && + docker compose --file ../azure-pipelines/docker-compose-build.yaml + --env-file ../azure-pipelines/.env + build $(noCacheOption) + --build-arg BUILDKIT_INLINE_CACHE=1 + --build-arg VISERON_VERSION=$(viseronVersion) + --build-arg VISERON_GIT_COMMIT=$(Build.SourceVersion) + ${{ parameters.image }} + displayName: Build ${{ parameters.image }} + condition: and(succeeded(), eq('${{ parameters.imageNameOnly }}', true)) + env: + DOCKER_BUILDKIT: 1 + COMPOSE_DOCKER_CLI_BUILD: 1 + BUILDKIT_PROGRESS: plain + + # Runs only for non-release triggers + - script: | + cd $(Agent.BuildDirectory)/s/docker + docker compose --file ../azure-pipelines/docker-compose-build.yaml --env-file ../azure-pipelines/.env push ${{ architecture }}-${{ parameters.image }} + + # Extract the tag and store it in an environment variable + IMAGE_TAG=$(docker compose --file ../azure-pipelines/docker-compose-build.yaml --env-file ../azure-pipelines/.env config | grep "${{ architecture }}-${{ parameters.image }}" -A2 | grep "image:" | sed 's/.*://g' | tr -d ' ' | cut -d':' -f2) + echo "Extracted IMAGE_TAG: $IMAGE_TAG" + + # Push tag with commit hash + docker image tag roflcoopter/${{ architecture }}-${{ parameters.image }}:${IMAGE_TAG} roflcoopter/${{ architecture }}-${{ parameters.image }}:$(Build.SourceVersion) + docker image push roflcoopter/${{ architecture }}-${{ parameters.image }}:$(Build.SourceVersion) + + # Push tag with timestamp + now=$(date -u +"%Y%m%d%H%M%S") + docker image tag roflcoopter/${{ architecture }}-${{ parameters.image }}:${IMAGE_TAG} roflcoopter/${{ architecture }}-${{ parameters.image }}:$now + docker image push roflcoopter/${{ architecture }}-${{ parameters.image }}:$now + displayName: Push ${{ architecture }}-${{ parameters.image }} (non-release) + condition: and(succeeded(), eq('${{ parameters.imageNameOnly }}', false), eq('${{ parameters.release }}', false)) + - script: | + cd $(Agent.BuildDirectory)/s/docker + docker compose --file ../azure-pipelines/docker-compose-build.yaml --env-file ../azure-pipelines/.env push ${{ parameters.image }} + + # Extract the tag and store it in an environment variable + IMAGE_TAG=$(docker compose --file ../azure-pipelines/docker-compose-build.yaml --env-file ../azure-pipelines/.env config | grep "${{ architecture }}-${{ parameters.image }}" -A2 | grep "image:" | sed 's/.*://g' | tr -d ' ' | cut -d':' -f2) + echo "Extracted IMAGE_TAG: $IMAGE_TAG" + + # Push tag with commit hash + docker image tag roflcoopter/${{ parameters.image }}:${IMAGE_TAG} roflcoopter/${{ parameters.image }}:$(Build.SourceVersion) + docker image push roflcoopter/${{ parameters.image }}:$(Build.SourceVersion) + + # Push tag with timestamp + now=$(date -u +"%Y%m%d%H%M%S") + docker image tag roflcoopter/${{ parameters.image }}:${IMAGE_TAG} roflcoopter/${{ parameters.image }}:$now + docker image push roflcoopter/${{ parameters.image }}:$now + displayName: Push ${{ parameters.image }} (non-release) + condition: and(succeeded(), eq('${{ parameters.imageNameOnly }}', true), eq('${{ parameters.release }}', false)) + + # Runs only for release triggers + - script: | + echo tagging roflcoopter/${{ architecture }}-${{ parameters.image }}:$(viseronVersion) + docker image tag roflcoopter/${{ architecture }}-${{ parameters.image }}:dev roflcoopter/${{ architecture }}-${{ parameters.image }}:$(viseronVersion) + docker image push roflcoopter/${{ architecture }}-${{ parameters.image }}:$(viseronVersion) + + # Push tag with commit hash + docker image tag roflcoopter/${{ architecture }}-${{ parameters.image }}:dev roflcoopter/${{ architecture }}-${{ parameters.image }}:$(viseronVersion)-$(Build.SourceVersion) + docker image push roflcoopter/${{ architecture }}-${{ parameters.image }}:$(viseronVersion)-$(Build.SourceVersion) + + # Push tag with timestamp + now=$(date -u +"%Y%m%d%H%M%S") + docker image tag roflcoopter/${{ architecture }}-${{ parameters.image }}:dev roflcoopter/${{ architecture }}-${{ parameters.image }}:$(viseronVersion)-$now + docker image push roflcoopter/${{ architecture }}-${{ parameters.image }}:$(viseronVersion)-$now + + if [ $(latestRelease) = true ] ; then + echo tagging roflcoopter/${{ architecture }}-${{ parameters.image }}:latest + docker image tag roflcoopter/${{ architecture }}-${{ parameters.image }}:dev roflcoopter/${{ architecture }}-${{ parameters.image }}:latest + docker image push roflcoopter/${{ architecture }}-${{ parameters.image }}:latest + fi + displayName: Push ${{ architecture }}-${{ parameters.image }} (release) + condition: and(succeeded(), eq('${{ parameters.release }}', true)) + + - task: Docker@2 + displayName: Logoff Docker Hub + inputs: + command: logout + containerRegistry: "Docker Hub" + condition: always() diff --git a/docker/Dockerfile b/docker/Dockerfile index ba7287036..975976220 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -16,11 +16,15 @@ ARG GPAC_VERSION ENV \ DEBIAN_FRONTEND=noninteractive +ARG BUILDPLATFORM RUN \ - if [ "$ARCH" = "armhf" ] || \ - [ "$ARCH" = "rpi3" ] || \ - [ "$ARCH" = "aarch64" ] || \ - [ "$ARCH" = "jetson-nano" ]; then echo "Crossbuilding!" && cross-build-start; fi + echo "Build platform: $BUILDPLATFORM" && \ + echo "Target architecture: $ARCH" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + case "$ARCH" in \ + aarch64|jetson-nano|rpi3) echo "Crossbuilding!" && cross-build-start;; \ + esac \ + fi RUN \ apt-get update && apt-get install -y --no-install-recommends \ @@ -102,7 +106,7 @@ ENV \ S6_KILL_GRACETIME=30000 \ S6_KILL_FINISH_MAXTIME=30000 \ PATH=$PATH:/home/abc/bin \ - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib \ + LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib:/ffmpeg/lib \ PYTHONPATH=$PYTHONPATH:/usr/local/lib/python3.10/site-packages \ OPENCV_OPENCL_CACHE_ENABLE=false \ PG_COLOR="always" diff --git a/docker/Dockerfile.hailo b/docker/Dockerfile.hailo new file mode 100644 index 000000000..b75e15ac0 --- /dev/null +++ b/docker/Dockerfile.hailo @@ -0,0 +1,59 @@ +ARG BUILD_FROM +FROM ${BUILD_FROM} as build + +ARG HAILO_VERSION +ARG MAKEFLAGS="-j2" + +ENV \ + DEBIAN_FRONTEND=noninteractive \ + PIP_IGNORE_INSTALLED=0 \ + HAILORT_LOGGER_PATH=NONE + +RUN \ + buildDeps="autoconf \ + automake \ + ca-certificates \ + cmake \ + g++ \ + gcc \ + git \ + make \ + zip \ + unzip \ + python3-dev \ + python3-pip \ + python3-setuptools \ + python3-wheel" && \ + apt-get -yqq update && \ + apt-get install -yq --no-install-recommends ${buildDeps} + +# Compile hailort +RUN \ + DIR=/tmp && mkdir -p ${DIR} && cd ${DIR} && \ + git clone --branch v${HAILO_VERSION} --depth 1 https://github.com/hailo-ai/hailort.git && \ + cd hailort && \ + cmake -S. -Bbuild -DCMAKE_BUILD_TYPE=Release && cmake --build build --config release --target install + +# Build and create wheel +RUN \ + cd /tmp/hailort/hailort/libhailort/bindings/python/platform && \ + python3 setup.py bdist_wheel --dist-dir=/wheels && \ + pip3 wheel . -w /wheels && \ + # Sanitize produced wheels to remove any lingering 'license-file' metadata lines + for whl in /wheels/*.whl; do \ + tmpdir=$(mktemp -d); \ + unzip -q "$whl" -d "$tmpdir"; \ + find "$tmpdir" -maxdepth 2 -type f -path '*/METADATA' -exec sed -i '/^license-file:/Id' {} +; \ + (cd "$tmpdir" && zip -qr "$whl.fixed" .); \ + mv "$whl.fixed" "$whl"; \ + rm -rf "$tmpdir"; \ + done && \ + ls -al /wheels/ + +FROM scratch as scratch + +ARG HAILO_VERSION + +COPY --from=build /usr/local/bin/hailortcli /usr/local/bin/hailortcli +COPY --from=build /usr/local/lib/libhailort.so.${HAILO_VERSION} /usr/local/lib/libhailort.so.${HAILO_VERSION} +COPY --from=build /wheels /wheels/ diff --git a/docker/Dockerfile.wheels b/docker/Dockerfile.wheels index a2f80fac1..7e0f53c24 100644 --- a/docker/Dockerfile.wheels +++ b/docker/Dockerfile.wheels @@ -14,10 +14,15 @@ ENV \ DEBIAN_FRONTEND=noninteractive \ PIP_IGNORE_INSTALLED=0 +ARG BUILDPLATFORM RUN \ - if [ "$ARCH" = "armhf" ] || \ - [ "$ARCH" = "rpi3" ] || \ - [ "$ARCH" = "aarch64" ] ; then echo "Crossbuilding!" && cross-build-start; fi + echo "Build platform: $BUILDPLATFORM" && \ + echo "Target architecture: $ARCH" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + case "$ARCH" in \ + aarch64|rpi3) echo "Crossbuilding!" && cross-build-start;; \ + esac \ + fi RUN apt-get -yqq update && apt-get install -yq --no-install-recommends \ software-properties-common \ diff --git a/docker/aarch64/Dockerfile.base b/docker/aarch64/Dockerfile.base index fd9c35b6f..b1316fe1c 100644 --- a/docker/aarch64/Dockerfile.base +++ b/docker/aarch64/Dockerfile.base @@ -1,6 +1,8 @@ ARG FFMPEG_VERSION ARG UBUNTU_VERSION +ARG HAILO_VERSION FROM roflcoopter/viseron-models:latest as models +FROM roflcoopter/aarch64-hailo:${HAILO_VERSION} as hailo FROM balenalib/aarch64-ubuntu:${UBUNTU_VERSION}-run COPY --from=models /detectors/models/darknet /detectors/models/darknet @@ -25,3 +27,8 @@ RUN \ libatomic1 && \ ln -s /detectors/models/darknet/yolov7-tiny.weights /detectors/models/darknet/default.weights && \ ln -s /detectors/models/darknet/yolov7-tiny.cfg /detectors/models/darknet/default.cfg + +ARG HAILO_VERSION +COPY --from=hailo /usr/local/bin/hailortcli /usr/local/bin/hailortcli +COPY --from=hailo /usr/local/lib/libhailort.so.${HAILO_VERSION} /usr/local/lib/libhailort.so.${HAILO_VERSION} +COPY --from=hailo /wheels /wheels/ diff --git a/docker/aarch64/Dockerfile.dlib b/docker/aarch64/Dockerfile.dlib index be9c35fa2..9bc8ba592 100644 --- a/docker/aarch64/Dockerfile.dlib +++ b/docker/aarch64/Dockerfile.dlib @@ -1,6 +1,5 @@ ARG UBUNTU_VERSION FROM balenalib/aarch64-ubuntu:${UBUNTU_VERSION}-build as build -RUN [ "cross-build-start" ] ARG DLIB_VERSION ARG MAKEFLAGS="-j2" @@ -9,6 +8,13 @@ ENV \ DEBIAN_FRONTEND=noninteractive \ PIP_IGNORE_INSTALLED=0 +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Crossbuilding!" && cross-build-start; \ + fi + RUN buildDeps="autoconf \ automake \ ca-certificates \ @@ -34,7 +40,5 @@ RUN \ \ && ls -al /wheels/ -RUN [ "cross-build-end" ] - FROM scratch as scratch COPY --from=build /wheels /wheels/ diff --git a/docker/aarch64/Dockerfile.ffmpeg b/docker/aarch64/Dockerfile.ffmpeg index 6f49600e4..72c49e1eb 100644 --- a/docker/aarch64/Dockerfile.ffmpeg +++ b/docker/aarch64/Dockerfile.ffmpeg @@ -1,7 +1,6 @@ # Shameless copy of https://github.com/jrottenberg/ffmpeg/ ARG UBUNTU_VERSION FROM balenalib/aarch64-ubuntu:${UBUNTU_VERSION}-build as build -RUN [ "cross-build-start" ] WORKDIR /tmp/workdir @@ -15,6 +14,13 @@ ENV \ PREFIX=/opt/ffmpeg \ LD_LIBRARY_PATH="/opt/ffmpeg/lib:/opt/ffmpeg/lib64:/usr/lib64:/usr/lib:/lib64:/lib" +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Crossbuilding!" && cross-build-start; \ + fi + RUN buildDeps="autoconf \ automake \ ca-certificates \ @@ -36,7 +42,9 @@ RUN buildDeps="autoconf \ yasm \ libgomp1 \ zlib1g-dev \ - libomxil-bellagio-dev" && \ + libomxil-bellagio-dev \ + libdrm-dev \ + libv4l-dev" && \ apt-get -yqq update && \ apt-get install -yq --no-install-recommends ${buildDeps} @@ -105,12 +113,22 @@ RUN \ curl -sLO https://archive.mozilla.org/pub/opus/opus-${OPUS_VERSION}.tar.gz && \ echo ${OPUS_SHA256SUM} | sha256sum --check && \ tar -zx --strip-components=1 -f opus-${OPUS_VERSION}.tar.gz -RUN ["cross-build-end"] +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Stopping Crossbuilding!" && cross-build-end; \ + fi RUN \ DIR=/tmp/opus && \ cd ${DIR} && \ autoreconf -fiv -RUN ["cross-build-start"] +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Crossbuilding!" && cross-build-start; \ + fi RUN \ DIR=/tmp/opus && \ cd ${DIR} && \ @@ -215,12 +233,22 @@ RUN \ cd ${DIR} && \ curl -sL https://github.com/mstorsjo/fdk-aac/archive/v${FDKAAC_VERSION}.tar.gz | \ tar -zx --strip-components=1 -RUN ["cross-build-end"] +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Stopping Crossbuilding!" && cross-build-end; \ + fi RUN \ DIR=/tmp/fdk-aac && \ cd ${DIR} && \ autoreconf -fiv -RUN ["cross-build-start"] +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Crossbuilding!" && cross-build-start; \ + fi RUN \ DIR=/tmp/fdk-aac && \ cd ${DIR} && \ @@ -279,13 +307,23 @@ RUN \ curl -sLO https://github.com/fribidi/fribidi/archive/${FRIBIDI_VERSION}.tar.gz && \ echo ${FRIBIDI_SHA256SUM} | sha256sum --check && \ tar -zx --strip-components=1 -f ${FRIBIDI_VERSION}.tar.gz -RUN ["cross-build-end"] +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Stopping Crossbuilding!" && cross-build-end; \ + fi RUN \ export NOCONFIGURE=1 && \ DIR=/tmp/fribidi && \ cd ${DIR} && \ ./autogen.sh -RUN ["cross-build-start"] +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Crossbuilding!" && cross-build-start; \ + fi RUN \ DIR=/tmp/fribidi && \ cd ${DIR} && \ @@ -316,12 +354,22 @@ RUN \ curl -sLO https://github.com/libass/libass/archive/${LIBASS_VERSION}.tar.gz && \ echo ${LIBASS_SHA256SUM} | sha256sum --check && \ tar -zx --strip-components=1 -f ${LIBASS_VERSION}.tar.gz -RUN ["cross-build-end"] +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Stopping Crossbuilding!" && cross-build-end; \ + fi RUN \ DIR=/tmp/libass && \ cd ${DIR} && \ ./autogen.sh -RUN ["cross-build-start"] +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Crossbuilding!" && cross-build-start; \ + fi RUN \ DIR=/tmp/libass && \ cd ${DIR} && \ @@ -337,12 +385,22 @@ RUN \ cd ${DIR} && \ curl -sLO https://github.com/ultravideo/kvazaar/archive/v${KVAZAAR_VERSION}.tar.gz && \ tar -zx --strip-components=1 -f v${KVAZAAR_VERSION}.tar.gz -RUN ["cross-build-end"] +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Stopping Crossbuilding!" && cross-build-end; \ + fi RUN \ DIR=/tmp/kvazaar && \ cd ${DIR} && \ ./autogen.sh -RUN ["cross-build-start"] +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Crossbuilding!" && cross-build-start; \ + fi RUN \ DIR=/tmp/kvazaar && \ cd ${DIR} && \ @@ -426,12 +484,22 @@ RUN \ curl -sLO https://gitlab.gnome.org/GNOME/libxml2/-/archive/v${LIBXML2_VERSION}/libxml2-v${LIBXML2_VERSION}.tar.gz && \ echo ${LIBXML2_SHA256SUM} | sha256sum --check && \ tar -xz --strip-components=1 -f libxml2-v${LIBXML2_VERSION}.tar.gz -RUN ["cross-build-end"] +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Stopping Crossbuilding!" && cross-build-end; \ + fi RUN \ DIR=/tmp/libxml2 && \ cd ${DIR} && \ ./autogen.sh --prefix="${PREFIX}" --with-ftp=no --with-http=no --with-python=no -RUN ["cross-build-start"] +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Crossbuilding!" && cross-build-start; \ + fi RUN \ DIR=/tmp/libxml2 && \ cd ${DIR} && \ @@ -464,12 +532,22 @@ RUN \ curl -sLO https://github.com/zeromq/libzmq/archive/v${LIBZMQ_VERSION}.tar.gz && \ echo ${LIBZMQ_SHA256SUM} | sha256sum --check && \ tar -xz --strip-components=1 -f v${LIBZMQ_VERSION}.tar.gz -RUN ["cross-build-end"] +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Stopping Crossbuilding!" && cross-build-end; \ + fi RUN \ DIR=/tmp/libzmq && \ cd ${DIR} && \ ./autogen.sh -RUN ["cross-build-start"] +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Crossbuilding!" && cross-build-start; \ + fi RUN \ DIR=/tmp/libzmq && \ cd ${DIR} && \ @@ -529,6 +607,8 @@ RUN \ --extra-libs=-lpthread \ --enable-neon \ --enable-v4l2_m2m \ + --enable-libv4l2 \ + --enable-libdrm \ --extra-cflags="-I${PREFIX}/include" \ --extra-ldflags="-L${PREFIX}/lib" && \ make && \ @@ -552,7 +632,5 @@ RUN \ sed "s:${PREFIX}:/usr/local:g" <"$pc" >/usr/local/lib/pkgconfig/"${pc##*/}"; \ done -RUN [ "cross-build-end" ] - FROM scratch COPY --from=ffmpeg /usr/local /usr/local/ diff --git a/docker/aarch64/Dockerfile.opencv b/docker/aarch64/Dockerfile.opencv index 41a7cd916..100101a58 100644 --- a/docker/aarch64/Dockerfile.opencv +++ b/docker/aarch64/Dockerfile.opencv @@ -2,7 +2,6 @@ ARG FFMPEG_VERSION ARG UBUNTU_VERSION FROM roflcoopter/aarch64-ffmpeg:${FFMPEG_VERSION} as ffmpeg FROM balenalib/aarch64-ubuntu:${UBUNTU_VERSION}-build as build -RUN [ "cross-build-start" ] COPY --from=ffmpeg /usr/local /usr/local/ @@ -18,6 +17,13 @@ ENV \ PREFIX=/opt/opencv \ LD_LIBRARY_PATH="/opt/opencv/lib:/opt/opencv/lib64:/usr/lib64:/usr/lib:/lib64:/lib:/usr/local/lib" +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Crossbuilding!" && cross-build-start; \ + fi + RUN buildDeps="autoconf \ automake \ ca-certificates \ @@ -91,8 +97,6 @@ RUN \ make && \ make install -RUN [ "cross-build-end" ] - FROM scratch COPY --from=build /opt/opencv /opt/opencv/ diff --git a/docker/amd64-cuda/Dockerfile.base b/docker/amd64-cuda/Dockerfile.base index 28cf5eae1..54ca5bf82 100644 --- a/docker/amd64-cuda/Dockerfile.base +++ b/docker/amd64-cuda/Dockerfile.base @@ -1,8 +1,10 @@ ARG DARKNET_COMMIT ARG CUDA_VERSION ARG UBUNTU_VERSION_NUMBER +ARG HAILO_VERSION FROM roflcoopter/viseron-models:latest as models FROM roflcoopter/amd64-cuda-darknet:${DARKNET_COMMIT} as darknet +FROM roflcoopter/amd64-hailo:${HAILO_VERSION} as hailo FROM nvidia/cuda:${CUDA_VERSION}-cudnn9-runtime-ubuntu${UBUNTU_VERSION_NUMBER} COPY --from=models /detectors/models/darknet /detectors/models/darknet @@ -79,3 +81,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ ln -s /detectors/models/darknet/yolov7.cfg /detectors/models/darknet/default.cfg COPY --from=darknet /darknet/lib /usr/local/lib + +ARG HAILO_VERSION +COPY --from=hailo /usr/local/bin/hailortcli /usr/local/bin/hailortcli +COPY --from=hailo /usr/local/lib/libhailort.so.${HAILO_VERSION} /usr/local/lib/libhailort.so.${HAILO_VERSION} +COPY --from=hailo /wheels /wheels/ diff --git a/docker/amd64/Dockerfile.base b/docker/amd64/Dockerfile.base index bbf1b2278..640767ade 100644 --- a/docker/amd64/Dockerfile.base +++ b/docker/amd64/Dockerfile.base @@ -1,5 +1,7 @@ ARG UBUNTU_VERSION +ARG HAILO_VERSION FROM roflcoopter/viseron-models:latest as models +FROM roflcoopter/amd64-hailo:${HAILO_VERSION} as hailo FROM ubuntu:${UBUNTU_VERSION} as base COPY --from=models /detectors/models/darknet /detectors/models/darknet @@ -68,3 +70,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ rm -R /opencl && \ ln -s /detectors/models/darknet/yolov7.weights /detectors/models/darknet/default.weights && \ ln -s /detectors/models/darknet/yolov7.cfg /detectors/models/darknet/default.cfg + +ARG HAILO_VERSION +COPY --from=hailo /usr/local/bin/hailortcli /usr/local/bin/hailortcli +COPY --from=hailo /usr/local/lib/libhailort.so.${HAILO_VERSION} /usr/local/lib/libhailort.so.${HAILO_VERSION} +COPY --from=hailo /wheels /wheels/ diff --git a/docker/azure-agent-start.sh b/docker/azure-agent-start.sh new file mode 100644 index 000000000..19a8a6ae8 --- /dev/null +++ b/docker/azure-agent-start.sh @@ -0,0 +1,114 @@ +#!/bin/bash +set -e + +if [ -z "${AZP_URL}" ]; then + echo 1>&2 "error: missing AZP_URL environment variable" + exit 1 +fi + +# If an agent installation already exists (persisted via a volume) and no force update is requested, +# skip download + configuration and just run the existing agent to speed up container startup. +if [ -f ./run.sh ] && [ -f .agent ] && [ -z "${AZP_FORCE_UPDATE}" ]; then + echo + echo "Existing Azure Pipelines agent detected. Skipping download and configuration (set AZP_FORCE_UPDATE=1 to override)." + chmod +x ./run.sh + ./run.sh "$@" & wait $! + exit 0 +fi + +if [ -n "$AZP_CLIENTID" ]; then + echo "Using service principal credentials to get token" + az login --allow-no-subscriptions --service-principal --username "$AZP_CLIENTID" --password "$AZP_CLIENTSECRET" --tenant "$AZP_TENANTID" + # adapted from https://learn.microsoft.com/en-us/azure/databricks/dev-tools/user-aad-token + AZP_TOKEN=$(az account get-access-token --query accessToken --output tsv) + echo "Token retrieved" +fi + +if [ -z "${AZP_TOKEN_FILE}" ]; then + if [ -z "${AZP_TOKEN}" ]; then + echo 1>&2 "error: missing AZP_TOKEN environment variable" + exit 1 + fi + + AZP_TOKEN_FILE="/azp/.token" + echo -n "${AZP_TOKEN}" > "${AZP_TOKEN_FILE}" +fi + +unset AZP_CLIENTSECRET +unset AZP_TOKEN + +if [ -n "${AZP_WORK}" ]; then + mkdir -p "${AZP_WORK}" +fi + +cleanup() { + trap "" EXIT + + if [ -e ./config.sh ]; then + print_header "Cleanup. Removing Azure Pipelines agent..." + + # If the agent has some running jobs, the configuration removal process will fail. + # So, give it some time to finish the job. + while true; do + ./config.sh remove --unattended --auth "PAT" --token $(cat "${AZP_TOKEN_FILE}") && break + + echo "Retrying in 30 seconds..." + sleep 30 + done + fi +} + +print_header() { + lightcyan="\033[1;36m" + nocolor="\033[0m" + echo -e "\n${lightcyan}$1${nocolor}\n" +} + +# Let the agent ignore the token env variables +export VSO_AGENT_IGNORE="AZP_TOKEN,AZP_TOKEN_FILE" + +print_header "1. Determining matching Azure Pipelines agent..." + +AZP_AGENT_PACKAGES=$(curl -LsS \ + -u user:$(cat "${AZP_TOKEN_FILE}") \ + -H "Accept:application/json" \ + "${AZP_URL}/_apis/distributedtask/packages/agent?platform=${TARGETARCH}&top=1") + +AZP_AGENT_PACKAGE_LATEST_URL=$(echo "${AZP_AGENT_PACKAGES}" | jq -r ".value[0].downloadUrl") + +if [ -z "${AZP_AGENT_PACKAGE_LATEST_URL}" -o "${AZP_AGENT_PACKAGE_LATEST_URL}" == "null" ]; then + echo 1>&2 "error: could not determine a matching Azure Pipelines agent" + echo 1>&2 "check that account "${AZP_URL}" is correct and the token is valid for that account" + exit 1 +fi + +print_header "2. Downloading and extracting Azure Pipelines agent..." + +curl -LsS "${AZP_AGENT_PACKAGE_LATEST_URL}" | tar -xz & wait $! + +source ./env.sh + +trap "cleanup; exit 0" EXIT +trap "cleanup; exit 130" INT +trap "cleanup; exit 143" TERM + +print_header "3. Configuring Azure Pipelines agent..." + +# Despite it saying "PAT", it can be the token through the service principal +./config.sh --unattended \ + --agent "${AZP_AGENT_NAME:-$(hostname)}" \ + --url "${AZP_URL}" \ + --auth "PAT" \ + --token $(cat "${AZP_TOKEN_FILE}") \ + --pool "${AZP_POOL:-Default}" \ + --work "${AZP_WORK:-_work}" \ + --replace \ + --acceptTeeEula & wait $! + +print_header "4. Running Azure Pipelines agent..." + +chmod +x ./run.sh + +# To be aware of TERM and INT signals call ./run.sh +# Running it with the --once flag at the end will shut down the agent after the build is executed +./run.sh "$@" & wait $! \ No newline at end of file diff --git a/docker/jetson-nano/Dockerfile.base b/docker/jetson-nano/Dockerfile.base index 133de5823..58914b193 100644 --- a/docker/jetson-nano/Dockerfile.base +++ b/docker/jetson-nano/Dockerfile.base @@ -1,11 +1,11 @@ ARG PYTHON_VERSION ARG DARKNET_COMMIT ARG JETPACK_VERSION -FROM roflcoopter/viseron-models:latest as models -FROM roflcoopter/jetson-nano-python:${PYTHON_VERSION} as python -FROM roflcoopter/jetson-nano-python:3.9.19 as python-3.9.19 -FROM roflcoopter/jetson-nano-darknet:${DARKNET_COMMIT} as darknet -FROM nvcr.io/nvidia/l4t-base:r${JETPACK_VERSION} as build +FROM roflcoopter/viseron-models:latest AS models +FROM roflcoopter/jetson-nano-python:${PYTHON_VERSION} AS python +FROM roflcoopter/jetson-nano-python:3.9.19 AS python-3.9.19 +FROM roflcoopter/jetson-nano-darknet:${DARKNET_COMMIT} AS darknet +FROM nvcr.io/nvidia/l4t-base:r${JETPACK_VERSION} AS build COPY --from=models /detectors/models/darknet /detectors/models/darknet COPY --from=models /detectors/models/edgetpu /detectors/models/edgetpu @@ -30,10 +30,11 @@ RUN \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ gnupg && \ + echo "deb https://newrepo.switchroot.org/ switchroot unstable" >> /etc/apt/sources.list.d/switchroot.list && \ + apt-key del 92813F6A23DB6DFC && \ + wget -O - https://newrepo.switchroot.org/pubkey | apt-key add - && \ echo "deb https://repo.download.nvidia.com/jetson/common r${JETPACK_VERSION_MAJOR}.${JETPACK_VERSION_MINOR} main" >> /etc/apt/sources.list.d/nvidia.list && \ echo "deb https://repo.download.nvidia.com/jetson/${SOC} r${JETPACK_VERSION_MAJOR}.${JETPACK_VERSION_MINOR} main" >> /etc/apt/sources.list.d/nvidia.list && \ - echo "deb https://repo.download.nvidia.com/jetson/ffmpeg main main" >> /etc/apt/sources.list.d/nvidia.list && \ - echo "deb-src https://repo.download.nvidia.com/jetson/ffmpeg main main" >> /etc/apt/sources.list.d/nvidia.list && \ apt-key adv --fetch-key http://repo.download.nvidia.com/jetson/jetson-ota-public.asc && \ mkdir -p /opt/nvidia/l4t-packages/ && touch /opt/nvidia/l4t-packages/.nv-l4t-disable-boot-fw-update-in-preinstall && \ rm -r /etc/ld.so.conf.d/nvidia-tegra.conf && \ diff --git a/docker/jetson-nano/Dockerfile.darknet b/docker/jetson-nano/Dockerfile.darknet index e0df00057..9075236d0 100644 --- a/docker/jetson-nano/Dockerfile.darknet +++ b/docker/jetson-nano/Dockerfile.darknet @@ -21,8 +21,6 @@ RUN \ COPY --from=opencv /opt/opencv /opt/opencv/ -RUN [ "cross-build-start" ] - ARG MAKEFLAGS="-j2" ARG SOC ARG JETPACK_VERSION_MAJOR @@ -36,6 +34,13 @@ ENV \ PKG_CONFIG_PATH=$PKG_CONFIG_PATH:/opt/opencv/lib/pkgconfig \ LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/opencv/lib:/usr/lib/aarch64-linux-gnu/tegra:/usr/lib/aarch64-linux-gnu/tegra-egl +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Crossbuilding!" && cross-build-start; \ + fi + RUN \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ @@ -65,7 +70,7 @@ RUN buildDeps="autoconf \ nvidia-l4t-cuda \ cuda-libraries-dev-10-2" && \ apt-get -yqq update && \ - apt-get install -yq -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confold' --no-install-recommends ${buildDeps} + apt-get install -yq -o Dpkg::Options::='--force-confdef' -o Dpkg::Options::='--force-confold' --no-install-recommends ${buildDeps} # Minimal cuda install does not create symlink so we do it manually RUN ln -s /usr/local/cuda-10.2 /usr/local/cuda diff --git a/docker/jetson-nano/Dockerfile.dlib b/docker/jetson-nano/Dockerfile.dlib index e4d3650de..601dfe25e 100644 --- a/docker/jetson-nano/Dockerfile.dlib +++ b/docker/jetson-nano/Dockerfile.dlib @@ -17,8 +17,6 @@ RUN \ ln -s pydoc3 pydoc && \ ln -s python3 python -RUN [ "cross-build-start" ] - ARG DLIB_VERSION ARG MAKEFLAGS="-j2" ARG SOC @@ -29,6 +27,13 @@ ENV \ DEBIAN_FRONTEND=noninteractive \ PIP_IGNORE_INSTALLED=0 +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Crossbuilding!" && cross-build-start; \ + fi + # NVIDIA repos use HTTPS so we need some additional libraries first RUN \ apt-get update && apt-get install -y --no-install-recommends \ @@ -73,7 +78,5 @@ RUN \ \ && ls -al /wheels/ -RUN [ "cross-build-end" ] - FROM scratch as scratch COPY --from=build /wheels /wheels/ diff --git a/docker/jetson-nano/Dockerfile.ffmpeg b/docker/jetson-nano/Dockerfile.ffmpeg index 15f170142..e046b547f 100644 --- a/docker/jetson-nano/Dockerfile.ffmpeg +++ b/docker/jetson-nano/Dockerfile.ffmpeg @@ -131,7 +131,12 @@ COPY --from=qemu /usr/bin/cross-build-start /bin/cross-build-start COPY --from=qemu /usr/bin/cross-build-end /bin/cross-build-end COPY --from=qemu /usr/bin/resin-xbuild /usr/bin/resin-xbuild -RUN [ "cross-build-start" ] +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Crossbuilding!" && cross-build-start; \ + fi WORKDIR /tmp/workdir @@ -600,8 +605,6 @@ RUN \ sed "s:${PREFIX}:/usr/local:g" <"$pc" >/usr/local/lib/pkgconfig/"${pc##*/}"; \ done -RUN [ "cross-build-end" ] - FROM scratch COPY --from=build /usr/local /usr/local/ diff --git a/docker/jetson-nano/Dockerfile.opencv b/docker/jetson-nano/Dockerfile.opencv index 1327b8cba..a9f56e67c 100644 --- a/docker/jetson-nano/Dockerfile.opencv +++ b/docker/jetson-nano/Dockerfile.opencv @@ -9,7 +9,12 @@ COPY --from=qemu /usr/bin/cross-build-start /bin/cross-build-start COPY --from=qemu /usr/bin/cross-build-end /bin/cross-build-end COPY --from=qemu /usr/bin/resin-xbuild /usr/bin/resin-xbuild -RUN [ "cross-build-start" ] +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Crossbuilding!" && cross-build-start; \ + fi WORKDIR /tmp/workdir diff --git a/docker/jetson-nano/Dockerfile.opencv.part1 b/docker/jetson-nano/Dockerfile.opencv.part1 index c229ac6a1..a78fe6d45 100644 --- a/docker/jetson-nano/Dockerfile.opencv.part1 +++ b/docker/jetson-nano/Dockerfile.opencv.part1 @@ -17,8 +17,6 @@ RUN \ ln -s pydoc3 pydoc && \ ln -s python3 python -RUN [ "cross-build-start" ] - WORKDIR /tmp/workdir ARG OPENCV_VERSION @@ -34,6 +32,13 @@ ENV \ CUDA_ARCH_BIN="53" \ CUDA_ARCH_PTX="70" +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Crossbuilding!" && cross-build-start; \ + fi + RUN \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ diff --git a/docker/jetson-nano/Dockerfile.python b/docker/jetson-nano/Dockerfile.python index 522b6f884..f939aef9f 100644 --- a/docker/jetson-nano/Dockerfile.python +++ b/docker/jetson-nano/Dockerfile.python @@ -9,8 +9,6 @@ COPY --from=qemu /usr/bin/cross-build-start /bin/cross-build-start COPY --from=qemu /usr/bin/cross-build-end /bin/cross-build-end COPY --from=qemu /usr/bin/resin-xbuild /usr/bin/resin-xbuild -RUN [ "cross-build-start" ] - ARG PYTHON_VERSION ARG PYTHON_PIP_VERSION @@ -18,6 +16,13 @@ ENV LANG=C.UTF-8 ENV DEBIAN_FRONTEND=noninteractive ENV PIP_URL=https://bootstrap.pypa.io/get-pip.py +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Crossbuilding!" && cross-build-start; \ + fi + RUN \ apt-get -yqq update && apt-get install -yq --no-install-recommends \ ca-certificates \ @@ -91,8 +96,6 @@ RUN \ -name pydoc_data -o \ -name tkinter \) -exec rm -rf {} + -RUN [ "cross-build-end" ] - FROM scratch COPY --from=build /usr/local /usr/local diff --git a/docker/jetson-nano/Dockerfile.wheels b/docker/jetson-nano/Dockerfile.wheels index 5d8f0affa..7d8fc54f9 100644 --- a/docker/jetson-nano/Dockerfile.wheels +++ b/docker/jetson-nano/Dockerfile.wheels @@ -27,7 +27,12 @@ ENV \ DEBIAN_FRONTEND=noninteractive \ PIP_IGNORE_INSTALLED=0 -RUN [ "cross-build-start" ] +ARG BUILDPLATFORM +RUN \ + echo "Build platform: $BUILDPLATFORM" && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + echo "Crossbuilding!" && cross-build-start; \ + fi RUN apt-get -yqq update && apt-get install -yq --no-install-recommends \ wget \ diff --git a/docker/rpi5/Dockerfile.azure b/docker/rpi5/Dockerfile.azure new file mode 100644 index 000000000..39beb04fb --- /dev/null +++ b/docker/rpi5/Dockerfile.azure @@ -0,0 +1,42 @@ +ARG UBUNTU_VERSION +FROM ubuntu:${UBUNTU_VERSION} +ENV TARGETARCH="linux-arm64" \ + DOCKER_BUILDKIT=1 + +ARG DOCKER_GID + +RUN apt-get update && \ + apt-get upgrade -y && \ + apt-get install -y curl git jq libicu70 ca-certificates curl + +# Install Docker +RUN \ + install -m 0755 -d /etc/apt/keyrings && \ + curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc && \ + chmod a+r /etc/apt/keyrings/docker.asc && \ + echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "${UBUNTU_CODENAME:-$VERSION_CODENAME}") stable" | \ + tee /etc/apt/sources.list.d/docker.list > /dev/null && \ + apt-get update && \ + apt-get install -y \ + docker-ce-cli \ + docker-buildx-plugin \ + docker-compose-plugin + +# Install Azure CLI +RUN curl -sL https://aka.ms/InstallAzureCLIDeb | bash + +WORKDIR /azp/ + +COPY ./docker/azure-agent-start.sh ./ +RUN chmod +x ./azure-agent-start.sh + +# Create agent user and set up home directory +RUN useradd -m -d /home/agent agent +RUN chown -R agent:agent /azp /home/agent +RUN groupadd -g ${DOCKER_GID} docker && usermod -aG docker agent + +USER agent + +ENTRYPOINT [ "./azure-agent-start.sh" ] \ No newline at end of file diff --git a/docs/docs/documentation/configuration/system_events.md b/docs/docs/documentation/configuration/system_events.md new file mode 100644 index 000000000..e99f39bdb --- /dev/null +++ b/docs/docs/documentation/configuration/system_events.md @@ -0,0 +1,23 @@ +# System Events + +System events are events that are dispatched by the backend for communication between the many components of Viseron, such as when a camera detects motion or an object is detected. + +These events can be used to trigger actions in other components, such as the [webhook component](/components-explorer/components/webhook). + +## System event viewer + +The system event viewer allows you to listen to and view system events in real-time, along with the event data. Seeing the event data can be useful for when you want to use the event data in a [template](/docs/documentation/configuration/templating). + +The event viewer can be accessed by admins from the Settings > System Events page in the Viseron web interface. + + + +:::info + +The event data is normally in JSON format, but the event viewer will format it to YAML for easier readability. + +::: diff --git a/docs/docs/documentation/configuration/templating.md b/docs/docs/documentation/configuration/templating.md new file mode 100644 index 000000000..0cc96fec9 --- /dev/null +++ b/docs/docs/documentation/configuration/templating.md @@ -0,0 +1,97 @@ +# Templating + +Templating in Viseron is backed by [Jinja2](https://jinja.palletsprojects.com/), a powerful templating engine for Python. +It allows you to create dynamic templates that can be used in the config, currently only the [webhook component](/components-explorer/components/webhook) leverages this functionality. + +## Templating in Viseron + +To know if a config option supports templating, check for the `Jinja2 template` tag in the component documentation. + +
+ Jinja2 template tag screenshot + +
+ +The syntax for Jinja2 is described in [their documentation](https://jinja.palletsprojects.com/en/latest/templates/) and is not covered here. + +Viseron provides some additional context variables that can be used in templates: + +- `states`: A dictionary of all the current states of all Entities in Viseron. +- `event`: The event data that triggered the component. This is only available for components that are triggered by events, such as the [webhook component](/components-explorer/components/webhook). + +## Template editor + +A template editor (heavily inspired by Home Assistant) is available that allows you to test your templates before using them in your configuration. +You can access it from the Settings > Template Editor page in the web interface. + +
+ Template editor screenshot + +
+ +## Examples + +### Using the `event` context variable + +When using the `webhook` component, you can access the event data that triggered the webhook. For example, if you want to include the camera identifier in the payload, you can use: + +```yaml +webhook: + my_webhook: + trigger: + event: camera_one/motion_detected + url: http://example.com/webhook + payload: > + {%- if event.motion_detected -%} + "Motion detected on {{ event.camera_identifier }}!" + {%- else -%} + "No motion detected on {{ event.camera_identifier }}." + {%- endif -%} +``` + +### Using the `states` context variable + +You can also use the `states` context variable to access the current state of all Entities. For example, if you want to include a camera's recording state in the payload, you can use: + +```yaml +webhook: + my_webhook: + trigger: + event: camera_one/motion_detected + url: http://example.com/webhook + payload: "Recording state: {{ states.camera_one_recorder.state }}" +``` + +### Conditions + +Some components allow you to use template conditions to determine whether an action should be taken based on the template. +The condition checks whether the template produces a value that evaluates to true. + +Values that evaluate to true include: + +- Boolean true +- Non zero numbers (e.g., 1, 2, 3, etc.) +- The strings `true`, `yes`, `on`, `enable` (case-insensitive) + +Any other value results in a false evaluation. + +This example checks if the `motion_detected` attribute of the event is true before triggering the webhook: + +```yaml +webhook: + my_webhook: + trigger: + event: camera_one/motion_detected + condition: > + {{ event.motion_detected }} + url: http://example.com/webhook + payload: "Motion detected on {{ event.camera_identifier }}" +``` diff --git a/docs/docs/documentation/installation.mdx b/docs/docs/documentation/installation.mdx index a71293cdf..b7e4ce731 100644 --- a/docs/docs/documentation/installation.mdx +++ b/docs/docs/documentation/installation.mdx @@ -55,6 +55,7 @@ docker run --rm \ -v {snapshots path}:/snapshots \ -v {thumbnails path}:/thumbnails \ -v {event clips path}:/event_clips \ + -v {timelapse path}:/timelapse \ -v {config path}:/config \ -v /etc/localtime:/etc/localtime:ro \ -p 8888:8888 \ @@ -67,8 +68,6 @@ docker run --rm \ ```yaml -version: "2.4" - services: viseron: image: roflcoopter/viseron:latest @@ -79,6 +78,7 @@ services: - {snapshots path}:/snapshots - {thumbnails path}:/thumbnails - {event clips path}:/event_clips + - {timelapse path}:/timelapse - {config path}:/config - /etc/localtime:/etc/localtime:ro ports: @@ -101,6 +101,7 @@ docker run --rm \ -v {snapshots path}:/snapshots \ -v {thumbnails path}:/thumbnails \ -v {event clips path}:/event_clips \ + -v {timelapse path}:/timelapse \ -v {config path}:/config \ -v /etc/localtime:/etc/localtime:ro \ -p 8888:8888 \ @@ -114,8 +115,6 @@ docker run --rm \ ```yaml -version: "2.4" - services: viseron: image: roflcoopter/viseron:latest @@ -126,6 +125,7 @@ services: - {snapshots path}:/snapshots - {thumbnails path}:/thumbnails - {event clips path}:/event_clips + - {timelapse path}:/timelapse - {config path}:/config - /etc/localtime:/etc/localtime:ro ports: @@ -150,6 +150,7 @@ docker run --rm \ -v {snapshots path}:/snapshots \ -v {thumbnails path}:/thumbnails \ -v {event clips path}:/event_clips \ + -v {timelapse path}:/timelapse \ -v {config path}:/config \ -v /etc/localtime:/etc/localtime:ro \ -p 8888:8888 \ @@ -163,8 +164,6 @@ docker run --rm \ ```yaml -version: "2.4" - services: viseron: image: roflcoopter/amd64-cuda-viseron:latest @@ -175,6 +174,7 @@ services: - {snapshots path}:/snapshots - {thumbnails path}:/thumbnails - {event clips path}:/event_clips + - {timelapse path}:/timelapse - {config path}:/config - /etc/localtime:/etc/localtime:ro ports: @@ -205,6 +205,7 @@ docker run --rm \ -v {snapshots path}:/snapshots \ -v {thumbnails path}:/thumbnails \ -v {event clips path}:/event_clips \ + -v {timelapse path}:/timelapse \ -v {config path}:/config \ -v /etc/localtime:/etc/localtime:ro \ -p 8888:8888 \ @@ -226,8 +227,6 @@ You can probably get around this by manually mounting all the needed devices but ```yaml -version: "2.4" - services: viseron: image: roflcoopter/jetson-nano-viseron:latest @@ -238,6 +237,7 @@ services: - {snapshots path}:/snapshots - {thumbnails path}:/thumbnails - {event clips path}:/event_clips + - {timelapse path}:/timelapse - {config path}:/config - /etc/localtime:/etc/localtime:ro ports: @@ -270,6 +270,7 @@ docker run --rm \ -v {snapshots path}:/snapshots \ -v {thumbnails path}:/thumbnails \ -v {event clips path}:/event_clips \ + -v {timelapse path}:/timelapse \ -v {config path}:/config \ -v /etc/localtime:/etc/localtime:ro \ -v /dev/bus/usb:/dev/bus/usb \ @@ -288,7 +289,6 @@ docker run --rm \ ```yaml -version: "2.4" services: viseron: image: roflcoopter/viseron:latest @@ -299,6 +299,7 @@ services: - {snapshots path}:/snapshots - {thumbnails path}:/thumbnails - {event clips path}:/event_clips + - {timelapse path}:/timelapse - {config path}:/config - /etc/localtime:/etc/localtime:ro devices: @@ -342,6 +343,7 @@ docker run --rm \ -v {snapshots path}:/snapshots \ -v {thumbnails path}:/thumbnails \ -v {event clips path}:/event_clips \ + -v {timelapse path}:/timelapse \ -v {config path}:/config \ -v /etc/localtime:/etc/localtime:ro \ -v /opt/vc/lib:/opt/vc/lib \ @@ -358,7 +360,6 @@ docker run --rm \ ```yaml -version: "2.4" services: viseron: image: roflcoopter/viseron:latest @@ -369,6 +370,7 @@ services: - {snapshots path}:/snapshots - {thumbnails path}:/thumbnails - {event clips path}:/event_clips + - {timelapse path}:/timelapse - {config path}:/config - /etc/localtime:/etc/localtime:ro - /opt/vc/lib:/opt/vc/lib @@ -447,6 +449,7 @@ docker run --rm \ -v {snapshots path}:/snapshots \ -v {thumbnails path}:/thumbnails \ -v {event clips path}:/event_clips \ + -v {timelapse path}:/timelapse \ -v {config path}:/config \ -v /etc/localtime:/etc/localtime:ro \ -p 8888:8888 \ @@ -465,8 +468,6 @@ docker run --rm \ Example docker-compose ```yaml -version: "2.4" - services: viseron: image: roflcoopter/viseron:latest @@ -477,6 +478,7 @@ services: - {snapshots path}:/snapshots - {thumbnails path}:/thumbnails - {event clips path}:/event_clips + - {timelapse path}:/timelapse - {config path}:/config - /etc/localtime:/etc/localtime:ro ports: diff --git a/docs/package-lock.json b/docs/package-lock.json index 7b66b8d10..10628fc67 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -8248,9 +8248,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001684", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001684.tgz", - "integrity": "sha512-G1LRwLIQjBQoyq0ZJGqGIJUXzJ8irpbjHLpVRXDvBEScFJ9b17sgK6vlx0GAJFE21okD7zXl08rRRUfq6HdoEQ==", + "version": "1.0.30001735", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001735.tgz", + "integrity": "sha512-EV/laoX7Wq2J9TQlyIXRxTJqIw4sxfXS4OYgudGxBYRuTv0q7AM6yMEpU/Vo1I94thg9U6EZ2NfZx9GJq83u7w==", "funding": [ { "type": "opencollective", diff --git a/docs/sidebars.ts b/docs/sidebars.ts index 71b51ec19..8562a3632 100644 --- a/docs/sidebars.ts +++ b/docs/sidebars.ts @@ -110,6 +110,8 @@ const sidebars: SidebarsConfig = { { type: "doc", id: "documentation/configuration/authentication" }, { type: "doc", id: "documentation/configuration/logging" }, { type: "doc", id: "documentation/configuration/secrets" }, + { type: "doc", id: "documentation/configuration/templating" }, + { type: "doc", id: "documentation/configuration/system_events" }, ], }, ], diff --git a/docs/src/pages/components-explorer/_components/ComponentConfiguration/index.tsx b/docs/src/pages/components-explorer/_components/ComponentConfiguration/index.tsx index 51b53925c..73186c468 100644 --- a/docs/src/pages/components-explorer/_components/ComponentConfiguration/index.tsx +++ b/docs/src/pages/components-explorer/_components/ComponentConfiguration/index.tsx @@ -4,6 +4,7 @@ /* eslint-disable @typescript-eslint/no-explicit-any */ import React from "react"; +import Link from "@docusaurus/Link"; import CodeBlock from "@theme/CodeBlock"; import Tippy from "@tippyjs/react"; import clsx from "clsx"; @@ -35,20 +36,45 @@ function getValidValues(options) { // Return div that contains valid values for the config option function buildValidValues(item: any) { if (item.options) { + const options = item.options.slice(); + const hasFormat = options.some((x) => x.format !== undefined); + const hasValue = options.some((x) => x.value !== undefined); return ( -
- Valid values: -
    - {getValidValues(item.options).map((option, index) => ( -
  • - - {option.value === undefined ? `<${option.type}>` : option.value} - - {option.description ? `: ${option.description}` : null} -
  • - ))} -
-
+ <> + {hasFormat && ( +
+ Valid formats: +
    + {getValidValues(item.options).map( + (option, index) => + option.format && ( +
  • + {option.format} + {option.description ? `: ${option.description}` : null} +
  • + ), + )} +
+
+ )} + {hasValue && ( +
+ Valid values: +
    + {getValidValues(item.options).map((option, index) => ( +
  • + + {option.value === undefined + ? `<${option.type}>` + : option.value} + + {option.description ? `: ${option.description}` : null} +
  • + ))} +
+
+ )} + ); } return null; @@ -124,7 +150,7 @@ function getName(item: any) { return `<${item.name.type}>`; } -function getDefault(item: any) { +function getDefault(item: any, optional: boolean) { function getCodeBlock() { return ( @@ -153,7 +179,7 @@ function getDefault(item: any) { // Handle object defaults if ( - item.optional && + optional && item.default !== null && typeof item.default === "object" && !Array.isArray(item.default) && @@ -165,7 +191,7 @@ function getDefault(item: any) { // Handle array defaults if ( - item.optional && + optional && item.default !== null && // Only display default values for arrays if the length is greater than zero Array.isArray(item.default) && @@ -177,7 +203,7 @@ function getDefault(item: any) { // Handle other defaults if ( - item.optional && + optional && item.default !== null && !Array.isArray(item.default) && !(typeof item.default === "object") @@ -189,7 +215,7 @@ function getDefault(item: any) { ); } - if (item.optional) { + if (optional) { return ")"; } return null; @@ -206,7 +232,15 @@ function buildHeader(item: any) { {/* Zero width space to prevent selecting type when double clicking the name */} ​ - {item.format ? item.format : item.type} + {item.format ? ( + item.format + ) : item.type === "jinja2_template" ? ( + + Jinja2 template + + ) : ( + item.type + )} {optional ? " (" : null} @@ -221,7 +255,7 @@ function buildHeader(item: any) { ? " deprecated" : " required"} - {getDefault(item)} + {getDefault(item, optional)} ); diff --git a/docs/src/pages/components-explorer/_domains/object_detector/index.mdx b/docs/src/pages/components-explorer/_domains/object_detector/index.mdx index 7b6961612..0fd60ef73 100644 --- a/docs/src/pages/components-explorer/_domains/object_detector/index.mdx +++ b/docs/src/pages/components-explorer/_domains/object_detector/index.mdx @@ -22,7 +22,7 @@ Object detectors can be taxing on the system, so it is wise to combine it with a ### Zones \{#object-detector-zones} - + ### Mask \{#object-detector-mask} diff --git a/docs/src/pages/components-explorer/_domains/object_detector/zones.mdx b/docs/src/pages/components-explorer/_domains/object_detector/zones.mdx index a42931e2a..80ceae15d 100644 --- a/docs/src/pages/components-explorer/_domains/object_detector/zones.mdx +++ b/docs/src/pages/components-explorer/_domains/object_detector/zones.mdx @@ -15,27 +15,27 @@ actually interested in, excluding the sidewalk. ? props.meta.name : "" }: - object_detector: - cameras: - camera_one: - ... - // highlight-start - zones: - - name: sidewalk - coordinates: - - x: 522 - y: 11 - - x: 729 - y: 275 - - x: 333 - y: 603 - - x: 171 - y: 97 - labels: - - label: person - confidence: 0.8 - trigger_event_recording: true - // highlight-end`} + object_detector: + cameras: + camera_one: + ... + // highlight-start + zones: + - name: sidewalk + coordinates: + - x: 522 + y: 11 + - x: 729 + y: 275 + - x: 333 + y: 603 + - x: 171 + y: 97 + labels: + - label: person + confidence: 0.8 + trigger_event_recording: true + // highlight-end`} diff --git a/docs/src/pages/components-explorer/components/ffmpeg/index.mdx b/docs/src/pages/components-explorer/components/ffmpeg/index.mdx index 803355298..dcb1c5239 100644 --- a/docs/src/pages/components-explorer/components/ffmpeg/index.mdx +++ b/docs/src/pages/components-explorer/components/ffmpeg/index.mdx @@ -211,10 +211,11 @@ docker run --rm \ -v {snapshots path}:/snapshots \ -v {thumbnails path}:/thumbnails \ -v {event clips path}:/event_clips \ + -v {timelapse path}:/timelapse \ -v {config path}:/config \ -v /etc/localtime:/etc/localtime:ro \ -p 8888:8888 \ - --tmpfs /tmp \ + --tmpfs /tmp/tier1 \ --name viseron \ --shm-size=1024mb \ roflcoopter/viseron:latest @@ -223,8 +224,6 @@ docker run --rm \ Example docker-compose ```yaml -version: "2.4" - services: viseron: image: roflcoopter/viseron:latest @@ -235,12 +234,13 @@ services: - {snapshots path}:/snapshots - {thumbnails path}:/thumbnails - {event clips path}:/event_clips + - {timelapse path}:/timelapse - {config path}:/config - /etc/localtime:/etc/localtime:ro ports: - 8888:8888 tmpfs: - - /tmp + - /tmp/tier1 ``` ```yaml title="/config/config.yaml" diff --git a/docs/src/pages/components-explorer/components/go2rtc/config.json b/docs/src/pages/components-explorer/components/go2rtc/config.json index 94a5774e9..3ff0df5e1 100644 --- a/docs/src/pages/components-explorer/components/go2rtc/config.json +++ b/docs/src/pages/components-explorer/components/go2rtc/config.json @@ -3,7 +3,7 @@ "type": "map", "value": [], "name": "go2rtc", - "description": "go2rtc configuration.", + "description": "go2rtc configuration. See the go2rtc documentation for more information on available options.", "required": true, "default": null } diff --git a/docs/src/pages/components-explorer/components/go2rtc/index.mdx b/docs/src/pages/components-explorer/components/go2rtc/index.mdx index c758d53f8..8d273235e 100644 --- a/docs/src/pages/components-explorer/components/go2rtc/index.mdx +++ b/docs/src/pages/components-explorer/components/go2rtc/index.mdx @@ -60,4 +60,36 @@ go2rtc: +## Restreaming + +Using go2rtc, you can restream your cameras to Viseron or other services, reducing the number of connections per camera. + +
+ Restreaming configuration example + +```yaml title="/config/config.yaml" +ffmpeg: + camera: + camera_one: + name: Camera 1 + host: localhost + port: 8554 + path: /camera_one + + camera_two: + name: Camera 2 + host: localhost + port: 8554 + path: /camera_two + +go2rtc: + streams: + camera_one: + - rtsp://user:pass@192.168.XX.X:554/Streaming/Channels/101/ + camera_two: + - rtsp://user:pass@192.168.XX.X:554/Streaming/Channels/101/ +``` + +
+ diff --git a/docs/src/pages/components-explorer/components/hailo/_meta.tsx b/docs/src/pages/components-explorer/components/hailo/_meta.tsx new file mode 100644 index 000000000..4d5aca5f0 --- /dev/null +++ b/docs/src/pages/components-explorer/components/hailo/_meta.tsx @@ -0,0 +1,12 @@ +import { Component } from "@site/src/types"; + +const ComponentMetadata: Component = { + title: "Hailo-8", + name: "hailo", + description: + "Hailo offers breakthrough AI processors uniquely designed to enable high performance deep learning applications on edge devices.", + image: "https://hailo.ai/wp-content/uploads/2023/08/Hailo.png", + tags: ["object_detector"], +}; + +export default ComponentMetadata; diff --git a/docs/src/pages/components-explorer/components/hailo/config.json b/docs/src/pages/components-explorer/components/hailo/config.json new file mode 100644 index 000000000..a2979095a --- /dev/null +++ b/docs/src/pages/components-explorer/components/hailo/config.json @@ -0,0 +1,389 @@ +[ + { + "type": "map", + "value": [ + { + "type": "map", + "value": [ + { + "type": "map", + "value": [ + { + "type": "map", + "value": [ + { + "type": "float", + "valueMin": 0.0, + "name": "fps", + "description": "The FPS at which the object detector runs.
Higher values will result in more scanning, which uses more resources.", + "optional": true, + "default": 1 + }, + { + "type": "boolean", + "name": "scan_on_motion_only", + "description": "When set to true and a motion_detector is configured, the object detector will only scan while motion is detected.", + "optional": true, + "default": true + }, + { + "type": "list", + "values": [ + [ + { + "type": "string", + "name": "label", + "description": "The label to track.", + "required": true, + "default": null + }, + { + "type": "float", + "valueMin": 0.0, + "valueMax": 1.0, + "name": "confidence", + "description": "Lowest confidence allowed for detected objects. The lower the value, the more sensitive the detector will be, and the risk of false positives will increase.", + "optional": true, + "default": 0.8 + }, + { + "type": "float", + "valueMin": 0.0, + "valueMax": 1.0, + "name": "height_min", + "description": "Minimum height allowed for detected objects, relative to stream height.", + "optional": true, + "default": 0 + }, + { + "type": "float", + "valueMin": 0.0, + "valueMax": 1.0, + "name": "height_max", + "description": "Maximum height allowed for detected objects, relative to stream height.", + "optional": true, + "default": 1 + }, + { + "type": "float", + "valueMin": 0.0, + "valueMax": 1.0, + "name": "width_min", + "description": "Minimum width allowed for detected objects, relative to stream width.", + "optional": true, + "default": 0 + }, + { + "type": "float", + "valueMin": 0.0, + "valueMax": 1.0, + "name": "width_max", + "description": "Maximum width allowed for detected objects, relative to stream width.", + "optional": true, + "default": 1 + }, + { + "type": "boolean", + "name": { + "type": "deprecated", + "name": "trigger_recorder", + "value": "Use trigger_event_recording instead." + }, + "description": "If set to true, objects matching this filter will start the recorder.", + "deprecated": true, + "default": null + }, + { + "type": "boolean", + "name": "trigger_event_recording", + "description": "If set to true, objects matching this filter will trigger an event recording.", + "optional": true, + "default": true + }, + { + "type": "boolean", + "name": "store", + "description": "If set to true, objects matching this filter will be stored in the database, as well as having a snapshot saved. Labels with trigger_event_recording set to true will always be stored when a recording starts, regardless of this setting.", + "optional": true, + "default": true + }, + { + "type": "integer", + "name": "store_interval", + "description": "The interval at which the label should be stored in the database, in seconds. If set to 0, the label will be stored every time it is detected.", + "optional": true, + "default": 60 + }, + { + "type": "boolean", + "name": "require_motion", + "description": "If set to true, the recorder will stop as soon as motion is no longer detected, even if the object still is. This is useful to avoid never ending recordings of stationary objects, such as a car on a driveway", + "optional": true, + "default": false + } + ] + ], + "name": "labels", + "description": "A list of labels (objects) to track.", + "optional": true, + "default": [] + }, + { + "type": "float", + "valueMin": 0.0, + "name": "max_frame_age", + "description": "Drop frames that are older than the given number. Specified in seconds.", + "optional": true, + "default": 2 + }, + { + "type": "boolean", + "name": "log_all_objects", + "description": "When set to true and loglevel is DEBUG, all found objects will be logged, including the ones not tracked by labels.", + "optional": true, + "default": false + }, + { + "type": "list", + "values": [ + [ + { + "type": "list", + "values": [ + [ + { + "type": "integer", + "name": "x", + "description": "X-coordinate (horizontal axis).", + "required": true, + "default": null + }, + { + "type": "integer", + "name": "y", + "description": "Y-coordinate (vertical axis).", + "required": true, + "default": null + } + ] + ], + "lengthMin": 3, + "name": "coordinates", + "description": "List of X and Y coordinates to form a polygon", + "required": true, + "default": null + } + ] + ], + "name": "mask", + "description": "A mask is used to exclude certain areas in the image from object detection. ", + "optional": true, + "default": [] + }, + { + "type": "list", + "values": [ + [ + { + "type": "string", + "name": "name", + "description": "Name of the zone. Has to be unique per camera.", + "required": true, + "default": null + }, + { + "type": "list", + "values": [ + [ + { + "type": "integer", + "name": "x", + "description": "X-coordinate (horizontal axis).", + "required": true, + "default": null + }, + { + "type": "integer", + "name": "y", + "description": "Y-coordinate (vertical axis).", + "required": true, + "default": null + } + ] + ], + "lengthMin": 3, + "name": "coordinates", + "description": "List of X and Y coordinates to form a polygon", + "required": true, + "default": null + }, + { + "type": "list", + "values": [ + [ + { + "type": "string", + "name": "label", + "description": "The label to track.", + "required": true, + "default": null + }, + { + "type": "float", + "valueMin": 0.0, + "valueMax": 1.0, + "name": "confidence", + "description": "Lowest confidence allowed for detected objects. The lower the value, the more sensitive the detector will be, and the risk of false positives will increase.", + "optional": true, + "default": 0.8 + }, + { + "type": "float", + "valueMin": 0.0, + "valueMax": 1.0, + "name": "height_min", + "description": "Minimum height allowed for detected objects, relative to stream height.", + "optional": true, + "default": 0 + }, + { + "type": "float", + "valueMin": 0.0, + "valueMax": 1.0, + "name": "height_max", + "description": "Maximum height allowed for detected objects, relative to stream height.", + "optional": true, + "default": 1 + }, + { + "type": "float", + "valueMin": 0.0, + "valueMax": 1.0, + "name": "width_min", + "description": "Minimum width allowed for detected objects, relative to stream width.", + "optional": true, + "default": 0 + }, + { + "type": "float", + "valueMin": 0.0, + "valueMax": 1.0, + "name": "width_max", + "description": "Maximum width allowed for detected objects, relative to stream width.", + "optional": true, + "default": 1 + }, + { + "type": "boolean", + "name": { + "type": "deprecated", + "name": "trigger_recorder", + "value": "Use trigger_event_recording instead." + }, + "description": "If set to true, objects matching this filter will start the recorder.", + "deprecated": true, + "default": null + }, + { + "type": "boolean", + "name": "trigger_event_recording", + "description": "If set to true, objects matching this filter will trigger an event recording.", + "optional": true, + "default": true + }, + { + "type": "boolean", + "name": "store", + "description": "If set to true, objects matching this filter will be stored in the database, as well as having a snapshot saved. Labels with trigger_event_recording set to true will always be stored when a recording starts, regardless of this setting.", + "optional": true, + "default": true + }, + { + "type": "integer", + "name": "store_interval", + "description": "The interval at which the label should be stored in the database, in seconds. If set to 0, the label will be stored every time it is detected.", + "optional": true, + "default": 60 + }, + { + "type": "boolean", + "name": "require_motion", + "description": "If set to true, the recorder will stop as soon as motion is no longer detected, even if the object still is. This is useful to avoid never ending recordings of stationary objects, such as a car on a driveway", + "optional": true, + "default": false + } + ] + ], + "name": "labels", + "description": "A list of labels (objects) to track.", + "optional": true, + "default": [] + } + ] + ], + "name": "zones", + "description": "Zones are used to define areas in the cameras field of view where you want to look for certain objects (labels).", + "optional": true, + "default": [] + } + ], + "name": { + "type": "CAMERA_IDENTIFIER" + }, + "description": "Camera identifier. Valid characters are lowercase a-z, numbers and underscores.", + "cameraidentifier": true, + "default": null + } + ], + "name": "cameras", + "description": "Camera-specific configuration. All subordinate keys corresponds to the camera_identifier of a configured camera.", + "required": true, + "default": null + }, + { + "type": "select", + "options": [ + { + "type": "string" + }, + { + "type": "string", + "format": "file path" + }, + { + "type": "string", + "format": "url" + } + ], + "name": "model_path", + "description": "Path or URL to a Hailo-8 model in HEF format. If a URL is provided, the model will be downloaded on startup. If not provided, a default model from Hailo's model zoo will be used.
Downloaded models are cached and won't be re-downloaded.", + "optional": true, + "default": null + }, + { + "type": "string", + "name": "label_path", + "description": "Path to file containing trained labels. If not provided, the COCO labels file from the darknet component will be used.", + "optional": true, + "default": "/detectors/models/darknet/coco.names" + }, + { + "type": "integer", + "name": "max_detections", + "description": "Maximum number of detections to return.", + "optional": true, + "default": 50 + } + ], + "name": "object_detector", + "description": "Object detector domain config.", + "required": true, + "default": null + } + ], + "name": "hailo", + "description": "Hailo configuration.", + "required": true, + "default": null + } +] \ No newline at end of file diff --git a/docs/src/pages/components-explorer/components/hailo/index.mdx b/docs/src/pages/components-explorer/components/hailo/index.mdx new file mode 100644 index 000000000..c5bc3a852 --- /dev/null +++ b/docs/src/pages/components-explorer/components/hailo/index.mdx @@ -0,0 +1,131 @@ +import TabItem from "@theme/TabItem"; +import Tabs from "@theme/Tabs"; + +import ComponentConfiguration from "@site/src/pages/components-explorer/_components/ComponentConfiguration"; +import ComponentHeader from "@site/src/pages/components-explorer/_components/ComponentHeader"; +import ComponentTroubleshooting from "@site/src/pages/components-explorer/_components/ComponentTroubleshooting/index.mdx"; + +import ObjectDetector from "@site/src/pages/components-explorer/_domains/object_detector/index.mdx"; + +import ComponentMetadata from "./_meta"; +import config from "./config.json"; + + + +The `hailo` component in Viseron allows you to integrate [Hailo-8 AI Accelerators](https://hailo.ai/products/ai-accelerators/hailo-8-ai-accelerator/) for object detection. + +:::info + +The `hailo` component is only available for `aarch64` and `amd64` architectures. + +::: + +## Configuration + +
+ Configuration example + +```yaml title="/config/config.yaml" +hailo: + object_detector: + cameras: + camera_one: + fps: 1 + labels: + - label: person + confidence: 0.8 + - label: cat + confidence: 0.8 + camera_two: + fps: 1 + scan_on_motion_only: false + labels: + - label: dog + confidence: 0.8 + trigger_event_recording: false +``` + +
+ + + +## Installation + +For a Raspberry Pi 5 with the AI kit, follow the instructions in the [Raspberry Pi 5 documentation](https://www.raspberrypi.com/documentation/accessories/ai-kit.html#ai-kit-installation). + +If you have installed Ubuntu on your Raspberry Pi 5 instead of Raspberry Pi OS, you need to perform some additional steps. These steps are outlined well in [this guide](https://github.com/opensensor/rpi5-resources/?tab=readme-ov-file#hailo-8l-installation). + +For other platforms, please refer to the official [Hailo documentation](https://hailo.ai/developer-zone/documentation/hailort-v4-22-0/?sp_referrer=install/install.html#installation-on-ubuntu). + +:::note + +It is only required to install the `HailoRT PCIe driver` and `HailoRT`. `PyHailoRT` is not necessary since it is installed inside the container. + +::: + +:::warning + +Viseron is currently using the version 4.22.0 of the Hailo runtime, and you may encounter compatibility issues if you install a different version of the drivers on your host. + +::: + +## Mounting the device + +To allow the Viseron container to access the Hailo device, you need to mount it when starting the container. + + + + +```shell +docker run --rm \ + -v {segments path}:/segments \ + -v {snapshots path}:/snapshots \ + -v {thumbnails path}:/thumbnails \ + -v {event clips path}:/event_clips \ + -v {timelapse path}:/timelapse \ + -v {config path}:/config \ + -v /etc/localtime:/etc/localtime:ro \ + -p 8888:8888 \ + --name viseron \ + --shm-size=1024mb \ +// highlight-start + --device /dev/hailo0 \ +// highlight-end + roflcoopter/viseron:latest +``` + + + + +```yaml +services: + viseron: + image: roflcoopter/viseron:latest + container_name: viseron + shm_size: "1024mb" + volumes: + - {segments path}:/segments + - {snapshots path}:/snapshots + - {thumbnails path}:/thumbnails + - {event clips path}:/event_clips + - {timelapse path}:/timelapse + - {config path}:/config + - /etc/localtime:/etc/localtime:ro + ports: + - 8888:8888 + // highlight-start + devices: + - /dev/hailo0 + // highlight-end + +``` + + + + + + + diff --git a/docs/src/pages/components-explorer/components/logger/index.mdx b/docs/src/pages/components-explorer/components/logger/index.mdx index da249a9b8..f22df6ab6 100644 --- a/docs/src/pages/components-explorer/components/logger/index.mdx +++ b/docs/src/pages/components-explorer/components/logger/index.mdx @@ -68,6 +68,7 @@ docker run --rm \ -v {snapshots path}:/snapshots \ -v {thumbnails path}:/thumbnails \ -v {event clips path}:/event_clips \ + -v {timelapse path}:/timelapse \ -v {config path}:/config \ -v /etc/localtime:/etc/localtime:ro \ -p 8888:8888 \ @@ -84,8 +85,6 @@ docker run --rm \ ```yaml -version: "2.4" - services: viseron: image: roflcoopter/viseron:latest @@ -96,6 +95,7 @@ services: - {snapshots path}:/snapshots - {thumbnails path}:/thumbnails - {event clips path}:/event_clips + - {timelapse path}:/timelapse - {config path}:/config - /etc/localtime:/etc/localtime:ro ports: diff --git a/docs/src/pages/components-explorer/components/storage/config.json b/docs/src/pages/components-explorer/components/storage/config.json index 9095965d8..43c509d09 100644 --- a/docs/src/pages/components-explorer/components/storage/config.json +++ b/docs/src/pages/components-explorer/components/storage/config.json @@ -11,6 +11,7 @@ }, { "type": "integer", + "valueMin": 1, "name": "tier_check_workers", "description": "The number of worker threads to use for checking tiers. This can be used to speed up the tier check process by using multiple threads to check for files to move or delete.", "optional": true, @@ -1315,6 +1316,234 @@ "description": "Snapshots are images taken when events are triggered or post processors finds anything. Snapshots will be taken for object detection, motion detection, and any post processor that scans the image, for example face and license plate recognition.", "optional": true, "default": {} + }, + { + "type": "map", + "value": [ + { + "type": "list", + "values": [ + [ + { + "type": "map", + "value": [ + { + "type": "float", + "name": "gb", + "description": "Min size in GB. Added together with min_mb.", + "optional": true, + "default": null + }, + { + "type": "float", + "name": "mb", + "description": "Min size in MB. Added together with min_gb.", + "optional": true, + "default": null + } + ], + "name": "min_size", + "description": "Minimum size of files to keep in this tier.", + "optional": true, + "default": {} + }, + { + "type": "map", + "value": [ + { + "type": "float", + "name": "gb", + "description": "Max size in GB. Added together with max_mb.", + "optional": true, + "default": null + }, + { + "type": "float", + "name": "mb", + "description": "Max size in MB. Added together with max_gb.", + "optional": true, + "default": null + } + ], + "name": "max_size", + "description": "Maximum size of files to keep in this tier.", + "optional": true, + "default": {} + }, + { + "type": "map", + "value": [ + { + "type": "integer", + "name": "days", + "description": "Max age in days.", + "optional": true, + "default": null + }, + { + "type": "integer", + "name": "hours", + "description": "Max age in hours.", + "optional": true, + "default": null + }, + { + "type": "integer", + "name": "minutes", + "description": "Max age in minutes.", + "optional": true, + "default": null + } + ], + "name": "max_age", + "description": "Maximum age of files to keep in this tier.", + "optional": true, + "default": {} + }, + { + "type": "map", + "value": [ + { + "type": "integer", + "name": "days", + "description": "Min age in days.", + "optional": true, + "default": null + }, + { + "type": "integer", + "name": "hours", + "description": "Min age in hours.", + "optional": true, + "default": null + }, + { + "type": "integer", + "name": "minutes", + "description": "Min age in minutes.", + "optional": true, + "default": null + } + ], + "name": "min_age", + "description": "Minimum age of files to keep in this tier.", + "optional": true, + "default": {} + }, + { + "type": "string", + "name": "path", + "description": "Path to store files in. Cannot be /tmp or /tmp/viseron.", + "required": true, + "default": null + }, + { + "type": "boolean", + "name": "poll", + "description": "Poll the file system for new files. Much slower than non-polling but required for some file systems like NTFS mounts.", + "optional": true, + "default": false + }, + { + "type": "boolean", + "name": "move_on_shutdown", + "description": "Move/delete files to the next tier when Viseron shuts down. Useful to not lose files when shutting down Viseron if using a RAM disk.", + "optional": true, + "default": false + }, + { + "type": "map", + "value": [ + { + "type": "integer", + "valueMin": 0, + "name": "days", + "description": "Days between checks for files to move/delete.", + "optional": true, + "default": 0 + }, + { + "type": "integer", + "valueMin": 0, + "name": "hours", + "description": "Hours between checks for files to move/delete.", + "optional": true, + "default": 0 + }, + { + "type": "integer", + "valueMin": 0, + "name": "minutes", + "description": "Minutes between checks for files to move/delete.", + "optional": true, + "default": 0 + }, + { + "type": "integer", + "valueMin": 0, + "name": "seconds", + "description": "Seconds between checks for files to move/delete.", + "optional": true, + "default": 0 + } + ], + "name": "check_interval", + "description": "How often to check for files to move to the next tier.", + "optional": true, + "default": { + "minutes": 1 + } + }, + { + "type": "map", + "value": [ + { + "type": "integer", + "name": "days", + "description": "Max age in days.", + "optional": true, + "default": null + }, + { + "type": "integer", + "name": "hours", + "description": "Max age in hours.", + "optional": true, + "default": null + }, + { + "type": "integer", + "name": "minutes", + "description": "Max age in minutes.", + "optional": true, + "default": null + }, + { + "type": "integer", + "name": "seconds", + "description": "Seconds between checks for files to move/delete.", + "optional": true, + "default": null + } + ], + "name": "interval", + "description": "Time interval between timelapse frame extractions.", + "optional": true, + "default": {} + } + ] + ], + "lengthMin": 1, + "name": "tiers", + "description": "Tiers for timelapse videos. Tiers are used to move files between different storage locations. When a file reaches the max age or max size of a tier, it will be moved to the next tier. If the file is already in the last tier, it will be deleted. ", + "required": true, + "default": null + } + ], + "name": "timelapse", + "description": "Configuration for timelapse videos. Timelapse videos are created by combining images or video segments over time to show changes in a compressed time format.", + "optional": true, + "default": null } ], "name": "storage", diff --git a/docs/src/pages/components-explorer/components/webhook/_meta.tsx b/docs/src/pages/components-explorer/components/webhook/_meta.tsx new file mode 100644 index 000000000..2bec9c4e8 --- /dev/null +++ b/docs/src/pages/components-explorer/components/webhook/_meta.tsx @@ -0,0 +1,12 @@ +import { Component } from "@site/src/types"; + +const ComponentMetadata: Component = { + title: "Webhook", + name: "webhook", + description: "A component to send webhooks on specific events.", + image: + "https://github.com/logo/webhooks/raw/refs/heads/master/images/logo.svg", + tags: ["notification"], +}; + +export default ComponentMetadata; diff --git a/docs/src/pages/components-explorer/components/webhook/config.json b/docs/src/pages/components-explorer/components/webhook/config.json new file mode 100644 index 000000000..41d8c2368 --- /dev/null +++ b/docs/src/pages/components-explorer/components/webhook/config.json @@ -0,0 +1,144 @@ +[ + { + "type": "map", + "value": [ + { + "type": "map", + "value": [ + { + "type": "map", + "value": [ + { + "type": "string", + "name": "event", + "description": "The event type that triggers the webhook.", + "required": true, + "default": null + }, + { + "type": "jinja2_template", + "value": "jinja2_template", + "name": "condition", + "description": "Template condition to check before sending the webhook. If set, the webhook will only be sent if the template evaluates to a truthy value (True, true, 1, yes, on).", + "optional": true, + "default": null + } + ], + "name": "trigger", + "description": "The trigger configuration for the webhook.", + "required": true, + "default": null + }, + { + "type": "jinja2_template", + "value": "jinja2_template", + "name": "url", + "description": "The URL to send the webhook request to.", + "required": true, + "default": null + }, + { + "lower": true, + "type": "select", + "options": [ + { + "type": "constant", + "value": "get" + }, + { + "type": "constant", + "value": "patch" + }, + { + "type": "constant", + "value": "post" + }, + { + "type": "constant", + "value": "put" + }, + { + "type": "constant", + "value": "delete" + } + ], + "name": "method", + "description": "The HTTP method to use for the webhook request.", + "optional": true, + "default": "get" + }, + { + "type": "map", + "value": [ + { + "type": "jinja2_template", + "value": "jinja2_template", + "name": { + "type": "string" + }, + "description": "Header key for the webhook request." + } + ], + "name": "headers", + "description": "Headers to include in the webhook request.", + "optional": true, + "default": null + }, + { + "type": "jinja2_template", + "value": "jinja2_template", + "name": "username", + "description": "Username for basic authentication.", + "inclusive": true, + "default": null + }, + { + "type": "jinja2_template", + "value": "jinja2_template", + "name": "password", + "description": "Password for basic authentication.", + "inclusive": true, + "default": null + }, + { + "type": "jinja2_template", + "value": "jinja2_template", + "name": "payload", + "description": "Payload to send with the webhook request.", + "optional": true, + "default": null + }, + { + "type": "integer", + "name": "timeout", + "description": "The timeout for the webhook request in seconds.", + "optional": true, + "default": 10 + }, + { + "type": "string", + "name": "content_type", + "description": "The content type of the webhook request.", + "optional": true, + "default": "application/json" + }, + { + "type": "boolean", + "name": "verify_ssl", + "description": "Whether to verify SSL certificates for the webhook request.", + "optional": true, + "default": true + } + ], + "name": { + "type": "string" + }, + "description": "Hook configuration." + } + ], + "name": "webhook", + "description": "Webhook component configuration.", + "required": true, + "default": null + } +] \ No newline at end of file diff --git a/docs/src/pages/components-explorer/components/webhook/index.mdx b/docs/src/pages/components-explorer/components/webhook/index.mdx new file mode 100644 index 000000000..4aedec670 --- /dev/null +++ b/docs/src/pages/components-explorer/components/webhook/index.mdx @@ -0,0 +1,40 @@ +import ComponentConfiguration from "@site/src/pages/components-explorer/_components/ComponentConfiguration"; +import ComponentHeader from "@site/src/pages/components-explorer/_components/ComponentHeader"; +import ComponentTroubleshooting from "@site/src/pages/components-explorer/_components/ComponentTroubleshooting/index.mdx"; + +import ComponentMetadata from "./_meta"; +import config from "./config.json"; + + + +This component allows you to send HTTP requests to a specified URL when an event is dispatched on the server. +It can be used to integrate with external services in a flexible way. + +## Configuration + +
+ Configuration example + +```yaml title="/config/config.yaml" +webhook: + cool_hook: + trigger: + event: camera_one/motion_detected # The event to listen for + condition: + > # Optional condition to filter events, e.g., only trigger if motion is detected + {{ event.motion_detected }} + url: http://example.com/webhook + payload: "Motion detected on {{ event.camera_identifier }}!" +``` + +
+ + + +## Templates + +The component supports Jinja2 templates for dynamic content in many of its configuration options, such as `url`, `payload`, and `condition`. + +See the [templating documentation](/docs/documentation/configuration/templating) for more details on how to use templates. + + diff --git a/docs/static/img/screenshots/Viseron-Docs-jinja-template.png b/docs/static/img/screenshots/Viseron-Docs-jinja-template.png new file mode 100644 index 000000000..889283cf9 Binary files /dev/null and b/docs/static/img/screenshots/Viseron-Docs-jinja-template.png differ diff --git a/docs/static/img/screenshots/Viseron-Settings-system-event-viewer.png b/docs/static/img/screenshots/Viseron-Settings-system-event-viewer.png new file mode 100644 index 000000000..a174e1237 Binary files /dev/null and b/docs/static/img/screenshots/Viseron-Settings-system-event-viewer.png differ diff --git a/docs/static/img/screenshots/Viseron-Settings-template-editor.png b/docs/static/img/screenshots/Viseron-Settings-template-editor.png new file mode 100644 index 000000000..752f56ffe Binary files /dev/null and b/docs/static/img/screenshots/Viseron-Settings-template-editor.png differ diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 681ec791e..96bac9669 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -8,6 +8,13 @@ "name": "frontend", "version": "0.1.0", "dependencies": { + "@codemirror/autocomplete": "6.18.6", + "@codemirror/commands": "6.8.1", + "@codemirror/language": "6.11.2", + "@codemirror/legacy-modes": "6.5.1", + "@codemirror/search": "6.5.11", + "@codemirror/state": "6.5.2", + "@codemirror/view": "6.38.1", "@emotion/react": "^11.13.3", "@emotion/styled": "^11.13.0", "@jy95/material-ui-image": "^4.0.9", @@ -20,9 +27,11 @@ "@tanstack/react-query": "^5.55.4", "@tanstack/react-query-devtools": "^5.55.4", "@tanstack/react-virtual": "^3.10.7", + "@types/js-yaml": "^4.0.9", "@types/node": "^22.5.4", "@types/react": "^18.3.5", "@types/react-dom": "^18.3.0", + "@uiw/react-codemirror": "^4.25.1", "ajv": "^8.17.1", "axios": "^1.7.7", "buffer": "^6.0.3", @@ -31,6 +40,7 @@ "hls.js": "^1.6.5", "http-proxy-middleware": "^3.0.2", "js-cookie": "^3.0.5", + "js-yaml": "^4.1.0", "material-ui-popup-state": "^5.3.3", "monaco-editor": "^0.51.0", "monaco-yaml": "^5.2.2", @@ -762,6 +772,108 @@ "tough-cookie": "^4.1.4" } }, + "node_modules/@codemirror/autocomplete": { + "version": "6.18.6", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.18.6.tgz", + "integrity": "sha512-PHHBXFomUs5DF+9tCOM/UoW6XQ4R44lLNNhRaW9PKPTU0D7lIjRg3ElxaJnTwsl/oHiR93WSXDBrekhoUGCPtg==", + "license": "MIT", + "dependencies": { + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.17.0", + "@lezer/common": "^1.0.0" + } + }, + "node_modules/@codemirror/commands": { + "version": "6.8.1", + "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.8.1.tgz", + "integrity": "sha512-KlGVYufHMQzxbdQONiLyGQDUW0itrLZwq3CcY7xpv9ZLRHqzkBSoteocBHtMCoY7/Ci4xhzSrToIeLg7FxHuaw==", + "license": "MIT", + "dependencies": { + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.4.0", + "@codemirror/view": "^6.27.0", + "@lezer/common": "^1.1.0" + } + }, + "node_modules/@codemirror/language": { + "version": "6.11.2", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.11.2.tgz", + "integrity": "sha512-p44TsNArL4IVXDTbapUmEkAlvWs2CFQbcfc0ymDsis1kH2wh0gcY96AS29c/vp2d0y2Tquk1EDSaawpzilUiAw==", + "license": "MIT", + "dependencies": { + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.23.0", + "@lezer/common": "^1.1.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.0.0", + "style-mod": "^4.0.0" + } + }, + "node_modules/@codemirror/legacy-modes": { + "version": "6.5.1", + "resolved": "https://registry.npmjs.org/@codemirror/legacy-modes/-/legacy-modes-6.5.1.tgz", + "integrity": "sha512-DJYQQ00N1/KdESpZV7jg9hafof/iBNp9h7TYo1SLMk86TWl9uDsVdho2dzd81K+v4retmK6mdC7WpuOQDytQqw==", + "license": "MIT", + "dependencies": { + "@codemirror/language": "^6.0.0" + } + }, + "node_modules/@codemirror/lint": { + "version": "6.8.5", + "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.5.tgz", + "integrity": "sha512-s3n3KisH7dx3vsoeGMxsbRAgKe4O1vbrnKBClm99PU0fWxmxsx5rR2PfqQgIt+2MMJBHbiJ5rfIdLYfB9NNvsA==", + "license": "MIT", + "dependencies": { + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.35.0", + "crelt": "^1.0.5" + } + }, + "node_modules/@codemirror/search": { + "version": "6.5.11", + "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.11.tgz", + "integrity": "sha512-KmWepDE6jUdL6n8cAAqIpRmLPBZ5ZKnicE8oGU/s3QrAVID+0VhLFrzUucVKHG5035/BSykhExDL/Xm7dHthiA==", + "license": "MIT", + "dependencies": { + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0", + "crelt": "^1.0.5" + } + }, + "node_modules/@codemirror/state": { + "version": "6.5.2", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.5.2.tgz", + "integrity": "sha512-FVqsPqtPWKVVL3dPSxy8wEF/ymIEuVzF1PK3VbUgrxXpJUSHQWWZz4JMToquRxnkw+36LTamCZG2iua2Ptq0fA==", + "license": "MIT", + "dependencies": { + "@marijn/find-cluster-break": "^1.0.0" + } + }, + "node_modules/@codemirror/theme-one-dark": { + "version": "6.1.3", + "resolved": "https://registry.npmjs.org/@codemirror/theme-one-dark/-/theme-one-dark-6.1.3.tgz", + "integrity": "sha512-NzBdIvEJmx6fjeremiGp3t/okrLPYT0d9orIc7AFun8oZcRk58aejkqhv6spnz4MLAevrKNPMQYXEWMg4s+sKA==", + "license": "MIT", + "dependencies": { + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0", + "@lezer/highlight": "^1.0.0" + } + }, + "node_modules/@codemirror/view": { + "version": "6.38.1", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.38.1.tgz", + "integrity": "sha512-RmTOkE7hRU3OVREqFVITWHz6ocgBjv08GoePscAakgVQfciA3SGCEk7mb9IzwW61cKKmlTpHXG6DUE5Ubx+MGQ==", + "license": "MIT", + "dependencies": { + "@codemirror/state": "^6.5.0", + "crelt": "^1.0.6", + "style-mod": "^4.1.0", + "w3c-keyname": "^2.2.4" + } + }, "node_modules/@emotion/babel-plugin": { "version": "11.12.0", "resolved": "https://registry.npmjs.org/@emotion/babel-plugin/-/babel-plugin-11.12.0.tgz", @@ -1893,6 +2005,36 @@ "react-dom": "^17.0.0 || ^18.0.0" } }, + "node_modules/@lezer/common": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.2.3.tgz", + "integrity": "sha512-w7ojc8ejBqr2REPsWxJjrMFsA/ysDCFICn8zEOR9mrqzOu2amhITYuLD8ag6XZf0CFXDrhKqw7+tW8cX66NaDA==", + "license": "MIT" + }, + "node_modules/@lezer/highlight": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.2.1.tgz", + "integrity": "sha512-Z5duk4RN/3zuVO7Jq0pGLJ3qynpxUVsh7IbUbGj88+uV2ApSAn6kWg2au3iJb+0Zi7kKtqffIESgNcRXWZWmSA==", + "license": "MIT", + "dependencies": { + "@lezer/common": "^1.0.0" + } + }, + "node_modules/@lezer/lr": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.2.tgz", + "integrity": "sha512-pu0K1jCIdnQ12aWNaAVU5bzi7Bd1w54J3ECgANPmYLtQKP0HBj2cE/5coBD66MT10xbtIuUr7tg0Shbsvk0mDA==", + "license": "MIT", + "dependencies": { + "@lezer/common": "^1.0.0" + } + }, + "node_modules/@marijn/find-cluster-break": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@marijn/find-cluster-break/-/find-cluster-break-1.0.2.tgz", + "integrity": "sha512-l0h88YhZFyKdXIFNfSWpyjStDjGHwZ/U7iobcK1cQQD8sejsONdQtTVU+1wVN1PBw40PiiHB1vA5S7VTfQiP9g==", + "license": "MIT" + }, "node_modules/@monaco-editor/loader": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/@monaco-editor/loader/-/loader-1.4.0.tgz", @@ -3312,6 +3454,12 @@ "integrity": "sha512-wkw9yd1kEXOPnvEeEV1Go1MmxtBJL0RR79aOTAApecWFVu7w0NNXNqhcWgvw2YgZDYadliXkl14pa3WXw5jlCQ==", "dev": true }, + "node_modules/@types/js-yaml": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-4.0.9.tgz", + "integrity": "sha512-k4MGaQl5TGo/iipqb2UDG2UwjXziSWkh0uysQelTlJpX1qGlpUZYm8PnO4DxG1qBomtJUdYJ6qR6xdIah10JLg==", + "license": "MIT" + }, "node_modules/@types/json-schema": { "version": "7.0.15", "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", @@ -3671,6 +3819,59 @@ "url": "https://opencollective.com/typescript-eslint" } }, + "node_modules/@uiw/codemirror-extensions-basic-setup": { + "version": "4.25.1", + "resolved": "https://registry.npmjs.org/@uiw/codemirror-extensions-basic-setup/-/codemirror-extensions-basic-setup-4.25.1.tgz", + "integrity": "sha512-zxgA2QkvP3ZDKxTBc9UltNFTrSeFezGXcZtZj6qcsBxiMzowoEMP5mVwXcKjpzldpZVRuY+JCC+RsekEgid4vg==", + "license": "MIT", + "dependencies": { + "@codemirror/autocomplete": "^6.0.0", + "@codemirror/commands": "^6.0.0", + "@codemirror/language": "^6.0.0", + "@codemirror/lint": "^6.0.0", + "@codemirror/search": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0" + }, + "funding": { + "url": "https://jaywcjlove.github.io/#/sponsor" + }, + "peerDependencies": { + "@codemirror/autocomplete": ">=6.0.0", + "@codemirror/commands": ">=6.0.0", + "@codemirror/language": ">=6.0.0", + "@codemirror/lint": ">=6.0.0", + "@codemirror/search": ">=6.0.0", + "@codemirror/state": ">=6.0.0", + "@codemirror/view": ">=6.0.0" + } + }, + "node_modules/@uiw/react-codemirror": { + "version": "4.25.1", + "resolved": "https://registry.npmjs.org/@uiw/react-codemirror/-/react-codemirror-4.25.1.tgz", + "integrity": "sha512-eESBKHndoYkaEGlKCwRO4KrnTw1HkWBxVpEeqntoWTpoFEUYxdLWUYmkPBVk4/u8YzVy9g91nFfIRpqe5LjApg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.6", + "@codemirror/commands": "^6.1.0", + "@codemirror/state": "^6.1.1", + "@codemirror/theme-one-dark": "^6.0.0", + "@uiw/codemirror-extensions-basic-setup": "4.25.1", + "codemirror": "^6.0.0" + }, + "funding": { + "url": "https://jaywcjlove.github.io/#/sponsor" + }, + "peerDependencies": { + "@babel/runtime": ">=7.11.0", + "@codemirror/state": ">=6.0.0", + "@codemirror/theme-one-dark": ">=6.0.0", + "@codemirror/view": ">=6.0.0", + "codemirror": ">=6.0.0", + "react": ">=17.0.0", + "react-dom": ">=17.0.0" + } + }, "node_modules/@ungap/structured-clone": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", @@ -4050,8 +4251,7 @@ "node_modules/argparse": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" }, "node_modules/aria-query": { "version": "5.3.0", @@ -4647,6 +4847,21 @@ "node": ">=6" } }, + "node_modules/codemirror": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/codemirror/-/codemirror-6.0.2.tgz", + "integrity": "sha512-VhydHotNW5w1UGK0Qj96BwSk/Zqbp9WbnyK2W/eVMv4QyF41INRGpjUhFJY7/uDNuudSc33a/PKr4iDqRduvHw==", + "license": "MIT", + "dependencies": { + "@codemirror/autocomplete": "^6.0.0", + "@codemirror/commands": "^6.0.0", + "@codemirror/language": "^6.0.0", + "@codemirror/lint": "^6.0.0", + "@codemirror/search": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0" + } + }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", @@ -4737,6 +4952,12 @@ "node": ">=10" } }, + "node_modules/crelt": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/crelt/-/crelt-1.0.6.tgz", + "integrity": "sha512-VQ2MBenTq1fWZUH9DJNGti7kKv6EeAuYr3cLwxUWhIu1baTaXh4Ib5W2CqHVqib4/MqbYGJqiL3Zb8GJZr3l4g==", + "license": "MIT" + }, "node_modules/cross-spawn": { "version": "7.0.3", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", @@ -7497,7 +7718,7 @@ "version": "4.1.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dev": true, + "license": "MIT", "dependencies": { "argparse": "^2.0.1" }, @@ -10036,6 +10257,12 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/style-mod": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.1.2.tgz", + "integrity": "sha512-wnD1HyVqpJUI2+eKZ+eo1UwghftP6yuFheBqqe+bWCotBjC2K1YnteJILRMs3SM4V/0dLEW1SC27MWP5y+mwmw==", + "license": "MIT" + }, "node_modules/stylis": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", @@ -10876,6 +11103,12 @@ "resolved": "https://registry.npmjs.org/vscode-uri/-/vscode-uri-3.0.8.tgz", "integrity": "sha512-AyFQ0EVmsOZOlAnxoFOGOq1SQDWAB7C6aqMGS23svWAllfOaxbuFvcT8D1i8z3Gyn8fraVeZNNmN6e9bxxXkKw==" }, + "node_modules/w3c-keyname": { + "version": "2.2.8", + "resolved": "https://registry.npmjs.org/w3c-keyname/-/w3c-keyname-2.2.8.tgz", + "integrity": "sha512-dpojBhNsCNN7T82Tm7k26A6G9ML3NkhDsnw9n/eoxSRlVBB4CEtIQ/KTCLI2Fwf3ataSXRhYFkQi3SlnFwPvPQ==", + "license": "MIT" + }, "node_modules/w3c-xmlserializer": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-5.0.0.tgz", diff --git a/frontend/package.json b/frontend/package.json index 4031184ad..5e334ab25 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -8,6 +8,13 @@ "url": "https://github.com/roflcoopter/viseron.git" }, "dependencies": { + "@codemirror/autocomplete": "6.18.6", + "@codemirror/commands": "6.8.1", + "@codemirror/language": "6.11.2", + "@codemirror/legacy-modes": "6.5.1", + "@codemirror/search": "6.5.11", + "@codemirror/state": "6.5.2", + "@codemirror/view": "6.38.1", "@emotion/react": "^11.13.3", "@emotion/styled": "^11.13.0", "@jy95/material-ui-image": "^4.0.9", @@ -20,9 +27,11 @@ "@tanstack/react-query": "^5.55.4", "@tanstack/react-query-devtools": "^5.55.4", "@tanstack/react-virtual": "^3.10.7", + "@types/js-yaml": "^4.0.9", "@types/node": "^22.5.4", "@types/react": "^18.3.5", "@types/react-dom": "^18.3.0", + "@uiw/react-codemirror": "^4.25.1", "ajv": "^8.17.1", "axios": "^1.7.7", "buffer": "^6.0.3", @@ -31,6 +40,7 @@ "hls.js": "^1.6.5", "http-proxy-middleware": "^3.0.2", "js-cookie": "^3.0.5", + "js-yaml": "^4.1.0", "material-ui-popup-state": "^5.3.3", "monaco-editor": "^0.51.0", "monaco-yaml": "^5.2.2", diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index d9be2703d..71f86b7cf 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -3,9 +3,6 @@ import { lazy } from "react"; import { Navigate, useRoutes } from "react-router-dom"; import "react-toastify/dist/ReactToastify.css"; -import Settings from "pages/settings"; -import Users from "pages/settings/Users"; - const Cameras = lazy(() => import("pages/Cameras")); const CameraRecordings = lazy( () => import("pages/recordings/CameraRecordings"), @@ -22,6 +19,10 @@ const NotFound = lazy(() => import("pages/NotFound")); const Onboarding = lazy(() => import("pages/Onboarding")); const PublicLayout = lazy(() => import("layouts/PublicLayout")); const Recordings = lazy(() => import("pages/recordings/Recordings")); +const Settings = lazy(() => import("pages/settings")); +const SystemEvents = lazy(() => import("pages/settings/SystemEvents")); +const Users = lazy(() => import("pages/settings/Users")); +const TemplateEditor = lazy(() => import("pages/settings/TemplateEditor")); function App() { const routes = useRoutes([ @@ -79,6 +80,14 @@ function App() { path: "/settings/users", element: , }, + { + path: "/settings/system-events", + element: , + }, + { + path: "/settings/template-editor", + element: , + }, ], }, ], diff --git a/frontend/src/components/events/DatePickerDialog.tsx b/frontend/src/components/events/DatePickerDialog.tsx index b68a03a33..60d9b5ed8 100644 --- a/frontend/src/components/events/DatePickerDialog.tsx +++ b/frontend/src/components/events/DatePickerDialog.tsx @@ -7,12 +7,9 @@ import { PickerChangeHandlerContext, } from "@mui/x-date-pickers/models"; import dayjs, { Dayjs } from "dayjs"; -import { useMemo } from "react"; import { useFilteredCameras } from "components/camera/useCameraStore"; -import { useTimespans } from "components/events/utils"; -import { useEventsAmountMultiple } from "lib/api/events"; -import * as types from "lib/types"; +import { useEventsDatesOfInterest } from "lib/api/events"; function HasEvent( props: PickersDayProps & { highlightedDays?: Record }, @@ -61,50 +58,6 @@ function HasEvent( ); } -type HighlightedDays = { - [date: string]: { - events: number; - timespanAvailable: boolean; - }; -}; - -export function getHighlightedDays( - eventsAmount: types.EventsAmount["events_amount"], - availableTimespans: types.HlsAvailableTimespan[], -) { - const result: HighlightedDays = {}; - for (const timespan of availableTimespans) { - // Loop through all dates between start and end - const start = dayjs(timespan.start * 1000); - const end = dayjs(timespan.end * 1000); - for (let d = start; d.isBefore(end); d = d.add(1, "day")) { - const date = d.format("YYYY-MM-DD"); - if (!(date in result)) { - result[date] = { - events: 0, - timespanAvailable: true, - }; - } - } - } - - for (const [date, events] of Object.entries(eventsAmount)) { - const totalEvents = Object.values(events).reduce((a, b) => a + b, 0); - if (totalEvents > 0) { - if (!(date in result)) { - result[date] = { - events: totalEvents, - timespanAvailable: false, - }; - } else { - result[date].events = totalEvents; - } - } - } - - return result; -} - type DatePickerDialogProps = { open: boolean; setOpen: (open: boolean) => void; @@ -122,20 +75,12 @@ export function DatePickerDialog({ onChange, }: DatePickerDialogProps) { const filteredCameras = useFilteredCameras(); - const eventsAmountQuery = useEventsAmountMultiple({ + const eventsDateOfInterest = useEventsDatesOfInterest({ camera_identifiers: Object.keys(filteredCameras), + configOptions: { + enabled: open, + }, }); - const { availableTimespans } = useTimespans(null, 5, open); - const highlightedDays = useMemo( - () => - eventsAmountQuery.data - ? getHighlightedDays( - eventsAmountQuery.data.events_amount, - availableTimespans, - ) - : {}, - [eventsAmountQuery.data, availableTimespans], - ); const handleClose = () => { setOpen(false); @@ -153,7 +98,7 @@ export function DatePickerDialog({ }} slotProps={{ day: { - highlightedDays, + highlightedDays: eventsDateOfInterest.data?.dates_of_interest, } as any, actionBar: { actions: ["today", "cancel"], diff --git a/frontend/src/components/header/Breadcrumbs.tsx b/frontend/src/components/header/Breadcrumbs.tsx index ec50050d1..c6914af66 100644 --- a/frontend/src/components/header/Breadcrumbs.tsx +++ b/frontend/src/components/header/Breadcrumbs.tsx @@ -8,6 +8,8 @@ import { Link as RouterLink, useLocation } from "react-router-dom"; import { getCameraNameFromQueryCache, toTitleCase } from "lib/helpers"; +const getTitle = (str: string) => toTitleCase(str.replace("-", " ")); + export default function Breadcrumbs() { const theme = useTheme(); const mediaQuerySmall = useMediaQuery(theme.breakpoints.up("sm")); @@ -20,7 +22,7 @@ export default function Breadcrumbs() { if (!mediaQuerySmall) { return ( - {toTitleCase(pathnames[0])} + {getTitle(pathnames[0])} ); } @@ -42,7 +44,7 @@ export default function Breadcrumbs() { return last ? ( - {toTitleCase(value)} + {getTitle(value)} ) : ( - {toTitleCase(value)} + {getTitle(value)} ); })} diff --git a/frontend/src/components/player/hlsplayer/HlsPlayer.tsx b/frontend/src/components/player/hlsplayer/HlsPlayer.tsx index 4ebb2b797..6e3f668e7 100644 --- a/frontend/src/components/player/hlsplayer/HlsPlayer.tsx +++ b/frontend/src/components/player/hlsplayer/HlsPlayer.tsx @@ -3,7 +3,7 @@ import { useTheme } from "@mui/material/styles"; import dayjs from "dayjs"; import utc from "dayjs/plugin/utc"; import Hls, { LevelLoadedData } from "hls.js"; -import React, { useContext, useEffect, useRef } from "react"; +import React, { useCallback, useContext, useEffect, useRef } from "react"; import { v4 as uuidv4 } from "uuid"; import { useShallow } from "zustand/react/shallow"; @@ -31,7 +31,7 @@ const loadSource = ( if (!hlsRef.current) { return; } - const source = `/api/v1/hls/${camera.identifier}/index.m3u8?start_timestamp=${playingDate}&daily=true`; + const source = `/api/v1/hls/${camera.identifier}/index.m3u8?start_timestamp=${playingDate}&date=${dayjs(playingDate * 1000).format("YYYY-MM-DD")}`; hlsClientIdRef.current = uuidv4(); hlsRef.current.loadSource(source); }; @@ -252,6 +252,34 @@ const useInitializePlayer = ( const delayedInitializationTimeoutRef = useRef(); const delayedRecoveryTimeoutRef = useRef(); + const reInitPlayer = useCallback(() => { + if (Hls.isSupported()) { + initializePlayer( + hlsRef, + hlsClientIdRef, + videoRef, + initialProgramDateTime, + auth, + camera, + playingDateRef, + setHlsRefsError, + delayedInitializationTimeoutRef, + delayedRecoveryTimeoutRef, + ); + } + }, [ + auth, + camera, + hlsClientIdRef, + hlsRef, + initialProgramDateTime, + playingDateRef, + setHlsRefsError, + videoRef, + delayedInitializationTimeoutRef, + delayedRecoveryTimeoutRef, + ]); + useEffect(() => { if (Hls.isSupported()) { addHlsRef(hlsRef); @@ -291,6 +319,10 @@ const useInitializePlayer = ( hlsRef.current.startLoad(); } }, [connected, hlsRef]); + + return { + reInitPlayer, + }; }; // Seek to the requestedTimestamp if it is within the seekable range @@ -300,6 +332,7 @@ const useSeekToTimestamp = ( videoRef: React.RefObject, initialProgramDateTime: React.MutableRefObject, camera: types.Camera | types.FailedCamera, + reInitPlayer: () => void, ) => { // Avoid running on first render to not call loadSource twice const firstRender = useFirstRender(); @@ -342,7 +375,8 @@ const useSeekToTimestamp = ( // Ignore play errors }); } else { - loadSource(hlsRef, hlsClientIdRef, requestedTimestamp, camera); + // If the fragment is not found, reinitialize the player to load the correct source + reInitPlayer(); } }, [ camera, @@ -350,6 +384,7 @@ const useSeekToTimestamp = ( hlsClientIdRef, hlsRef, initialProgramDateTime, + reInitPlayer, requestedTimestamp, videoRef, ]); @@ -372,7 +407,7 @@ export const HlsPlayer: React.FC = ({ camera }) => { })), ); - useInitializePlayer( + const { reInitPlayer } = useInitializePlayer( hlsRef, hlsClientIdRef, videoRef, @@ -385,6 +420,7 @@ export const HlsPlayer: React.FC = ({ camera }) => { videoRef, initialProgramDateTime, camera, + reInitPlayer, ); return ( diff --git a/frontend/src/context/ViseronContext.tsx b/frontend/src/context/ViseronContext.tsx index 634e87166..fe6b8335a 100644 --- a/frontend/src/context/ViseronContext.tsx +++ b/frontend/src/context/ViseronContext.tsx @@ -1,4 +1,4 @@ -import { useQueryClient } from "@tanstack/react-query"; +import { QueryKey, useQueryClient } from "@tanstack/react-query"; import React, { FC, createContext, useEffect, useState } from "react"; import { useNavigate } from "react-router-dom"; @@ -14,6 +14,7 @@ type SubscriptionManager = { count: number; unsubscribe: SubscriptionUnsubscribe | null; subscribing: boolean; + queryKeys: QueryKey[]; }; export type ViseronContextState = { diff --git a/frontend/src/hooks/useDebouncedTemplateRender.ts b/frontend/src/hooks/useDebouncedTemplateRender.ts new file mode 100644 index 000000000..f86270d5b --- /dev/null +++ b/frontend/src/hooks/useDebouncedTemplateRender.ts @@ -0,0 +1,80 @@ +import { useCallback, useContext, useEffect, useRef, useState } from "react"; + +import { ViseronContext } from "context/ViseronContext"; +import { renderTemplate } from "lib/commands"; + +/** + * Debounced template renderer hook. + * Automatically renders the provided template after a debounce delay when it changes. + * Exposes methods for manual render and clearing results. + */ +export function useDebouncedTemplateRender(template: string, delay = 500) { + const { connection } = useContext(ViseronContext); + + const [result, setResult] = useState(""); + const [error, setError] = useState(null); + const [loading, setLoading] = useState(false); + const debounceTimeout = useRef(null); + + const renderNow = useCallback(async () => { + if (!connection) { + return; + } + setLoading(true); + setResult(""); + try { + const response = await renderTemplate(connection, template); + setResult(response); + setError(null); + } catch (e: any) { + setError(e?.message || "Failed to render template"); + setResult(""); + } finally { + setLoading(false); + } + }, [connection, template]); + + useEffect(() => { + if (!template) { + setResult(""); + setError(null); + setLoading(false); + return () => {}; + } + + setLoading(true); + if (debounceTimeout.current) { + clearTimeout(debounceTimeout.current); + } + + debounceTimeout.current = setTimeout(async () => { + if (!connection) { + setLoading(false); + return; + } + try { + const response = await renderTemplate(connection, template); + setResult(response); + setError(null); + } catch (e: any) { + setError(e?.message || "Failed to render template"); + setResult(""); + } finally { + setLoading(false); + } + }, delay); + + return () => { + if (debounceTimeout.current) { + clearTimeout(debounceTimeout.current); + } + }; + }, [template, connection, delay]); + + const clear = useCallback(() => { + setResult(""); + setError(null); + }, []); + + return { result, error, loading, renderNow, clear }; +} diff --git a/frontend/src/lib/api/client.ts b/frontend/src/lib/api/client.ts index 28b51d99c..b34e5481f 100644 --- a/frontend/src/lib/api/client.ts +++ b/frontend/src/lib/api/client.ts @@ -60,11 +60,18 @@ export const useInvalidateQueryOnStateChange = ( count: 0, subscribing: false, unsubscribe: null, + queryKeys: [], }; } + if (!subscriptionRef.current[entityId].queryKeys.includes(queryKey)) { + subscriptionRef.current[entityId].queryKeys.push(queryKey); + } + + const queryKeys = subscriptionRef.current[entityId].queryKeys; + const _stateChanged = (_event: types.StateChangedEvent) => { - queryClient.invalidateQueries({ queryKey }); + queryClient.invalidateQueries({ queryKey: queryKeys }); }; subscriptionRef.current[entityId].count++; @@ -137,11 +144,20 @@ export const useInvalidateQueryOnEvent = ( count: 0, subscribing: false, unsubscribe: null, + queryKeys: [], }; } + if (!subscriptionRef.current[event].queryKeys.includes(queryKey)) { + subscriptionRef.current[event].queryKeys.push(queryKey); + } + + const queryKeys = subscriptionRef.current[event].queryKeys; + const callback = (_event: types.Event) => { - queryClient.invalidateQueries({ queryKey }); + queryKeys.forEach((key) => { + queryClient.invalidateQueries({ queryKey: key }); + }); }; subscriptionRef.current[event].count++; diff --git a/frontend/src/lib/api/events.ts b/frontend/src/lib/api/events.ts index 7c94b49ba..7b6636f9c 100644 --- a/frontend/src/lib/api/events.ts +++ b/frontend/src/lib/api/events.ts @@ -225,3 +225,56 @@ export function useEventsAmountMultiple( ...variables.configOptions, }); } + +type EventsDatesOfInterestVariables = { + camera_identifiers: string[]; + configOptions?: Omit< + UseQueryOptions, + "queryKey" | "queryFn" + >; +}; + +async function eventsDatesOfInterest({ + camera_identifiers, +}: EventsDatesOfInterestVariables): Promise { + const response = await viseronAPI.post( + "events/dates_of_interest", + { + camera_identifiers, + }, + ); + return response.data; +} + +export function useEventsDatesOfInterest( + variables: EventsDatesOfInterestVariables, +): UseQueryResult { + const eventQueryPairs = useMemo(() => { + const _eventQueryPairs: EventQueryPair[] = []; + variables.camera_identifiers.forEach((camera_identifier) => { + _eventQueryPairs.push( + { + event: `${camera_identifier}/camera_event/*/*`, + queryKey: ["events", "dates_of_interest"], + }, + { + event: `${camera_identifier}/recorder/start`, + queryKey: ["events", "dates_of_interest"], + }, + { + event: `${camera_identifier}/recorder/stop`, + queryKey: ["events", "dates_of_interest"], + }, + ); + }); + return _eventQueryPairs; + }, [variables.camera_identifiers]); + + useInvalidateQueryOnEvent(eventQueryPairs, 5); + + return useQuery({ + queryKey: ["events", "dates_of_interest"], + queryFn: async () => eventsDatesOfInterest(variables), + ...variables.configOptions, + }); +} diff --git a/frontend/src/lib/api/system.ts b/frontend/src/lib/api/system.ts new file mode 100644 index 000000000..5e1b4035e --- /dev/null +++ b/frontend/src/lib/api/system.ts @@ -0,0 +1,22 @@ +import { UseQueryOptions, useQuery } from "@tanstack/react-query"; + +import { viseronAPI } from "lib/api/client"; +import * as types from "lib/types"; + +async function systemDispatchedEvents() { + const response = await viseronAPI.get(`system/dispatched_events`); + return response.data; +} + +export function useSystemDispatchedEvents( + configOptions?: Omit< + UseQueryOptions, + "queryKey" | "queryFn" + >, +) { + return useQuery({ + queryKey: ["system", "dispatched_events"], + queryFn: async () => systemDispatchedEvents(), + ...configOptions, + }); +} diff --git a/frontend/src/lib/commands.ts b/frontend/src/lib/commands.ts index fbc1f77c9..c6f4aa86a 100644 --- a/frontend/src/lib/commands.ts +++ b/frontend/src/lib/commands.ts @@ -285,3 +285,9 @@ export const useExportTimespan = () => { return exportTimespanCallback; }; + +export const renderTemplate = async ( + connection: Connection, + template: string, +): Promise => + connection.sendMessagePromise(messages.renderTemplate(template)); diff --git a/frontend/src/lib/messages.ts b/frontend/src/lib/messages.ts index 0749cf68a..218c04289 100644 --- a/frontend/src/lib/messages.ts +++ b/frontend/src/lib/messages.ts @@ -193,3 +193,10 @@ export function exportTimespan( end, } as ExportTimespanMessage; } + +export function renderTemplate(template: string) { + return { + type: "render_template", + template, + }; +} diff --git a/frontend/src/lib/types.ts b/frontend/src/lib/types.ts index 6f7254113..ea434c995 100644 --- a/frontend/src/lib/types.ts +++ b/frontend/src/lib/types.ts @@ -350,6 +350,15 @@ export type EventsAmount = { }; }; +export type EventsDatesOfInterest = { + dates_of_interest: { + [date: string]: { + events: number; + timespan_available: boolean; + }; + }; +}; + export interface Entity { entity_id: string; state: string; @@ -390,3 +399,7 @@ export type DownloadFileResponse = { filename: string; token: string; }; + +export type SystemDispatchedEvents = { + events: string[]; +}; diff --git a/frontend/src/pages/settings/SystemEvents.tsx b/frontend/src/pages/settings/SystemEvents.tsx new file mode 100644 index 000000000..8af21341f --- /dev/null +++ b/frontend/src/pages/settings/SystemEvents.tsx @@ -0,0 +1,140 @@ +import Autocomplete from "@mui/material/Autocomplete"; +import Box from "@mui/material/Box"; +import Button from "@mui/material/Button"; +import Container from "@mui/material/Container"; +import Paper from "@mui/material/Paper"; +import TextField from "@mui/material/TextField"; +import Typography from "@mui/material/Typography"; +import yaml from "js-yaml"; +import { useContext, useRef, useState } from "react"; + +import { ViseronContext } from "context/ViseronContext"; +import { useToast } from "hooks/UseToast"; +import { useSystemDispatchedEvents } from "lib/api/system"; +import { subscribeEvent } from "lib/commands"; + +const SystemEvents = () => { + const { connection } = useContext(ViseronContext); + const toast = useToast(); + + const systemDispatchedEvents = useSystemDispatchedEvents({ + refetchInterval: 10000, + }); + + const [event, setEvent] = useState(""); + const [subscribed, setSubscribed] = useState(false); + const [receivedEvents, setReceivedEvents] = useState([]); + const unsubscribeRef = useRef Promise)>(null); + + const handleSubscribe = async () => { + if (!connection) { + return; + } + try { + const unsub = await subscribeEvent(connection, event, (msg) => { + setReceivedEvents((prev) => [msg, ...prev]); + }); + unsubscribeRef.current = unsub; + setSubscribed(true); + } catch (e: any) { + setSubscribed(false); + toast.error( + `Failed to subscribe to event: ${e?.message || "Unknown error"}`, + ); + } + }; + + const handleUnsubscribe = async () => { + if (unsubscribeRef.current) { + await unsubscribeRef.current(); + unsubscribeRef.current = null; + } + setSubscribed(false); + }; + + const handleClearEvents = () => { + setReceivedEvents([]); + }; + + return ( + + + + Listen to events + + { + setEvent(value || ""); + }} + renderInput={(params) => ( + setEvent(e.target.value)} + {...params} + /> + )} + sx={{ mb: 2 }} + /> + + {subscribed ? ( + + ) : ( + + )} + + + + When clicking the text field above, you can see a list of all events + fired since the last restart of Viseron. + + + A star (*) can be used as a wildcard in the event name. + + + + {receivedEvents.length > 0 && ( + + {receivedEvents.map((ev, idx) => ( + + + Event {receivedEvents.length - idx - 1}: + + + {yaml.dump(ev)} + + + ))} + + )} + + ); +}; + +export default SystemEvents; diff --git a/frontend/src/pages/settings/TemplateEditor.tsx b/frontend/src/pages/settings/TemplateEditor.tsx new file mode 100644 index 000000000..23e094e7a --- /dev/null +++ b/frontend/src/pages/settings/TemplateEditor.tsx @@ -0,0 +1,151 @@ +import { StreamLanguage } from "@codemirror/language"; +import { jinja2 } from "@codemirror/legacy-modes/mode/jinja2"; +import ErrorOutlineIcon from "@mui/icons-material/ErrorOutline"; +import { TextField } from "@mui/material"; +import Alert from "@mui/material/Alert"; +import Box from "@mui/material/Box"; +import Button from "@mui/material/Button"; +import Container from "@mui/material/Container"; +import Grid from "@mui/material/Grid2"; +import Paper from "@mui/material/Paper"; +import Typography from "@mui/material/Typography"; +import { useTheme } from "@mui/material/styles"; +import CodeMirror, { basicSetup } from "@uiw/react-codemirror"; +import { useState } from "react"; + +import { useDebouncedTemplateRender } from "hooks/useDebouncedTemplateRender"; + +const extensions = [ + basicSetup({ lintKeymap: true }), + StreamLanguage.define(jinja2), +]; + +const paperStyle = { + padding: 3, + marginBottom: { xs: 2, md: 0 }, + flex: 1, + display: "flex", + flexDirection: "column", + maxHeight: "80vh", + overflow: "auto", + height: "100%", +}; + +type EditorProps = { + template: string; + setTemplate: (value: string) => void; + handleRender: () => Promise | void; + clearTemplate: () => void; + loading: boolean; +}; + +const Editor = ({ + template, + setTemplate, + handleRender, + clearTemplate, + loading, +}: EditorProps) => { + const theme = useTheme(); + + return ( + + + Template Editor + + setTemplate(value)} + style={{ + overflow: "auto", + marginBottom: "16px", + }} + /> + + + + + + ); +}; + +type ResultProps = { + result: string; + error: string | null; +}; + +const Result = ({ result, error }: ResultProps) => ( + + + Result + + {error ? ( + }> + {error} + + ) : result ? ( + + {result} + + ) : null} + +); + +const TemplateEditor = () => { + const [template, setTemplate] = useState(""); + + const { result, error, loading, renderNow, clear } = + useDebouncedTemplateRender(template, 500); + + const handleRender = async () => { + await renderNow(); + }; + + const clearTemplate = () => { + setTemplate(""); + clear(); + }; + + return ( + + + + + + + + + + + ); +}; + +export default TemplateEditor; diff --git a/frontend/src/pages/settings/index.tsx b/frontend/src/pages/settings/index.tsx index fb2115008..feabeb951 100644 --- a/frontend/src/pages/settings/index.tsx +++ b/frontend/src/pages/settings/index.tsx @@ -44,6 +44,24 @@ const Settings = () => { ? "Enable authentication to manage users" : "Only admins can manage users", }, + { + name: "System Events", + description: "View system events dispatched by the server", + path: "/settings/system-events", + icon: , + color: "purple", + disabled: false, + disabledReason: null, + }, + { + name: "Template Editor", + description: "Test and render Jinja2 templates", + path: "/settings/template-editor", + icon: , + color: "teal", + disabled: false, + disabledReason: null, + }, { name: "Logs", description: "View system logs", diff --git a/frontend/tests/components/events/DatePicker.test.tsx b/frontend/tests/components/events/DatePicker.test.tsx deleted file mode 100644 index 41515cabd..000000000 --- a/frontend/tests/components/events/DatePicker.test.tsx +++ /dev/null @@ -1,77 +0,0 @@ -import { describe, expect } from "vitest"; - -import { getHighlightedDays } from "components/events/DatePickerDialog"; -import * as types from "lib/types"; - -describe("getHighlightedDays", () => { - it("should return an empty object for an empty input", () => { - const events = {}; - const availableTimespans: types.HlsAvailableTimespan[] = []; - const result = getHighlightedDays(events, availableTimespans); - expect(result).toEqual({}); - }); - - it("should return the correct events and available timespans per day", () => { - const events: types.EventsAmount["events_amount"] = { - "2023-03-03": { - motion: 6, - object: 4, - }, - "2023-03-02": { - motion: 2, - }, - "2023-03-01": { - object: 1, - }, - }; - const availableTimespans: types.HlsAvailableTimespan[] = [ - { - start: 1677312000, - end: 1677398400, - duration: 86400, - }, - { - start: 1677398400, - end: 1677484800, - duration: 86400, - }, - { - start: 1677484800, - end: 1677571200, - duration: 86400, - }, - { - start: 1677740400, - end: 1677744000, - duration: 3600, - }, - ]; - const result = getHighlightedDays(events, availableTimespans); - expect(result).toEqual({ - "2023-02-25": { - events: 0, - timespanAvailable: true, - }, - "2023-02-26": { - events: 0, - timespanAvailable: true, - }, - "2023-02-27": { - events: 0, - timespanAvailable: true, - }, - "2023-03-01": { - events: 1, - timespanAvailable: false, - }, - "2023-03-02": { - events: 2, - timespanAvailable: true, - }, - "2023-03-03": { - events: 10, - timespanAvailable: false, - }, - }); - }); -}); diff --git a/frontend/tests/components/header/Header.test.tsx b/frontend/tests/components/header/Header.test.tsx index df47cf39c..16c11b0a1 100644 --- a/frontend/tests/components/header/Header.test.tsx +++ b/frontend/tests/components/header/Header.test.tsx @@ -15,8 +15,8 @@ describe("Loading Component", () => { test("renders app header without auth", () => { const { queryByRole } = renderWithContext(, { - enabled: false, - onboarding_complete: false, + auth: { enabled: false, onboarding_complete: false }, + user: null, }); expect( queryByRole("button", { diff --git a/frontend/tests/hooks/useDebouncedTemplateRender.test.tsx b/frontend/tests/hooks/useDebouncedTemplateRender.test.tsx new file mode 100644 index 000000000..ffc0d193f --- /dev/null +++ b/frontend/tests/hooks/useDebouncedTemplateRender.test.tsx @@ -0,0 +1,149 @@ +import { act } from "@testing-library/react"; +import { renderHookWithContext } from "tests/utils/renderWithContext"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; + +import { useDebouncedTemplateRender } from "hooks/useDebouncedTemplateRender"; +import { renderTemplate } from "lib/commands"; + +vi.mock("lib/commands", () => ({ renderTemplate: vi.fn() })); + +const mockConnection = {} as any; + +const DEBOUNCE = 500; + +const flushMicrotasks = async () => { + await Promise.resolve(); + await Promise.resolve(); +}; + +describe("useDebouncedTemplateRender", () => { + beforeEach(() => { + vi.useFakeTimers(); + (renderTemplate as any).mockReset(); + }); + afterEach(() => { + vi.useRealTimers(); + }); + + it("does not call renderTemplate when template is empty", () => { + renderHookWithContext(() => useDebouncedTemplateRender("", DEBOUNCE), { + connection: mockConnection, + }); + expect(renderTemplate).not.toHaveBeenCalled(); + }); + + it("debounces rapid changes and only calls once", async () => { + (renderTemplate as any).mockResolvedValue("OK"); + const { rerender, result } = renderHookWithContext( + ({ template }) => useDebouncedTemplateRender(template, DEBOUNCE), + { initialProps: { template: "a" }, connection: mockConnection }, + ); + + act(() => { + rerender({ template: "ab" }); + rerender({ template: "abc" }); + }); + + expect(renderTemplate).not.toHaveBeenCalled(); + + await act(async () => { + vi.advanceTimersByTime(DEBOUNCE); + await flushMicrotasks(); + }); + + expect(renderTemplate).toHaveBeenCalledTimes(1); + expect(result.current.result).toBe("OK"); + expect(result.current.error).toBeNull(); + }); + + it("manual renderNow triggers immediately", async () => { + (renderTemplate as any).mockResolvedValue("MANUAL"); + const { result } = renderHookWithContext( + () => useDebouncedTemplateRender("hello", DEBOUNCE), + { connection: mockConnection }, + ); + + await act(async () => { + await result.current.renderNow(); + }); + + expect(renderTemplate).toHaveBeenCalledTimes(1); + expect(result.current.result).toBe("MANUAL"); + }); + + it("sets loading correctly during async call", async () => { + let resolveFn: (v: string) => void = () => {}; + (renderTemplate as any).mockImplementation( + () => + new Promise((res) => { + resolveFn = res as any; + }), + ); + + const { result } = renderHookWithContext( + () => useDebouncedTemplateRender("abc", DEBOUNCE), + { connection: mockConnection }, + ); + + await act(async () => { + vi.advanceTimersByTime(DEBOUNCE); + // Don't flush yet; still pending promise + }); + expect(result.current.loading).toBe(true); + + await act(async () => { + resolveFn("DONE"); + await flushMicrotasks(); + }); + + expect(result.current.loading).toBe(false); + expect(result.current.result).toBe("DONE"); + }); + + it("handles error path", async () => { + (renderTemplate as any).mockRejectedValue(new Error("Testing")); + const { result } = renderHookWithContext( + () => useDebouncedTemplateRender("err", DEBOUNCE), + { connection: mockConnection }, + ); + + await act(async () => { + vi.advanceTimersByTime(DEBOUNCE); + await flushMicrotasks(); + }); + + expect(result.current.error).toBe("Testing"); + expect(result.current.result).toBe(""); + }); + + it("clear resets result and error", async () => { + (renderTemplate as any).mockResolvedValue("VALUE"); + const { result } = renderHookWithContext( + () => useDebouncedTemplateRender("x", DEBOUNCE), + { connection: mockConnection }, + ); + + await act(async () => { + vi.advanceTimersByTime(DEBOUNCE); + await flushMicrotasks(); + }); + expect(result.current.result).toBe("VALUE"); + + act(() => { + result.current.clear(); + }); + + expect(result.current.result).toBe(""); + expect(result.current.error).toBeNull(); + }); + + it("does not call renderTemplate when no connection present", async () => { + (renderTemplate as any).mockResolvedValue("SHOULD_NOT"); + renderHookWithContext(() => useDebouncedTemplateRender("text", DEBOUNCE)); + await act(async () => { + vi.advanceTimersByTime(DEBOUNCE); + await flushMicrotasks(); + }); + expect(renderTemplate).not.toHaveBeenCalled(); + }); +}); diff --git a/frontend/tests/utils/renderWithContext.tsx b/frontend/tests/utils/renderWithContext.tsx index 665a52a73..e916057c7 100644 --- a/frontend/tests/utils/renderWithContext.tsx +++ b/frontend/tests/utils/renderWithContext.tsx @@ -1,13 +1,18 @@ import CssBaseline from "@mui/material/CssBaseline"; import { QueryClient, QueryClientProvider } from "@tanstack/react-query"; -import { RenderOptions, render } from "@testing-library/react"; +import { + RenderHookOptions, + RenderOptions, + render, + renderHook, +} from "@testing-library/react"; import { useRef } from "react"; import { MemoryRouter } from "react-router-dom"; import ToastContainer from "components/toast/ToastContainer"; import { AuthContext } from "context/AuthContext"; import { ColorModeProvider } from "context/ColorModeContext"; -import { ViseronContext } from "context/ViseronContext"; +import { ViseronContext, ViseronContextState } from "context/ViseronContext"; import * as types from "lib/types"; interface ProvidersWrapperProps { @@ -15,50 +20,57 @@ interface ProvidersWrapperProps { } // Wraps a component in all the providers needed for testing -function customRender( - component: React.ReactElement, - auth: types.AuthEnabledResponse = { - enabled: true, - onboarding_complete: true, - }, - user: types.AuthUserResponse | null = { - id: "123456789", - name: "", - username: "", - role: "admin", - assigned_cameras: null, - }, - queryClient = new QueryClient({ - defaultOptions: { - queries: { - retry: false, - }, - }, - }), - options?: Omit, -) { +export interface TestContextOptions { + auth?: types.AuthEnabledResponse; + user?: types.AuthUserResponse | null; + queryClient?: QueryClient; + connection?: ViseronContextState["connection"]; + viseronOverrides?: Partial; +} + +const defaultAuth: types.AuthEnabledResponse = { + enabled: true, + onboarding_complete: true, +}; + +const defaultUser: types.AuthUserResponse = { + id: "123456789", + name: "", + username: "", + role: "admin", + assigned_cameras: null, +}; + +export function createProvidersWrapper(options: TestContextOptions = {}) { + const { + auth = defaultAuth, + user = defaultUser, + queryClient = new QueryClient({ + defaultOptions: { queries: { retry: false } }, + }), + connection = undefined, + viseronOverrides = {}, + } = options; + function ProvidersWrapper({ children }: ProvidersWrapperProps) { + const viseronValue: ViseronContextState = { + connected: true, + safeMode: false, + version: "0.0.0", + gitCommit: "0000000", + subscriptionRef: useRef({}), + ...viseronOverrides, + connection: + connection !== undefined ? connection : viseronOverrides.connection, + }; + return ( - - + + {children} @@ -68,8 +80,49 @@ function customRender( ); } + return ProvidersWrapper; +} - return render(component, { wrapper: ProvidersWrapper, ...options }); +// Component render helper +function renderWithContext( + component: React.ReactElement, + options?: TestContextOptions & Omit, +) { + const { viseronOverrides, connection, auth, user, queryClient, ...rtl } = + options || {}; + const wrapper = createProvidersWrapper({ + viseronOverrides, + connection, + auth, + user: user || null, + queryClient, + }); + return render(component, { wrapper, ...rtl }); +} + +// Hook render helper +function renderHookWithContext( + callback: (initialProps: TProps) => TResult, + options?: TestContextOptions & + RenderHookOptions & { initialProps?: TProps }, +) { + const { + viseronOverrides, + connection, + auth, + user, + queryClient, + initialProps, + ...rest + } = options || ({} as any); + const wrapper = createProvidersWrapper({ + viseronOverrides, + connection, + auth, + user: user || null, + queryClient, + }); + return renderHook(callback, { wrapper, initialProps, ...rest }); } -export { customRender as renderWithContext }; +export { renderWithContext, renderHookWithContext }; diff --git a/requirements.txt b/requirements.txt index 345f4c9fa..b0eaf0580 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,6 +10,7 @@ face-recognition==1.3.0 httpx==0.27.0 python-immutable==1.1.0 imutils==0.5.4 +Jinja2==3.1.6 numpy==1.26.4 paho-mqtt==2.1.0 path.py==12.5.0 diff --git a/requirements_ci.txt b/requirements_ci.txt index 12919e7d8..86db00d41 100644 --- a/requirements_ci.txt +++ b/requirements_ci.txt @@ -1,4 +1,5 @@ # Extra requirements for GitHub Actions CI +hailort==4.22.0 opencv-python==4.9.0.80 opencv-contrib-python==4.9.0.80 pygobject \ No newline at end of file diff --git a/rootfs/etc/cont-init.d/50-check-if-rpi b/rootfs/etc/cont-init.d/50-check-if-rpi index f1c39dfc6..5280453a0 100644 --- a/rootfs/etc/cont-init.d/50-check-if-rpi +++ b/rootfs/etc/cont-init.d/50-check-if-rpi @@ -3,23 +3,56 @@ source /helpers/logger.sh log_info "********** Checking if we are running on an RPi **********" -# Check if we are running on an RPi3 or RPi4 -# Hardware revision table is found here: https://www.raspberrypi-spy.co.uk/2012/09/checking-your-raspberry-pi-board-version/ -OUTPUT=$(cat /proc/cpuinfo | grep 'Revision' | awk '{print $3}') -case "$OUTPUT" in - "a02082" | "a22082" | "a020d3") +# Check if we are running on a supported Raspberry Pi +# Based on https://www.raspberrypi.com/documentation/computers/raspberry-pi.html#getting-the-revision-code-in-your-program + +# Get the revision code from /proc/cpuinfo +REV_CODE=$(awk '/Revision/ {print $3}' /proc/cpuinfo) + +# Convert hex revision code to decimal +CODE=$((0x$REV_CODE)) + +# Extract fields +NEW=$(( (CODE >> 23) & 0x1 )) +MODEL=$(( (CODE >> 4) & 0xff )) +MEM=$(( (CODE >> 20) & 0x7 )) + +# Supported models +RPI3=(8 13 14) # 0x08, 0x0d, 0x0e = Raspberry Pi 3A+, 3B, 3B+ +RPI4=(17) # 0x11 = Raspberry Pi 4B +RPI5=(23) # 0x17 = Raspberry Pi 5 + +SUPPORTED=0 + +for m in "${RPI3[@]}"; do + if [[ $NEW -eq 1 && $MODEL -eq $m ]]; then + log_info "Detected Raspberry Pi 3 Model (revision: $REV_CODE)" export VISERON_RASPBERRYPI3=true printf "true" > /var/run/environment/VISERON_RASPBERRYPI3 - log_info "Running on an RPi3" - ;; + SUPPORTED=1 + fi +done - "a03111" | "b03111" | "b03112" | "c03111" | "c03112" | "d03114") +for m in "${RPI4[@]}"; do + if [[ $NEW -eq 1 && $MODEL -eq $m ]]; then + log_info "Detected Raspberry Pi 4 Model (revision: $REV_CODE)" export VISERON_RASPBERRYPI4=true printf "true" > /var/run/environment/VISERON_RASPBERRYPI4 - log_info "Running on an RPi4" - ;; + SUPPORTED=1 + fi +done + +for m in "${RPI5[@]}"; do + if [[ $NEW -eq 1 && $MODEL -eq $m ]]; then + log_info "Detected Raspberry Pi 5 Model (revision: $REV_CODE)" + export VISERON_RASPBERRYPI5=true + printf "true" > /var/run/environment/VISERON_RASPBERRYPI5 + SUPPORTED=1 + fi +done + - *) - log_info "Not running on any supported RPi" -esac -log_info "*********************** Done *****************************" +if [[ $SUPPORTED -eq 0 ]]; then + log_info "No supported Raspberry Pi model detected" +fi +log_info "*********************** Done *****************************" \ No newline at end of file diff --git a/rootfs/usr/local/nginx/conf/proxy.conf b/rootfs/usr/local/nginx/conf/proxy.conf index a0d35744b..e2c7bd8ac 100644 --- a/rootfs/usr/local/nginx/conf/proxy.conf +++ b/rootfs/usr/local/nginx/conf/proxy.conf @@ -19,7 +19,6 @@ proxy_no_cache $cookie_session; # Proxy Header Settings proxy_set_header Connection "Upgrade"; -proxy_set_header Early-Data $ssl_early_data; proxy_set_header Host $http_host; proxy_set_header Proxy ""; proxy_set_header Upgrade $http_upgrade; @@ -33,4 +32,4 @@ proxy_set_header X-Forwarded-Ssl on; proxy_set_header X-Forwarded-Uri $request_uri; proxy_set_header X-Original-Method $request_method; proxy_set_header X-Original-URL $scheme://$http_host$request_uri; -proxy_set_header X-Real-IP $remote_addr; \ No newline at end of file +proxy_set_header X-Real-IP $remote_addr; diff --git a/scripts/gen_docs/__main__.py b/scripts/gen_docs/__main__.py index e98d784ef..a010a4024 100644 --- a/scripts/gen_docs/__main__.py +++ b/scripts/gen_docs/__main__.py @@ -21,7 +21,11 @@ CoerceNoneToDict, Deprecated, Maybe, + PathExists, Slug, + StringKey, + Url, + jinja2_template, ) from viseron.types import SupportedDomains @@ -191,11 +195,23 @@ def recurse_options(options): schema.__name__.lower(): True, } - if schema in (vol.Email, vol.Url, vol.FqdnUrl): + if schema in (vol.Email, vol.FqdnUrl): return { "format": schema.__name__.lower(), } + if isinstance(schema, Url): + return { + "type": "string", + "format": schema.__class__.__name__.lower(), + } + + if isinstance(schema, PathExists): + return { + "type": "string", + "format": "file path", + } + if isinstance(schema, vol.Coerce): schema = schema.type @@ -224,7 +240,7 @@ def recurse_options(options): return { "type": "CAMERA_IDENTIFIER", } - if isinstance(schema, Slug): + if isinstance(schema, (Slug, StringKey)): return { "type": "string", } @@ -236,6 +252,12 @@ def recurse_options(options): "value": schema.message, } + if schema == jinja2_template: # pylint: disable=comparison-with-callable + return { + "type": "jinja2_template", + "value": "jinja2_template", + } + if callable(schema): return {"type": "custom_validator", "value": "unable_to_convert"} diff --git a/tests/components/storage/test_config.py b/tests/components/storage/test_config.py index f240b3715..bd5da05d5 100644 --- a/tests/components/storage/test_config.py +++ b/tests/components/storage/test_config.py @@ -120,6 +120,7 @@ def create_tier_snapshots( "motion_detector": None, "object_detector": None, }, + "timelapse": None, }, } diff --git a/tests/components/webserver/api/v1/test_events.py b/tests/components/webserver/api/v1/test_events.py index 00f9f1604..af154da77 100644 --- a/tests/components/webserver/api/v1/test_events.py +++ b/tests/components/webserver/api/v1/test_events.py @@ -172,3 +172,44 @@ def test_get_events_amount_utc_offset_positive(self): assert body["events_amount"]["2024-06-22"]["motion"] == 1 assert body["events_amount"]["2024-06-23"]["motion"] == 1 assert body["events_amount"]["2024-06-22"]["face_recognition"] == 1 + + def test_post_dates_of_interest(self): + """Test getting dates of interest.""" + response = self.fetch( + "/api/v1/events/dates_of_interest", + method="POST", + body=json.dumps( + { + "camera_identifiers": ["test"], + } + ), + headers={ + "X-Client-UTC-Offset": "0", + }, + ) + + assert response.code == 200 + + body = json.loads(response.body) + assert body["dates_of_interest"]["2024-06-22"]["events"] == 4 + + def test_post_dates_of_interest_utc_offset_negative(self): + """Test getting dates of interest with utc offset.""" + response = self.fetch( + "/api/v1/events/dates_of_interest", + method="POST", + body=json.dumps( + { + "camera_identifiers": ["test"], + } + ), + headers={ + "X-Client-UTC-Offset": "-120", + }, + ) + + assert response.code == 200 + + body = json.loads(response.body) + assert body["dates_of_interest"]["2024-06-21"]["events"] == 2 + assert body["dates_of_interest"]["2024-06-22"]["events"] == 2 diff --git a/tests/components/webserver/api/v1/test_hls.py b/tests/components/webserver/api/v1/test_hls.py index dba4d149a..1ac2ab716 100644 --- a/tests/components/webserver/api/v1/test_hls.py +++ b/tests/components/webserver/api/v1/test_hls.py @@ -179,6 +179,72 @@ def test_get_available_timespans(self): assert response.code == 200 assert len(json.loads(response.body)["timespans"]) == 2 + def _get_hls_playlist_time_period( + self, + start_timestamp, + end_timestamp, + date, + expected_files_count, + expected_end_tag=0, + ): + """Test getting HLS playlist.""" + start = int(self._now.timestamp()) + start_timestamp + end = ( + int(self._now.timestamp()) + end_timestamp + if end_timestamp is not None + else None + ) + url = f"/api/v1/hls/test/index.m3u8?start_timestamp={start}" + if end is not None: + url += f"&end_timestamp={end}" + if date is not None: + url += f"&date={date}" + mocked_camera = MockCamera( + identifier="test", + ) + with patch( + ( + "viseron.components.webserver.request_handler.ViseronRequestHandler." + "_get_camera" + ), + return_value=mocked_camera, + ), patch( + ( + "viseron.components.webserver.request_handler.ViseronRequestHandler" + "._get_session" + ), + return_value=self._get_db_session(), + ), patch( + "viseron.components.webserver.api.v1.hls._get_init_file", + return_value="/test/init.mp4", + ), patch( + "viseron.components.storage.queries.utcnow", + return_value=self._simulated_now, + ): + response = self.fetch(url) + + assert response.code == 200 + response_string = response.body.decode() + assert response_string.count("#EXTINF") == expected_files_count + assert response_string.count("#EXT-X-ENDLIST") == expected_end_tag + + # Can't use parametrize for these test because we derive from unittest.TestCase + def test_get_hls_playlist_time_period_start(self): + """Test getting HLS playlist for a specific time period.""" + self._get_hls_playlist_time_period(60, None, None, 4) + + def test_get_hls_playlist_time_period_end(self): + """Test getting HLS playlist for a specific time period with end.""" + self._get_hls_playlist_time_period(0, 60, None, 12, 1) + + def test_get_hls_playlist_time_period_date_today(self): + """Test getting HLS playlist for a specific time period with date today.""" + self._get_hls_playlist_time_period(0, None, self._now.date().isoformat(), 15, 0) + + def test_get_hls_playlist_time_period_date_not_today(self): + """Test getting HLS playlist for a specific time period with date not today.""" + self._get_hls_playlist_time_period(0, None, "2023-10-01", 0, 1) + def test_count_files_removed_no_files_removed(): """Test count_files_removed with no files removed.""" diff --git a/tests/components/webserver/api/v1/test_system.py b/tests/components/webserver/api/v1/test_system.py new file mode 100644 index 000000000..28b8b628b --- /dev/null +++ b/tests/components/webserver/api/v1/test_system.py @@ -0,0 +1,43 @@ +"""Test the System API handler.""" +from __future__ import annotations + +import json +from unittest.mock import PropertyMock, patch + +from viseron.components.webserver.auth import Role, User + +from tests.components.webserver.common import TestAppBaseAuth + + +class TestSystemApiHandler(TestAppBaseAuth): + """Test the SystemAPIHandler.""" + + def test_get_dispatched_events_admin(self): + """Test getting dispatched events as admin.""" + with patch( + "viseron.Viseron.dispatched_events", + new_callable=PropertyMock, + return_value=["event1", "event2"], + ): + response = self.fetch_with_auth("/api/v1/system/dispatched_events") + assert response.code == 200 + data = json.loads(response.body) + assert data == {"events": ["event1", "event2"]} + + def test_get_dispatched_events_non_admin(self): + """Test getting dispatched events as non-admin.""" + with patch( + "viseron.components.webserver.request_handler.ViseronRequestHandler.current_user", # pylint: disable=line-too-long + new_callable=PropertyMock, + return_value=User( + name="Test", + username="test", + password="test", + role=Role.READ, + ), + ), patch( + "viseron.components.webserver.request_handler.ViseronRequestHandler.validate_access_token", # pylint: disable=line-too-long + return_value=True, + ): + response = self.fetch_with_auth("/api/v1/system/dispatched_events") + assert response.code == 403 diff --git a/tests/components/webserver/common.py b/tests/components/webserver/common.py index 0c25c712b..e7b8768cc 100644 --- a/tests/components/webserver/common.py +++ b/tests/components/webserver/common.py @@ -12,7 +12,7 @@ from tornado.testing import AsyncHTTPTestCase from tornado.web import create_signed_value -from viseron import setup_viseron +from viseron import Viseron, setup_viseron from viseron.components.webserver import Webserver, create_application from viseron.components.webserver.const import COMPONENT @@ -101,7 +101,8 @@ def setUp(self) -> None: "viseron.components.webserver.create_application" ): mocked_load_config.return_value = self.config - self.vis = setup_viseron(start_background_scheduler=False) + self.vis = Viseron(start_background_scheduler=False) + setup_viseron(self.vis) self.webserver: Webserver = self.vis.data[COMPONENT] return super().setUp() diff --git a/tests/helpers/test_template.py b/tests/helpers/test_template.py new file mode 100644 index 000000000..4f360988e --- /dev/null +++ b/tests/helpers/test_template.py @@ -0,0 +1,132 @@ +"""Tests for viseron.helpers.template.""" +from types import SimpleNamespace + +import pytest + +from viseron.helpers.template import ( + StateNamespace, + _DomainNamespace, + render_template, + render_template_condition, +) + +from tests.conftest import MockViseron + + +class DummyStates: + """Dummy states for testing.""" + + def __init__(self, states): + self.current = states + + +def test_state_namespace_getattr_and_getitem(): + """Test StateNamespace and _DomainNamespace attribute and item access.""" + states = { + "binary_sensor.camera_1": SimpleNamespace(state="on"), + "camera.camera_2": SimpleNamespace(state="off"), + } + ns = StateNamespace(states) + # Attribute access returns _DomainNamespace + binary_sensor_ns = ns.binary_sensor + assert isinstance(binary_sensor_ns, _DomainNamespace) + # Item access returns the state object + assert ns["binary_sensor.camera_1"].state == "on" + assert ns["camera.camera_2"].state == "off" + # _DomainNamespace attribute and item access + assert binary_sensor_ns.camera_1.state == "on" + assert binary_sensor_ns["camera_1"].state == "on" + camera_ns = ns.camera + assert camera_ns.camera_2.state == "off" + assert camera_ns["camera_2"].state == "off" + + +def test_state_namespace_missing_key(): + """Test StateNamespace and _DomainNamespace raise KeyError for missing keys.""" + states = {"sensor.x": SimpleNamespace(state="ok")} + ns = StateNamespace(states) + with pytest.raises(KeyError): + _ = ns["sensor.y"] + with pytest.raises(KeyError): + _ = ns.sensor.y + with pytest.raises(KeyError): + _ = ns.sensor["y"] + + +def test_render_template_valid_and_empty(vis: MockViseron): + """Test render_template with valid template, empty template, and extra kwargs.""" + vis.states._current_states = { # pylint: disable=protected-access + "sensor.temp": SimpleNamespace(state="23"), # type: ignore[dict-item] + "switch.light": SimpleNamespace(state="on"), # type: ignore[dict-item] + } + # Valid template + tpl = ( + "Sensor is {{ states.sensor.temp.state }} " + "and switch is {{ states.switch.light.state }}." + ) + result = render_template(vis, tpl) + assert result == "Sensor is 23 and switch is on." + # With extra kwargs + tpl2 = "Value: {{ value }}" + result2 = render_template(vis, tpl2, value=42) + assert result2 == "Value: 42" + # Empty template + assert render_template(vis, "") is None + assert render_template(vis, None) is None + + +@pytest.mark.parametrize( + "template", + [ + ("True"), + ("yes"), + ("on"), + ("enable"), + ("{{ true }}"), + ("{{ True }}"), + ("{{ 1 }}"), + ("{{ states.sensor.x.state }}"), + ("{{ states.sensor.x.state == 'on' }}"), + ], +) +def test_render_template_condition_truthy(vis: MockViseron, template): + """Test render_template_condition for all truthy outputs.""" + vis.states._current_states = { # pylint: disable=protected-access + "sensor.x": SimpleNamespace(state="on"), # type: ignore[dict-item] + } + result, _ = render_template_condition(vis, template) + assert result is True + + +@pytest.mark.parametrize( + "template", + [ + ("False"), + ("no"), + ("off"), + ("disable"), + ("{{ false }}"), + ("{{ False }}"), + ("{{ 0 }}"), + ("{{ -1 }}"), + ("{{ states.sensor.x.state == 'off' }}"), + ("random text"), + ], +) +def test_render_template_condition_false(vis: MockViseron, template): + """Test render_template_condition for all false outputs.""" + vis.states._current_states = { # pylint: disable=protected-access + "sensor.x": SimpleNamespace(state="on"), # type: ignore[dict-item] + } + result, _ = render_template_condition(vis, template) + assert result is False + + +def test_render_template_missing_state_raises(vis: MockViseron): + """Test that render_template raises KeyError if state is missing.""" + vis.states._current_states = { # pylint: disable=protected-access + "sensor.x": SimpleNamespace(state="on"), # type: ignore[dict-item] + } + tpl = "{{ states.sensor.y.state }}" + with pytest.raises(KeyError): + render_template(vis, tpl) diff --git a/tests/test__init__.py b/tests/test__init__.py index 8f57bc6a6..b90b0a366 100644 --- a/tests/test__init__.py +++ b/tests/test__init__.py @@ -35,13 +35,14 @@ def test_setup_viseron_nvr_loaded(vis, caplog): } mocked_viseron = MagicMock(data=data) - with patch("viseron.Viseron", return_value=mocked_viseron): - with patch("viseron.setup_components") as mocked_setup_components: - with patch("viseron.setup_domains") as mocked_setup_domains: - with patch("viseron.load_config") as mocked_load_config: - mocked_load_config.return_value = "Testing" - with patch("viseron.components.get_component"): - setup_viseron(start_background_scheduler=False) + with ( + patch("viseron.Viseron", return_value=mocked_viseron), + patch("viseron.setup_components") as mocked_setup_components, + patch("viseron.setup_domains") as mocked_setup_domains, + patch("viseron.load_config", return_value="Testing") as mocked_load_config, + patch("viseron.components.get_component"), + ): + setup_viseron(mocked_viseron) mocked_setup_components.assert_called_once() mocked_setup_domains.assert_called_once() @@ -79,14 +80,15 @@ def test_setup_viseron_nvr_missing(vis, caplog): } mocked_viseron = MagicMock(data=data) - with patch("viseron.Viseron", return_value=mocked_viseron): - with patch("viseron.setup_components") as mocked_setup_components: - with patch("viseron.setup_component") as mocked_setup_component: - with patch("viseron.setup_domains") as mocked_setup_domains: - with patch("viseron.load_config") as mocked_load_config: - mocked_load_config.return_value = "Testing" - with patch("viseron.components.get_component"): - setup_viseron(start_background_scheduler=False) + with ( + patch("viseron.Viseron", return_value=mocked_viseron), + patch("viseron.setup_components") as mocked_setup_components, + patch("viseron.setup_component") as mocked_setup_component, + patch("viseron.setup_domains") as mocked_setup_domains, + patch("viseron.load_config", return_value="Testing") as mocked_load_config, + patch("viseron.components.get_component"), + ): + setup_viseron(mocked_viseron) mocked_setup_components.assert_called_once() mocked_setup_component.assert_called_once() @@ -112,14 +114,15 @@ def test_setup_viseron_cameras_missing(caplog): } mocked_viseron = MagicMock(data=data) - with patch("viseron.Viseron", return_value=mocked_viseron): - with patch("viseron.setup_components") as mocked_setup_components: - with patch("viseron.setup_component") as mocked_setup_component: - with patch("viseron.setup_domains") as mocked_setup_domains: - with patch("viseron.load_config") as mocked_load_config: - mocked_load_config.return_value = "Testing" - with patch("viseron.components.get_component"): - setup_viseron(start_background_scheduler=False) + with ( + patch("viseron.Viseron", return_value=mocked_viseron), + patch("viseron.setup_components") as mocked_setup_components, + patch("viseron.setup_component") as mocked_setup_component, + patch("viseron.setup_domains") as mocked_setup_domains, + patch("viseron.load_config", return_value="Testing") as mocked_load_config, + patch("viseron.components.get_component"), + ): + setup_viseron(mocked_viseron) mocked_setup_components.assert_called_once() mocked_setup_component.assert_not_called() @@ -137,14 +140,15 @@ def test_setup_viseron_cameras_missing_nvr_loaded(caplog): } mocked_viseron = MagicMock(data=data) - with patch("viseron.Viseron", return_value=mocked_viseron): - with patch("viseron.setup_components") as mocked_setup_components: - with patch("viseron.setup_component") as mocked_setup_component: - with patch("viseron.setup_domains") as mocked_setup_domains: - with patch("viseron.load_config") as mocked_load_config: - mocked_load_config.return_value = "Testing" - with patch("viseron.components.get_component"): - setup_viseron(start_background_scheduler=False) + with ( + patch("viseron.Viseron", return_value=mocked_viseron), + patch("viseron.setup_components") as mocked_setup_components, + patch("viseron.setup_component") as mocked_setup_component, + patch("viseron.setup_domains") as mocked_setup_domains, + patch("viseron.load_config", return_value="Testing") as mocked_load_config, + patch("viseron.components.get_component"), + ): + setup_viseron(mocked_viseron) mocked_setup_components.assert_called_once() mocked_setup_component.assert_not_called() diff --git a/viseron/__init__.py b/viseron/__init__.py index efac17f88..ab5010f19 100644 --- a/viseron/__init__.py +++ b/viseron/__init__.py @@ -19,6 +19,7 @@ import voluptuous as vol from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.schedulers.base import SchedulerNotRunningError +from jinja2 import BaseLoader, Environment, StrictUndefined from sqlalchemy import insert from viseron.components import ( @@ -190,7 +191,7 @@ def enable_logging() -> None: ) -def setup_viseron(start_background_scheduler=True) -> Viseron: +def setup_viseron(vis: Viseron): """Set up and run Viseron.""" start = timer() enable_logging() @@ -198,8 +199,6 @@ def setup_viseron(start_background_scheduler=True) -> Viseron: LOGGER.info("-------------------------------------------") LOGGER.info(f"Initializing Viseron {viseron_version if viseron_version else ''}") - vis = Viseron(start_background_scheduler=start_background_scheduler) - try: config = load_config() except Exception as error: # pylint: disable=broad-except @@ -243,7 +242,6 @@ def setup_viseron(start_background_scheduler=True) -> Viseron: vis.critical_components_config_store.save(config) LOGGER.info("Viseron initialized in %.1f seconds", timer() - start) - return vis class Viseron: @@ -274,6 +272,8 @@ def __init__(self, start_background_scheduler=True) -> None: self._subprocess_watchdog: SubprocessWatchDog | None = None self._process_watchdog: ProcessWatchDog | None = None + self._dispatched_events: list[str] = [] + self.background_scheduler = BackgroundScheduler(timezone="UTC", daemon=True) if start_background_scheduler: self.background_scheduler.start() @@ -282,11 +282,13 @@ def __init__(self, start_background_scheduler=True) -> None: self._process_watchdog = ProcessWatchDog(self) self.storage: Storage | None = None + self.jinja_env = Environment(loader=BaseLoader(), undefined=StrictUndefined) self.critical_components_config_store = CriticalComponentsConfigStore(self) self.safe_mode = False self.exit_code = 0 self.shutdown_stage: Literal["shutdown", "last_write", "stopping"] | None = None + self.shutdown_event = threading.Event() @property def version(self) -> str: @@ -301,6 +303,11 @@ def git_commit(self) -> str: return git_commit[:7] return "unknown" + @property + def dispatched_events(self) -> list[str]: + """Return the list of dispatched events.""" + return self._dispatched_events + def register_signal_handler(self, viseron_signal, callback): """Register a callback which gets called on signals emitted by Viseron. @@ -373,10 +380,10 @@ def dispatch_event(self, event: str, data: EventData, store: bool = True) -> Non _event: Event[EventData] = Event(event, data, utcnow().timestamp()) if store: self._insert_event(_event) - event = f"event/{event}" - self.data[DATA_STREAM_COMPONENT].publish_data( - event, data=Event(event, data, time.time()) - ) + self.data[DATA_STREAM_COMPONENT].publish_data(f"event/{event}", data=_event) + + if event not in self._dispatched_events: + self._dispatched_events.append(event) @overload def register_domain( @@ -564,6 +571,7 @@ def shutdown(self) -> None: """Shut down Viseron.""" start = timer() LOGGER.info("Initiating shutdown") + self.shutdown_event.set() if self.data.get(DATA_STREAM_COMPONENT, None): data_stream: DataStream = self.data[DATA_STREAM_COMPONENT] @@ -673,11 +681,15 @@ def join( ] = [ thread for thread in threading.enumerate() - if not thread.daemon and thread != threading.current_thread() + if not thread.daemon + and thread != threading.current_thread() + and "setup_domains" not in thread.name ] threads_and_processes += multiprocessing.active_children() - with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor: + with concurrent.futures.ThreadPoolExecutor( + max_workers=100, thread_name_prefix="wait_for_threads_and_processes_to_exit" + ) as executor: thread_or_process_future = { executor.submit(join, thread_or_process): thread_or_process for thread_or_process in threads_and_processes diff --git a/viseron/__main__.py b/viseron/__main__.py index 957393a8a..c6af345c9 100644 --- a/viseron/__main__.py +++ b/viseron/__main__.py @@ -37,7 +37,8 @@ def shutdown_failed(): signal.signal(signal.SIGTERM, signal_term) signal.signal(signal.SIGINT, signal_term) - viseron = setup_viseron() + viseron = Viseron() + setup_viseron(viseron) signal.pause() if viseron: diff --git a/viseron/components/__init__.py b/viseron/components/__init__.py index e8f1d5b46..6f1d661b2 100644 --- a/viseron/components/__init__.py +++ b/viseron/components/__init__.py @@ -8,6 +8,7 @@ import traceback from concurrent.futures import ThreadPoolExecutor, as_completed from dataclasses import dataclass +from inspect import signature from timeit import default_timer as timer from typing import TYPE_CHECKING, Any, Literal @@ -168,6 +169,12 @@ def setup_component(self, tries: int = 1) -> bool: slow_setup_warning.start() result = component_module.setup(self._vis, config) except ComponentNotReady as error: + if self._vis.shutdown_event.is_set(): + LOGGER.warning( + f"Component {self.name} setup aborted due to shutdown" + ) + slow_setup_warning.cancel() + return False wait_time = min( tries * COMPONENT_RETRY_INTERVAL, COMPONENT_RETRY_INTERVAL_MAX ) @@ -375,7 +382,10 @@ def _slow_dependency_warning(futures) -> None: if future.result() is True: continue failed.append(future) - slow_dependency_warning.remove() + try: + slow_dependency_warning.remove() + except Exception: # pylint: disable=broad-except + pass if failed: LOGGER.error( @@ -440,10 +450,24 @@ def setup_domain(self, domain_to_setup: DomainToSetup, tries=1): if config: try: slow_setup_warning.start() - result = domain_module.setup( - self._vis, config, domain_to_setup.identifier - ) + sig = signature(domain_module.setup) + if len(sig.parameters) == 4: + # If the setup function has an attempt parameter, we pass it + result = domain_module.setup( + self._vis, config, domain_to_setup.identifier, tries + ) + else: + result = domain_module.setup( + self._vis, config, domain_to_setup.identifier + ) except DomainNotReady as error: + if self._vis.shutdown_event.is_set(): + LOGGER.warning( + f"Domain {domain_to_setup.domain} for " + f"component {self.name} setup aborted due to shutdown" + ) + slow_setup_warning.cancel() + return False # Cancel the slow setup warning here since the retrying blocks domain_to_setup.error = str(error) domain_to_setup.retrying = True @@ -458,10 +482,19 @@ def setup_domain(self, domain_to_setup: DomainToSetup, tries=1): f"Retrying in {wait_time} seconds. " f"Error: {str(error)}" ) - time.sleep(wait_time) + elapsed = 0.0 + interval = 0.2 + while elapsed < wait_time: + if self._vis.shutdown_event.is_set(): + LOGGER.warning("Domain setup retry aborted due to shutdown") + return False + time.sleep(interval) + elapsed += interval # Running with ThreadPoolExecutor and awaiting the future does not # cause a max recursion error if we retry for a long time - with ThreadPoolExecutor(max_workers=1) as executor: + with ThreadPoolExecutor( + max_workers=1, thread_name_prefix="Component.setup_domain" + ) as executor: future = executor.submit( self.setup_domain, domain_to_setup, @@ -664,7 +697,9 @@ def setup_domains(vis: Viseron) -> None: # Check that all domain dependencies are resolved domain_dependencies(vis) - with ThreadPoolExecutor(max_workers=100) as executor: + with ThreadPoolExecutor( + max_workers=100, thread_name_prefix="setup_domains" + ) as executor: for domain in vis.data[DOMAINS_TO_SETUP]: for domain_to_setup in vis.data[DOMAINS_TO_SETUP][domain].values(): setup_domain(vis, executor, domain_to_setup) @@ -785,7 +820,9 @@ def join(thread) -> None: if thread.is_alive(): LOGGER.error(f"{thread.name} did not finish in time") - with ThreadPoolExecutor(max_workers=100) as executor: + with ThreadPoolExecutor( + max_workers=100, thread_name_prefix="setup_components" + ) as executor: setup_thread_future = { executor.submit(join, setup_thread): setup_thread for setup_thread in setup_threads diff --git a/viseron/components/ffmpeg/camera.py b/viseron/components/ffmpeg/camera.py index 66839161b..0ab1faab3 100644 --- a/viseron/components/ffmpeg/camera.py +++ b/viseron/components/ffmpeg/camera.py @@ -5,7 +5,7 @@ import os import time from queue import Empty, Full -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any import cv2 import setproctitle @@ -308,10 +308,10 @@ def get_default_hwaccel_args() -> list[str]: ) -def setup(vis: Viseron, config, identifier) -> bool: +def setup(vis: Viseron, config: dict[str, Any], identifier: str, attempt: int) -> bool: """Set up the ffmpeg camera domain.""" try: - Camera(vis, config[identifier], identifier) + Camera(vis, config[identifier], identifier, attempt) except (FFprobeError, FFprobeTimeout) as error: raise DomainNotReady from error return True @@ -320,7 +320,9 @@ def setup(vis: Viseron, config, identifier) -> bool: class Camera(AbstractCamera): """Represents a camera which is consumed via FFmpeg.""" - def __init__(self, vis: Viseron, config, identifier) -> None: + def __init__( + self, vis: Viseron, config: dict[str, Any], identifier: str, attempt: int + ) -> None: # Add password to SensitiveInformationFilter. # It is done in AbstractCamera but since we are calling Stream before # super().__init__ we need to do it here as well @@ -336,7 +338,7 @@ def __init__(self, vis: Viseron, config, identifier) -> None: # Stream must be initialized before super().__init__ is called as it raises # FFprobeError/FFprobeTimeout which is caught in setup() and re-raised as # DomainNotReady - self.stream = Stream(config, self, identifier) + self.stream = Stream(config, self, identifier, attempt) super().__init__(vis, COMPONENT, config, identifier) self._frame_queue: mp.Queue[ # pylint: disable=unsubscriptable-object diff --git a/viseron/components/ffmpeg/stream.py b/viseron/components/ffmpeg/stream.py index 41d9368d3..99b0ec763 100644 --- a/viseron/components/ffmpeg/stream.py +++ b/viseron/components/ffmpeg/stream.py @@ -8,14 +8,6 @@ from dataclasses import dataclass from typing import TYPE_CHECKING, Any -from tenacity import ( - Retrying, - before_sleep_log, - retry_if_exception_type, - stop_after_attempt, - wait_exponential, -) - from viseron.const import ( CAMERA_SEGMENT_DURATION, ENV_CUDA_SUPPORTED, @@ -95,7 +87,11 @@ class Stream: """Represents a stream of frames from a camera.""" def __init__( - self, config: dict[str, Any], camera: Camera, camera_identifier: str + self, + config: dict[str, Any], + camera: Camera, + camera_identifier: str, + attempt: int = 1, ) -> None: self._logger = logging.getLogger(__name__ + "." + camera_identifier) self._logger.addFilter( @@ -114,7 +110,7 @@ def __init__( self._pipe: sp.Popen | None = None self.segment_process: RestartablePopen | None = None self._log_pipe: LogPipe | None = None - self._ffprobe = FFprobe(config, camera_identifier) + self._ffprobe = FFprobe(config, camera_identifier, attempt) self._mainstream = self.get_stream_information(config) self._substream = None @@ -351,6 +347,7 @@ def get_encoder_audio_codec( if stream_audio_codec in [ "pcm_alaw", "pcm_mulaw", + "pcm_s16be", ]: self._logger.warning( f"Container mp4 does not support {stream_audio_codec} audio " @@ -502,8 +499,10 @@ def pipe(self): stderr=self._log_pipe, ) - return sp.Popen( # type: ignore[call-overload] + return RestartablePopen( self.build_command(), + name=f"viseron.camera.{self._camera.identifier}.pipe", + register=False, stdout=sp.PIPE, stderr=self._log_pipe, ) @@ -581,10 +580,13 @@ def record_only(self): class FFprobe: """FFprobe wrapper class.""" - def __init__(self, config: dict[str, Any], camera_identifier: str) -> None: + def __init__( + self, config: dict[str, Any], camera_identifier: str, attempt: int + ) -> None: self._logger = logging.getLogger(__name__ + "." + camera_identifier) self._config = config - self._ffprobe_timeout = FFPROBE_TIMEOUT + self._camera_identifier = camera_identifier + self._ffprobe_timeout = FFPROBE_TIMEOUT * attempt def stream_information( self, stream_url: str, stream_config: dict[str, Any] @@ -655,35 +657,26 @@ def run_ffprobe( ) self._logger.debug(f"FFprobe command: {' '.join(ffprobe_command)}") - for attempt in Retrying( - retry=retry_if_exception_type((sp.TimeoutExpired, FFprobeTimeout)), - stop=stop_after_attempt(10), - wait=wait_exponential(multiplier=2, min=1, max=30), - before_sleep=before_sleep_log(self._logger, logging.ERROR), - reraise=True, - ): - with attempt: - log_pipe = LogPipe( - self._logger, - FFPROBE_LOGLEVELS[self._config[CONFIG_FFPROBE_LOGLEVEL]], - ) - pipe = sp.Popen( # type: ignore[call-overload] - ffprobe_command, - stdout=sp.PIPE, - stderr=log_pipe, - ) - try: - stdout, _ = pipe.communicate(timeout=self._ffprobe_timeout) - pipe.wait(timeout=FFPROBE_TIMEOUT) - except sp.TimeoutExpired as error: - pipe.terminate() - pipe.wait(timeout=FFPROBE_TIMEOUT) - ffprobe_timeout = self._ffprobe_timeout - self._ffprobe_timeout += FFPROBE_TIMEOUT - raise FFprobeTimeout(ffprobe_timeout) from error - finally: - log_pipe.close() - self._ffprobe_timeout = FFPROBE_TIMEOUT + log_pipe = LogPipe( + self._logger, + FFPROBE_LOGLEVELS[self._config[CONFIG_FFPROBE_LOGLEVEL]], + ) + pipe = RestartablePopen( + ffprobe_command, + name=f"viseron.camera.{self._camera_identifier}.ffprobe", + register=False, + stdout=sp.PIPE, + stderr=log_pipe, + ) + try: + stdout, _ = pipe.communicate(timeout=self._ffprobe_timeout) + pipe.wait(timeout=FFPROBE_TIMEOUT) + except sp.TimeoutExpired as error: + pipe.terminate() + pipe.wait(timeout=FFPROBE_TIMEOUT) + raise FFprobeTimeout(self._ffprobe_timeout) from error + finally: + log_pipe.close() try: # Trim away any text before start of JSON object diff --git a/viseron/components/go2rtc/const.py b/viseron/components/go2rtc/const.py index bb4cdbc4c..70d5d1863 100644 --- a/viseron/components/go2rtc/const.py +++ b/viseron/components/go2rtc/const.py @@ -5,4 +5,8 @@ GO2RTC_CONFIG = "/tmp/go2rtc.yaml" # CONFIG_SCHEMA constants -DESC_COMPONENT = "go2rtc configuration." +DESC_COMPONENT = ( + "go2rtc configuration. See the " + "go2rtc documentation " + "for more information on available options." +) diff --git a/viseron/components/gstreamer/camera.py b/viseron/components/gstreamer/camera.py index ba38cb4ea..16ec2ebb8 100644 --- a/viseron/components/gstreamer/camera.py +++ b/viseron/components/gstreamer/camera.py @@ -3,7 +3,7 @@ import time from threading import Event -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any import cv2 import voluptuous as vol @@ -270,10 +270,10 @@ ) -def setup(vis: Viseron, config, identifier) -> bool: +def setup(vis: Viseron, config: dict[str, Any], identifier: str, attempt: int) -> bool: """Set up the gstreamer camera domain.""" try: - Camera(vis, config[identifier], identifier) + Camera(vis, config[identifier], identifier, attempt) except (FFprobeError, FFprobeTimeout) as error: raise DomainNotReady from error return True @@ -282,13 +282,15 @@ def setup(vis: Viseron, config, identifier) -> bool: class Camera(AbstractCamera): """Represents a camera which is consumed via GStreamer.""" - def __init__(self, vis: Viseron, config, identifier) -> None: + def __init__( + self, vis: Viseron, config: dict[str, Any], identifier: str, attempt: int + ) -> None: self._poll_timer = utcnow().timestamp() self._frame_reader = None # Stream must be initialized before super().__init__ is called as it raises # FFprobeError/FFprobeTimeout which is caught in setup() and re-raised as # DomainNotReady - self.stream = Stream(config, self, identifier) + self.stream = Stream(config, self, identifier, attempt) super().__init__(vis, COMPONENT, config, identifier) self._capture_frames = False diff --git a/viseron/components/gstreamer/stream.py b/viseron/components/gstreamer/stream.py index d3f5c430a..dcb904c94 100644 --- a/viseron/components/gstreamer/stream.py +++ b/viseron/components/gstreamer/stream.py @@ -19,6 +19,7 @@ ENV_JETSON_NANO, ENV_RASPBERRYPI3, ENV_RASPBERRYPI4, + ENV_RASPBERRYPI5, ) from viseron.domains.camera.shared_frames import SharedFrame from viseron.helpers import pop_if_full @@ -56,7 +57,11 @@ class Stream(FFmpegStream): """ def __init__( # pylint: disable=super-init-not-called - self, config: dict[str, Any], camera: Camera, camera_identifier: str + self, + config: dict[str, Any], + camera: Camera, + camera_identifier: str, + attempt: int = 1, ) -> None: self._logger = logging.getLogger(__name__ + "." + camera_identifier) self._logger.addFilter( @@ -67,7 +72,7 @@ def __init__( # pylint: disable=super-init-not-called self._camera: Camera = camera # type: ignore[assignment] - self._ffprobe = FFprobe(config, camera_identifier) + self._ffprobe = FFprobe(config, camera_identifier, attempt) self._mainstream = self.get_stream_information(config) self._substream = None # Substream is not implemented for GStreamer @@ -94,6 +99,8 @@ def __init__( # pylint: disable=super-init-not-called self._pipeline = BasePipeline(config, self, camera) elif os.getenv(ENV_RASPBERRYPI4) == "true": self._pipeline = BasePipeline(config, self, camera) + elif os.getenv(ENV_RASPBERRYPI5) == "true": + self._pipeline = BasePipeline(config, self, camera) elif os.getenv(ENV_JETSON_NANO) == "true": self._pipeline = JetsonPipeline(config, self, camera) elif os.getenv(ENV_CUDA_SUPPORTED) == "true": diff --git a/viseron/components/hailo/__init__.py b/viseron/components/hailo/__init__.py new file mode 100644 index 000000000..c9f449a9d --- /dev/null +++ b/viseron/components/hailo/__init__.py @@ -0,0 +1,398 @@ +"""Hailo component.""" + +from __future__ import annotations + +import logging +import multiprocessing as mp +from functools import partial +from queue import Empty, Queue +from typing import Any + +import numpy as np +import voluptuous as vol +from hailo_platform import HEF, FormatType, HailoSchedulingAlgorithm, VDevice +from hailo_platform.pyhailort.pyhailort import AsyncInferJob, FormatOrder + +from viseron import Viseron +from viseron.components.hailo.utils import ( + get_hailo_arch, + get_model, + get_model_size, + inference_callback, + load_labels, +) +from viseron.domains import RequireDomain, setup_domain +from viseron.domains.object_detector import ( + BASE_CONFIG_SCHEMA as OBJECT_DETECTOR_BASE_CONFIG_SCHEMA, +) +from viseron.domains.object_detector.const import CONFIG_CAMERAS +from viseron.domains.object_detector.detected_object import DetectedObject +from viseron.exceptions import ComponentNotReady, ViseronError +from viseron.helpers import letterbox_resize, pop_if_full +from viseron.helpers.child_process_worker import ChildProcessWorker +from viseron.helpers.validators import Maybe, PathExists, Url + +from .const import ( + COMPONENT, + CONFIG_LABEL_PATH, + CONFIG_MAX_DETECTIONS, + CONFIG_MODEL_PATH, + CONFIG_OBJECT_DETECTOR, + DEFAULT_LABEL_PATH, + DEFAULT_MAX_DETECTIONS, + DEFAULT_MODEL_PATH, + DESC_COMPONENT, + DESC_LABEL_PATH, + DESC_MAX_DETECTIONS, + DESC_MODEL_PATH, + DESC_OBJECT_DETECTOR, +) + +LOGGER = logging.getLogger(__name__) + +OBJECT_DETECTOR_SCHEMA = OBJECT_DETECTOR_BASE_CONFIG_SCHEMA.extend( + { + vol.Optional( + CONFIG_MODEL_PATH, + default=DEFAULT_MODEL_PATH, + description=DESC_MODEL_PATH, + ): Maybe(str, vol.Any(PathExists(), Url())), + vol.Optional( + CONFIG_LABEL_PATH, + default=DEFAULT_LABEL_PATH, + description=DESC_LABEL_PATH, + ): str, + vol.Optional( + CONFIG_MAX_DETECTIONS, + default=DEFAULT_MAX_DETECTIONS, + description=DESC_MAX_DETECTIONS, + ): int, + } +) + +CONFIG_SCHEMA = vol.Schema( + { + vol.Required(COMPONENT, description=DESC_COMPONENT): vol.Schema( + { + vol.Required( + CONFIG_OBJECT_DETECTOR, description=DESC_OBJECT_DETECTOR + ): OBJECT_DETECTOR_SCHEMA, + } + ) + }, + extra=vol.ALLOW_EXTRA, +) + + +def setup(vis: Viseron, config: dict[str, Any]) -> bool: + """Set up Hailo component.""" + config = config[COMPONENT] + + try: + vis.data[COMPONENT] = Hailo8Detector(vis, config[CONFIG_OBJECT_DETECTOR]) + except Exception as error: + LOGGER.error("Failed to start Hailo 8 detector: %s", error, exc_info=True) + raise ComponentNotReady from error + + if config.get(CONFIG_OBJECT_DETECTOR, None): + for camera_identifier in config[CONFIG_OBJECT_DETECTOR][CONFIG_CAMERAS].keys(): + setup_domain( + vis, + COMPONENT, + CONFIG_OBJECT_DETECTOR, + config, + identifier=camera_identifier, + require_domains=[ + RequireDomain( + domain="camera", + identifier=camera_identifier, + ) + ], + ) + + return True + + +class LoadHailo8Error(ViseronError): + """Error raised on all failures to load Hailo8.""" + + +class Hailo8Detector(ChildProcessWorker): + """Hailo 8 object detector.""" + + def __init__(self, vis: Viseron, config: dict[str, Any]): + hailo_arch = get_hailo_arch() + LOGGER.debug(f"Detected Hailo architecture: {hailo_arch}") + + if hailo_arch is None: + LOGGER.error("Failed to detect Hailo architecture.") + raise ComponentNotReady + + self.model_path = get_model(config[CONFIG_MODEL_PATH], hailo_arch) + self.labels = load_labels(config[CONFIG_LABEL_PATH]) + + self._result_queues: dict[str, Queue] = {} + self._process_initialization_done = mp.Event() + self._process_initialization_error = mp.Event() + self._hailo_inference: HailoInfer + self._model_height: int + self._model_width: int + + self._process_initialization_done = mp.Event() + super().__init__(vis, f"{COMPONENT}.{CONFIG_OBJECT_DETECTOR}") + self.initialize() + + def initialize(self) -> None: + """Initialize Hailo8.""" + self._process_initialization_done.wait(30) + if ( + not self._process_initialization_done.is_set() + or self._process_initialization_error.is_set() + ): + LOGGER.error("Failed to load Hailo8") + self.stop() + raise LoadHailo8Error + + self._model_size_event = mp.Event() + self._model_width = 0 + self._model_height = 0 + get_model_size(self._process_queue) + self._model_size_event.wait(10) + if not self._model_size_event.is_set(): + LOGGER.error("Failed to get model size") + self.stop() + raise LoadHailo8Error("Failed to get model size") + LOGGER.debug(f"Model size: {self._model_width}x{self._model_height}") + + def process_initialization(self) -> None: + """Load network inside the child process.""" + try: + self._hailo_inference = HailoInfer(self.model_path) + ( + self._model_height, + self._model_width, + _, + ) = self._hailo_inference.get_input_shape() + except Exception as error: # pylint: disable=broad-except + LOGGER.error(f"Failed to load Hailo8: {error}") + self._process_initialization_error.set() + self._process_initialization_done.set() + + def work_input(self, item): + """Perform object detection.""" + if item == "get_model_size": + height, width, _ = self._hailo_inference.get_input_shape() + return { + "get_model_size": { + "model_width": width, + "model_height": height, + } + } + + # Run async inference + inference_callback_fn = partial(inference_callback, item=item) + async_job = self._hailo_inference.run([item["frame"]], inference_callback_fn) + async_job.wait(3000) + return item + + def work_output(self, item) -> None: + """Put result into queue.""" + if item.get("get_model_size", None): + self._model_width = item["get_model_size"]["model_width"] + self._model_height = item["get_model_size"]["model_height"] + self._model_size_event.set() + return + + pop_if_full(self._result_queues[item["camera_identifier"]], item) + + def preprocess(self, frame): + """Pre process frame before detection.""" + return letterbox_resize(frame, self.model_width, self.model_height) + + def detect( + self, + frame: np.ndarray, + camera_identifier: str, + result_queue: Queue, + ): + """Perform detection.""" + self._result_queues[camera_identifier] = result_queue + pop_if_full( + self.input_queue, + { + "frame": frame, + "camera_identifier": camera_identifier, + }, + ) + try: + item = result_queue.get(timeout=3) + except Empty: + return None + return item["result"] + + def post_process( + self, + detections, + camera_resolution: tuple[int, int], + min_confidence: float, + max_boxes: int = 50, + ) -> list[DetectedObject]: + """Post process detections.""" + all_detections = [] + for class_id, detection in enumerate(detections): + for det in detection: + bbox, score = det[:4], det[4] + if score >= min_confidence: + all_detections.append((class_id, score, bbox)) + + # Sort all detections by score descending + all_detections.sort(reverse=True, key=lambda x: x[1]) + + # Filter to max_boxes highest scoring detections + top_detections = all_detections[:max_boxes] + _detections = [] + for class_id, score, bbox in top_detections: + _detections.append( + DetectedObject.from_relative_letterboxed( + self.labels[int(class_id)], + score, + bbox[1], + bbox[0], + bbox[3], + bbox[2], + frame_res=camera_resolution, + model_res=self.model_res, + ) + ) + + return _detections + + @property + def model_width(self) -> int: + """Return trained model width.""" + return self._model_width + + @property + def model_height(self) -> int: + """Return trained model height.""" + return self._model_height + + @property + def model_res(self): + """Return trained model resolution.""" + return self.model_width, self.model_height + + +class HailoInfer: + """Helper around Hailo SDK to perform asynchronous inference. + + Based on https://raw.githubusercontent.com/hailo-ai/Hailo-Application-Code-Examples/refs/heads/main/runtime/hailo-8/python/common/hailo_inference.py #pylint: disable=line-too-long + """ + + def __init__( + self, + hef_path: str, + batch_size: int = 1, + input_type: str | None = None, + output_type: str | None = None, + priority: int | None = 0, + ) -> None: + """Initialize async inference wrapper for a HEF model.""" + params = VDevice.create_params() + # Set the scheduling algorithm to round-robin to activate the scheduler + params.scheduling_algorithm = HailoSchedulingAlgorithm.ROUND_ROBIN + params.group_id = "SHARED" + vdev = VDevice(params) + + self.target = vdev + self.hef = HEF(hef_path) + + self.infer_model = self.target.create_infer_model(hef_path) + self.infer_model.set_batch_size(batch_size) + + self._set_input_type(input_type) + self._set_output_type(output_type) + + self.config_ctx = self.infer_model.configure() + self.configured_model = self.config_ctx.__enter__() + self.configured_model.set_scheduler_priority(priority) + self.last_infer_job: AsyncInferJob | None = None + + def _set_input_type(self, input_type: str | None = None) -> None: + """Set the input type for the HEF model. If the model has multiple inputs.""" + if input_type is not None: + self.infer_model.input().set_format_type(getattr(FormatType, input_type)) + + def _set_output_type(self, output_type: str | None = None) -> None: + """Set the output type for each model output.""" + self.nms_postprocess_enabled = False + + # If the model uses HAILO_NMS_WITH_BYTE_MASK format (e.g.,instance segmentation) + if ( + self.infer_model.outputs[0].format.order + == FormatOrder.HAILO_NMS_WITH_BYTE_MASK + ): + # Use UINT8 and skip setting output formats + self.nms_postprocess_enabled = True + self.output_type = self._output_data_type2dict("UINT8") + return + + # Otherwise, set the format type based on the provided output_type argument + self.output_type = self._output_data_type2dict(output_type) + + # Apply format to each output layer + for name, dtype in self.output_type.items(): + self.infer_model.output(name).set_format_type(getattr(FormatType, dtype)) + + def get_input_shape(self) -> tuple[int, ...]: + """Get the shape of the model's input layer.""" + return self.hef.get_input_vstream_infos()[0].shape # Assumes one input + + def run(self, input_batch: list[np.ndarray], inference_callback_fn): + """Run an asynchronous inference job on a batch of preprocessed inputs.""" + bindings_list = self.create_bindings(self.configured_model, input_batch) + self.configured_model.wait_for_async_ready(timeout_ms=10000) + + # Launch async inference and attach the result handler + self.last_infer_job = self.configured_model.run_async( + bindings_list, partial(inference_callback_fn, bindings_list=bindings_list) + ) + return self.last_infer_job + + def create_bindings(self, configured_model, input_batch): + """Create a list of input-output bindings for a batch of frames.""" + + def frame_binding(frame: np.ndarray): + output_buffers = { + name: np.empty( + self.infer_model.output(name).shape, + dtype=(getattr(np, self.output_type[name].lower())), + ) + for name in self.output_type + } + + binding = configured_model.create_bindings(output_buffers=output_buffers) + binding.input().set_buffer(np.array(frame)) + return binding + + return [frame_binding(frame) for frame in input_batch] + + def _output_data_type2dict(self, data_type: str | None) -> dict[str, str]: + """Generate a dictionary mapping for output layer data types.""" + valid_types = {"float32", "uint8", "uint16"} + data_type_dict = {} + + for output_info in self.hef.get_output_vstream_infos(): + name = output_info.name + if data_type is None: + # Extract type from HEF metadata + hef_type = str(output_info.format.type).rsplit(".", maxsplit=1)[-1] + data_type_dict[name] = hef_type + else: + if data_type.lower() not in valid_types: + raise ValueError( + f"Invalid data_type: {data_type}. Must be one of {valid_types}" + ) + data_type_dict[name] = data_type + + return data_type_dict diff --git a/viseron/components/hailo/const.py b/viseron/components/hailo/const.py new file mode 100644 index 000000000..cff5892eb --- /dev/null +++ b/viseron/components/hailo/const.py @@ -0,0 +1,42 @@ +"""Constants for the Hailo component.""" +from typing import Final + +COMPONENT = "hailo" + +HAILO8_DEFAULT_URL = ( + "https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/" + "ModelZoo/Compiled/v2.16.0/hailo8l/yolov11m.hef" + "ModelZoo/Compiled/v2.14.0/hailo8/yolov6n.hef" +) +HAILO8L_DEFAULT_URL = ( + "https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/" + "ModelZoo/Compiled/v2.16.0/hailo8l/yolov11m.hef" +) + +# CONFIG_SCHEMA constants +CONFIG_OBJECT_DETECTOR = "object_detector" + +# OBJECT_DETECTOR_SCHEMA constants +CONFIG_MODEL_PATH = "model_path" +CONFIG_LABEL_PATH = "label_path" +CONFIG_MAX_DETECTIONS = "max_detections" + + +DEFAULT_MODEL_PATH: Final = None +DEFAULT_LABEL_PATH = "/detectors/models/darknet/coco.names" +DEFAULT_MAX_DETECTIONS = 50 + + +DESC_COMPONENT = "Hailo configuration." +DESC_OBJECT_DETECTOR = "Object detector domain config." +DESC_MODEL_PATH = ( + "Path or URL to a Hailo-8 model in HEF format." + " If a URL is provided, the model will be downloaded on startup." + " If not provided, a default model from Hailo's model zoo will be used.
" + "Downloaded models are cached and won't be re-downloaded." +) +DESC_LABEL_PATH = ( + "Path to file containing trained labels. If not provided, the COCO labels file from" + " the darknet component will be used." +) +DESC_MAX_DETECTIONS = "Maximum number of detections to return." diff --git a/viseron/components/hailo/object_detector.py b/viseron/components/hailo/object_detector.py new file mode 100644 index 000000000..ba290ca00 --- /dev/null +++ b/viseron/components/hailo/object_detector.py @@ -0,0 +1,53 @@ +"""Hailo object detector.""" +from __future__ import annotations + +import logging +from queue import Queue +from typing import TYPE_CHECKING + +from viseron.domains.object_detector import AbstractObjectDetector + +from .const import COMPONENT, CONFIG_OBJECT_DETECTOR + +if TYPE_CHECKING: + from viseron import Viseron + from viseron.components.hailo import Hailo8Detector + from viseron.domains.object_detector.detected_object import DetectedObject + + +LOGGER = logging.getLogger(__name__) + + +def setup(vis: Viseron, config, identifier) -> bool: + """Set up the Hailo object_detector domain.""" + ObjectDetector(vis, config, identifier) + + return True + + +class ObjectDetector(AbstractObjectDetector): + """Hailo object detection.""" + + def __init__(self, vis: Viseron, config, camera_identifier) -> None: + super().__init__( + vis, COMPONENT, config[CONFIG_OBJECT_DETECTOR], camera_identifier + ) + self._hailo8: Hailo8Detector = vis.data[COMPONENT] + self._object_result_queue: Queue[list[DetectedObject]] = Queue(maxsize=1) + + def preprocess(self, frame): + """Preprocess frame before detection.""" + return self._hailo8.preprocess(frame) + + def return_objects(self, frame) -> list[DetectedObject] | None: + """Perform object detection.""" + detections = self._hailo8.detect( + frame, + self._camera_identifier, + self._object_result_queue, + ) + if detections is None: + return None + return self._hailo8.post_process( + detections, self._camera.resolution, self.min_confidence + ) diff --git a/viseron/components/hailo/utils.py b/viseron/components/hailo/utils.py new file mode 100644 index 000000000..4f6b7ad7c --- /dev/null +++ b/viseron/components/hailo/utils.py @@ -0,0 +1,139 @@ +"""Utility functions for Hailo.""" + + +import logging +import multiprocessing as mp +import os +import subprocess as sp +import urllib.request +from typing import Any, Literal +from urllib.parse import urlparse + +import numpy as np + +from viseron.components.hailo.const import HAILO8_DEFAULT_URL, HAILO8L_DEFAULT_URL +from viseron.domains.object_detector.const import MODEL_CACHE + +LOGGER = logging.getLogger(__name__) + + +def get_hailo_arch() -> None | Literal["hailo8l"] | Literal["hailo8"]: + """Return detected Hailo device architecture.""" + cmd = ["hailortcli", "fw-control", "identify"] + try: + result = sp.run(cmd, capture_output=True, text=True, check=False) + except FileNotFoundError: + LOGGER.error("hailortcli not found in PATH while detecting Hailo architecture") + return None + except Exception: # pylint: disable=broad-except + LOGGER.exception("Unexpected error while detecting Hailo architecture") + return None + + if result.returncode != 0: + LOGGER.error( + "Failed running '%s': returncode=%s stderr=%s", + " ".join(cmd), + result.returncode, + result.stderr.strip(), + ) + return None + + for line in result.stdout.splitlines(): + if "Device Architecture" in line: + lowered = line.lower() + if "hailo8l" in lowered: + return "hailo8l" + if "hailo8" in lowered: + return "hailo8" + break + + LOGGER.error("Could not determine Hailo architecture from hailortcli output") + return None + + +def load_labels(labels: str) -> list[str]: + """Load labels from file.""" + with open(labels, encoding="utf-8") as labels_file: + return labels_file.read().rstrip("\n").split("\n") + + +def get_model_size(process_queue: mp.Queue): + """Get model size by sending a job to the subprocess.""" + process_queue.put("get_model_size") + + +def inference_callback( + completion_info, + bindings_list: list, + item: dict[str, Any], +) -> None: + """Inference callback to handle inference results.""" + if completion_info.exception: + LOGGER.error(f"Inference error: {completion_info.exception}") + return + + for _, bindings in enumerate(bindings_list): + if len(bindings._output_names) == 1: # pylint: disable=protected-access + result = bindings.output().get_buffer() + else: + result = { + name: np.expand_dims(bindings.output(name).get_buffer(), axis=0) + for name in bindings._output_names # pylint: disable=protected-access + } + item["result"] = result + + +def is_url(value: str) -> bool: + """Return True if value appears to be an HTTP(S) URL.""" + parsed = urlparse(value) + return parsed.scheme in {"http", "https"} + + +def get_model_name(model_path: str) -> str: + """Return model filename.""" + return os.path.basename(model_path) + + +def download_model(url: str, cached_model_path: str) -> None: + """Download model to cache.""" + if not url.endswith(".hef"): + raise ValueError("Invalid model URL. Only .hef files are supported.") + try: + urllib.request.urlretrieve(url, cached_model_path) + LOGGER.info(f"Downloaded model to {cached_model_path}") + except Exception as e: + raise RuntimeError(f"Failed to download model from {url}: {str(e)}") from e + + +def get_model(model_path: str | None, hailo_arch: Literal["hailo8", "hailo8l"]) -> str: + """Return locally cached model or download if a URL is provided.""" + if model_path is None: + if hailo_arch == "hailo8l": + model_path = HAILO8L_DEFAULT_URL + else: + model_path = HAILO8_DEFAULT_URL + + os.makedirs(MODEL_CACHE, exist_ok=True) + path_is_url = is_url(model_path) + + # Search for local path + if not path_is_url: + if not model_path.endswith(".hef"): + raise ValueError(f"Provided model path must end with .hef: {model_path}") + if not os.path.exists(model_path): + raise FileNotFoundError(f"Model file not found at: {model_path}") + LOGGER.debug("Using provided model file %s", model_path) + return model_path + + # Determine model name and cache destination + model_name = get_model_name(model_path) + cached_model_path = os.path.join(MODEL_CACHE, model_name) + + # Search for cached path + if os.path.exists(cached_model_path): + LOGGER.debug("Using cached model %s", cached_model_path) + return cached_model_path + + LOGGER.info("Downloading model %s -> %s", model_path, cached_model_path) + download_model(model_path, cached_model_path) + return cached_model_path diff --git a/viseron/components/storage/__init__.py b/viseron/components/storage/__init__.py index 95e1514b6..feebbc5b8 100644 --- a/viseron/components/storage/__init__.py +++ b/viseron/components/storage/__init__.py @@ -34,11 +34,13 @@ CONFIG_TIER_CHECK_SLEEP_BETWEEN_BATCHES, CONFIG_TIER_CHECK_WORKERS, CONFIG_TIERS, + CONFIG_TIMELAPSE, DEFAULT_COMPONENT, DESC_COMPONENT, ENGINE, TIER_CATEGORY_RECORDER, TIER_CATEGORY_SNAPSHOTS, + TIER_CATEGORY_TIMELAPSE, TIER_SUBCATEGORY_EVENT_CLIPS, TIER_SUBCATEGORY_FACE_RECOGNITION, TIER_SUBCATEGORY_LICENSE_PLATE_RECOGNITION, @@ -46,6 +48,7 @@ TIER_SUBCATEGORY_OBJECT_DETECTOR, TIER_SUBCATEGORY_SEGMENTS, TIER_SUBCATEGORY_THUMBNAILS, + TIER_SUBCATEGORY_TIMELAPSE, ) from viseron.components.storage.jobs import CleanupManager from viseron.components.storage.models import Base, FilesMeta, Motion, Recordings @@ -55,6 +58,7 @@ SegmentsTierHandler, SnapshotTierHandler, ThumbnailTierHandler, + TimelapseTierHandler, ) from viseron.components.storage.util import ( RequestedFilesCount, @@ -62,6 +66,7 @@ get_segments_path, get_snapshots_path, get_thumbnails_path, + get_timelapse_path, ) from viseron.const import EVENT_DOMAIN_REGISTERED, VISERON_SIGNAL_STOPPING from viseron.domains.camera.const import CONFIG_STORAGE, DOMAIN as CAMERA_DOMAIN @@ -104,6 +109,7 @@ class TierSubcategory(TypedDict): | SegmentsTierHandler | ThumbnailTierHandler | EventClipTierHandler + | TimelapseTierHandler ] @@ -112,6 +118,7 @@ class TierCategories(TypedDict): recorder: list[TierSubcategory] snapshots: list[TierSubcategory] + timelapse: list[TierSubcategory] TIER_CATEGORIES: TierCategories = { @@ -147,6 +154,12 @@ class TierCategories(TypedDict): "tier_handler": SnapshotTierHandler, }, ], + TIER_CATEGORY_TIMELAPSE: [ + { + "subcategory": TIER_SUBCATEGORY_TIMELAPSE, + "tier_handler": TimelapseTierHandler, + }, + ], } @@ -192,8 +205,24 @@ def __init__(self, vis: Viseron, config: dict[str, Any]) -> None: self._config = config self._recordings_tiers = config[CONFIG_RECORDER][CONFIG_TIERS] self._snapshots_tiers = config[CONFIG_SNAPSHOTS][CONFIG_TIERS] + self._timelapse_tiers = ( + config[CONFIG_TIMELAPSE][CONFIG_TIERS] + if config.get(CONFIG_TIMELAPSE) + else [] + ) self._camera_tier_handlers: dict[ - str, dict[str, list[dict[str, SnapshotTierHandler | SegmentsTierHandler]]] + str, + dict[ + str, + list[ + dict[ + str, + SnapshotTierHandler + | SegmentsTierHandler + | TimelapseTierHandler, + ] + ], + ], ] = {} self.camera_requested_files_count: dict[str, RequestedFilesCount] = {} @@ -449,6 +478,43 @@ def get_snapshots_path( ] ] + @overload + def get_timelapse_path(self, camera: AbstractCamera) -> str | None: + ... + + @overload + def get_timelapse_path( + self, camera: AbstractCamera, all_tiers: Literal[False] + ) -> str | None: + ... + + @overload + def get_timelapse_path( + self, camera: AbstractCamera, all_tiers: Literal[True] + ) -> list[str] | None: + ... + + def get_timelapse_path( + self, camera: AbstractCamera, all_tiers: bool = False + ) -> str | list[str] | None: + """Get timelapse path for camera.""" + if not self._timelapse_tiers: + return None + self.create_tier_handlers(camera) + if not all_tiers: + return get_timelapse_path( + self._camera_tier_handlers[camera.identifier][TIER_CATEGORY_TIMELAPSE][ + 0 + ][TIER_SUBCATEGORY_TIMELAPSE].tier, + camera, + ) + return [ + get_timelapse_path(tier_handler[TIER_SUBCATEGORY_TIMELAPSE].tier, camera) + for tier_handler in self._camera_tier_handlers[camera.identifier][ + TIER_CATEGORY_TIMELAPSE + ] + ] + def search_file( self, camera_identifier: str, category: str, subcategory: str, path: str ) -> str | None: @@ -496,6 +562,11 @@ def create_tier_handlers(self, camera: AbstractCamera) -> None: tier_config = _get_tier_config(self._config, camera) for category in TIER_CATEGORIES: + # Skip timelapse if not configured + if category == TIER_CATEGORY_TIMELAPSE and not tier_config.get( + CONFIG_TIMELAPSE + ): + continue self._camera_tier_handlers[camera.identifier].setdefault(category, []) # pylint: disable-next=line-too-long for subcategory in TIER_CATEGORIES[category]: # type: ignore[literal-required] # noqa: E501 @@ -653,4 +724,29 @@ def _get_tier_config(config: dict[str, Any], camera: AbstractCamera) -> dict[str ) )(tier_config[CONFIG_SNAPSHOTS][CONFIG_TIERS]) + # Handle timelapse tiers (only if timelapse is configured) + if tier_config.get(CONFIG_TIMELAPSE): + _timelapse_tier: dict[str, Any] = {} + if ( + camera.config[CONFIG_STORAGE] + and camera.config[CONFIG_STORAGE][CONFIG_TIMELAPSE] != UNDEFINED + ): + _timelapse_tier = camera.config[CONFIG_STORAGE][CONFIG_TIMELAPSE][ + CONFIG_TIERS + ] + tier_config[CONFIG_TIMELAPSE][CONFIG_TIERS] = _timelapse_tier + + if _timelapse_tier: + LOGGER.debug( + f"Camera {camera.name} has custom timelapse tiers, " + "overwriting storage timelapse tiers" + ) + # Validate the tier schema to fill in defaults + tier_config[CONFIG_TIMELAPSE][CONFIG_TIERS] = vol.Schema( + vol.All( + [TIER_SCHEMA_SNAPSHOTS], + vol.Length(min=1), + ) + )(tier_config[CONFIG_TIMELAPSE][CONFIG_TIERS]) + return tier_config diff --git a/viseron/components/storage/check_tier.py b/viseron/components/storage/check_tier.py index 2f050528c..6107fc9c8 100644 --- a/viseron/components/storage/check_tier.py +++ b/viseron/components/storage/check_tier.py @@ -202,7 +202,8 @@ def work_input(self, item: DataItem | DataItemMoveFile | DataItemDeleteFile): self.delete_file(item) except Exception as e: # pylint: disable=broad-except LOGGER.error( - "Error processing command: %s", + "Error processing command: %s, error: %s", + item, e, ) item.error = str(e) diff --git a/viseron/components/storage/config.py b/viseron/components/storage/config.py index ca6e8a7d1..22e58a97a 100644 --- a/viseron/components/storage/config.py +++ b/viseron/components/storage/config.py @@ -15,6 +15,7 @@ CONFIG_FACE_RECOGNITION, CONFIG_GB, CONFIG_HOURS, + CONFIG_INTERVAL, CONFIG_LICENSE_PLATE_RECOGNITION, CONFIG_MAX_AGE, CONFIG_MAX_SIZE, @@ -35,6 +36,7 @@ CONFIG_TIER_CHECK_SLEEP_BETWEEN_BATCHES, CONFIG_TIER_CHECK_WORKERS, CONFIG_TIERS, + CONFIG_TIMELAPSE, DEFAULT_CHECK_INTERVAL, DEFAULT_CHECK_INTERVAL_DAYS, DEFAULT_CHECK_INTERVAL_HOURS, @@ -46,6 +48,7 @@ DEFAULT_FACE_RECOGNITION, DEFAULT_GB, DEFAULT_HOURS, + DEFAULT_INTERVAL, DEFAULT_LICENSE_PLATE_RECOGNITION, DEFAULT_MAX_AGE, DEFAULT_MAX_SIZE, @@ -59,12 +62,14 @@ DEFAULT_POLL, DEFAULT_RECORDER, DEFAULT_RECORDER_TIERS, + DEFAULT_SECONDS, DEFAULT_SNAPSHOTS, DEFAULT_SNAPSHOTS_TIERS, DEFAULT_TIER_CHECK_BATCH_SIZE, DEFAULT_TIER_CHECK_CPU_LIMIT, DEFAULT_TIER_CHECK_SLEEP_BETWEEN_BATCHES, DEFAULT_TIER_CHECK_WORKERS, + DEFAULT_TIMELAPSE, DESC_CHECK_INTERVAL, DESC_CHECK_INTERVAL_DAYS, DESC_CHECK_INTERVAL_HOURS, @@ -74,6 +79,7 @@ DESC_DOMAIN_TIERS, DESC_EVENTS, DESC_FACE_RECOGNITION, + DESC_INTERVAL, DESC_LICENSE_PLATE_RECOGNITION, DESC_MAX_AGE, DESC_MAX_DAYS, @@ -102,8 +108,11 @@ DESC_TIER_CHECK_CPU_LIMIT, DESC_TIER_CHECK_SLEEP_BETWEEN_BATCHES, DESC_TIER_CHECK_WORKERS, + DESC_TIMELAPSE, + DESC_TIMELAPSE_TIERS, TIER_CATEGORY_RECORDER, TIER_CATEGORY_SNAPSHOTS, + TIER_CATEGORY_TIMELAPSE, TIER_SUBCATEGORY_EVENT_CLIPS, TIER_SUBCATEGORY_SEGMENTS, TIER_SUBCATEGORY_THUMBNAILS, @@ -388,6 +397,53 @@ def get_snapshots_schema(undefined_defaults=False): SNAPSHOTS_SCHEMA = get_snapshots_schema() + +TIER_SCHEMA_TIMELAPSE = vol.All( + TIER_SCHEMA_SNAPSHOTS.extend( + { + vol.Optional( + CONFIG_INTERVAL, + default=DEFAULT_INTERVAL, + description=DESC_INTERVAL, + ): { + vol.Optional( + CONFIG_DAYS, + default=DEFAULT_DAYS, + description=DESC_MAX_DAYS, + ): Maybe(vol.Coerce(int)), + vol.Optional( + CONFIG_HOURS, + default=DEFAULT_HOURS, + description=DESC_MAX_HOURS, + ): Maybe(vol.Coerce(int)), + vol.Optional( + CONFIG_MINUTES, + default=DEFAULT_MINUTES, + description=DESC_MAX_MINUTES, + ): Maybe(vol.Coerce(int)), + vol.Optional( + CONFIG_SECONDS, + default=DEFAULT_SECONDS, + description=DESC_CHECK_INTERVAL_SECONDS, + ): Maybe(vol.Coerce(int)), + }, + } + ), +) + + +def get_timelapse_schema(): + """Get timelapse schema.""" + return { + vol.Required(CONFIG_TIERS, description=DESC_TIMELAPSE_TIERS): vol.All( + [TIER_SCHEMA_TIMELAPSE], + vol.Length(min=1), + ), + } + + +TIMELAPSE_SCHEMA = get_timelapse_schema() + STORAGE_SCHEMA = vol.Schema( { vol.Optional( @@ -399,7 +455,7 @@ def get_snapshots_schema(undefined_defaults=False): CONFIG_TIER_CHECK_WORKERS, default=DEFAULT_TIER_CHECK_WORKERS, description=DESC_TIER_CHECK_WORKERS, - ): Maybe(vol.Coerce(int)), + ): Maybe(vol.All(vol.Coerce(int), vol.Range(min=1))), vol.Optional( CONFIG_TIER_CHECK_BATCH_SIZE, default=DEFAULT_TIER_CHECK_BATCH_SIZE, @@ -420,6 +476,11 @@ def get_snapshots_schema(undefined_defaults=False): default=DEFAULT_SNAPSHOTS, description=DESC_SNAPSHOTS, ): SNAPSHOTS_SCHEMA, + vol.Optional( + CONFIG_TIMELAPSE, + default=DEFAULT_TIMELAPSE, + description=DESC_TIMELAPSE, + ): Maybe(TIMELAPSE_SCHEMA), } ) @@ -447,6 +508,14 @@ def _check_path_exists(tier: Tier, category: str): ) return + if category == TIER_CATEGORY_TIMELAPSE and tier[CONFIG_PATH] == "/": + if not os.path.exists(f"/{TIER_CATEGORY_TIMELAPSE}"): + raise vol.Invalid( + f"The /{TIER_CATEGORY_TIMELAPSE} folder does not exist. " + "Please mount it to the container." + ) + return + if not os.path.exists(tier[CONFIG_PATH]): raise vol.Invalid( f"The {tier[CONFIG_PATH]} folder does not exist. " @@ -585,6 +654,22 @@ def _validate_snapshots_tiers( previous_tier = _tier +def _validate_timelapse_tiers( + component_config: dict[str, Any], +): + # Check timelapse config + timelapse_config = component_config.get(CONFIG_TIMELAPSE) + if not timelapse_config: + return + + previous_tier = None + paths: list[str] = [] + for tier in timelapse_config.get(CONFIG_TIERS, []): + _tier = Tier(path=tier[CONFIG_PATH], max_age=tier[CONFIG_MAX_AGE]) + _check_tier(_tier, previous_tier, paths, CONFIG_TIMELAPSE) + previous_tier = _tier + + def validate_tiers(config: dict[str, Any]) -> dict[str, Any]: """Validate tiers. @@ -602,4 +687,7 @@ def validate_tiers(config: dict[str, Any]) -> dict[str, Any]: if component_config.get(CONFIG_SNAPSHOTS, None): _validate_snapshots_tiers(component_config) + if component_config.get(CONFIG_TIMELAPSE): + _validate_timelapse_tiers(component_config) + return config diff --git a/viseron/components/storage/const.py b/viseron/components/storage/const.py index be9468fd3..c03f051f1 100644 --- a/viseron/components/storage/const.py +++ b/viseron/components/storage/const.py @@ -17,6 +17,7 @@ class CleanupJobNames(Enum): ORPHANED_FILES = "cleanup_orphaned_files" ORPHANED_DB_FILES = "cleanup_orphaned_db_files" + ZERO_SIZE_FILES = "cleanup_zero_size_files" EMPTY_FOLDERS = "cleanup_empty_folders" ORPHANED_THUMBNAILS = "cleanup_orphaned_thumbnails" ORPHANED_EVENT_CLIPS = "cleanup_orphaned_clips" @@ -27,17 +28,14 @@ class CleanupJobNames(Enum): OLD_EVENTS = "cleanup_old_events" -EVENT_FILE_CREATED = ( - "file_created/{camera_identifier}/{category}/{subcategory}/{file_name}" -) -EVENT_FILE_DELETED = ( - "file_deleted/{camera_identifier}/{category}/{subcategory}/{file_name}" -) +EVENT_FILE_CREATED = "file_created/{camera_identifier}/{category}/{subcategory}" +EVENT_FILE_DELETED = "file_deleted/{camera_identifier}/{category}/{subcategory}" EVENT_CHECK_TIER = "check_tier/{camera_identifier}/{tier_id}/{category}/{subcategory}" # Tier categories TIER_CATEGORY_RECORDER: Final = "recorder" TIER_CATEGORY_SNAPSHOTS: Final = "snapshots" +TIER_CATEGORY_TIMELAPSE: Final = "timelapse" # Tier subcategories TIER_SUBCATEGORY_SEGMENTS: Final = "segments" @@ -47,6 +45,7 @@ class CleanupJobNames(Enum): TIER_SUBCATEGORY_OBJECT_DETECTOR: Final = "object_detector" TIER_SUBCATEGORY_LICENSE_PLATE_RECOGNITION: Final = "license_plate_recognition" TIER_SUBCATEGORY_MOTION_DETECTOR: Final = "motion_detector" +TIER_SUBCATEGORY_TIMELAPSE: Final = "timelapse" # Storage configuration @@ -78,7 +77,9 @@ class CleanupJobNames(Enum): CONFIG_OBJECT_DETECTOR: Final = "object_detector" CONFIG_LICENSE_PLATE_RECOGNITION: Final = "license_plate_recognition" CONFIG_MOTION_DETECTOR: Final = "motion_detector" +CONFIG_TIMELAPSE: Final = "timelapse" CONFIG_TIERS: Final = "tiers" +CONFIG_INTERVAL: Final = "interval" DEFAULT_TIER_CHECK_CPU_LIMIT: Final = 10 @@ -105,6 +106,7 @@ class CleanupJobNames(Enum): }, }, ] +DEFAULT_TIMELAPSE: Final = None DEFAULT_FACE_RECOGNITION: Final = None DEFAULT_OBJECT_DETECTOR: Final = None DEFAULT_LICENSE_PLATE_RECOGNITION: Final = None @@ -124,12 +126,14 @@ class CleanupJobNames(Enum): DEFAULT_DAYS: Final = None DEFAULT_HOURS: Final = None DEFAULT_MINUTES: Final = None +DEFAULT_SECONDS: Final = None DEFAULT_MIN_SIZE: dict[str, Any] = {} DEFAULT_MAX_SIZE: dict[str, Any] = {} DEFAULT_MIN_AGE: dict[str, Any] = {} DEFAULT_MAX_AGE: dict[str, Any] = {} DEFAULT_CONTINUOUS: Final = None DEFAULT_EVENTS: Final = None +DEFAULT_INTERVAL: dict[str, Any] = {} DESC_TIER_CHECK_CPU_LIMIT = ( "CPU limit for the tier check process. " @@ -171,10 +175,16 @@ class CleanupJobNames(Enum): "Snapshots will be taken for object detection, motion detection, and any post " "processor that scans the image, for example face and license plate recognition." ) +DESC_TIMELAPSE = ( + "Configuration for timelapse videos. " + "Timelapse videos are created by combining images or video segments over time " + "to show changes in a compressed time format." +) DESC_SNAPSHOTS_TIERS = ( "Default tiers for all domains, unless overridden in the domain configuration.
" f"{DESC_RECORDER_TIERS} " ) +DESC_TIMELAPSE_TIERS = "Tiers for timelapse videos. " f"{DESC_RECORDER_TIERS} " DESC_DOMAIN_TIERS = DESC_RECORDER_TIERS DESC_FACE_RECOGNITION = ( "Override the default snapshot tiers for face recognition. " @@ -225,3 +235,4 @@ class CleanupJobNames(Enum): DESC_MAX_AGE = "Maximum age of files to keep in this tier." DESC_CONTINUOUS = "Retention rules for continuous recordings." DESC_EVENTS = "Retention rules for event recordings." +DESC_INTERVAL = "Time interval between timelapse frame extractions." diff --git a/viseron/components/storage/jobs.py b/viseron/components/storage/jobs.py index a1ebb7059..e0b3de93a 100644 --- a/viseron/components/storage/jobs.py +++ b/viseron/components/storage/jobs.py @@ -201,6 +201,10 @@ def _run(self) -> None: paths += self._storage.get_event_clips_path(camera, all_tiers=True) paths += self._storage.get_segments_path(camera, all_tiers=True) paths += self._storage.get_thumbnails_path(camera, all_tiers=True) + if timelapse_path := self._storage.get_timelapse_path( + camera, all_tiers=True + ): + paths += timelapse_path for domain in SnapshotDomain: paths += self._storage.get_snapshots_path( @@ -325,6 +329,91 @@ def _run(self) -> None: ) +class ZeroSizeFilesCleanup(BaseCleanupJob): + """Cleanup job that handles zero-size files in the database. + + For any Files in the database with zero size, the job will attempt to update the + size. If the size on disk is zero the file is removed. + """ + + @property + def name(self) -> str: + """Return job name.""" + return CleanupJobNames.ZERO_SIZE_FILES.value + + def _run(self) -> None: + now = time.time() + LOGGER.debug("Running %s", self.name) + processed = 0 + updated = 0 + deleted = 0 + last_id = 0 + + # Only consider files older than 5 minutes + cutoff_time = utcnow() - datetime.timedelta(minutes=5) + + with self._storage.get_session() as session: + while True: + if self.kill_event.is_set(): + break + + batch = ( + session.execute( + select(Files) + .where( + and_( + Files.id > last_id, + Files.size == 0, + Files.created_at < cutoff_time, + ) + ) + .order_by(Files.id) + .limit(BATCH_SIZE) + ) + .scalars() + .all() + ) + + if not batch: + break + + last_id = batch[-1].id + + to_delete_ids: list[int] = [] + for file_row in batch: + if self.kill_event.is_set(): + break + processed += 1 + path = file_row.path + try: + stat_size = os.path.getsize(path) + except OSError: + stat_size = 0 + + if stat_size > 0: + file_row.size = stat_size + updated += 1 + else: + if os.path.exists(path): + os.remove(path) + to_delete_ids.append(file_row.id) + deleted += 1 + + if to_delete_ids: + session.execute(delete(Files).where(Files.id.in_(to_delete_ids))) + session.commit() + time.sleep(0.5) + + LOGGER.debug( + "%s processed %d files, updated %d sizes, deleted %d records, took %s", + self.name, + processed, + updated, + deleted, + time.time() - now, + ) + + class EmptyFoldersCleanup(BaseCleanupJob): """Cleanup job that removes empty directories from the storage locations. @@ -353,6 +442,10 @@ def _run(self) -> None: paths += self._storage.get_event_clips_path(camera, all_tiers=True) paths += self._storage.get_segments_path(camera, all_tiers=True) paths += self._storage.get_thumbnails_path(camera, all_tiers=True) + if timelapse_path := self._storage.get_timelapse_path( + camera, all_tiers=True + ): + paths += timelapse_path for domain in SnapshotDomain: paths += self._storage.get_snapshots_path( @@ -747,6 +840,7 @@ def __init__(self, vis: Viseron, storage: Storage): OrphanedDatabaseFilesCleanup( vis, storage, CronTrigger(hour=0, jitter=3600) ), + ZeroSizeFilesCleanup(vis, storage, CronTrigger(hour=0, jitter=3600)), EmptyFoldersCleanup(vis, storage, CronTrigger(hour=0, jitter=3600)), OrphanedThumbnailsCleanup(vis, storage, CronTrigger(hour=0, jitter=3600)), OrphanedEventClipsCleanup(vis, storage, CronTrigger(hour=0, jitter=3600)), diff --git a/viseron/components/storage/storage_subprocess.py b/viseron/components/storage/storage_subprocess.py index 87584dec0..45e72ab70 100644 --- a/viseron/components/storage/storage_subprocess.py +++ b/viseron/components/storage/storage_subprocess.py @@ -5,7 +5,6 @@ import datetime import logging import multiprocessing as mp -import subprocess as sp import sys import time from collections.abc import Callable @@ -189,20 +188,81 @@ def initializer(cpulimit: int | None): if pid and cpulimit is not None: command = f"cpulimit -l {cpulimit} -p {pid} -z -q" LOGGER.debug(f"Running command: {command}") - sp.Popen(command, shell=True) + RestartablePopen( + command, + register=False, + shell=True, + ) -def worker_task(worker: Worker, process_queue: Queue, output_queue: Queue): - """Worker thread task.""" +def worker_task_files( + worker: Worker, + file_queue: Queue[DataItemDeleteFile | DataItemMoveFile], + output_queue: Queue[DataItemDeleteFile | DataItemMoveFile], +): + """Worker thread that only processes file operation commands.""" while True: try: - job = process_queue.get(block=True, timeout=1) + job = file_queue.get(timeout=1) worker.work_input(job) output_queue.put(job) except Empty: continue except Exception as exc: # pylint: disable=broad-except - LOGGER.exception(f"Error in worker thread: {exc}") + LOGGER.exception(f"Error in file worker thread: {exc}") + + +def worker_task_mixed( + worker: Worker, + check_queue: Queue[DataItem], + file_queue: Queue[DataItemDeleteFile | DataItemMoveFile], + output_queue: Queue[DataItem | DataItemDeleteFile | DataItemMoveFile], + name: str, +): + """Worker thread that prioritizes file operations but also handles check_tier. + + This ensures that file operations are not blocked by slow check_tier jobs. + """ + job: DataItem | DataItemDeleteFile | DataItemMoveFile + while True: + try: + try: + job = file_queue.get_nowait() + except Empty: + job = check_queue.get(timeout=1) + worker.work_input(job) + output_queue.put(job) + except Empty: + continue + except Exception as exc: # pylint: disable=broad-except + LOGGER.exception(f"Error in mixed worker thread {name}: {exc}") + + +def dispatcher_task( + process_queue: Queue[DataItem | DataItemDeleteFile | DataItemMoveFile], + check_queue: Queue[DataItem], + file_queue: Queue[DataItemDeleteFile | DataItemMoveFile], +): + """Dispatcher thread routing jobs to dedicated queues. + + check_tier commands can be slow. File operations should not be blocked by them, + so they get their own queue and worker. + """ + while True: + try: + job = process_queue.get(timeout=1) + except Empty: + continue + + try: + if job.cmd == "check_tier": + check_queue.put(job) + elif job.cmd in ("move_file", "delete_file"): + file_queue.put(job) + else: + LOGGER.debug("Unknown command %s", job.cmd) + except Exception as exc: # pylint: disable=broad-except + LOGGER.exception(f"Dispatcher error routing job: {exc}") def main(): @@ -210,6 +270,8 @@ def main(): parser = get_parser() args = parser.parse_args() setup_logger(args.loglevel) + process_queue: Queue[DataItem | DataItemDeleteFile | DataItemMoveFile] + output_queue: Queue[DataItem | DataItemDeleteFile | DataItemMoveFile] process_queue, output_queue = connect( "127.0.0.1", int(args.manager_port), args.manager_authkey ) @@ -228,16 +290,40 @@ def main(): ThreadWatchDog(background_scheduler) LOGGER.debug(f"Starting {args.workers} worker threads") - threads: list[RestartableThread] = [] + + check_queue: Queue[DataItem] = Queue() + file_queue: Queue[DataItemDeleteFile | DataItemMoveFile] = Queue() + + dispatcher = RestartableThread( + name="storage_subprocess.dispatcher", + target=dispatcher_task, + args=(process_queue, check_queue, file_queue), + daemon=True, + ) + dispatcher.start() + for i in range(args.workers): thread = RestartableThread( - name=f"storage_subprocess.worker.{i}", - target=worker_task, - args=(worker, process_queue, output_queue), + name=f"storage_subprocess.mixed_worker.{i}", + target=worker_task_mixed, + args=( + worker, + check_queue, + file_queue, + output_queue, + f"mixed_worker.{i}", + ), daemon=True, ) thread.start() - threads.append(thread) + + thread = RestartableThread( + name="storage_subprocess.file_worker", + target=worker_task_files, + args=(worker, file_queue, output_queue), + daemon=True, + ) + thread.start() while True: time.sleep(1) diff --git a/viseron/components/storage/tier_handler.py b/viseron/components/storage/tier_handler.py index 9e824b3cb..e52a39703 100644 --- a/viseron/components/storage/tier_handler.py +++ b/viseron/components/storage/tier_handler.py @@ -33,6 +33,7 @@ CONFIG_DAYS, CONFIG_EVENTS, CONFIG_HOURS, + CONFIG_INTERVAL, CONFIG_MAX_AGE, CONFIG_MAX_SIZE, CONFIG_MIN_AGE, @@ -77,6 +78,7 @@ get_event_clips_path, get_segments_path, get_thumbnails_path, + get_timelapse_path, ) from viseron.components.webserver.const import COMPONENT as WEBSERVER_COMPONENT from viseron.const import VISERON_SIGNAL_LAST_WRITE, VISERON_SIGNAL_STOPPING @@ -362,7 +364,6 @@ def _on_created(self, event: FileCreatedEvent) -> None: camera_identifier=self._camera.identifier, category=self._category, subcategory=self._subcategory, - file_name="*", ), EventFileCreated( camera_identifier=self._camera.identifier, @@ -371,6 +372,7 @@ def _on_created(self, event: FileCreatedEvent) -> None: file_name=os.path.basename(event.src_path), path=event.src_path, ), + store=False, ) self.check_tier() @@ -423,7 +425,6 @@ def _on_deleted(self, event: FileDeletedEvent) -> None: camera_identifier=self._camera.identifier, category=self._category, subcategory=self._subcategory, - file_name=os.path.basename(event.src_path), ), EventFileDeleted( camera_identifier=self._camera.identifier, @@ -432,6 +433,7 @@ def _on_deleted(self, event: FileDeletedEvent) -> None: file_name=os.path.basename(event.src_path), path=event.src_path, ), + store=False, ) def _shutdown(self) -> None: @@ -1100,6 +1102,7 @@ def _move_file_callback( subcategory=curr_tier_subcategory, ), EventEmptyData(), + store=False, ) return @@ -1184,3 +1187,61 @@ def add_file_handler( ) ], ) + + +class TimelapseTierHandler(TierHandler): + """Handle timelapse files.""" + + def initialize(self): + """Initialize timelapse tier.""" + super().initialize() + + self._path = get_timelapse_path(self._tier, self._camera) + self._interval = calculate_age(self._tier.get(CONFIG_INTERVAL, {})) + self.add_file_handler(self._path, rf"{self._path}/(.*.jpg$)") + + def _on_created(self, event: FileCreatedEvent) -> None: + """Handle file creation with interval-based cleanup.""" + super()._on_created(event) + + # If no interval is set, keep all files + if not self._interval: + return + + # Check if there's already a file within the interval + try: + with self._storage.get_session() as session: + current_file_stmt = select(Files.orig_ctime).where( + Files.path == event.src_path + ) + current_file_result = session.execute(current_file_stmt).scalar_one() + current_file_datetime = current_file_result + + interval_start = current_file_datetime - self._interval + interval_end = current_file_datetime + + stmt = select(Files).where( + Files.tier_id == self._tier_id, + Files.camera_identifier == self._camera.identifier, + Files.category == self._category, + Files.subcategory == self._subcategory, + Files.path != event.src_path, + Files.orig_ctime >= interval_start, + Files.orig_ctime <= interval_end, + ) + + result = session.execute(stmt).scalars().all() + + if result: + self._logger.debug( + f"File within interval already exists, removing current file: " + f"{event.src_path}" + ) + delete_file(self._storage, event.src_path) + + delete_stmt = delete(Files).where(Files.path == event.src_path) + session.execute(delete_stmt) + session.commit() + + except Exception as e: # pylint: disable=broad-except + self._logger.error(f"Error during timelapse interval cleanup: {e}") diff --git a/viseron/components/storage/util.py b/viseron/components/storage/util.py index 475e06e8b..89629cefb 100644 --- a/viseron/components/storage/util.py +++ b/viseron/components/storage/util.py @@ -15,10 +15,12 @@ CONFIG_MB, CONFIG_MINUTES, CONFIG_PATH, + CONFIG_SECONDS, TIER_CATEGORY_SNAPSHOTS, TIER_SUBCATEGORY_EVENT_CLIPS, TIER_SUBCATEGORY_SEGMENTS, TIER_SUBCATEGORY_THUMBNAILS, + TIER_SUBCATEGORY_TIMELAPSE, ) from viseron.events import EventData from viseron.types import SnapshotDomain @@ -36,6 +38,7 @@ def calculate_age(age: dict[str, Any]) -> timedelta: days=age[CONFIG_DAYS] if age[CONFIG_DAYS] else 0, hours=age[CONFIG_HOURS] if age[CONFIG_HOURS] else 0, minutes=age[CONFIG_MINUTES] if age[CONFIG_MINUTES] else 0, + seconds=age.get(CONFIG_SECONDS, None) if age.get(CONFIG_SECONDS, None) else 0, ) @@ -95,6 +98,15 @@ def get_snapshots_path( ) +def get_timelapse_path( + tier: dict[str, Any], camera: AbstractCamera | FailedCamera +) -> str: + """Get timelapse path for camera.""" + return os.path.join( + tier[CONFIG_PATH], TIER_SUBCATEGORY_TIMELAPSE, camera.identifier + ) + + @dataclass class EventFile(EventData): """Event data for file events.""" diff --git a/viseron/components/webhook/__init__.py b/viseron/components/webhook/__init__.py new file mode 100644 index 000000000..cae687e5f --- /dev/null +++ b/viseron/components/webhook/__init__.py @@ -0,0 +1,231 @@ +"""Webhook component.""" +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING, Any + +import requests +import voluptuous as vol + +from viseron.events import Event +from viseron.helpers.template import render_template, render_template_condition +from viseron.helpers.validators import ( + CoerceNoneToDict, + Maybe, + Slug, + StringKey, + jinja2_template, +) + +from .const import ( + COMPONENT, + CONFIG_CONDITION, + CONFIG_CONTENT_TYPE, + CONFIG_EVENT, + CONFIG_HEADERS, + CONFIG_METHOD, + CONFIG_PASSWORD, + CONFIG_PAYLOAD, + CONFIG_TIMEOUT, + CONFIG_TRIGGER, + CONFIG_URL, + CONFIG_USERNAME, + CONFIG_VERIFY_SSL, + DEFAULT_CONDITION, + DEFAULT_CONTENT_TYPE, + DEFAULT_HEADERS, + DEFAULT_METHOD, + DEFAULT_PASSWORD, + DEFAULT_PAYLOAD, + DEFAULT_TIMEOUT, + DEFAULT_USERNAME, + DEFAULT_VERIFY_SSL, + DESC_COMPONENT, + DESC_CONDITION, + DESC_CONTENT_TYPE, + DESC_EVENT, + DESC_HEADER, + DESC_HEADERS, + DESC_HOOK, + DESC_METHOD, + DESC_PASSWORD, + DESC_PAYLOAD, + DESC_TIMEOUT, + DESC_TRIGGER, + DESC_URL, + DESC_USERNAME, + DESC_VERIFY_SSL, + INCLUSION_GROUP_AUTHENTICATION, + MESSAGE_AUTHENTICATION, + SUPPORTED_METHODS, +) + +if TYPE_CHECKING: + from viseron import Viseron + +LOGGER = logging.getLogger(__name__) + +HOOK_SCHEMA = vol.Schema( + { + vol.Required(CONFIG_TRIGGER, description=DESC_TRIGGER): { + vol.Required( + CONFIG_EVENT, + description=DESC_EVENT, + ): str, + vol.Optional( + CONFIG_CONDITION, + default=DEFAULT_CONDITION, + description=DESC_CONDITION, + ): Maybe(jinja2_template), + }, + vol.Required( + CONFIG_URL, + description=DESC_URL, + ): Maybe(jinja2_template), + vol.Optional( + CONFIG_METHOD, + description=DESC_METHOD, + default=DEFAULT_METHOD, + ): vol.All(vol.Lower, vol.In(SUPPORTED_METHODS)), + vol.Optional( + CONFIG_HEADERS, + default=DEFAULT_HEADERS, + description=DESC_HEADERS, + ): vol.All( + CoerceNoneToDict(), + {StringKey(description=DESC_HEADER): jinja2_template}, + ), + vol.Inclusive( + CONFIG_USERNAME, + INCLUSION_GROUP_AUTHENTICATION, + default=DEFAULT_USERNAME, + description=DESC_USERNAME, + msg=MESSAGE_AUTHENTICATION, + ): Maybe(jinja2_template), + vol.Inclusive( + CONFIG_PASSWORD, + INCLUSION_GROUP_AUTHENTICATION, + default=DEFAULT_PASSWORD, + description=DESC_PASSWORD, + msg=MESSAGE_AUTHENTICATION, + ): Maybe(jinja2_template), + vol.Optional( + CONFIG_PAYLOAD, + default=DEFAULT_PAYLOAD, + description=DESC_PAYLOAD, + ): Maybe(jinja2_template), + vol.Optional( + CONFIG_TIMEOUT, + default=DEFAULT_TIMEOUT, + description=DESC_TIMEOUT, + ): vol.Coerce(int), + vol.Optional( + CONFIG_CONTENT_TYPE, + default=DEFAULT_CONTENT_TYPE, + description=DESC_CONTENT_TYPE, + ): str, + vol.Optional( + CONFIG_VERIFY_SSL, + default=DEFAULT_VERIFY_SSL, + description=DESC_VERIFY_SSL, + ): bool, + } +) + +CONFIG_SCHEMA = vol.Schema( + { + vol.Required(COMPONENT, description=DESC_COMPONENT): { + Slug(description=DESC_HOOK): HOOK_SCHEMA, + }, + }, + extra=vol.ALLOW_EXTRA, +) + + +def setup(vis: Viseron, config: dict[str, Any]) -> bool: + """Set up the webhook component.""" + Webhook(vis, config[COMPONENT]) + return True + + +class Webhook: + """Initialize the webhook component.""" + + def __init__(self, vis: Viseron, config: dict[str, Any]) -> None: + self.vis = vis + self.config = config + self._setup_hooks() + + def _setup_hook(self, hook_name: str, hook_conf: dict[str, Any]) -> None: + """Set up a single webhook.""" + + def _handle_trigger(event_data: Event) -> None: + """Handle the trigger event.""" + LOGGER.debug( + f"Handling trigger for webhook '{hook_name}' " + f"with event data: {event_data.as_json()}" + ) + self._handle_event(hook_conf, event_data.data, hook_name) + + trigger = hook_conf[CONFIG_TRIGGER] + event_type = trigger[CONFIG_EVENT] + self.vis.listen_event(event_type, _handle_trigger) + LOGGER.debug(f"Registered webhook '{hook_name}' for event '{event_type}'") + + def _setup_hooks(self): + for hook_name, hook_conf in self.config.items(): + self._setup_hook(hook_name, hook_conf) + + def _handle_event( + self, hook_conf: dict[str, Any], event: dict[str, Any], hook_name: str + ): + condition_template = hook_conf[CONFIG_TRIGGER][CONFIG_CONDITION] + if condition_template: + result, rendered_condition = render_template_condition( + self.vis, condition_template, event=event + ) + if not result: + LOGGER.debug( + f"Webhook '{hook_name}' condition not met, skipping webhook. " + f"Condition: {rendered_condition}" + ) + return + + url = render_template(self.vis, hook_conf[CONFIG_URL], event=event) + if not url: + LOGGER.error(f"Webhook '{hook_name}' URL is empty, skipping webhook") + return + + payload = render_template(self.vis, hook_conf[CONFIG_PAYLOAD], event=event) + headers = {} + for header, value in hook_conf[CONFIG_HEADERS].items(): + rendered_value = render_template(self.vis, value, event=event) + if rendered_value is not None: + headers[str(header)] = str(rendered_value) + + auth = None + if hook_conf[CONFIG_USERNAME] and hook_conf[CONFIG_PASSWORD]: + auth = (hook_conf[CONFIG_USERNAME], hook_conf[CONFIG_PASSWORD]) + if hook_conf[CONFIG_CONTENT_TYPE]: + headers["Content-Type"] = hook_conf[CONFIG_CONTENT_TYPE] + + try: + LOGGER.debug( + f"Sending webhook '{hook_name}' " + f"with method: {hook_conf[CONFIG_METHOD].upper()}, " + f"url: {url}, " + f"headers: {headers}, " + f"payload: {payload}" + ) + resp = requests.request( + method=hook_conf[CONFIG_METHOD], + url=url, + data=payload, + headers=headers, + timeout=hook_conf[CONFIG_TIMEOUT], + verify=hook_conf[CONFIG_VERIFY_SSL], + auth=auth, + ) + LOGGER.debug(f"Webhook '{hook_name}' status code: {resp.status_code}") + except Exception as e: # pylint: disable=broad-except + LOGGER.error(f"Webhook '{hook_name}' error: {e}") diff --git a/viseron/components/webhook/const.py b/viseron/components/webhook/const.py new file mode 100644 index 000000000..d8f1e0dbd --- /dev/null +++ b/viseron/components/webhook/const.py @@ -0,0 +1,56 @@ +"""Webhook constants.""" +from typing import Final + +COMPONENT = "webhook" + + +# CONFIG_SCHEMA constants +DESC_COMPONENT: Final = "Webhook component configuration." + +SUPPORTED_METHODS = ["get", "patch", "post", "put", "delete"] + +DESC_HOOK = "Hook configuration." +CONFIG_TRIGGER: Final = "trigger" +CONFIG_EVENT: Final = "event" +CONFIG_CONDITION: Final = "condition" +CONFIG_URL: Final = "url" +CONFIG_METHOD: Final = "method" +CONFIG_HEADERS: Final = "headers" +CONFIG_USERNAME: Final = "username" +CONFIG_PASSWORD: Final = "password" +CONFIG_PAYLOAD: Final = "payload" +CONFIG_TIMEOUT: Final = "timeout" +CONFIG_CONTENT_TYPE: Final = "content_type" +CONFIG_VERIFY_SSL: Final = "verify_ssl" + +DEFAULT_CONDITION: Final = None +DEFAULT_HEADERS: Final = None +DEFAULT_USERNAME: Final = None +DEFAULT_PASSWORD: Final = None +DEFAULT_PAYLOAD: Final = None +DEFAULT_CONTENT_TYPE: Final = "application/json" +DEFAULT_TIMEOUT: Final = 10 +DEFAULT_METHOD: Final = "get" +DEFAULT_VERIFY_SSL: Final = True + +DESC_TRIGGER: Final = "The trigger configuration for the webhook." +DESC_EVENT: Final = "The event type that triggers the webhook." +DESC_CONDITION: Final = ( + "Template condition to check before sending the webhook. " + "If set, the webhook will only be sent if the template evaluates to a " + "truthy value (True, true, 1, yes, on)." +) +DESC_URL: Final = "The URL to send the webhook request to." +DESC_METHOD: Final = "The HTTP method to use for the webhook request." +DESC_HEADERS: Final = "Headers to include in the webhook request." +DESC_HEADER: Final = "Header key for the webhook request." +DESC_USERNAME: Final = "Username for basic authentication." +DESC_PASSWORD: Final = "Password for basic authentication." +DESC_PAYLOAD: Final = "Payload to send with the webhook request." +DESC_TIMEOUT: Final = "The timeout for the webhook request in seconds." +DESC_CONTENT_TYPE: Final = "The content type of the webhook request." +DESC_VERIFY_SSL: Final = "Whether to verify SSL certificates for the webhook request." + +INCLUSION_GROUP_AUTHENTICATION: Final = "authentication" + +MESSAGE_AUTHENTICATION = "username and password must be provided together" diff --git a/viseron/components/webserver/__init__.py b/viseron/components/webserver/__init__.py index 4a92a8d47..3ded06de5 100644 --- a/viseron/components/webserver/__init__.py +++ b/viseron/components/webserver/__init__.py @@ -53,6 +53,7 @@ get_cameras, get_config, get_entities, + handle_render_template, ping, restart_viseron, save_config, @@ -139,6 +140,7 @@ def setup(vis: Viseron, config) -> bool: webserver.register_websocket_command(export_recording) webserver.register_websocket_command(export_snapshot) webserver.register_websocket_command(export_timespan) + webserver.register_websocket_command(handle_render_template) webserver.start() diff --git a/viseron/components/webserver/api/v1/events.py b/viseron/components/webserver/api/v1/events.py index dc13401cb..e8e48d738 100644 --- a/viseron/components/webserver/api/v1/events.py +++ b/viseron/components/webserver/api/v1/events.py @@ -8,9 +8,10 @@ from typing import TYPE_CHECKING, Any import voluptuous as vol -from sqlalchemy import select +from sqlalchemy import func, select from viseron.components.storage.models import ( + Files, Motion, Objects, PostProcessorResults, @@ -69,6 +70,17 @@ class EventsAPIHandler(BaseAPIHandler): } ), }, + { + "requires_role": [Role.ADMIN, Role.READ, Role.WRITE], + "path_pattern": r"/events/dates_of_interest", + "supported_methods": ["POST"], + "method": "post_events_dates_of_interest", + "json_body_schema": vol.Schema( + { + vol.Required("camera_identifiers"): [str], + } + ), + }, ] def _motion_events( @@ -335,46 +347,64 @@ def _events_amount( camera_identifiers: list[str], ) -> dict[str, dict[str, Any]]: with get_session() as session: - stmt = select(Motion.start_time).where( - Motion.camera_identifier.in_(camera_identifiers) + stmt = ( + select( + func.date(Motion.start_time + self.utc_offset), + func.count(), # pylint: disable=not-callable + ) + .where(Motion.camera_identifier.in_(camera_identifiers)) + .group_by(func.date(Motion.start_time + self.utc_offset)) ) - motion_events = session.execute(stmt).scalars().all() + motion_events = session.execute(stmt).all() - stmt = select(Recordings.start_time).where( - Recordings.camera_identifier.in_(camera_identifiers) + stmt = ( + select( + func.date(Recordings.start_time + self.utc_offset), + func.count(), # pylint: disable=not-callable + ) + .where(Recordings.camera_identifier.in_(camera_identifiers)) + .group_by(func.date(Recordings.start_time + self.utc_offset)) ) - recording_events = session.execute(stmt).scalars().all() + recording_events = session.execute(stmt).all() - stmt = select(Objects.created_at).where( - Objects.camera_identifier.in_(camera_identifiers) + stmt = ( + select( + func.date(Objects.created_at + self.utc_offset), + func.count(), # pylint: disable=not-callable + ) + .where(Objects.camera_identifier.in_(camera_identifiers)) + .group_by(func.date(Objects.created_at + self.utc_offset)) ) - object_events = session.execute(stmt).scalars().all() + object_events = session.execute(stmt).all() - stmt_pp = select(PostProcessorResults).where( - PostProcessorResults.camera_identifier.in_(camera_identifiers) + stmt_pp = ( + select( + func.date(PostProcessorResults.created_at + self.utc_offset), + PostProcessorResults.domain, + func.count(), # pylint: disable=not-callable + ) + .where(PostProcessorResults.camera_identifier.in_(camera_identifiers)) + .group_by( + func.date(PostProcessorResults.created_at + self.utc_offset), + PostProcessorResults.domain, + ) ) - post_processor_events = session.execute(stmt_pp).scalars().all() + post_processor_events = session.execute(stmt_pp).all() events_amount: dict[str, dict[str, Any]] = {} - for event in motion_events: - event_day = (event + self.utc_offset).date().isoformat() - events_amount.setdefault(event_day, {}).setdefault("motion", 0) - events_amount[event_day]["motion"] += 1 - - for event in recording_events: - event_day = (event + self.utc_offset).date().isoformat() - events_amount.setdefault(event_day, {}).setdefault("recording", 0) - events_amount[event_day]["recording"] += 1 - - for event in object_events: - event_day = (event + self.utc_offset).date().isoformat() - events_amount.setdefault(event_day, {}).setdefault("object", 0) - events_amount[event_day]["object"] += 1 - - for event_pp in post_processor_events: - event_day = (event_pp.created_at + self.utc_offset).date().isoformat() - events_amount.setdefault(event_day, {}).setdefault(event_pp.domain, 0) - events_amount[event_day][event_pp.domain] += 1 + day: datetime.date + count: int + for day, count in motion_events: + events_amount.setdefault(day.isoformat(), {}).setdefault("motion", count) + + for day, count in recording_events: + events_amount.setdefault(day.isoformat(), {}).setdefault("recording", count) + + for day, count in object_events: + events_amount.setdefault(day.isoformat(), {}).setdefault("object", count) + + for day, domain, count in post_processor_events: + events_amount.setdefault(day.isoformat(), {}).setdefault(domain, count) return events_amount @@ -413,3 +443,68 @@ async def post_events_amount_multiple(self): self.json_body["camera_identifiers"], ) await self.response_success(response={"events_amount": events_amount}) + + async def post_events_dates_of_interest(self): + """Get dates of interest for multiple cameras. + + This returns a list of dates with the amount of events and whether + there are timespans available for that date. + """ + camera_identifiers = self.json_body["camera_identifiers"] + if not camera_identifiers: + self.response_error( + HTTPStatus.BAD_REQUEST, reason="No camera identifiers provided" + ) + return + + events_amount = await self.run_in_executor( + self._events_amount, + self._get_session, + camera_identifiers, + ) + events_per_day = {} + for date, event_types in events_amount.items(): + events_per_day[date] = sum(event_types.values()) + + timespans_per_day = await self.run_in_executor( + _get_timespans_per_day, + self._get_session, + camera_identifiers, + self.utc_offset, + ) + + combined = {} + for date, event_count in events_per_day.items(): + combined[date] = { + "events": event_count, + "timespanAvailable": date in timespans_per_day, + } + + # Add dates that have timespans but no events + for date in timespans_per_day: + if date not in combined: + combined[date] = { + "events": 0, + "timespanAvailable": True, + } + await self.response_success(response={"dates_of_interest": combined}) + + +def _get_timespans_per_day( + get_session: Callable[[], Session], + camera_identifiers: list[str], + utc_offset: datetime.timedelta, +) -> list[str]: + """Get the dates with timespans available for multiple cameras.""" + with get_session() as session: + stmt = ( + select( + func.date(Files.created_at + utc_offset), + ) + .where(Files.camera_identifier.in_(camera_identifiers)) + .group_by(func.date(Files.created_at + utc_offset)) + .order_by(func.date(Files.created_at + utc_offset)) + ) + dates = session.execute(stmt).all() + date_list = [str(d[0]) for d in dates] + return date_list diff --git a/viseron/components/webserver/api/v1/hls.py b/viseron/components/webserver/api/v1/hls.py index 415c8b759..431acd331 100644 --- a/viseron/components/webserver/api/v1/hls.py +++ b/viseron/components/webserver/api/v1/hls.py @@ -24,9 +24,8 @@ generate_playlist, get_available_timespans, ) -from viseron.helpers import daterange_to_utc, utcnow +from viseron.helpers import client_current_datetime, daterange_to_utc, utcnow from viseron.helpers.fixed_size_dict import FixedSizeDict -from viseron.helpers.validators import request_argument_no_value if TYPE_CHECKING: from sqlalchemy.orm import Session @@ -87,7 +86,7 @@ class HlsAPIHandler(BaseAPIHandler): vol.Optional("end_timestamp", default=None): vol.Maybe( vol.Coerce(int) ), - vol.Optional("daily", default=False): request_argument_no_value, + vol.Optional("date", default=None): vol.Maybe(str), } ), }, @@ -160,9 +159,10 @@ async def get_hls_playlist_time_period( self._get_session, camera, hls_client_id, + self.utc_offset, self.request_arguments["start_timestamp"], self.request_arguments["end_timestamp"], - self.request_arguments["daily"], + self.request_arguments["date"], ) if not playlist: self.response_error( @@ -315,11 +315,23 @@ def _generate_playlist_time_period( get_session: Callable[[], Session], camera: AbstractCamera | FailedCamera, hls_client_id: str | None, + utc_offset: datetime.timedelta, start_timestamp: int, end_timestamp: int | None = None, - end_playlist_at_timestamp: bool = False, + date: str | None = None, ) -> str | None: """Generate the HLS playlist for a time period.""" + end_playlist = False + if date and end_timestamp is None: + # If a date is provided, convert to timestamp range + _, time_to = daterange_to_utc(date, utc_offset) + end_timestamp = int(time_to.timestamp()) + # If the date is not today, playlist should end + if date != client_current_datetime(utc_offset).date().isoformat(): + end_playlist = True + elif end_timestamp is not None: + end_playlist = True + files = get_time_period_fragments( [camera.identifier], start_timestamp, end_timestamp, get_session ) @@ -333,8 +345,6 @@ def _generate_playlist_time_period( for file in files ] - end_playlist = bool(end_timestamp) if not end_playlist_at_timestamp else False - media_sequence = ( update_hls_client(hls_client_id, fragments) if end_timestamp is None and hls_client_id diff --git a/viseron/components/webserver/api/v1/system.py b/viseron/components/webserver/api/v1/system.py new file mode 100644 index 000000000..60c0faab0 --- /dev/null +++ b/viseron/components/webserver/api/v1/system.py @@ -0,0 +1,28 @@ +"""System API handler.""" + +import logging + +from viseron.components.webserver.api.handlers import BaseAPIHandler +from viseron.components.webserver.auth import Role + +LOGGER = logging.getLogger(__name__) + + +class SystemAPIHandler(BaseAPIHandler): + """Handler for API calls related to the system.""" + + routes = [ + { + "requires_role": [Role.ADMIN], + "path_pattern": r"/system/dispatched_events", + "supported_methods": ["GET"], + "method": "get_dispatched_events", + }, + ] + + async def get_dispatched_events(self) -> None: + """Return dispatched events.""" + self._vis.dispatched_events.sort() + await self.response_success( + response={"events": self._vis.dispatched_events}, + ) diff --git a/viseron/components/webserver/websocket_api/commands.py b/viseron/components/webserver/websocket_api/commands.py index 9afc03403..0259da898 100644 --- a/viseron/components/webserver/websocket_api/commands.py +++ b/viseron/components/webserver/websocket_api/commands.py @@ -52,6 +52,8 @@ ) from viseron.exceptions import Unauthorized from viseron.helpers import create_directory, daterange_to_utc, get_utc_offset +from viseron.helpers.template import render_template +from viseron.helpers.validators import jinja2_template from .messages import ( BASE_MESSAGE_SCHEMA, @@ -404,7 +406,6 @@ async def forward_timespans( camera_identifier=camera_identifier, category=TIER_CATEGORY_RECORDER, subcategory=TIER_SUBCATEGORY_SEGMENTS, - file_name="*", ), forward_timespans, ioloop=connection.ioloop, @@ -416,7 +417,6 @@ async def forward_timespans( camera_identifier=camera_identifier, category=TIER_CATEGORY_RECORDER, subcategory=TIER_SUBCATEGORY_SEGMENTS, - file_name="*", ), forward_timespans, ioloop=connection.ioloop, @@ -692,3 +692,30 @@ def _result() -> dict[str, Any] | str: await connection.async_send_message( cancel_subscription_message(message["command_id"]) ) + + +@websocket_command( + { + vol.Required("type"): "render_template", + vol.Required("template"): str, + } +) +async def handle_render_template(connection: WebSocketHandler, message) -> None: + """Render a Jinja2 template.""" + template = message["template"] + try: + jinja2_template(template) + rendered = await connection.run_in_executor( + render_template, connection.vis, template + ) + except Exception as exception: # pylint: disable=broad-except + LOGGER.debug("Failed to render template: %s", exception) + await connection.async_send_message( + error_message( + message["command_id"], + WS_ERROR_NOT_FOUND, + f"Failed to render template: {exception}", + ) + ) + return + await connection.async_send_message(result_message(message["command_id"], rendered)) diff --git a/viseron/const.py b/viseron/const.py index f2a618915..4322b9592 100644 --- a/viseron/const.py +++ b/viseron/const.py @@ -97,6 +97,7 @@ ENV_OPENCL_SUPPORTED = "VISERON_OPENCL_SUPPORTED" ENV_RASPBERRYPI3 = "VISERON_RASPBERRYPI3" ENV_RASPBERRYPI4 = "VISERON_RASPBERRYPI4" +ENV_RASPBERRYPI5 = "VISERON_RASPBERRYPI5" ENV_JETSON_NANO = "VISERON_JETSON_NANO" ENV_PROFILE_MEMORY = "VISERON_PROFILE_MEMORY" ENV_LOG_MAX_BYTES = "VISERON_LOG_MAX_BYTES" diff --git a/viseron/domains/camera/__init__.py b/viseron/domains/camera/__init__.py index ff852e9b8..cadb438ee 100644 --- a/viseron/domains/camera/__init__.py +++ b/viseron/domains/camera/__init__.py @@ -167,6 +167,10 @@ def __init__(self, vis: Viseron, component: str, config, identifier: str) -> Non self.snapshots_motion_folder: str = self._storage.get_snapshots_path( self, SnapshotDomain.MOTION_DETECTOR ) + self.timelapse_folder: str | None = self._storage.get_timelapse_path(self) + self.temp_timelapse_folder: str | None = ( + TEMP_DIR + self.timelapse_folder if self.timelapse_folder else None + ) self.fragmenter: Fragmenter = Fragmenter(vis, self) if self.config[CONFIG_PASSWORD]: diff --git a/viseron/domains/camera/fragmenter.py b/viseron/domains/camera/fragmenter.py index 41532356d..0a0130c70 100644 --- a/viseron/domains/camera/fragmenter.py +++ b/viseron/domains/camera/fragmenter.py @@ -41,6 +41,9 @@ from viseron.components.storage import Storage from viseron.domains.camera import AbstractCamera +# Constants +TIMELAPSE_FFMPEG_TIMEOUT = 10 + def _get_open_files(path: str, process: psutil.Process) -> list[str]: """Get open files for a process.""" @@ -173,6 +176,7 @@ def work_output(self, item: dict | None): subcategory=TIER_SUBCATEGORY_SEGMENTS, ), EventEmptyData(), + store=False, ) self._storage.cleanup_manager.run_job(CleanupJobNames.ORPHANED_FILES) @@ -211,8 +215,106 @@ def _mp4box_command(self, file: str): return False return True + def _segment_hook_mp4box(self, file: str): + """ + Perform per fragment tasks before moving fragment to storage. + + Currently only used for extracting timelapse frames from fragments. + """ + if ( + self._camera.timelapse_folder is None + or self._camera.temp_timelapse_folder is None + ): + return + + frame_filename = os.path.splitext(os.path.basename(file))[0] + ".jpg" + tmp_frame_path = os.path.join( + self._camera.temp_timelapse_folder, frame_filename + ) + frame_path = os.path.join(self._camera.timelapse_folder, frame_filename) + init_path = os.path.join( + self.temp_segments_folder, file.split(".")[0], "clip_init.mp4" + ) + segment_path = os.path.join( + self.temp_segments_folder, file.split(".")[0], "clip_1.m4s" + ) + + self._extract_timelapse_frame( + init_path, segment_path, tmp_frame_path, frame_path + ) + + def _segment_hook(self, file: str): + """ + Perform per fragment tasks before moving fragment to storage. + + Currently only used for extracting timelapse frames from fragments. + """ + if ( + self._camera.timelapse_folder is None + or self._camera.temp_timelapse_folder is None + ): + return + + frame_filename = os.path.splitext(os.path.basename(file))[0] + ".jpg" + tmp_frame_path = os.path.join( + self._camera.temp_timelapse_folder, frame_filename + ) + frame_path = os.path.join(self._camera.timelapse_folder, frame_filename) + init_path = os.path.join(self.temp_segments_folder, "init.mp4") + segment_path = os.path.join(self.temp_segments_folder, file) + + self._extract_timelapse_frame( + init_path, segment_path, tmp_frame_path, frame_path + ) + + def _extract_timelapse_frame( + self, init_path: str, segment_path: str, tmp_frame_path: str, frame_path: str + ): + """Extract a timelapse frame from segment files.""" + try: + # Run ffmpeg command to extract first keyframe of the segment + cmd = [ + "bash", + "-c", + f"cat '{init_path}' '{segment_path}' | " + f"ffmpeg -skip_frame nokey -i pipe:0 -frames:v 1 " + f"-update true -f mjpeg '{tmp_frame_path}' -y", + ] + + result = sp.run( + cmd, + capture_output=True, + text=True, + timeout=TIMELAPSE_FFMPEG_TIMEOUT, + check=False, + ) + if result.returncode == 0: + self._logger.debug(f"Timelapse: Extracted frame {tmp_frame_path}") + shutil.move(tmp_frame_path, frame_path) + else: + self._logger.warning( + f"Timelapse: Failed to extract frame {frame_path}: " + f"{result.stderr}" + ) + os.remove(tmp_frame_path) + + except FileNotFoundError: + self._logger.debug(f"{tmp_frame_path} not found") + + except (OSError, sp.TimeoutExpired, sp.SubprocessError) as error: + self._logger.error( + f"Timelapse: Error extracting frame {frame_path}: {error}" + ) + + finally: + try: + os.remove(tmp_frame_path) + except FileNotFoundError: + pass + def _move_to_segments_folder_mp4box(self, file: str): """Move fragmented mp4 created by mp4box to segments folder.""" + self._segment_hook_mp4box(file) try: shutil.move( os.path.join( @@ -235,6 +337,7 @@ def _move_to_segments_folder_mp4box(self, file: str): def _move_to_segments_folder(self, file: str): """Move fragmented mp4 created by encoder to segments folder.""" + self._segment_hook(file) try: shutil.move( os.path.join(self.temp_segments_folder, file), @@ -371,6 +474,8 @@ def __init__( self._camera = camera self._storage: Storage = vis.data[STORAGE_COMPONENT] os.makedirs(camera.temp_segments_folder, exist_ok=True) + if camera.temp_timelapse_folder is not None: + os.makedirs(camera.temp_timelapse_folder, exist_ok=True) self._storage.ignore_file("init.mp4") self._log_pipe_ffmpeg = LogPipe( diff --git a/viseron/domains/motion_detector/__init__.py b/viseron/domains/motion_detector/__init__.py index 0daf82315..ccea5310b 100644 --- a/viseron/domains/motion_detector/__init__.py +++ b/viseron/domains/motion_detector/__init__.py @@ -133,7 +133,9 @@ def as_dict(self) -> dict[str, Any]: return { "camera_identifier": self.camera_identifier, "motion_detected": self.motion_detected, - "motion_contours": self.motion_contours, + "max_area": ( + self.motion_contours.max_area if self.motion_contours else None + ), } diff --git a/viseron/domains/object_detector/const.py b/viseron/domains/object_detector/const.py index 1c6f0b4dd..fbfd05916 100644 --- a/viseron/domains/object_detector/const.py +++ b/viseron/domains/object_detector/const.py @@ -1,8 +1,12 @@ """Object detector domain constants.""" +import os from typing import Any, Final +from viseron.const import CONFIG_DIR + DOMAIN: Final = "object_detector" +MODEL_CACHE: Final = os.path.join(CONFIG_DIR, "models") # Data stream topic constants DATA_OBJECT_DETECTOR_SCAN = "object_detector/{camera_identifier}/scan" diff --git a/viseron/events.py b/viseron/events.py index ef7d00148..594a2d6b2 100644 --- a/viseron/events.py +++ b/viseron/events.py @@ -2,11 +2,15 @@ from __future__ import annotations +import json from dataclasses import dataclass +from functools import partial from typing import Any, Generic from typing_extensions import TypeVar +from viseron.helpers.json import JSONEncoder + T = TypeVar("T") @@ -21,11 +25,15 @@ class Event(Generic[T]): def as_dict(self) -> dict[str, Any]: """Convert Event to dict.""" return { - "name": self.name.split("/", 1)[1], + "name": self.name, "data": self.data, "timestamp": self.timestamp, } + def as_json(self) -> str: + """Convert Event to JSON string.""" + return partial(json.dumps, cls=JSONEncoder, allow_nan=False)(self.as_dict()) + class EventData: """Base class that holds event data.""" diff --git a/viseron/helpers/__init__.py b/viseron/helpers/__init__.py index 0ca167663..b69951a7a 100644 --- a/viseron/helpers/__init__.py +++ b/viseron/helpers/__init__.py @@ -60,6 +60,11 @@ def daterange_to_utc( return time_from, time_to +def client_current_datetime(utc_offset: datetime.timedelta) -> datetime.datetime: + """Return the current datetime adjusted to the clients timezone.""" + return utcnow() + utc_offset + + def calculate_relative_contours( contours, resolution: tuple[int, int] ) -> list[np.ndarray]: diff --git a/viseron/helpers/child_process_worker.py b/viseron/helpers/child_process_worker.py index dc5504932..cc0ea7fe8 100644 --- a/viseron/helpers/child_process_worker.py +++ b/viseron/helpers/child_process_worker.py @@ -4,6 +4,7 @@ import logging import multiprocessing as mp +import os from abc import ABC, abstractmethod from multiprocessing.synchronize import Event from queue import Empty, Queue @@ -119,6 +120,7 @@ def _process_frames( self, exit_event: Event, process_queue: mp.Queue, output_queue: mp.Queue ) -> None: """Process frame and send it to the detector.""" + os.setsid() remove_shm_from_resource_tracker() setproctitle.setproctitle(self.child_process_name) self.process_initialization() diff --git a/viseron/helpers/template.py b/viseron/helpers/template.py new file mode 100644 index 000000000..e2efd6dab --- /dev/null +++ b/viseron/helpers/template.py @@ -0,0 +1,86 @@ +"""Jinja2 template helpers for Viseron.""" +from __future__ import annotations + +from numbers import Number +from typing import TYPE_CHECKING, Any, Literal + +if TYPE_CHECKING: + from viseron import Viseron + + +class StateNamespace: + """Namespace for accessing states in a domain-specific manner. + + This class allows access to states in templates by using dot notation, + e.g., `states.binary_sensor.camera_1.state`. + """ + + def __init__(self, states_dict): + self._states = states_dict + + def __getattr__(self, domain): + """Return a domain-specific namespace for accessing states.""" + return _DomainNamespace(self._states, domain) + + def __getitem__(self, key): + """Return a state by its key.""" + return self._states[key] + + +class _DomainNamespace: + def __init__(self, states_dict, domain): + self._states = states_dict + self._domain = domain + + def __getattr__(self, entity): + key = f"{self._domain}.{entity}" + return self._states[key] + + def __getitem__(self, entity): + key = f"{self._domain}.{entity}" + return self._states[key] + + +def render_template(vis: Viseron, template_str: str | None, **kwargs) -> None | str: + """Render a Jinja2 template with the states and any other arbitrary data.""" + if not template_str: + return None + states_ns = StateNamespace(vis.states.current) + template = vis.jinja_env.from_string(template_str) + return template.render(states=states_ns, **kwargs) + + +def _template_boolean(value: Any) -> bool: + """Convert a rendered template value to a boolean.""" + if isinstance(value, bool): + return value + if isinstance(value, str): + value = value.lower().strip() + if value in ("1", "true", "yes", "on", "enable"): + return True + elif isinstance(value, Number): + # type ignore: https://github.com/python/mypy/issues/3186 + return value != 0 # type: ignore[comparison-overlap] + return False + + +def render_template_condition( + vis: Viseron, template_str: str | None, **kwargs +) -> tuple[Literal[False], None] | tuple[bool, str]: + """Render a Jinja2 template condition. + + Returns True if the condition evaluates to a truthy value, otherwise False. + Considers any number greater than 0 as truthy. + """ + rendered_condition = render_template(vis, template_str, **kwargs) + if rendered_condition is None: + return False, rendered_condition + try: + if float(rendered_condition) > 0: + return True, rendered_condition + except (ValueError, TypeError): + pass + return ( + _template_boolean(rendered_condition), + rendered_condition, + ) diff --git a/viseron/helpers/validators.py b/viseron/helpers/validators.py index e202d5b9d..48c8e35ec 100644 --- a/viseron/helpers/validators.py +++ b/viseron/helpers/validators.py @@ -4,6 +4,7 @@ from typing import Any import voluptuous as vol +from jinja2 import BaseLoader, Environment from viseron.helpers import slugify @@ -165,6 +166,21 @@ def request_argument_no_value(value) -> bool: return False +def jinja2_template(value: Any) -> str: + """Validate that value is a valid Jinja2 template.""" + if not isinstance(value, str): + msg = f"Expected Jinja2 template, got {value}" + raise vol.Invalid(msg) + + env = Environment(loader=BaseLoader()) + try: + env.compile(value) + except Exception as e: + msg = f"Invalid Jinja2 template: {e}" + raise vol.Invalid(msg) + return value + + class CameraIdentifier(vol.Required): """Validate Camera Identifier.""" @@ -240,6 +256,61 @@ def __call__(self, value): raise vol.Invalid("Invalid slug.") +class StringKey: + """Ensure that a config key is a string.""" + + def __init__( + self, + description: str, + ) -> None: + self.description = description + + def __call__(self, value): + """Ensure slug.""" + if not isinstance(value, str): + msg = f"Expected string. Got {value}" + LOGGER.error(msg) + raise vol.Invalid(msg) + + +class Url: + """Wrap voluptuous.Url but as a class instead. + + This allows for special handling when generating docs with scripts/gen_docs.py. + """ + + def __init__( + self, + ) -> None: + self.url_validator = vol.Url() # pylint: disable=no-value-for-parameter + + def __call__(self, value): + """Validate URL.""" + return self.url_validator( + value, + ) + + +class PathExists: + """Wrap voluptuous.PathExists but as a class instead. + + This allows for special handling when generating docs with scripts/gen_docs.py. + """ + + def __init__( + self, + ) -> None: + self.path_exists_validator = ( + vol.PathExists() # pylint: disable=no-value-for-parameter + ) + + def __call__(self, value): + """Validate path exists.""" + return self.path_exists_validator( # pylint: disable=not-callable + value, + ) + + def request_argument_bool(value): """Boolean HTTP request argument. diff --git a/viseron/states.py b/viseron/states.py index 8dbbba0db..13e81b192 100644 --- a/viseron/states.py +++ b/viseron/states.py @@ -85,6 +85,11 @@ def __init__(self, vis: Viseron) -> None: self._current_states: dict[str, State] = {} + @property + def current(self) -> dict[str, State]: + """Return current states.""" + return self._current_states + def set_state(self, entity: Entity) -> None: """Set the state in the states registry.""" LOGGER.debug( diff --git a/viseron/watchdog/process_watchdog.py b/viseron/watchdog/process_watchdog.py index 19f7d41a9..b76ac01a6 100644 --- a/viseron/watchdog/process_watchdog.py +++ b/viseron/watchdog/process_watchdog.py @@ -3,6 +3,7 @@ import logging import multiprocessing as mp +import os from collections.abc import Callable from typing import TYPE_CHECKING @@ -38,6 +39,7 @@ def __init__( self._grace_period = grace_period self._kwargs = kwargs self._kwargs["name"] = name + self._original_target: Callable | None = self._kwargs.get("target") self._process: mp.Process | None = None self._started = False self._start_time: float | None = None @@ -87,6 +89,25 @@ def exitcode(self) -> int | None: def start(self) -> None: """Start the process.""" + # Always (re)set the wrapped target so that restarts also create a new + # process that calls os.setsid() before executing the user target. + if self._original_target: + original_target = self._original_target + + def wrapped_target(*targs, **tkwargs): + """Wrap original target to establish its own session ID. + + Creating a new session (setsid) ensures the child process becomes + the leader of a new session and process group. This makes signal + management (e.g. terminating entire groups) more robust and + prevents the process from receiving signals intended for the + parent group. + """ + os.setsid() + original_target(*targs, **tkwargs) + + self._kwargs["target"] = wrapped_target + if self._create_process_method: self._process = self._create_process_method() else: diff --git a/viseron/watchdog/subprocess_watchdog.py b/viseron/watchdog/subprocess_watchdog.py index e1a02558b..111941dbc 100644 --- a/viseron/watchdog/subprocess_watchdog.py +++ b/viseron/watchdog/subprocess_watchdog.py @@ -28,12 +28,14 @@ def __init__( grace_period=20, register=True, stage: str | None = VISERON_SIGNAL_SHUTDOWN, + start_new_session: bool = True, **kwargs, ) -> None: self._args = args self._name = name self._grace_period = grace_period self._kwargs = kwargs + self._kwargs["start_new_session"] = start_new_session self._subprocess: sp.Popen | None = None self._started = False self.start()